Compare commits

..

2 Commits

Author SHA1 Message Date
Lluis Agusti
0e62f43652 chore: wip 2026-01-22 23:47:02 +07:00
Lluis Agusti
49e0fb5f40 chore: improve loading states 2026-01-22 23:12:50 +07:00
76 changed files with 1274 additions and 9681 deletions

View File

@@ -122,24 +122,6 @@ class ConnectionManager:
return len(connections)
async def broadcast_to_all(self, *, method: WSMethod, data: dict) -> int:
"""Broadcast a message to all active websocket connections."""
message = WSMessage(
method=method,
data=data,
).model_dump_json()
connections = tuple(self.active_connections)
if not connections:
return 0
await asyncio.gather(
*(connection.send_text(message) for connection in connections),
return_exceptions=True,
)
return len(connections)
async def _subscribe(self, channel_key: str, websocket: WebSocket) -> str:
if channel_key not in self.subscriptions:
self.subscriptions[channel_key] = set()

View File

@@ -176,64 +176,30 @@ async def get_execution_analytics_config(
# Return with provider prefix for clarity
return f"{provider_name}: {model_name}"
# Get all models from the registry (dynamic, not hardcoded enum)
from backend.data import llm_registry
from backend.server.v2.llm import db as llm_db
# Get the recommended model from the database (configurable via admin UI)
recommended_model_slug = await llm_db.get_recommended_model_slug()
# Build the available models list
first_enabled_slug = None
for registry_model in llm_registry.iter_dynamic_models():
# Only include enabled models in the list
if not registry_model.is_enabled:
continue
# Track first enabled model as fallback
if first_enabled_slug is None:
first_enabled_slug = registry_model.slug
model_enum = LlmModel(registry_model.slug) # Create enum instance from slug
label = generate_model_label(model_enum)
# Include all LlmModel values (no more filtering by hardcoded list)
recommended_model = LlmModel.GPT4O_MINI.value
for model in LlmModel:
label = generate_model_label(model)
# Add "(Recommended)" suffix to the recommended model
if registry_model.slug == recommended_model_slug:
if model.value == recommended_model:
label += " (Recommended)"
available_models.append(
ModelInfo(
value=registry_model.slug,
value=model.value,
label=label,
provider=registry_model.metadata.provider,
provider=model.provider,
)
)
# Sort models by provider and name for better UX
available_models.sort(key=lambda x: (x.provider, x.label))
# Handle case where no models are available
if not available_models:
logger.warning(
"No enabled LLM models found in registry. "
"Ensure models are configured and enabled in the LLM Registry."
)
# Provide a placeholder entry so admins see meaningful feedback
available_models.append(
ModelInfo(
value="",
label="No models available - configure in LLM Registry",
provider="none",
)
)
# Use the DB recommended model, or fallback to first enabled model
final_recommended = recommended_model_slug or first_enabled_slug or ""
return ExecutionAnalyticsConfig(
available_models=available_models,
default_system_prompt=DEFAULT_SYSTEM_PROMPT,
default_user_prompt=DEFAULT_USER_PROMPT,
recommended_model=final_recommended,
recommended_model=recommended_model,
)

View File

@@ -1,595 +0,0 @@
import logging
import autogpt_libs.auth
import fastapi
from backend.data import llm_registry
from backend.data.block_cost_config import refresh_llm_costs
from backend.server.v2.llm import db as llm_db
from backend.server.v2.llm import model as llm_model
logger = logging.getLogger(__name__)
router = fastapi.APIRouter(
tags=["llm", "admin"],
dependencies=[fastapi.Security(autogpt_libs.auth.requires_admin_user)],
)
async def _refresh_runtime_state() -> None:
"""Refresh the LLM registry and clear all related caches to ensure real-time updates."""
logger.info("Refreshing LLM registry runtime state...")
try:
# Refresh registry from database
await llm_registry.refresh_llm_registry()
refresh_llm_costs()
# Clear block schema caches so they're regenerated with updated model options
from backend.data.block import BlockSchema
BlockSchema.clear_all_schema_caches()
logger.info("Cleared all block schema caches")
# Clear the /blocks endpoint cache so frontend gets updated schemas
try:
from backend.api.features.v1 import _get_cached_blocks
_get_cached_blocks.cache_clear()
logger.info("Cleared /blocks endpoint cache")
except Exception as e:
logger.warning("Failed to clear /blocks cache: %s", e)
# Clear the v2 builder caches (if they exist)
try:
from backend.api.features.builder import db as builder_db
if hasattr(builder_db, "_get_all_providers"):
builder_db._get_all_providers.cache_clear()
logger.info("Cleared v2 builder providers cache")
if hasattr(builder_db, "_build_cached_search_results"):
builder_db._build_cached_search_results.cache_clear()
logger.info("Cleared v2 builder search results cache")
except Exception as e:
logger.debug("Could not clear v2 builder cache: %s", e)
# Notify all executor services to refresh their registry cache
from backend.data.llm_registry import publish_registry_refresh_notification
await publish_registry_refresh_notification()
logger.info("Published registry refresh notification")
except Exception as exc:
logger.exception(
"LLM runtime state refresh failed; caches may be stale: %s", exc
)
@router.get(
"/providers",
summary="List LLM providers",
response_model=llm_model.LlmProvidersResponse,
)
async def list_llm_providers(include_models: bool = True):
providers = await llm_db.list_providers(include_models=include_models)
return llm_model.LlmProvidersResponse(providers=providers)
@router.post(
"/providers",
summary="Create LLM provider",
response_model=llm_model.LlmProvider,
)
async def create_llm_provider(request: llm_model.UpsertLlmProviderRequest):
provider = await llm_db.upsert_provider(request=request)
await _refresh_runtime_state()
return provider
@router.patch(
"/providers/{provider_id}",
summary="Update LLM provider",
response_model=llm_model.LlmProvider,
)
async def update_llm_provider(
provider_id: str,
request: llm_model.UpsertLlmProviderRequest,
):
provider = await llm_db.upsert_provider(request=request, provider_id=provider_id)
await _refresh_runtime_state()
return provider
@router.delete(
"/providers/{provider_id}",
summary="Delete LLM provider",
response_model=dict,
)
async def delete_llm_provider(provider_id: str):
"""
Delete an LLM provider.
A provider can only be deleted if it has no associated models.
Delete all models from the provider first before deleting the provider.
"""
try:
await llm_db.delete_provider(provider_id)
await _refresh_runtime_state()
logger.info("Deleted LLM provider '%s'", provider_id)
return {"success": True, "message": "Provider deleted successfully"}
except ValueError as e:
logger.warning("Failed to delete provider '%s': %s", provider_id, e)
raise fastapi.HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.exception("Failed to delete provider '%s': %s", provider_id, e)
raise fastapi.HTTPException(status_code=500, detail=str(e))
@router.get(
"/models",
summary="List LLM models",
response_model=llm_model.LlmModelsResponse,
)
async def list_llm_models(
provider_id: str | None = fastapi.Query(default=None),
page: int = fastapi.Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = fastapi.Query(
default=50, ge=1, le=100, description="Number of models per page"
),
):
return await llm_db.list_models(
provider_id=provider_id, page=page, page_size=page_size
)
@router.post(
"/models",
summary="Create LLM model",
response_model=llm_model.LlmModel,
)
async def create_llm_model(request: llm_model.CreateLlmModelRequest):
model = await llm_db.create_model(request=request)
await _refresh_runtime_state()
return model
@router.patch(
"/models/{model_id}",
summary="Update LLM model",
response_model=llm_model.LlmModel,
)
async def update_llm_model(
model_id: str,
request: llm_model.UpdateLlmModelRequest,
):
model = await llm_db.update_model(model_id=model_id, request=request)
await _refresh_runtime_state()
return model
@router.patch(
"/models/{model_id}/toggle",
summary="Toggle LLM model availability",
response_model=llm_model.ToggleLlmModelResponse,
)
async def toggle_llm_model(
model_id: str,
request: llm_model.ToggleLlmModelRequest,
):
"""
Toggle a model's enabled status, optionally migrating workflows when disabling.
If disabling a model and `migrate_to_slug` is provided, all workflows using
this model will be migrated to the specified replacement model before disabling.
A migration record is created which can be reverted later using the revert endpoint.
Optional fields:
- `migration_reason`: Reason for the migration (e.g., "Provider outage")
- `custom_credit_cost`: Custom pricing override for billing during migration
"""
try:
result = await llm_db.toggle_model(
model_id=model_id,
is_enabled=request.is_enabled,
migrate_to_slug=request.migrate_to_slug,
migration_reason=request.migration_reason,
custom_credit_cost=request.custom_credit_cost,
)
await _refresh_runtime_state()
if result.nodes_migrated > 0:
logger.info(
"Toggled model '%s' to %s and migrated %d nodes to '%s' (migration_id=%s)",
result.model.slug,
"enabled" if request.is_enabled else "disabled",
result.nodes_migrated,
result.migrated_to_slug,
result.migration_id,
)
return result
except ValueError as exc:
logger.warning("Model toggle validation failed: %s", exc)
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
except Exception as exc:
logger.exception("Failed to toggle LLM model %s: %s", model_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to toggle model availability",
) from exc
@router.get(
"/models/{model_id}/usage",
summary="Get model usage count",
response_model=llm_model.LlmModelUsageResponse,
)
async def get_llm_model_usage(model_id: str):
"""Get the number of workflow nodes using this model."""
try:
return await llm_db.get_model_usage(model_id=model_id)
except ValueError as exc:
raise fastapi.HTTPException(status_code=404, detail=str(exc)) from exc
except Exception as exc:
logger.exception("Failed to get model usage %s: %s", model_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to get model usage",
) from exc
@router.delete(
"/models/{model_id}",
summary="Delete LLM model and migrate workflows",
response_model=llm_model.DeleteLlmModelResponse,
)
async def delete_llm_model(
model_id: str,
replacement_model_slug: str | None = fastapi.Query(
default=None,
description="Slug of the model to migrate existing workflows to (required only if workflows use this model)",
),
):
"""
Delete a model and optionally migrate workflows using it to a replacement model.
If no workflows are using this model, it can be deleted without providing a
replacement. If workflows exist, replacement_model_slug is required.
This endpoint:
1. Counts how many workflow nodes use the model being deleted
2. If nodes exist, validates the replacement model and migrates them
3. Deletes the model record
4. Refreshes all caches and notifies executors
Example: DELETE /admin/llm/models/{id}?replacement_model_slug=gpt-4o
Example (no usage): DELETE /admin/llm/models/{id}
"""
try:
result = await llm_db.delete_model(
model_id=model_id, replacement_model_slug=replacement_model_slug
)
await _refresh_runtime_state()
logger.info(
"Deleted model '%s' and migrated %d nodes to '%s'",
result.deleted_model_slug,
result.nodes_migrated,
result.replacement_model_slug,
)
return result
except ValueError as exc:
# Validation errors (model not found, replacement invalid, etc.)
logger.warning("Model deletion validation failed: %s", exc)
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
except Exception as exc:
logger.exception("Failed to delete LLM model %s: %s", model_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to delete model and migrate workflows",
) from exc
# ============================================================================
# Migration Management Endpoints
# ============================================================================
@router.get(
"/migrations",
summary="List model migrations",
response_model=llm_model.LlmMigrationsResponse,
)
async def list_llm_migrations(
include_reverted: bool = fastapi.Query(
default=False, description="Include reverted migrations in the list"
),
):
"""
List all model migrations.
Migrations are created when disabling a model with the migrate_to_slug option.
They can be reverted to restore the original model configuration.
"""
try:
migrations = await llm_db.list_migrations(include_reverted=include_reverted)
return llm_model.LlmMigrationsResponse(migrations=migrations)
except Exception as exc:
logger.exception("Failed to list migrations: %s", exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to list migrations",
) from exc
@router.get(
"/migrations/{migration_id}",
summary="Get migration details",
response_model=llm_model.LlmModelMigration,
)
async def get_llm_migration(migration_id: str):
"""Get details of a specific migration."""
try:
migration = await llm_db.get_migration(migration_id)
if not migration:
raise fastapi.HTTPException(
status_code=404, detail=f"Migration '{migration_id}' not found"
)
return migration
except fastapi.HTTPException:
raise
except Exception as exc:
logger.exception("Failed to get migration %s: %s", migration_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to get migration",
) from exc
@router.post(
"/migrations/{migration_id}/revert",
summary="Revert a model migration",
response_model=llm_model.RevertMigrationResponse,
)
async def revert_llm_migration(
migration_id: str,
request: llm_model.RevertMigrationRequest | None = None,
):
"""
Revert a model migration, restoring affected workflows to their original model.
This only reverts the specific nodes that were part of the migration.
The source model must exist for the revert to succeed.
Options:
- `re_enable_source_model`: Whether to re-enable the source model if disabled (default: True)
Response includes:
- `nodes_reverted`: Number of nodes successfully reverted
- `nodes_already_changed`: Number of nodes that were modified since migration (not reverted)
- `source_model_re_enabled`: Whether the source model was re-enabled
Requirements:
- Migration must not already be reverted
- Source model must exist
"""
try:
re_enable = request.re_enable_source_model if request else True
result = await llm_db.revert_migration(
migration_id,
re_enable_source_model=re_enable,
)
await _refresh_runtime_state()
logger.info(
"Reverted migration '%s': %d nodes restored from '%s' to '%s' "
"(%d already changed, source re-enabled=%s)",
migration_id,
result.nodes_reverted,
result.target_model_slug,
result.source_model_slug,
result.nodes_already_changed,
result.source_model_re_enabled,
)
return result
except ValueError as exc:
logger.warning("Migration revert validation failed: %s", exc)
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
except Exception as exc:
logger.exception("Failed to revert migration %s: %s", migration_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to revert migration",
) from exc
# ============================================================================
# Creator Management Endpoints
# ============================================================================
@router.get(
"/creators",
summary="List model creators",
response_model=llm_model.LlmCreatorsResponse,
)
async def list_llm_creators():
"""
List all model creators.
Creators are organizations that create/train models (e.g., OpenAI, Meta, Anthropic).
This is distinct from providers who host/serve the models (e.g., OpenRouter).
"""
try:
creators = await llm_db.list_creators()
return llm_model.LlmCreatorsResponse(creators=creators)
except Exception as exc:
logger.exception("Failed to list creators: %s", exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to list creators",
) from exc
@router.get(
"/creators/{creator_id}",
summary="Get creator details",
response_model=llm_model.LlmModelCreator,
)
async def get_llm_creator(creator_id: str):
"""Get details of a specific model creator."""
try:
creator = await llm_db.get_creator(creator_id)
if not creator:
raise fastapi.HTTPException(
status_code=404, detail=f"Creator '{creator_id}' not found"
)
return creator
except fastapi.HTTPException:
raise
except Exception as exc:
logger.exception("Failed to get creator %s: %s", creator_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to get creator",
) from exc
@router.post(
"/creators",
summary="Create model creator",
response_model=llm_model.LlmModelCreator,
)
async def create_llm_creator(request: llm_model.UpsertLlmCreatorRequest):
"""
Create a new model creator.
A creator represents an organization that creates/trains AI models,
such as OpenAI, Anthropic, Meta, or Google.
"""
try:
creator = await llm_db.upsert_creator(request=request)
await _refresh_runtime_state()
logger.info("Created model creator '%s' (%s)", creator.display_name, creator.id)
return creator
except Exception as exc:
logger.exception("Failed to create creator: %s", exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to create creator",
) from exc
@router.patch(
"/creators/{creator_id}",
summary="Update model creator",
response_model=llm_model.LlmModelCreator,
)
async def update_llm_creator(
creator_id: str,
request: llm_model.UpsertLlmCreatorRequest,
):
"""Update an existing model creator."""
try:
creator = await llm_db.upsert_creator(request=request, creator_id=creator_id)
await _refresh_runtime_state()
logger.info("Updated model creator '%s' (%s)", creator.display_name, creator_id)
return creator
except Exception as exc:
logger.exception("Failed to update creator %s: %s", creator_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to update creator",
) from exc
@router.delete(
"/creators/{creator_id}",
summary="Delete model creator",
response_model=dict,
)
async def delete_llm_creator(creator_id: str):
"""
Delete a model creator.
This will remove the creator association from all models that reference it
(sets creatorId to NULL), but will not delete the models themselves.
"""
try:
await llm_db.delete_creator(creator_id)
await _refresh_runtime_state()
logger.info("Deleted model creator '%s'", creator_id)
return {"success": True, "message": f"Creator '{creator_id}' deleted"}
except ValueError as exc:
logger.warning("Creator deletion validation failed: %s", exc)
raise fastapi.HTTPException(status_code=404, detail=str(exc)) from exc
except Exception as exc:
logger.exception("Failed to delete creator %s: %s", creator_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to delete creator",
) from exc
# ============================================================================
# Recommended Model Endpoints
# ============================================================================
@router.get(
"/recommended-model",
summary="Get recommended model",
response_model=llm_model.RecommendedModelResponse,
)
async def get_recommended_model():
"""
Get the currently recommended LLM model.
The recommended model is shown to users as the default/suggested option
in model selection dropdowns.
"""
try:
model = await llm_db.get_recommended_model()
return llm_model.RecommendedModelResponse(
model=model,
slug=model.slug if model else None,
)
except Exception as exc:
logger.exception("Failed to get recommended model: %s", exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to get recommended model",
) from exc
@router.post(
"/recommended-model",
summary="Set recommended model",
response_model=llm_model.SetRecommendedModelResponse,
)
async def set_recommended_model(request: llm_model.SetRecommendedModelRequest):
"""
Set a model as the recommended model.
This clears the recommended flag from any other model and sets it on
the specified model. The model must be enabled to be set as recommended.
The recommended model is displayed to users as the default/suggested
option in model selection dropdowns throughout the platform.
"""
try:
model, previous_slug = await llm_db.set_recommended_model(request.model_id)
await _refresh_runtime_state()
logger.info(
"Set recommended model to '%s' (previous: %s)",
model.slug,
previous_slug or "none",
)
return llm_model.SetRecommendedModelResponse(
model=model,
previous_recommended_slug=previous_slug,
message=f"Model '{model.display_name}' is now the recommended model",
)
except ValueError as exc:
logger.warning("Set recommended model validation failed: %s", exc)
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
except Exception as exc:
logger.exception("Failed to set recommended model: %s", exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to set recommended model",
) from exc

View File

@@ -1,491 +0,0 @@
import json
from unittest.mock import AsyncMock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from autogpt_libs.auth.jwt_utils import get_jwt_payload
from pytest_snapshot.plugin import Snapshot
import backend.api.features.admin.llm_routes as llm_routes
from backend.server.v2.llm import model as llm_model
from backend.util.models import Pagination
app = fastapi.FastAPI()
app.include_router(llm_routes.router, prefix="/admin/llm")
client = fastapi.testclient.TestClient(app)
@pytest.fixture(autouse=True)
def setup_app_admin_auth(mock_jwt_admin):
"""Setup admin auth overrides for all tests in this module"""
app.dependency_overrides[get_jwt_payload] = mock_jwt_admin["get_jwt_payload"]
yield
app.dependency_overrides.clear()
def test_list_llm_providers_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful listing of LLM providers"""
# Mock the database function
mock_providers = [
{
"id": "provider-1",
"name": "openai",
"display_name": "OpenAI",
"description": "OpenAI LLM provider",
"supports_tools": True,
"supports_json_output": True,
"supports_reasoning": False,
"supports_parallel_tool": True,
"metadata": {},
"models": [],
},
{
"id": "provider-2",
"name": "anthropic",
"display_name": "Anthropic",
"description": "Anthropic LLM provider",
"supports_tools": True,
"supports_json_output": True,
"supports_reasoning": False,
"supports_parallel_tool": True,
"metadata": {},
"models": [],
},
]
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.list_providers",
new=AsyncMock(return_value=mock_providers),
)
response = client.get("/admin/llm/providers")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["providers"]) == 2
assert response_data["providers"][0]["name"] == "openai"
# Snapshot test the response (must be string)
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"list_llm_providers_success.json",
)
def test_list_llm_models_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful listing of LLM models with pagination"""
# Mock the database function - now returns LlmModelsResponse
mock_model = llm_model.LlmModel(
id="model-1",
slug="gpt-4o",
display_name="GPT-4o",
description="GPT-4 Optimized",
provider_id="provider-1",
context_window=128000,
max_output_tokens=16384,
is_enabled=True,
capabilities={},
metadata={},
costs=[
llm_model.LlmModelCost(
id="cost-1",
credit_cost=10,
credential_provider="openai",
metadata={},
)
],
)
mock_response = llm_model.LlmModelsResponse(
models=[mock_model],
pagination=Pagination(
total_items=1,
total_pages=1,
current_page=1,
page_size=50,
),
)
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.list_models",
new=AsyncMock(return_value=mock_response),
)
response = client.get("/admin/llm/models")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["models"]) == 1
assert response_data["models"][0]["slug"] == "gpt-4o"
assert response_data["pagination"]["total_items"] == 1
assert response_data["pagination"]["page_size"] == 50
# Snapshot test the response (must be string)
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"list_llm_models_success.json",
)
def test_create_llm_provider_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful creation of LLM provider"""
mock_provider = {
"id": "new-provider-id",
"name": "groq",
"display_name": "Groq",
"description": "Groq LLM provider",
"supports_tools": True,
"supports_json_output": True,
"supports_reasoning": False,
"supports_parallel_tool": False,
"metadata": {},
}
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.upsert_provider",
new=AsyncMock(return_value=mock_provider),
)
mock_refresh = mocker.patch(
"backend.api.features.admin.llm_routes._refresh_runtime_state",
new=AsyncMock(),
)
request_data = {
"name": "groq",
"display_name": "Groq",
"description": "Groq LLM provider",
"supports_tools": True,
"supports_json_output": True,
"supports_reasoning": False,
"supports_parallel_tool": False,
"metadata": {},
}
response = client.post("/admin/llm/providers", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["name"] == "groq"
assert response_data["display_name"] == "Groq"
# Verify refresh was called
mock_refresh.assert_called_once()
# Snapshot test the response (must be string)
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"create_llm_provider_success.json",
)
def test_create_llm_model_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful creation of LLM model"""
mock_model = {
"id": "new-model-id",
"slug": "gpt-4.1-mini",
"display_name": "GPT-4.1 Mini",
"description": "Latest GPT-4.1 Mini model",
"provider_id": "provider-1",
"context_window": 128000,
"max_output_tokens": 16384,
"is_enabled": True,
"capabilities": {},
"metadata": {},
"costs": [
{
"id": "cost-id",
"credit_cost": 5,
"credential_provider": "openai",
"metadata": {},
}
],
}
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.create_model",
new=AsyncMock(return_value=mock_model),
)
mock_refresh = mocker.patch(
"backend.api.features.admin.llm_routes._refresh_runtime_state",
new=AsyncMock(),
)
request_data = {
"slug": "gpt-4.1-mini",
"display_name": "GPT-4.1 Mini",
"description": "Latest GPT-4.1 Mini model",
"provider_id": "provider-1",
"context_window": 128000,
"max_output_tokens": 16384,
"is_enabled": True,
"capabilities": {},
"metadata": {},
"costs": [
{
"credit_cost": 5,
"credential_provider": "openai",
"metadata": {},
}
],
}
response = client.post("/admin/llm/models", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["slug"] == "gpt-4.1-mini"
assert response_data["is_enabled"] is True
# Verify refresh was called
mock_refresh.assert_called_once()
# Snapshot test the response (must be string)
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"create_llm_model_success.json",
)
def test_update_llm_model_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful update of LLM model"""
mock_model = {
"id": "model-1",
"slug": "gpt-4o",
"display_name": "GPT-4o Updated",
"description": "Updated description",
"provider_id": "provider-1",
"context_window": 256000,
"max_output_tokens": 32768,
"is_enabled": True,
"capabilities": {},
"metadata": {},
"costs": [
{
"id": "cost-1",
"credit_cost": 15,
"credential_provider": "openai",
"metadata": {},
}
],
}
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.update_model",
new=AsyncMock(return_value=mock_model),
)
mock_refresh = mocker.patch(
"backend.api.features.admin.llm_routes._refresh_runtime_state",
new=AsyncMock(),
)
request_data = {
"display_name": "GPT-4o Updated",
"description": "Updated description",
"context_window": 256000,
"max_output_tokens": 32768,
}
response = client.patch("/admin/llm/models/model-1", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["display_name"] == "GPT-4o Updated"
assert response_data["context_window"] == 256000
# Verify refresh was called
mock_refresh.assert_called_once()
# Snapshot test the response (must be string)
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"update_llm_model_success.json",
)
def test_toggle_llm_model_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful toggling of LLM model enabled status"""
# Create a proper mock model object
mock_model = llm_model.LlmModel(
id="model-1",
slug="gpt-4o",
display_name="GPT-4o",
description="GPT-4 Optimized",
provider_id="provider-1",
context_window=128000,
max_output_tokens=16384,
is_enabled=False,
capabilities={},
metadata={},
costs=[],
)
# Create a proper ToggleLlmModelResponse
mock_response = llm_model.ToggleLlmModelResponse(
model=mock_model,
nodes_migrated=0,
migrated_to_slug=None,
migration_id=None,
)
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.toggle_model",
new=AsyncMock(return_value=mock_response),
)
mock_refresh = mocker.patch(
"backend.api.features.admin.llm_routes._refresh_runtime_state",
new=AsyncMock(),
)
request_data = {"is_enabled": False}
response = client.patch("/admin/llm/models/model-1/toggle", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["model"]["is_enabled"] is False
# Verify refresh was called
mock_refresh.assert_called_once()
# Snapshot test the response (must be string)
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"toggle_llm_model_success.json",
)
def test_delete_llm_model_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful deletion of LLM model with migration"""
# Create a proper DeleteLlmModelResponse
mock_response = llm_model.DeleteLlmModelResponse(
deleted_model_slug="gpt-3.5-turbo",
deleted_model_display_name="GPT-3.5 Turbo",
replacement_model_slug="gpt-4o-mini",
nodes_migrated=42,
message="Successfully deleted model 'GPT-3.5 Turbo' (gpt-3.5-turbo) "
"and migrated 42 workflow node(s) to 'gpt-4o-mini'.",
)
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.delete_model",
new=AsyncMock(return_value=mock_response),
)
mock_refresh = mocker.patch(
"backend.api.features.admin.llm_routes._refresh_runtime_state",
new=AsyncMock(),
)
response = client.delete(
"/admin/llm/models/model-1?replacement_model_slug=gpt-4o-mini"
)
assert response.status_code == 200
response_data = response.json()
assert response_data["deleted_model_slug"] == "gpt-3.5-turbo"
assert response_data["nodes_migrated"] == 42
assert response_data["replacement_model_slug"] == "gpt-4o-mini"
# Verify refresh was called
mock_refresh.assert_called_once()
# Snapshot test the response (must be string)
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"delete_llm_model_success.json",
)
def test_delete_llm_model_validation_error(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test deletion fails with proper error when validation fails"""
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.delete_model",
new=AsyncMock(side_effect=ValueError("Replacement model 'invalid' not found")),
)
response = client.delete("/admin/llm/models/model-1?replacement_model_slug=invalid")
assert response.status_code == 400
assert "Replacement model 'invalid' not found" in response.json()["detail"]
def test_delete_llm_model_no_replacement_with_usage(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test deletion fails when nodes exist but no replacement is provided"""
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.delete_model",
new=AsyncMock(
side_effect=ValueError(
"Cannot delete model 'test-model': 5 workflow node(s) are using it. "
"Please provide a replacement_model_slug to migrate them."
)
),
)
response = client.delete("/admin/llm/models/model-1")
assert response.status_code == 400
assert "workflow node(s) are using it" in response.json()["detail"]
def test_delete_llm_model_no_replacement_no_usage(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test deletion succeeds when no nodes use the model and no replacement is provided"""
mock_response = llm_model.DeleteLlmModelResponse(
deleted_model_slug="unused-model",
deleted_model_display_name="Unused Model",
replacement_model_slug=None,
nodes_migrated=0,
message="Successfully deleted model 'Unused Model' (unused-model). No workflows were using this model.",
)
mocker.patch(
"backend.api.features.admin.llm_routes.llm_db.delete_model",
new=AsyncMock(return_value=mock_response),
)
mock_refresh = mocker.patch(
"backend.api.features.admin.llm_routes._refresh_runtime_state",
new=AsyncMock(),
)
response = client.delete("/admin/llm/models/model-1")
assert response.status_code == 200
response_data = response.json()
assert response_data["deleted_model_slug"] == "unused-model"
assert response_data["nodes_migrated"] == 0
assert response_data["replacement_model_slug"] is None
mock_refresh.assert_called_once()

View File

@@ -15,7 +15,6 @@ from backend.blocks import load_all_blocks
from backend.blocks.llm import LlmModel
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
from backend.data.db import query_raw_with_schema
from backend.data.llm_registry import get_all_model_slugs_for_validation
from backend.integrations.providers import ProviderName
from backend.util.cache import cached
from backend.util.models import Pagination
@@ -32,14 +31,7 @@ from .model import (
)
logger = logging.getLogger(__name__)
def _get_llm_models() -> list[str]:
"""Get LLM model names for search matching from the registry."""
return [
slug.lower().replace("-", " ") for slug in get_all_model_slugs_for_validation()
]
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
MAX_LIBRARY_AGENT_RESULTS = 100
MAX_MARKETPLACE_AGENT_RESULTS = 100
@@ -504,8 +496,8 @@ async def _get_static_counts():
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
for field in schema_cls.model_fields.values():
if field.annotation == LlmModel:
# Check if query matches any value in llm_models from registry
if any(query in name for name in _get_llm_models()):
# Check if query matches any value in llm_models
if any(query in name for name in llm_models):
return True
return False

View File

@@ -393,7 +393,6 @@ async def get_creators(
@router.get(
"/creator/{username}",
summary="Get creator details",
operation_id="getV2GetCreatorDetails",
tags=["store", "public"],
response_model=store_model.CreatorDetails,
)

View File

@@ -18,7 +18,6 @@ from prisma.errors import PrismaError
import backend.api.features.admin.credit_admin_routes
import backend.api.features.admin.execution_analytics_routes
import backend.api.features.admin.llm_routes
import backend.api.features.admin.store_admin_routes
import backend.api.features.builder
import backend.api.features.builder.routes
@@ -38,11 +37,9 @@ import backend.data.db
import backend.data.graph
import backend.data.user
import backend.integrations.webhooks.utils
import backend.server.v2.llm.routes as public_llm_routes
import backend.util.service
import backend.util.settings
from backend.data import llm_registry
from backend.data.block_cost_config import refresh_llm_costs
from backend.blocks.llm import DEFAULT_LLM_MODEL
from backend.data.model import Credentials
from backend.integrations.providers import ProviderName
from backend.monitoring.instrumentation import instrument_fastapi
@@ -112,27 +109,11 @@ async def lifespan_context(app: fastapi.FastAPI):
AutoRegistry.patch_integrations()
# Refresh LLM registry before initializing blocks so blocks can use registry data
await llm_registry.refresh_llm_registry()
refresh_llm_costs()
# Clear block schema caches so they're regenerated with updated discriminator_mapping
from backend.data.block import BlockSchema
BlockSchema.clear_all_schema_caches()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
# migrate_llm_models uses registry default model
from backend.blocks.llm import LlmModel
default_model_slug = llm_registry.get_default_model_slug()
if default_model_slug:
await backend.data.graph.migrate_llm_models(LlmModel(default_model_slug))
else:
logger.warning("Skipping LLM model migration: no default model available")
await backend.data.graph.migrate_llm_models(DEFAULT_LLM_MODEL)
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
with launch_darkly_context():
@@ -317,16 +298,6 @@ app.include_router(
tags=["v2", "executions", "review"],
prefix="/api/review",
)
app.include_router(
backend.api.features.admin.llm_routes.router,
tags=["v2", "admin", "llm"],
prefix="/api/llm/admin",
)
app.include_router(
public_llm_routes.router,
tags=["v2", "llm"],
prefix="/api",
)
app.include_router(
backend.api.features.library.routes.router, tags=["v2"], prefix="/api/library"
)

View File

@@ -77,39 +77,7 @@ async def event_broadcaster(manager: ConnectionManager):
payload=notification.payload,
)
async def registry_refresh_worker():
"""Listen for LLM registry refresh notifications and broadcast to all clients."""
from backend.data.llm_registry import REGISTRY_REFRESH_CHANNEL
from backend.data.redis_client import connect_async
redis = await connect_async()
pubsub = redis.pubsub()
await pubsub.subscribe(REGISTRY_REFRESH_CHANNEL)
logger.info(
"Subscribed to LLM registry refresh notifications for WebSocket broadcast"
)
async for message in pubsub.listen():
if (
message["type"] == "message"
and message["channel"] == REGISTRY_REFRESH_CHANNEL
):
logger.info(
"Broadcasting LLM registry refresh to all WebSocket clients"
)
await manager.broadcast_to_all(
method=WSMethod.NOTIFICATION,
data={
"type": "LLM_REGISTRY_REFRESH",
"event": "registry_updated",
},
)
await asyncio.gather(
execution_worker(),
notification_worker(),
registry_refresh_worker(),
)
await asyncio.gather(execution_worker(), notification_worker())
async def authenticate_websocket(websocket: WebSocket) -> str:

View File

@@ -1,6 +1,7 @@
from typing import Any
from backend.blocks.llm import (
DEFAULT_LLM_MODEL,
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
AIBlockBase,
@@ -9,7 +10,6 @@ from backend.blocks.llm import (
LlmModel,
LLMResponse,
llm_call,
llm_model_schema_extra,
)
from backend.data.block import (
BlockCategory,
@@ -50,10 +50,9 @@ class AIConditionBlock(AIBlockBase):
)
model: LlmModel = SchemaField(
title="LLM Model",
default_factory=LlmModel.default,
default=DEFAULT_LLM_MODEL,
description="The language model to use for evaluating the condition.",
advanced=False,
json_schema_extra=llm_model_schema_extra(),
)
credentials: AICredentials = AICredentialsField()
@@ -83,7 +82,7 @@ class AIConditionBlock(AIBlockBase):
"condition": "the input is an email address",
"yes_value": "Valid email",
"no_value": "Not an email",
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
"model": DEFAULT_LLM_MODEL,
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,

View File

@@ -4,19 +4,17 @@ import logging
import re
import secrets
from abc import ABC
from enum import Enum
from enum import Enum, EnumMeta
from json import JSONDecodeError
from typing import Any, Iterable, List, Literal, Optional
from typing import Any, Iterable, List, Literal, NamedTuple, Optional
import anthropic
import ollama
import openai
from anthropic.types import ToolParam
from groq import AsyncGroq
from pydantic import BaseModel, GetCoreSchemaHandler, SecretStr
from pydantic_core import CoreSchema, core_schema
from pydantic import BaseModel, SecretStr
from backend.data import llm_registry
from backend.data.block import (
Block,
BlockCategory,
@@ -24,7 +22,6 @@ from backend.data.block import (
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.llm_registry import ModelMetadata
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
@@ -69,117 +66,114 @@ TEST_CREDENTIALS_INPUT = {
def AICredentialsField() -> AICredentials:
"""
Returns a CredentialsField for LLM providers.
The discriminator_mapping will be refreshed when the schema is generated
if it's empty, ensuring the LLM registry is loaded.
"""
# Get the mapping now - it may be empty initially, but will be refreshed
# when the schema is generated via CredentialsMetaInput._add_json_schema_extra
mapping = llm_registry.get_llm_discriminator_mapping()
return CredentialsField(
description="API key for the LLM provider.",
discriminator="model",
discriminator_mapping=mapping, # May be empty initially, refreshed later
discriminator_mapping={
model.value: model.metadata.provider for model in LlmModel
},
)
def llm_model_schema_extra() -> dict[str, Any]:
return {"options": llm_registry.get_llm_model_schema_options()}
class ModelMetadata(NamedTuple):
provider: str
context_window: int
max_output_tokens: int | None
display_name: str
provider_name: str
creator_name: str
price_tier: Literal[1, 2, 3]
class LlmModelMeta(type):
"""
Metaclass for LlmModel that enables attribute-style access to dynamic models.
This allows code like `LlmModel.GPT4O` to work by converting the attribute
name to a slug format:
- GPT4O -> gpt-4o
- GPT4O_MINI -> gpt-4o-mini
- CLAUDE_3_5_SONNET -> claude-3-5-sonnet
"""
def __getattr__(cls, name: str):
# Don't intercept private/dunder attributes
if name.startswith("_"):
raise AttributeError(f"type object 'LlmModel' has no attribute '{name}'")
# Convert attribute name to slug format:
# 1. Lowercase: GPT4O -> gpt4o
# 2. Underscores to hyphens: GPT4O_MINI -> gpt4o-mini
# 3. Insert hyphen between letter and digit: gpt4o -> gpt-4o
slug = name.lower().replace("_", "-")
slug = re.sub(r"([a-z])(\d)", r"\1-\2", slug)
return cls(slug)
def __iter__(cls):
"""Iterate over all models from the registry.
Yields LlmModel instances for each model in the dynamic registry.
Used by __get_pydantic_json_schema__ to build model metadata.
"""
for model in llm_registry.iter_dynamic_models():
yield cls(model.slug)
class LlmModelMeta(EnumMeta):
pass
class LlmModel(str, metaclass=LlmModelMeta):
"""
Dynamic LLM model type that accepts any model slug from the registry.
This is a string subclass (not an Enum) that allows any model slug value.
All models are managed via the LLM Registry in the database.
Usage:
model = LlmModel("gpt-4o") # Direct construction
model = LlmModel.GPT4O # Attribute access (converted to "gpt-4o")
model.value # Returns the slug string
model.provider # Returns the provider from registry
"""
def __new__(cls, value: str):
if isinstance(value, LlmModel):
return value
return str.__new__(cls, value)
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
"""
Tell Pydantic how to validate LlmModel.
Accepts strings and converts them to LlmModel instances.
"""
return core_schema.no_info_after_validator_function(
cls, # The validator function (LlmModel constructor)
core_schema.str_schema(), # Accept string input
serialization=core_schema.to_string_ser_schema(), # Serialize as string
)
@property
def value(self) -> str:
"""Return the model slug (for compatibility with enum-style access)."""
return str(self)
@classmethod
def default(cls) -> "LlmModel":
"""
Get the default model from the registry.
Returns the recommended model if set, otherwise gpt-4o if available
and enabled, otherwise the first enabled model from the registry.
Falls back to "gpt-4o" if registry is empty (e.g., at module import time).
"""
from backend.data.llm_registry import get_default_model_slug
slug = get_default_model_slug()
if slug is None:
# Registry is empty (e.g., at module import time before DB connection).
# Fall back to gpt-4o for backward compatibility.
slug = "gpt-4o"
return cls(slug)
class LlmModel(str, Enum, metaclass=LlmModelMeta):
# OpenAI models
O3_MINI = "o3-mini"
O3 = "o3-2025-04-16"
O1 = "o1"
O1_MINI = "o1-mini"
# GPT-5 models
GPT5_2 = "gpt-5.2-2025-12-11"
GPT5_1 = "gpt-5.1-2025-11-13"
GPT5 = "gpt-5-2025-08-07"
GPT5_MINI = "gpt-5-mini-2025-08-07"
GPT5_NANO = "gpt-5-nano-2025-08-07"
GPT5_CHAT = "gpt-5-chat-latest"
GPT41 = "gpt-4.1-2025-04-14"
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
GPT4O_MINI = "gpt-4o-mini"
GPT4O = "gpt-4o"
GPT4_TURBO = "gpt-4-turbo"
GPT3_5_TURBO = "gpt-3.5-turbo"
# Anthropic models
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
CLAUDE_4_OPUS = "claude-opus-4-20250514"
CLAUDE_4_SONNET = "claude-sonnet-4-20250514"
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
# AI/ML API models
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
AIML_API_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct"
AIML_API_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
AIML_API_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
AIML_API_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo"
# Groq models
LLAMA3_3_70B = "llama-3.3-70b-versatile"
LLAMA3_1_8B = "llama-3.1-8b-instant"
# Ollama models
OLLAMA_LLAMA3_3 = "llama3.3"
OLLAMA_LLAMA3_2 = "llama3.2"
OLLAMA_LLAMA3_8B = "llama3"
OLLAMA_LLAMA3_405B = "llama3.1:405b"
OLLAMA_DOLPHIN = "dolphin-mistral:latest"
# OpenRouter models
OPENAI_GPT_OSS_120B = "openai/gpt-oss-120b"
OPENAI_GPT_OSS_20B = "openai/gpt-oss-20b"
GEMINI_2_5_PRO = "google/gemini-2.5-pro-preview-03-25"
GEMINI_3_PRO_PREVIEW = "google/gemini-3-pro-preview"
GEMINI_2_5_FLASH = "google/gemini-2.5-flash"
GEMINI_2_0_FLASH = "google/gemini-2.0-flash-001"
GEMINI_2_5_FLASH_LITE_PREVIEW = "google/gemini-2.5-flash-lite-preview-06-17"
GEMINI_2_0_FLASH_LITE = "google/gemini-2.0-flash-lite-001"
MISTRAL_NEMO = "mistralai/mistral-nemo"
COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024"
COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024"
DEEPSEEK_CHAT = "deepseek/deepseek-chat" # Actually: DeepSeek V3
DEEPSEEK_R1_0528 = "deepseek/deepseek-r1-0528"
PERPLEXITY_SONAR = "perplexity/sonar"
PERPLEXITY_SONAR_PRO = "perplexity/sonar-pro"
PERPLEXITY_SONAR_DEEP_RESEARCH = "perplexity/sonar-deep-research"
NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b"
NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b"
AMAZON_NOVA_LITE_V1 = "amazon/nova-lite-v1"
AMAZON_NOVA_MICRO_V1 = "amazon/nova-micro-v1"
AMAZON_NOVA_PRO_V1 = "amazon/nova-pro-v1"
MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b"
GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b"
META_LLAMA_4_SCOUT = "meta-llama/llama-4-scout"
META_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick"
GROK_4 = "x-ai/grok-4"
GROK_4_FAST = "x-ai/grok-4-fast"
GROK_4_1_FAST = "x-ai/grok-4.1-fast"
GROK_CODE_FAST_1 = "x-ai/grok-code-fast-1"
KIMI_K2 = "moonshotai/kimi-k2"
QWEN3_235B_A22B_THINKING = "qwen/qwen3-235b-a22b-thinking-2507"
QWEN3_CODER = "qwen/qwen3-coder"
# Llama API models
LLAMA_API_LLAMA_4_SCOUT = "Llama-4-Scout-17B-16E-Instruct-FP8"
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
LLAMA_API_LLAMA3_3_8B = "Llama-3.3-8B-Instruct"
LLAMA_API_LLAMA3_3_70B = "Llama-3.3-70B-Instruct"
# v0 by Vercel models
V0_1_5_MD = "v0-1.5-md"
V0_1_5_LG = "v0-1.5-lg"
V0_1_0_MD = "v0-1.0-md"
@classmethod
def __get_pydantic_json_schema__(cls, schema, handler):
@@ -187,12 +181,7 @@ class LlmModel(str, metaclass=LlmModelMeta):
llm_model_metadata = {}
for model in cls:
model_name = model.value
# Use registry directly with None check to gracefully handle
# missing metadata during startup/import before registry is populated
metadata = llm_registry.get_llm_model_metadata(model_name)
if metadata is None:
# Skip models without metadata (registry not yet populated)
continue
metadata = model.metadata
llm_model_metadata[model_name] = {
"creator": metadata.creator_name,
"creator_name": metadata.creator_name,
@@ -208,12 +197,7 @@ class LlmModel(str, metaclass=LlmModelMeta):
@property
def metadata(self) -> ModelMetadata:
metadata = llm_registry.get_llm_model_metadata(self.value)
if metadata:
return metadata
raise ValueError(
f"Missing metadata for model: {self.value}. Model not found in LLM registry."
)
return MODEL_METADATA[self]
@property
def provider(self) -> str:
@@ -228,11 +212,300 @@ class LlmModel(str, metaclass=LlmModelMeta):
return self.metadata.max_output_tokens
# MODEL_METADATA removed - all models now come from the database via llm_registry
MODEL_METADATA = {
# https://platform.openai.com/docs/models
LlmModel.O3: ModelMetadata("openai", 200000, 100000, "O3", "OpenAI", "OpenAI", 2),
LlmModel.O3_MINI: ModelMetadata(
"openai", 200000, 100000, "O3 Mini", "OpenAI", "OpenAI", 1
), # o3-mini-2025-01-31
LlmModel.O1: ModelMetadata(
"openai", 200000, 100000, "O1", "OpenAI", "OpenAI", 3
), # o1-2024-12-17
LlmModel.O1_MINI: ModelMetadata(
"openai", 128000, 65536, "O1 Mini", "OpenAI", "OpenAI", 2
), # o1-mini-2024-09-12
# GPT-5 models
LlmModel.GPT5_2: ModelMetadata(
"openai", 400000, 128000, "GPT-5.2", "OpenAI", "OpenAI", 3
),
LlmModel.GPT5_1: ModelMetadata(
"openai", 400000, 128000, "GPT-5.1", "OpenAI", "OpenAI", 2
),
LlmModel.GPT5: ModelMetadata(
"openai", 400000, 128000, "GPT-5", "OpenAI", "OpenAI", 1
),
LlmModel.GPT5_MINI: ModelMetadata(
"openai", 400000, 128000, "GPT-5 Mini", "OpenAI", "OpenAI", 1
),
LlmModel.GPT5_NANO: ModelMetadata(
"openai", 400000, 128000, "GPT-5 Nano", "OpenAI", "OpenAI", 1
),
LlmModel.GPT5_CHAT: ModelMetadata(
"openai", 400000, 16384, "GPT-5 Chat Latest", "OpenAI", "OpenAI", 2
),
LlmModel.GPT41: ModelMetadata(
"openai", 1047576, 32768, "GPT-4.1", "OpenAI", "OpenAI", 1
),
LlmModel.GPT41_MINI: ModelMetadata(
"openai", 1047576, 32768, "GPT-4.1 Mini", "OpenAI", "OpenAI", 1
),
LlmModel.GPT4O_MINI: ModelMetadata(
"openai", 128000, 16384, "GPT-4o Mini", "OpenAI", "OpenAI", 1
), # gpt-4o-mini-2024-07-18
LlmModel.GPT4O: ModelMetadata(
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
), # gpt-4o-2024-08-06
LlmModel.GPT4_TURBO: ModelMetadata(
"openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3
), # gpt-4-turbo-2024-04-09
LlmModel.GPT3_5_TURBO: ModelMetadata(
"openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1
), # gpt-3.5-turbo-0125
# https://docs.anthropic.com/en/docs/about-claude/models
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
), # claude-opus-4-1-20250805
LlmModel.CLAUDE_4_OPUS: ModelMetadata(
"anthropic", 200000, 32000, "Claude Opus 4", "Anthropic", "Anthropic", 3
), # claude-4-opus-20250514
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
), # claude-4-sonnet-20250514
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
), # claude-opus-4-5-20251101
LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
"anthropic", 200000, 64000, "Claude Sonnet 4.5", "Anthropic", "Anthropic", 3
), # claude-sonnet-4-5-20250929
LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
"anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2
), # claude-haiku-4-5-20251001
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
"anthropic", 200000, 64000, "Claude 3.7 Sonnet", "Anthropic", "Anthropic", 2
), # claude-3-7-sonnet-20250219
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
"anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1
), # claude-3-haiku-20240307
# https://docs.aimlapi.com/api-overview/model-database/text-models
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata(
"aiml_api", 32000, 8000, "Qwen 2.5 72B Instruct Turbo", "AI/ML", "Qwen", 1
),
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata(
"aiml_api",
128000,
40000,
"Llama 3.1 Nemotron 70B Instruct",
"AI/ML",
"Nvidia",
1,
),
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata(
"aiml_api", 128000, None, "Llama 3.3 70B Instruct Turbo", "AI/ML", "Meta", 1
),
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata(
"aiml_api", 131000, 2000, "Llama 3.1 70B Instruct Turbo", "AI/ML", "Meta", 1
),
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata(
"aiml_api", 128000, None, "Llama 3.2 3B Instruct Turbo", "AI/ML", "Meta", 1
),
# https://console.groq.com/docs/models
LlmModel.LLAMA3_3_70B: ModelMetadata(
"groq", 128000, 32768, "Llama 3.3 70B Versatile", "Groq", "Meta", 1
),
LlmModel.LLAMA3_1_8B: ModelMetadata(
"groq", 128000, 8192, "Llama 3.1 8B Instant", "Groq", "Meta", 1
),
# https://ollama.com/library
LlmModel.OLLAMA_LLAMA3_3: ModelMetadata(
"ollama", 8192, None, "Llama 3.3", "Ollama", "Meta", 1
),
LlmModel.OLLAMA_LLAMA3_2: ModelMetadata(
"ollama", 8192, None, "Llama 3.2", "Ollama", "Meta", 1
),
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata(
"ollama", 8192, None, "Llama 3", "Ollama", "Meta", 1
),
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata(
"ollama", 8192, None, "Llama 3.1 405B", "Ollama", "Meta", 1
),
LlmModel.OLLAMA_DOLPHIN: ModelMetadata(
"ollama", 32768, None, "Dolphin Mistral Latest", "Ollama", "Mistral AI", 1
),
# https://openrouter.ai/models
LlmModel.GEMINI_2_5_PRO: ModelMetadata(
"open_router",
1050000,
8192,
"Gemini 2.5 Pro Preview 03.25",
"OpenRouter",
"Google",
2,
),
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata(
"open_router", 1048576, 65535, "Gemini 3 Pro Preview", "OpenRouter", "Google", 2
),
LlmModel.GEMINI_2_5_FLASH: ModelMetadata(
"open_router", 1048576, 65535, "Gemini 2.5 Flash", "OpenRouter", "Google", 1
),
LlmModel.GEMINI_2_0_FLASH: ModelMetadata(
"open_router", 1048576, 8192, "Gemini 2.0 Flash 001", "OpenRouter", "Google", 1
),
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
"open_router",
1048576,
65535,
"Gemini 2.5 Flash Lite Preview 06.17",
"OpenRouter",
"Google",
1,
),
LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata(
"open_router",
1048576,
8192,
"Gemini 2.0 Flash Lite 001",
"OpenRouter",
"Google",
1,
),
LlmModel.MISTRAL_NEMO: ModelMetadata(
"open_router", 128000, 4096, "Mistral Nemo", "OpenRouter", "Mistral AI", 1
),
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata(
"open_router", 128000, 4096, "Command R 08.2024", "OpenRouter", "Cohere", 1
),
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata(
"open_router", 128000, 4096, "Command R Plus 08.2024", "OpenRouter", "Cohere", 2
),
LlmModel.DEEPSEEK_CHAT: ModelMetadata(
"open_router", 64000, 2048, "DeepSeek Chat", "OpenRouter", "DeepSeek", 1
),
LlmModel.DEEPSEEK_R1_0528: ModelMetadata(
"open_router", 163840, 163840, "DeepSeek R1 0528", "OpenRouter", "DeepSeek", 1
),
LlmModel.PERPLEXITY_SONAR: ModelMetadata(
"open_router", 127000, 8000, "Sonar", "OpenRouter", "Perplexity", 1
),
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata(
"open_router", 200000, 8000, "Sonar Pro", "OpenRouter", "Perplexity", 2
),
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
"open_router",
128000,
16000,
"Sonar Deep Research",
"OpenRouter",
"Perplexity",
3,
),
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
"open_router",
131000,
4096,
"Hermes 3 Llama 3.1 405B",
"OpenRouter",
"Nous Research",
1,
),
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata(
"open_router",
12288,
12288,
"Hermes 3 Llama 3.1 70B",
"OpenRouter",
"Nous Research",
1,
),
LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata(
"open_router", 131072, 131072, "GPT-OSS 120B", "OpenRouter", "OpenAI", 1
),
LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata(
"open_router", 131072, 32768, "GPT-OSS 20B", "OpenRouter", "OpenAI", 1
),
LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata(
"open_router", 300000, 5120, "Nova Lite V1", "OpenRouter", "Amazon", 1
),
LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata(
"open_router", 128000, 5120, "Nova Micro V1", "OpenRouter", "Amazon", 1
),
LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata(
"open_router", 300000, 5120, "Nova Pro V1", "OpenRouter", "Amazon", 1
),
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata(
"open_router", 65536, 4096, "WizardLM 2 8x22B", "OpenRouter", "Microsoft", 1
),
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata(
"open_router", 4096, 4096, "MythoMax L2 13B", "OpenRouter", "Gryphe", 1
),
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata(
"open_router", 131072, 131072, "Llama 4 Scout", "OpenRouter", "Meta", 1
),
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata(
"open_router", 1048576, 1000000, "Llama 4 Maverick", "OpenRouter", "Meta", 1
),
LlmModel.GROK_4: ModelMetadata(
"open_router", 256000, 256000, "Grok 4", "OpenRouter", "xAI", 3
),
LlmModel.GROK_4_FAST: ModelMetadata(
"open_router", 2000000, 30000, "Grok 4 Fast", "OpenRouter", "xAI", 1
),
LlmModel.GROK_4_1_FAST: ModelMetadata(
"open_router", 2000000, 30000, "Grok 4.1 Fast", "OpenRouter", "xAI", 1
),
LlmModel.GROK_CODE_FAST_1: ModelMetadata(
"open_router", 256000, 10000, "Grok Code Fast 1", "OpenRouter", "xAI", 1
),
LlmModel.KIMI_K2: ModelMetadata(
"open_router", 131000, 131000, "Kimi K2", "OpenRouter", "Moonshot AI", 1
),
LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata(
"open_router",
262144,
262144,
"Qwen 3 235B A22B Thinking 2507",
"OpenRouter",
"Qwen",
1,
),
LlmModel.QWEN3_CODER: ModelMetadata(
"open_router", 262144, 262144, "Qwen 3 Coder", "OpenRouter", "Qwen", 3
),
# Llama API models
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata(
"llama_api",
128000,
4028,
"Llama 4 Scout 17B 16E Instruct FP8",
"Llama API",
"Meta",
1,
),
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata(
"llama_api",
128000,
4028,
"Llama 4 Maverick 17B 128E Instruct FP8",
"Llama API",
"Meta",
1,
),
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata(
"llama_api", 128000, 4028, "Llama 3.3 8B Instruct", "Llama API", "Meta", 1
),
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata(
"llama_api", 128000, 4028, "Llama 3.3 70B Instruct", "Llama API", "Meta", 1
),
# v0 by Vercel models
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000, "v0 1.5 MD", "V0", "V0", 1),
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000, "v0 1.5 LG", "V0", "V0", 1),
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000, "v0 1.0 MD", "V0", "V0", 1),
}
# Default model constant for backward compatibility
# Uses the dynamic registry to get the default model
DEFAULT_LLM_MODEL = LlmModel.default()
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
for model in LlmModel:
if model not in MODEL_METADATA:
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
class ToolCall(BaseModel):
@@ -361,98 +634,19 @@ async def llm_call(
- prompt_tokens: The number of tokens used in the prompt.
- completion_tokens: The number of tokens used in the completion.
"""
# Get model metadata and check if enabled - with fallback support
# The model we'll actually use (may differ if original is disabled)
model_to_use = llm_model.value
# Check if model is in registry and if it's enabled
from backend.data.llm_registry import (
get_fallback_model_for_disabled,
get_model_info,
)
model_info = get_model_info(llm_model.value)
if model_info and not model_info.is_enabled:
# Model is disabled - try to find a fallback from the same provider
fallback = get_fallback_model_for_disabled(llm_model.value)
if fallback:
logger.warning(
f"Model '{llm_model.value}' is disabled. Using fallback model '{fallback.slug}' from the same provider ({fallback.metadata.provider})."
)
model_to_use = fallback.slug
# Use fallback model's metadata
provider = fallback.metadata.provider
context_window = fallback.metadata.context_window
model_max_output = fallback.metadata.max_output_tokens or int(2**15)
else:
# No fallback available - raise error
raise ValueError(
f"LLM model '{llm_model.value}' is disabled and no fallback model "
f"from the same provider is available. Please enable the model or "
f"select a different model in the block configuration."
)
else:
# Model is enabled or not in registry (legacy/static model)
try:
provider = llm_model.metadata.provider
context_window = llm_model.context_window
model_max_output = llm_model.max_output_tokens or int(2**15)
except ValueError:
# Model not in cache - try refreshing the registry once if we have DB access
logger.warning(f"Model {llm_model.value} not found in registry cache")
# Try refreshing the registry if we have database access
from backend.data.db import is_connected
if is_connected():
try:
logger.info(
f"Refreshing LLM registry and retrying lookup for {llm_model.value}"
)
await llm_registry.refresh_llm_registry()
# Try again after refresh
try:
provider = llm_model.metadata.provider
context_window = llm_model.context_window
model_max_output = llm_model.max_output_tokens or int(2**15)
logger.info(
f"Successfully loaded model {llm_model.value} metadata after registry refresh"
)
except ValueError:
# Still not found after refresh
raise ValueError(
f"LLM model '{llm_model.value}' not found in registry after refresh. "
"Please ensure the model is added and enabled in the LLM registry via the admin UI."
)
except Exception as refresh_exc:
logger.error(f"Failed to refresh LLM registry: {refresh_exc}")
raise ValueError(
f"LLM model '{llm_model.value}' not found in registry and failed to refresh. "
"Please ensure the model is added to the LLM registry via the admin UI."
) from refresh_exc
else:
# No DB access (e.g., in executor without direct DB connection)
# The registry should have been loaded on startup
raise ValueError(
f"LLM model '{llm_model.value}' not found in registry cache. "
"The registry may need to be refreshed. Please contact support or try again later."
)
# Create effective model for model-specific parameter resolution (e.g., o-series check)
# This uses the resolved model_to_use which may differ from llm_model if fallback occurred
effective_model = LlmModel(model_to_use)
provider = llm_model.metadata.provider
context_window = llm_model.context_window
if compress_prompt_to_fit:
prompt = compress_prompt(
messages=prompt,
target_tokens=context_window // 2,
target_tokens=llm_model.context_window // 2,
lossy_ok=True,
)
# Calculate available tokens based on context window and input length
estimated_input_tokens = estimate_token_count(prompt)
# model_max_output already set above
model_max_output = llm_model.max_output_tokens or int(2**15)
user_max = max_tokens or model_max_output
available_tokens = max(context_window - estimated_input_tokens, 0)
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
@@ -463,14 +657,14 @@ async def llm_call(
response_format = None
parallel_tool_calls = get_parallel_tool_calls_param(
effective_model, parallel_tool_calls
llm_model, parallel_tool_calls
)
if force_json_output:
response_format = {"type": "json_object"}
response = await oai_client.chat.completions.create(
model=model_to_use,
model=llm_model.value,
messages=prompt, # type: ignore
response_format=response_format, # type: ignore
max_completion_tokens=max_tokens,
@@ -517,7 +711,7 @@ async def llm_call(
)
try:
resp = await client.messages.create(
model=model_to_use,
model=llm_model.value,
system=sysprompt,
messages=messages,
max_tokens=max_tokens,
@@ -581,7 +775,7 @@ async def llm_call(
client = AsyncGroq(api_key=credentials.api_key.get_secret_value())
response_format = {"type": "json_object"} if force_json_output else None
response = await client.chat.completions.create(
model=model_to_use,
model=llm_model.value,
messages=prompt, # type: ignore
response_format=response_format, # type: ignore
max_tokens=max_tokens,
@@ -603,7 +797,7 @@ async def llm_call(
sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
response = await client.generate(
model=model_to_use,
model=llm_model.value,
prompt=f"{sys_messages}\n\n{usr_messages}",
stream=False,
options={"num_ctx": max_tokens},
@@ -625,7 +819,7 @@ async def llm_call(
)
parallel_tool_calls_param = get_parallel_tool_calls_param(
effective_model, parallel_tool_calls
llm_model, parallel_tool_calls
)
response = await client.chat.completions.create(
@@ -633,7 +827,7 @@ async def llm_call(
"HTTP-Referer": "https://agpt.co",
"X-Title": "AutoGPT",
},
model=model_to_use,
model=llm_model.value,
messages=prompt, # type: ignore
max_tokens=max_tokens,
tools=tools_param, # type: ignore
@@ -667,7 +861,7 @@ async def llm_call(
)
parallel_tool_calls_param = get_parallel_tool_calls_param(
effective_model, parallel_tool_calls
llm_model, parallel_tool_calls
)
response = await client.chat.completions.create(
@@ -675,7 +869,7 @@ async def llm_call(
"HTTP-Referer": "https://agpt.co",
"X-Title": "AutoGPT",
},
model=model_to_use,
model=llm_model.value,
messages=prompt, # type: ignore
max_tokens=max_tokens,
tools=tools_param, # type: ignore
@@ -702,7 +896,7 @@ async def llm_call(
reasoning=reasoning,
)
elif provider == "aiml_api":
client = openai.AsyncOpenAI(
client = openai.OpenAI(
base_url="https://api.aimlapi.com/v2",
api_key=credentials.api_key.get_secret_value(),
default_headers={
@@ -712,8 +906,8 @@ async def llm_call(
},
)
completion = await client.chat.completions.create(
model=model_to_use,
completion = client.chat.completions.create(
model=llm_model.value,
messages=prompt, # type: ignore
max_tokens=max_tokens,
)
@@ -741,11 +935,11 @@ async def llm_call(
response_format = {"type": "json_object"}
parallel_tool_calls_param = get_parallel_tool_calls_param(
effective_model, parallel_tool_calls
llm_model, parallel_tool_calls
)
response = await client.chat.completions.create(
model=model_to_use,
model=llm_model.value,
messages=prompt, # type: ignore
response_format=response_format, # type: ignore
max_tokens=max_tokens,
@@ -796,10 +990,9 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
)
model: LlmModel = SchemaField(
title="LLM Model",
default_factory=LlmModel.default,
default=DEFAULT_LLM_MODEL,
description="The language model to use for answering the prompt.",
advanced=False,
json_schema_extra=llm_model_schema_extra(),
)
force_json_output: bool = SchemaField(
title="Restrict LLM to pure JSON output",
@@ -862,7 +1055,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
input_schema=AIStructuredResponseGeneratorBlock.Input,
output_schema=AIStructuredResponseGeneratorBlock.Output,
test_input={
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
"model": DEFAULT_LLM_MODEL,
"credentials": TEST_CREDENTIALS_INPUT,
"expected_format": {
"key1": "value1",
@@ -1228,10 +1421,9 @@ class AITextGeneratorBlock(AIBlockBase):
)
model: LlmModel = SchemaField(
title="LLM Model",
default_factory=LlmModel.default,
default=DEFAULT_LLM_MODEL,
description="The language model to use for answering the prompt.",
advanced=False,
json_schema_extra=llm_model_schema_extra(),
)
credentials: AICredentials = AICredentialsField()
sys_prompt: str = SchemaField(
@@ -1325,9 +1517,8 @@ class AITextSummarizerBlock(AIBlockBase):
)
model: LlmModel = SchemaField(
title="LLM Model",
default_factory=LlmModel.default,
default=DEFAULT_LLM_MODEL,
description="The language model to use for summarizing the text.",
json_schema_extra=llm_model_schema_extra(),
)
focus: str = SchemaField(
title="Focus",
@@ -1543,9 +1734,8 @@ class AIConversationBlock(AIBlockBase):
)
model: LlmModel = SchemaField(
title="LLM Model",
default_factory=LlmModel.default,
default=DEFAULT_LLM_MODEL,
description="The language model to use for the conversation.",
json_schema_extra=llm_model_schema_extra(),
)
credentials: AICredentials = AICredentialsField()
max_tokens: int | None = SchemaField(
@@ -1582,7 +1772,7 @@ class AIConversationBlock(AIBlockBase):
},
{"role": "user", "content": "Where was it played?"},
],
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
"model": DEFAULT_LLM_MODEL,
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
@@ -1645,10 +1835,9 @@ class AIListGeneratorBlock(AIBlockBase):
)
model: LlmModel = SchemaField(
title="LLM Model",
default_factory=LlmModel.default,
default=DEFAULT_LLM_MODEL,
description="The language model to use for generating the list.",
advanced=True,
json_schema_extra=llm_model_schema_extra(),
)
credentials: AICredentials = AICredentialsField()
max_retries: int = SchemaField(
@@ -1703,7 +1892,7 @@ class AIListGeneratorBlock(AIBlockBase):
"drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
"fictional worlds."
),
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
"model": DEFAULT_LLM_MODEL,
"credentials": TEST_CREDENTIALS_INPUT,
"max_retries": 3,
"force_json_output": False,

View File

@@ -226,10 +226,9 @@ class SmartDecisionMakerBlock(Block):
)
model: llm.LlmModel = SchemaField(
title="LLM Model",
default_factory=llm.LlmModel.default,
default=llm.DEFAULT_LLM_MODEL,
description="The language model to use for answering the prompt.",
advanced=False,
json_schema_extra=llm.llm_model_schema_extra(),
)
credentials: llm.AICredentials = llm.AICredentialsField()
multiple_tool_calls: bool = SchemaField(

View File

@@ -10,13 +10,13 @@ import stagehand.main
from stagehand import Stagehand
from backend.blocks.llm import (
MODEL_METADATA,
AICredentials,
AICredentialsField,
LlmModel,
ModelMetadata,
)
from backend.blocks.stagehand._config import stagehand as stagehand_provider
from backend.data import llm_registry
from backend.sdk import (
APIKeyCredentials,
Block,
@@ -91,7 +91,7 @@ class StagehandRecommendedLlmModel(str, Enum):
Returns the provider name for the model in the required format for Stagehand:
provider/model_name
"""
model_metadata = self.metadata
model_metadata = MODEL_METADATA[LlmModel(self.value)]
model_name = self.value
if len(model_name.split("/")) == 1 and not self.value.startswith(
@@ -107,23 +107,19 @@ class StagehandRecommendedLlmModel(str, Enum):
@property
def provider(self) -> str:
return self.metadata.provider
return MODEL_METADATA[LlmModel(self.value)].provider
@property
def metadata(self) -> ModelMetadata:
metadata = llm_registry.get_llm_model_metadata(self.value)
if metadata:
return metadata
# Fallback to LlmModel enum if registry lookup fails
return LlmModel(self.value).metadata
return MODEL_METADATA[LlmModel(self.value)]
@property
def context_window(self) -> int:
return self.metadata.context_window
return MODEL_METADATA[LlmModel(self.value)].context_window
@property
def max_output_tokens(self) -> int | None:
return self.metadata.max_output_tokens
return MODEL_METADATA[LlmModel(self.value)].max_output_tokens
class StagehandObserveBlock(Block):

View File

@@ -25,7 +25,6 @@ from prisma.models import AgentBlock
from prisma.types import AgentBlockCreateInput
from pydantic import BaseModel
from backend.data.llm_registry import update_schema_with_llm_registry
from backend.data.model import NodeExecutionStats
from backend.integrations.providers import ProviderName
from backend.util import json
@@ -144,59 +143,35 @@ class BlockInfo(BaseModel):
class BlockSchema(BaseModel):
cached_jsonschema: ClassVar[dict[str, Any] | None] = None
@classmethod
def clear_schema_cache(cls) -> None:
"""Clear the cached JSON schema for this class."""
# Use None instead of {} because {} is truthy and would prevent regeneration
cls.cached_jsonschema = None # type: ignore
@staticmethod
def clear_all_schema_caches() -> None:
"""Clear cached JSON schemas for all BlockSchema subclasses."""
def clear_recursive(cls: type) -> None:
"""Recursively clear cache for class and all subclasses."""
if hasattr(cls, "clear_schema_cache"):
cls.clear_schema_cache()
for subclass in cls.__subclasses__():
clear_recursive(subclass)
clear_recursive(BlockSchema)
cached_jsonschema: ClassVar[dict[str, Any]]
@classmethod
def jsonschema(cls) -> dict[str, Any]:
# Generate schema if not cached
if not cls.cached_jsonschema:
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
if cls.cached_jsonschema:
return cls.cached_jsonschema
def ref_to_dict(obj):
if isinstance(obj, dict):
# OpenAPI <3.1 does not support sibling fields that has a $ref key
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
keys = {"allOf", "anyOf", "oneOf"}
one_key = next(
(k for k in keys if k in obj and len(obj[k]) == 1), None
)
if one_key:
obj.update(obj[one_key][0])
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
return {
key: ref_to_dict(value)
for key, value in obj.items()
if not key.startswith("$") and key != one_key
}
elif isinstance(obj, list):
return [ref_to_dict(item) for item in obj]
def ref_to_dict(obj):
if isinstance(obj, dict):
# OpenAPI <3.1 does not support sibling fields that has a $ref key
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
keys = {"allOf", "anyOf", "oneOf"}
one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None)
if one_key:
obj.update(obj[one_key][0])
return obj
return {
key: ref_to_dict(value)
for key, value in obj.items()
if not key.startswith("$") and key != one_key
}
elif isinstance(obj, list):
return [ref_to_dict(item) for item in obj]
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
return obj
# Always post-process to ensure LLM registry data is up-to-date
# This refreshes model options and discriminator mappings even if schema was cached
update_schema_with_llm_registry(cls.cached_jsonschema, cls)
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
return cls.cached_jsonschema
@@ -259,7 +234,7 @@ class BlockSchema(BaseModel):
super().__pydantic_init_subclass__(**kwargs)
# Reset cached JSON schema to prevent inheriting it from parent class
cls.cached_jsonschema = None
cls.cached_jsonschema = {}
credentials_fields = cls.get_credentials_fields()
@@ -896,28 +871,6 @@ def is_block_auth_configured(
async def initialize_blocks() -> None:
# Refresh LLM registry before initializing blocks so blocks can use registry data
# This ensures the registry cache is populated even in executor context
try:
from backend.data import llm_registry
from backend.data.block_cost_config import refresh_llm_costs
# Only refresh if we have DB access (check if Prisma is connected)
from backend.data.db import is_connected
if is_connected():
await llm_registry.refresh_llm_registry()
refresh_llm_costs()
logger.info("LLM registry refreshed during block initialization")
else:
logger.warning(
"Prisma not connected, skipping LLM registry refresh during block initialization"
)
except Exception as exc:
logger.warning(
"Failed to refresh LLM registry during block initialization: %s", exc
)
# First, sync all provider costs to blocks
# Imported here to avoid circular import
from backend.sdk.cost_integration import sync_all_provider_costs

View File

@@ -1,4 +1,3 @@
import logging
from typing import Type
from backend.blocks.ai_image_customizer import AIImageCustomizerBlock, GeminiImageModel
@@ -24,18 +23,19 @@ from backend.blocks.ideogram import IdeogramModelBlock
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
from backend.blocks.jina.search import ExtractWebsiteContentBlock, SearchTheWebBlock
from backend.blocks.llm import (
MODEL_METADATA,
AIConversationBlock,
AIListGeneratorBlock,
AIStructuredResponseGeneratorBlock,
AITextGeneratorBlock,
AITextSummarizerBlock,
LlmModel,
)
from backend.blocks.replicate.flux_advanced import ReplicateFluxAdvancedModelBlock
from backend.blocks.replicate.replicate_block import ReplicateModelBlock
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
from backend.data import llm_registry
from backend.data.block import Block, BlockCost, BlockCostType
from backend.integrations.credentials_store import (
aiml_api_credentials,
@@ -55,63 +55,210 @@ from backend.integrations.credentials_store import (
v0_credentials,
)
logger = logging.getLogger(__name__)
# =============== Configure the cost for each LLM Model call =============== #
PROVIDER_CREDENTIALS = {
"openai": openai_credentials,
"anthropic": anthropic_credentials,
"groq": groq_credentials,
"open_router": open_router_credentials,
"llama_api": llama_api_credentials,
"aiml_api": aiml_api_credentials,
"v0": v0_credentials,
MODEL_COST: dict[LlmModel, int] = {
LlmModel.O3: 4,
LlmModel.O3_MINI: 2,
LlmModel.O1: 16,
LlmModel.O1_MINI: 4,
# GPT-5 models
LlmModel.GPT5_2: 6,
LlmModel.GPT5_1: 5,
LlmModel.GPT5: 2,
LlmModel.GPT5_MINI: 1,
LlmModel.GPT5_NANO: 1,
LlmModel.GPT5_CHAT: 5,
LlmModel.GPT41: 2,
LlmModel.GPT41_MINI: 1,
LlmModel.GPT4O_MINI: 1,
LlmModel.GPT4O: 3,
LlmModel.GPT4_TURBO: 10,
LlmModel.GPT3_5_TURBO: 1,
LlmModel.CLAUDE_4_1_OPUS: 21,
LlmModel.CLAUDE_4_OPUS: 21,
LlmModel.CLAUDE_4_SONNET: 5,
LlmModel.CLAUDE_4_5_HAIKU: 4,
LlmModel.CLAUDE_4_5_OPUS: 14,
LlmModel.CLAUDE_4_5_SONNET: 9,
LlmModel.CLAUDE_3_7_SONNET: 5,
LlmModel.CLAUDE_3_HAIKU: 1,
LlmModel.AIML_API_QWEN2_5_72B: 1,
LlmModel.AIML_API_LLAMA3_1_70B: 1,
LlmModel.AIML_API_LLAMA3_3_70B: 1,
LlmModel.AIML_API_META_LLAMA_3_1_70B: 1,
LlmModel.AIML_API_LLAMA_3_2_3B: 1,
LlmModel.LLAMA3_3_70B: 1,
LlmModel.LLAMA3_1_8B: 1,
LlmModel.OLLAMA_LLAMA3_3: 1,
LlmModel.OLLAMA_LLAMA3_2: 1,
LlmModel.OLLAMA_LLAMA3_8B: 1,
LlmModel.OLLAMA_LLAMA3_405B: 1,
LlmModel.OLLAMA_DOLPHIN: 1,
LlmModel.OPENAI_GPT_OSS_120B: 1,
LlmModel.OPENAI_GPT_OSS_20B: 1,
LlmModel.GEMINI_2_5_PRO: 4,
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
LlmModel.GEMINI_2_5_FLASH: 1,
LlmModel.GEMINI_2_0_FLASH: 1,
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
LlmModel.MISTRAL_NEMO: 1,
LlmModel.COHERE_COMMAND_R_08_2024: 1,
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
LlmModel.DEEPSEEK_CHAT: 2,
LlmModel.DEEPSEEK_R1_0528: 1,
LlmModel.PERPLEXITY_SONAR: 1,
LlmModel.PERPLEXITY_SONAR_PRO: 5,
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10,
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: 1,
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: 1,
LlmModel.AMAZON_NOVA_LITE_V1: 1,
LlmModel.AMAZON_NOVA_MICRO_V1: 1,
LlmModel.AMAZON_NOVA_PRO_V1: 1,
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: 1,
LlmModel.GRYPHE_MYTHOMAX_L2_13B: 1,
LlmModel.META_LLAMA_4_SCOUT: 1,
LlmModel.META_LLAMA_4_MAVERICK: 1,
LlmModel.LLAMA_API_LLAMA_4_SCOUT: 1,
LlmModel.LLAMA_API_LLAMA4_MAVERICK: 1,
LlmModel.LLAMA_API_LLAMA3_3_8B: 1,
LlmModel.LLAMA_API_LLAMA3_3_70B: 1,
LlmModel.GROK_4: 9,
LlmModel.GROK_4_FAST: 1,
LlmModel.GROK_4_1_FAST: 1,
LlmModel.GROK_CODE_FAST_1: 1,
LlmModel.KIMI_K2: 1,
LlmModel.QWEN3_235B_A22B_THINKING: 1,
LlmModel.QWEN3_CODER: 9,
# v0 by Vercel models
LlmModel.V0_1_5_MD: 1,
LlmModel.V0_1_5_LG: 2,
LlmModel.V0_1_0_MD: 1,
}
# =============== Configure the cost for each LLM Model call =============== #
# All LLM costs now come from the database via llm_registry
LLM_COST: list[BlockCost] = []
for model in LlmModel:
if model not in MODEL_COST:
raise ValueError(f"Missing MODEL_COST for model: {model}")
def _build_llm_costs_from_registry() -> list[BlockCost]:
"""Build BlockCost list from all models in the LLM registry."""
costs: list[BlockCost] = []
for model in llm_registry.iter_dynamic_models():
for cost in model.costs:
credentials = PROVIDER_CREDENTIALS.get(cost.credential_provider)
if not credentials:
logger.warning(
"Skipping cost entry for %s due to unknown credentials provider %s",
model.slug,
cost.credential_provider,
)
continue
cost_filter = {
"model": model.slug,
LLM_COST = (
# Anthropic Models
[
BlockCost(
cost_type=BlockCostType.RUN,
cost_filter={
"model": model,
"credentials": {
"id": credentials.id,
"provider": credentials.provider,
"type": credentials.type,
"id": anthropic_credentials.id,
"provider": anthropic_credentials.provider,
"type": anthropic_credentials.type,
},
}
costs.append(
BlockCost(
cost_type=BlockCostType.RUN,
cost_filter=cost_filter,
cost_amount=cost.credit_cost,
)
)
return costs
def refresh_llm_costs() -> None:
"""Refresh LLM costs from the registry. All costs now come from the database."""
LLM_COST.clear()
LLM_COST.extend(_build_llm_costs_from_registry())
# Initial load will happen after registry is refreshed at startup
# Don't call refresh_llm_costs() here - it will be called after registry refresh
},
cost_amount=cost,
)
for model, cost in MODEL_COST.items()
if MODEL_METADATA[model].provider == "anthropic"
]
# OpenAI Models
+ [
BlockCost(
cost_type=BlockCostType.RUN,
cost_filter={
"model": model,
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
cost_amount=cost,
)
for model, cost in MODEL_COST.items()
if MODEL_METADATA[model].provider == "openai"
]
# Groq Models
+ [
BlockCost(
cost_type=BlockCostType.RUN,
cost_filter={
"model": model,
"credentials": {"id": groq_credentials.id},
},
cost_amount=cost,
)
for model, cost in MODEL_COST.items()
if MODEL_METADATA[model].provider == "groq"
]
# Open Router Models
+ [
BlockCost(
cost_type=BlockCostType.RUN,
cost_filter={
"model": model,
"credentials": {
"id": open_router_credentials.id,
"provider": open_router_credentials.provider,
"type": open_router_credentials.type,
},
},
cost_amount=cost,
)
for model, cost in MODEL_COST.items()
if MODEL_METADATA[model].provider == "open_router"
]
# Llama API Models
+ [
BlockCost(
cost_type=BlockCostType.RUN,
cost_filter={
"model": model,
"credentials": {
"id": llama_api_credentials.id,
"provider": llama_api_credentials.provider,
"type": llama_api_credentials.type,
},
},
cost_amount=cost,
)
for model, cost in MODEL_COST.items()
if MODEL_METADATA[model].provider == "llama_api"
]
# v0 by Vercel Models
+ [
BlockCost(
cost_type=BlockCostType.RUN,
cost_filter={
"model": model,
"credentials": {
"id": v0_credentials.id,
"provider": v0_credentials.provider,
"type": v0_credentials.type,
},
},
cost_amount=cost,
)
for model, cost in MODEL_COST.items()
if MODEL_METADATA[model].provider == "v0"
]
# AI/ML Api Models
+ [
BlockCost(
cost_type=BlockCostType.RUN,
cost_filter={
"model": model,
"credentials": {
"id": aiml_api_credentials.id,
"provider": aiml_api_credentials.provider,
"type": aiml_api_credentials.type,
},
},
cost_amount=cost,
)
for model, cost in MODEL_COST.items()
if MODEL_METADATA[model].provider == "aiml_api"
]
)
# =============== This is the exhaustive list of cost for each Block =============== #

View File

@@ -1511,10 +1511,8 @@ async def migrate_llm_models(migrate_to: LlmModel):
if field.annotation == LlmModel:
llm_model_fields[block.id] = field_name
# Get all model slugs from the registry (dynamic, not hardcoded enum)
from backend.data import llm_registry
enum_values = list(llm_registry.get_all_model_slugs_for_validation())
# Convert enum values to a list of strings for the SQL query
enum_values = [v.value for v in LlmModel]
escaped_enum_values = repr(tuple(enum_values)) # hack but works
# Update each block

View File

@@ -1,72 +0,0 @@
"""
LLM Registry module for managing LLM models, providers, and costs dynamically.
This module provides a database-driven registry system for LLM models,
replacing hardcoded model configurations with a flexible admin-managed system.
"""
from backend.data.llm_registry.model import ModelMetadata
# Re-export for backwards compatibility
from backend.data.llm_registry.notifications import (
REGISTRY_REFRESH_CHANNEL,
publish_registry_refresh_notification,
subscribe_to_registry_refresh,
)
from backend.data.llm_registry.registry import (
RegistryModel,
RegistryModelCost,
RegistryModelCreator,
get_all_model_slugs_for_validation,
get_default_model_slug,
get_dynamic_model_slugs,
get_fallback_model_for_disabled,
get_llm_discriminator_mapping,
get_llm_model_cost,
get_llm_model_metadata,
get_llm_model_schema_options,
get_model_info,
is_model_enabled,
iter_dynamic_models,
refresh_llm_registry,
register_static_costs,
register_static_metadata,
)
from backend.data.llm_registry.schema_utils import (
is_llm_model_field,
refresh_llm_discriminator_mapping,
refresh_llm_model_options,
update_schema_with_llm_registry,
)
__all__ = [
# Types
"ModelMetadata",
"RegistryModel",
"RegistryModelCost",
"RegistryModelCreator",
# Registry functions
"get_all_model_slugs_for_validation",
"get_default_model_slug",
"get_dynamic_model_slugs",
"get_fallback_model_for_disabled",
"get_llm_discriminator_mapping",
"get_llm_model_cost",
"get_llm_model_metadata",
"get_llm_model_schema_options",
"get_model_info",
"is_model_enabled",
"iter_dynamic_models",
"refresh_llm_registry",
"register_static_costs",
"register_static_metadata",
# Notifications
"REGISTRY_REFRESH_CHANNEL",
"publish_registry_refresh_notification",
"subscribe_to_registry_refresh",
# Schema utilities
"is_llm_model_field",
"refresh_llm_discriminator_mapping",
"refresh_llm_model_options",
"update_schema_with_llm_registry",
]

View File

@@ -1,25 +0,0 @@
"""Type definitions for LLM model metadata."""
from typing import Literal, NamedTuple
class ModelMetadata(NamedTuple):
"""Metadata for an LLM model.
Attributes:
provider: The provider identifier (e.g., "openai", "anthropic")
context_window: Maximum context window size in tokens
max_output_tokens: Maximum output tokens (None if unlimited)
display_name: Human-readable name for the model
provider_name: Human-readable provider name (e.g., "OpenAI", "Anthropic")
creator_name: Name of the organization that created the model
price_tier: Relative cost tier (1=cheapest, 2=medium, 3=expensive)
"""
provider: str
context_window: int
max_output_tokens: int | None
display_name: str
provider_name: str
creator_name: str
price_tier: Literal[1, 2, 3]

View File

@@ -1,89 +0,0 @@
"""
Redis pub/sub notifications for LLM registry updates.
When models are added/updated/removed via the admin UI, this module
publishes notifications to Redis that all executor services subscribe to,
ensuring they refresh their registry cache in real-time.
"""
import asyncio
import logging
from typing import Any
from backend.data.redis_client import connect_async
logger = logging.getLogger(__name__)
# Redis channel name for LLM registry refresh notifications
REGISTRY_REFRESH_CHANNEL = "llm_registry:refresh"
async def publish_registry_refresh_notification() -> None:
"""
Publish a notification to Redis that the LLM registry has been updated.
All executor services subscribed to this channel will refresh their registry.
"""
try:
redis = await connect_async()
await redis.publish(REGISTRY_REFRESH_CHANNEL, "refresh")
logger.info("Published LLM registry refresh notification to Redis")
except Exception as exc:
logger.warning(
"Failed to publish LLM registry refresh notification: %s",
exc,
exc_info=True,
)
async def subscribe_to_registry_refresh(
on_refresh: Any, # Async callable that takes no args
) -> None:
"""
Subscribe to Redis notifications for LLM registry updates.
This runs in a loop and processes messages as they arrive.
Args:
on_refresh: Async callable to execute when a refresh notification is received
"""
try:
redis = await connect_async()
pubsub = redis.pubsub()
await pubsub.subscribe(REGISTRY_REFRESH_CHANNEL)
logger.info(
"Subscribed to LLM registry refresh notifications on channel: %s",
REGISTRY_REFRESH_CHANNEL,
)
# Process messages in a loop
while True:
try:
message = await pubsub.get_message(
ignore_subscribe_messages=True, timeout=1.0
)
if (
message
and message["type"] == "message"
and message["channel"] == REGISTRY_REFRESH_CHANNEL
):
logger.info("Received LLM registry refresh notification")
try:
await on_refresh()
except Exception as exc:
logger.error(
"Error refreshing LLM registry from notification: %s",
exc,
exc_info=True,
)
except Exception as exc:
logger.warning(
"Error processing registry refresh message: %s", exc, exc_info=True
)
# Continue listening even if one message fails
await asyncio.sleep(1)
except Exception as exc:
logger.error(
"Failed to subscribe to LLM registry refresh notifications: %s",
exc,
exc_info=True,
)
raise

View File

@@ -1,388 +0,0 @@
"""Core LLM registry implementation for managing models dynamically."""
from __future__ import annotations
import asyncio
import logging
from dataclasses import dataclass, field
from typing import Any, Iterable
import prisma.models
from backend.data.llm_registry.model import ModelMetadata
logger = logging.getLogger(__name__)
def _json_to_dict(value: Any) -> dict[str, Any]:
"""Convert Prisma Json type to dict, with fallback to empty dict."""
if value is None:
return {}
if isinstance(value, dict):
return value
# Prisma Json type should always be a dict at runtime
return dict(value) if value else {}
@dataclass(frozen=True)
class RegistryModelCost:
"""Cost configuration for an LLM model."""
credit_cost: int
credential_provider: str
credential_id: str | None
credential_type: str | None
currency: str | None
metadata: dict[str, Any]
@dataclass(frozen=True)
class RegistryModelCreator:
"""Creator information for an LLM model."""
id: str
name: str
display_name: str
description: str | None
website_url: str | None
logo_url: str | None
@dataclass(frozen=True)
class RegistryModel:
"""Represents a model in the LLM registry."""
slug: str
display_name: str
description: str | None
metadata: ModelMetadata
capabilities: dict[str, Any]
extra_metadata: dict[str, Any]
provider_display_name: str
is_enabled: bool
is_recommended: bool = False
costs: tuple[RegistryModelCost, ...] = field(default_factory=tuple)
creator: RegistryModelCreator | None = None
_static_metadata: dict[str, ModelMetadata] = {}
_static_costs: dict[str, int] = {}
_dynamic_models: dict[str, RegistryModel] = {}
_schema_options: list[dict[str, str]] = []
_discriminator_mapping: dict[str, str] = {}
_lock = asyncio.Lock()
def register_static_metadata(metadata: dict[Any, ModelMetadata]) -> None:
"""Register static metadata for legacy models (deprecated)."""
_static_metadata.update({str(key): value for key, value in metadata.items()})
_refresh_cached_schema()
def register_static_costs(costs: dict[Any, int]) -> None:
"""Register static costs for legacy models (deprecated)."""
_static_costs.update({str(key): value for key, value in costs.items()})
def _build_schema_options() -> list[dict[str, str]]:
"""Build schema options for model selection dropdown. Only includes enabled models."""
options: list[dict[str, str]] = []
# Only include enabled models in the dropdown options
for model in sorted(_dynamic_models.values(), key=lambda m: m.display_name.lower()):
if model.is_enabled:
options.append(
{
"label": model.display_name,
"value": model.slug,
"group": model.metadata.provider,
"description": model.description or "",
}
)
for slug, metadata in _static_metadata.items():
if slug in _dynamic_models:
continue
options.append(
{
"label": slug,
"value": slug,
"group": metadata.provider,
"description": "",
}
)
return options
async def refresh_llm_registry() -> None:
"""Refresh the LLM registry from the database. Loads all models (enabled and disabled)."""
async with _lock:
try:
records = await prisma.models.LlmModel.prisma().find_many(
include={
"Provider": True,
"Costs": True,
"Creator": True,
}
)
logger.debug("Found %d LLM model records in database", len(records))
except Exception as exc:
logger.error(
"Failed to refresh LLM registry from DB: %s", exc, exc_info=True
)
return
dynamic: dict[str, RegistryModel] = {}
for record in records:
provider_name = (
record.Provider.name if record.Provider else record.providerId
)
provider_display_name = (
record.Provider.displayName if record.Provider else record.providerId
)
# Creator name: prefer Creator.name, fallback to provider display name
creator_name = (
record.Creator.name if record.Creator else provider_display_name
)
# Price tier: default to 1 (cheapest) if not set
price_tier = getattr(record, "priceTier", 1) or 1
# Clamp to valid range 1-3
price_tier = max(1, min(3, price_tier))
metadata = ModelMetadata(
provider=provider_name,
context_window=record.contextWindow,
max_output_tokens=record.maxOutputTokens,
display_name=record.displayName,
provider_name=provider_display_name,
creator_name=creator_name,
price_tier=price_tier, # type: ignore[arg-type]
)
costs = tuple(
RegistryModelCost(
credit_cost=cost.creditCost,
credential_provider=cost.credentialProvider,
credential_id=cost.credentialId,
credential_type=cost.credentialType,
currency=cost.currency,
metadata=_json_to_dict(cost.metadata),
)
for cost in (record.Costs or [])
)
# Map creator if present
creator = None
if record.Creator:
creator = RegistryModelCreator(
id=record.Creator.id,
name=record.Creator.name,
display_name=record.Creator.displayName,
description=record.Creator.description,
website_url=record.Creator.websiteUrl,
logo_url=record.Creator.logoUrl,
)
dynamic[record.slug] = RegistryModel(
slug=record.slug,
display_name=record.displayName,
description=record.description,
metadata=metadata,
capabilities=_json_to_dict(record.capabilities),
extra_metadata=_json_to_dict(record.metadata),
provider_display_name=(
record.Provider.displayName
if record.Provider
else record.providerId
),
is_enabled=record.isEnabled,
is_recommended=record.isRecommended,
costs=costs,
creator=creator,
)
# Atomic swap - build new structures then replace references
# This ensures readers never see partially updated state
global _dynamic_models
_dynamic_models = dynamic
_refresh_cached_schema()
logger.info(
"LLM registry refreshed with %s dynamic models (enabled: %s, disabled: %s)",
len(dynamic),
sum(1 for m in dynamic.values() if m.is_enabled),
sum(1 for m in dynamic.values() if not m.is_enabled),
)
def _refresh_cached_schema() -> None:
"""Refresh cached schema options and discriminator mapping."""
global _schema_options, _discriminator_mapping
# Build new structures
new_options = _build_schema_options()
new_mapping = {
slug: entry.metadata.provider for slug, entry in _dynamic_models.items()
}
for slug, metadata in _static_metadata.items():
new_mapping.setdefault(slug, metadata.provider)
# Atomic swap - replace references to ensure readers see consistent state
_schema_options = new_options
_discriminator_mapping = new_mapping
def get_llm_model_metadata(slug: str) -> ModelMetadata | None:
"""Get model metadata by slug. Checks dynamic models first, then static metadata."""
if slug in _dynamic_models:
return _dynamic_models[slug].metadata
return _static_metadata.get(slug)
def get_llm_model_cost(slug: str) -> tuple[RegistryModelCost, ...]:
"""Get model cost configuration by slug."""
if slug in _dynamic_models:
return _dynamic_models[slug].costs
cost_value = _static_costs.get(slug)
if cost_value is None:
return tuple()
return (
RegistryModelCost(
credit_cost=cost_value,
credential_provider="static",
credential_id=None,
credential_type=None,
currency=None,
metadata={},
),
)
def get_llm_model_schema_options() -> list[dict[str, str]]:
"""
Get schema options for LLM model selection dropdown.
Returns a copy of cached schema options that are refreshed when the registry is
updated via refresh_llm_registry() (called on startup and via Redis pub/sub).
"""
# Return a copy to prevent external mutation
return list(_schema_options)
def get_llm_discriminator_mapping() -> dict[str, str]:
"""
Get discriminator mapping for LLM models.
Returns a copy of cached discriminator mapping that is refreshed when the registry
is updated via refresh_llm_registry() (called on startup and via Redis pub/sub).
"""
# Return a copy to prevent external mutation
return dict(_discriminator_mapping)
def get_dynamic_model_slugs() -> set[str]:
"""Get all dynamic model slugs from the registry."""
return set(_dynamic_models.keys())
def get_all_model_slugs_for_validation() -> set[str]:
"""
Get ALL model slugs (both enabled and disabled) for validation purposes.
This is used for JSON schema enum validation - we need to accept any known
model value (even disabled ones) so that existing graphs don't fail validation.
The actual fallback/enforcement happens at runtime in llm_call().
"""
all_slugs = set(_dynamic_models.keys())
all_slugs.update(_static_metadata.keys())
return all_slugs
def iter_dynamic_models() -> Iterable[RegistryModel]:
"""Iterate over all dynamic models in the registry."""
return tuple(_dynamic_models.values())
def get_fallback_model_for_disabled(disabled_model_slug: str) -> RegistryModel | None:
"""
Find a fallback model when the requested model is disabled.
Looks for an enabled model from the same provider. Prefers models with
similar names or capabilities if possible.
Args:
disabled_model_slug: The slug of the disabled model
Returns:
An enabled RegistryModel from the same provider, or None if no fallback found
"""
disabled_model = _dynamic_models.get(disabled_model_slug)
if not disabled_model:
return None
provider = disabled_model.metadata.provider
# Find all enabled models from the same provider
candidates = [
model
for model in _dynamic_models.values()
if model.is_enabled and model.metadata.provider == provider
]
if not candidates:
return None
# Sort by: prefer models with similar context window, then by name
candidates.sort(
key=lambda m: (
abs(m.metadata.context_window - disabled_model.metadata.context_window),
m.display_name.lower(),
)
)
return candidates[0]
def is_model_enabled(model_slug: str) -> bool:
"""Check if a model is enabled in the registry."""
model = _dynamic_models.get(model_slug)
if not model:
# Model not in registry - assume it's a static/legacy model and allow it
return True
return model.is_enabled
def get_model_info(model_slug: str) -> RegistryModel | None:
"""Get model info from the registry."""
return _dynamic_models.get(model_slug)
def get_default_model_slug() -> str | None:
"""
Get the default model slug to use for block defaults.
Returns the recommended model if set (configured via admin UI),
otherwise returns the first enabled model alphabetically.
Returns None if no models are available or enabled.
"""
# Return the recommended model if one is set and enabled
for model in _dynamic_models.values():
if model.is_recommended and model.is_enabled:
return model.slug
# No recommended model set - find first enabled model alphabetically
for model in sorted(_dynamic_models.values(), key=lambda m: m.display_name.lower()):
if model.is_enabled:
logger.warning(
"No recommended model set, using '%s' as default",
model.slug,
)
return model.slug
# No enabled models available
if _dynamic_models:
logger.error(
"No enabled models found in registry (%d models registered but all disabled)",
len(_dynamic_models),
)
else:
logger.error("No models registered in LLM registry")
return None

View File

@@ -1,130 +0,0 @@
"""
Helper utilities for LLM registry integration with block schemas.
This module handles the dynamic injection of discriminator mappings
and model options from the LLM registry into block schemas.
"""
import logging
from typing import Any
from backend.data.llm_registry.registry import (
get_all_model_slugs_for_validation,
get_default_model_slug,
get_llm_discriminator_mapping,
get_llm_model_schema_options,
)
logger = logging.getLogger(__name__)
def is_llm_model_field(field_name: str, field_info: Any) -> bool:
"""
Check if a field is an LLM model selection field.
Returns True if the field has 'options' in json_schema_extra
(set by llm_model_schema_extra() in blocks/llm.py).
"""
if not hasattr(field_info, "json_schema_extra"):
return False
extra = field_info.json_schema_extra
if isinstance(extra, dict):
return "options" in extra
return False
def refresh_llm_model_options(field_schema: dict[str, Any]) -> None:
"""
Refresh LLM model options from the registry.
Updates 'options' (for frontend dropdown) to show only enabled models,
but keeps the 'enum' (for validation) inclusive of ALL known models.
This is important because:
- Options: What users see in the dropdown (enabled models only)
- Enum: What values pass validation (all known models, including disabled)
Existing graphs may have disabled models selected - they should pass validation
and the fallback logic in llm_call() will handle using an alternative model.
"""
fresh_options = get_llm_model_schema_options()
if not fresh_options:
return
# Update options array (UI dropdown) - only enabled models
if "options" in field_schema:
field_schema["options"] = fresh_options
all_known_slugs = get_all_model_slugs_for_validation()
if all_known_slugs and "enum" in field_schema:
existing_enum = set(field_schema.get("enum", []))
combined_enum = existing_enum | all_known_slugs
field_schema["enum"] = sorted(combined_enum)
# Set the default value from the registry (gpt-4o if available, else first enabled)
# This ensures new blocks have a sensible default pre-selected
default_slug = get_default_model_slug()
if default_slug:
field_schema["default"] = default_slug
def refresh_llm_discriminator_mapping(field_schema: dict[str, Any]) -> None:
"""
Refresh discriminator_mapping for fields that use model-based discrimination.
The discriminator is already set when AICredentialsField() creates the field.
We only need to refresh the mapping when models are added/removed.
"""
if field_schema.get("discriminator") != "model":
return
# Always refresh the mapping to get latest models
fresh_mapping = get_llm_discriminator_mapping()
if fresh_mapping is not None:
field_schema["discriminator_mapping"] = fresh_mapping
def update_schema_with_llm_registry(
schema: dict[str, Any], model_class: type | None = None
) -> None:
"""
Update a JSON schema with current LLM registry data.
Refreshes:
1. Model options for LLM model selection fields (dropdown choices)
2. Discriminator mappings for credentials fields (model → provider)
Args:
schema: The JSON schema to update (mutated in-place)
model_class: The Pydantic model class (optional, for field introspection)
"""
properties = schema.get("properties", {})
for field_name, field_schema in properties.items():
if not isinstance(field_schema, dict):
continue
# Refresh model options for LLM model fields
if model_class and hasattr(model_class, "model_fields"):
field_info = model_class.model_fields.get(field_name)
if field_info and is_llm_model_field(field_name, field_info):
try:
refresh_llm_model_options(field_schema)
except Exception as exc:
logger.warning(
"Failed to refresh LLM options for field %s: %s",
field_name,
exc,
)
# Refresh discriminator mapping for fields that use model discrimination
try:
refresh_llm_discriminator_mapping(field_schema)
except Exception as exc:
logger.warning(
"Failed to refresh discriminator mapping for field %s: %s",
field_name,
exc,
)

View File

@@ -40,7 +40,6 @@ from pydantic_core import (
)
from typing_extensions import TypedDict
from backend.data.llm_registry import update_schema_with_llm_registry
from backend.integrations.providers import ProviderName
from backend.util.json import loads as json_loads
from backend.util.settings import Secrets
@@ -545,9 +544,7 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
else:
schema["credentials_provider"] = allowed_providers
schema["credentials_types"] = model_class.allowed_cred_types()
# Ensure LLM discriminators are populated (delegates to shared helper)
update_schema_with_llm_registry(schema, model_class)
# Do not return anything, just mutate schema in place
model_config = ConfigDict(
json_schema_extra=_add_json_schema_extra, # type: ignore
@@ -696,20 +693,16 @@ def CredentialsField(
This is enforced by the `BlockSchema` base class.
"""
# Build field_schema_extra - always include discriminator and mapping if discriminator is set
field_schema_extra: dict[str, Any] = {}
# Always include discriminator if provided
if discriminator is not None:
field_schema_extra["discriminator"] = discriminator
# Always include discriminator_mapping when discriminator is set (even if empty initially)
field_schema_extra["discriminator_mapping"] = discriminator_mapping or {}
# Include other optional fields (only if not None)
if required_scopes:
field_schema_extra["credentials_scopes"] = list(required_scopes)
if discriminator_values:
field_schema_extra["discriminator_values"] = discriminator_values
field_schema_extra = {
k: v
for k, v in {
"credentials_scopes": list(required_scopes) or None,
"discriminator": discriminator,
"discriminator_mapping": discriminator_mapping,
"discriminator_values": discriminator_values,
}.items()
if v is not None
}
# Merge any json_schema_extra passed in kwargs
if "json_schema_extra" in kwargs:

View File

@@ -1,66 +0,0 @@
"""
Helper functions for LLM registry initialization in executor context.
These functions handle refreshing the LLM registry when the executor starts
and subscribing to real-time updates via Redis pub/sub.
"""
import logging
from backend.data import db, llm_registry
from backend.data.block import BlockSchema, initialize_blocks
from backend.data.block_cost_config import refresh_llm_costs
from backend.data.llm_registry import subscribe_to_registry_refresh
logger = logging.getLogger(__name__)
async def initialize_registry_for_executor() -> None:
"""
Initialize blocks and refresh LLM registry in the executor context.
This must run in the executor's event loop to have access to the database.
"""
try:
# Connect to database if not already connected
if not db.is_connected():
await db.connect()
logger.info("[GraphExecutor] Connected to database for registry refresh")
# Initialize blocks (internally refreshes LLM registry and costs)
await initialize_blocks()
logger.info("[GraphExecutor] Blocks initialized")
except Exception as exc:
logger.warning(
"[GraphExecutor] Failed to refresh LLM registry on startup: %s",
exc,
exc_info=True,
)
async def refresh_registry_on_notification() -> None:
"""Refresh LLM registry when notified via Redis pub/sub."""
try:
# Ensure DB is connected
if not db.is_connected():
await db.connect()
# Refresh registry and costs
await llm_registry.refresh_llm_registry()
refresh_llm_costs()
# Clear block schema caches so they regenerate with new model options
BlockSchema.clear_all_schema_caches()
logger.info("[GraphExecutor] LLM registry refreshed from notification")
except Exception as exc:
logger.error(
"[GraphExecutor] Failed to refresh LLM registry from notification: %s",
exc,
exc_info=True,
)
async def subscribe_to_registry_updates() -> None:
"""Subscribe to Redis pub/sub for LLM registry refresh notifications."""
await subscribe_to_registry_refresh(refresh_registry_on_notification)

View File

@@ -702,20 +702,6 @@ class ExecutionProcessor:
)
self.node_execution_thread.start()
self.node_evaluation_thread.start()
# Initialize LLM registry and subscribe to updates
from backend.executor.llm_registry_init import (
initialize_registry_for_executor,
subscribe_to_registry_updates,
)
asyncio.run_coroutine_threadsafe(
initialize_registry_for_executor(), self.node_execution_loop
)
asyncio.run_coroutine_threadsafe(
subscribe_to_registry_updates(), self.node_execution_loop
)
logger.info(f"[GraphExecutor] {self.tid} started")
@error_logged(swallow=False)

View File

@@ -1,935 +0,0 @@
from __future__ import annotations
from typing import Any, Iterable, Sequence, cast
import prisma
import prisma.models
from backend.data.db import transaction
from backend.server.v2.llm import model as llm_model
from backend.util.models import Pagination
def _json_dict(value: Any | None) -> dict[str, Any]:
if not value:
return {}
if isinstance(value, dict):
return value
return {}
def _map_cost(record: prisma.models.LlmModelCost) -> llm_model.LlmModelCost:
return llm_model.LlmModelCost(
id=record.id,
unit=record.unit,
credit_cost=record.creditCost,
credential_provider=record.credentialProvider,
credential_id=record.credentialId,
credential_type=record.credentialType,
currency=record.currency,
metadata=_json_dict(record.metadata),
)
def _map_creator(
record: prisma.models.LlmModelCreator,
) -> llm_model.LlmModelCreator:
return llm_model.LlmModelCreator(
id=record.id,
name=record.name,
display_name=record.displayName,
description=record.description,
website_url=record.websiteUrl,
logo_url=record.logoUrl,
metadata=_json_dict(record.metadata),
)
def _map_model(record: prisma.models.LlmModel) -> llm_model.LlmModel:
costs = []
if record.Costs:
costs = [_map_cost(cost) for cost in record.Costs]
creator = None
if hasattr(record, "Creator") and record.Creator:
creator = _map_creator(record.Creator)
return llm_model.LlmModel(
id=record.id,
slug=record.slug,
display_name=record.displayName,
description=record.description,
provider_id=record.providerId,
creator_id=record.creatorId,
creator=creator,
context_window=record.contextWindow,
max_output_tokens=record.maxOutputTokens,
is_enabled=record.isEnabled,
is_recommended=record.isRecommended,
capabilities=_json_dict(record.capabilities),
metadata=_json_dict(record.metadata),
costs=costs,
)
def _map_provider(record: prisma.models.LlmProvider) -> llm_model.LlmProvider:
models: list[llm_model.LlmModel] = []
if record.Models:
models = [_map_model(model) for model in record.Models]
return llm_model.LlmProvider(
id=record.id,
name=record.name,
display_name=record.displayName,
description=record.description,
default_credential_provider=record.defaultCredentialProvider,
default_credential_id=record.defaultCredentialId,
default_credential_type=record.defaultCredentialType,
supports_tools=record.supportsTools,
supports_json_output=record.supportsJsonOutput,
supports_reasoning=record.supportsReasoning,
supports_parallel_tool=record.supportsParallelTool,
metadata=_json_dict(record.metadata),
models=models,
)
async def list_providers(
include_models: bool = True, enabled_only: bool = False
) -> list[llm_model.LlmProvider]:
"""
List all LLM providers.
Args:
include_models: Whether to include models for each provider
enabled_only: If True, only include enabled models (for public routes)
"""
include: Any = None
if include_models:
model_where = {"isEnabled": True} if enabled_only else None
include = {
"Models": {
"include": {"Costs": True, "Creator": True},
"where": model_where,
}
}
records = await prisma.models.LlmProvider.prisma().find_many(include=include)
return [_map_provider(record) for record in records]
async def upsert_provider(
request: llm_model.UpsertLlmProviderRequest,
provider_id: str | None = None,
) -> llm_model.LlmProvider:
data: Any = {
"name": request.name,
"displayName": request.display_name,
"description": request.description,
"defaultCredentialProvider": request.default_credential_provider,
"defaultCredentialId": request.default_credential_id,
"defaultCredentialType": request.default_credential_type,
"supportsTools": request.supports_tools,
"supportsJsonOutput": request.supports_json_output,
"supportsReasoning": request.supports_reasoning,
"supportsParallelTool": request.supports_parallel_tool,
"metadata": prisma.Json(request.metadata or {}),
}
include: Any = {"Models": {"include": {"Costs": True, "Creator": True}}}
if provider_id:
record = await prisma.models.LlmProvider.prisma().update(
where={"id": provider_id},
data=data,
include=include,
)
else:
record = await prisma.models.LlmProvider.prisma().create(
data=data,
include=include,
)
if record is None:
raise ValueError("Failed to create/update provider")
return _map_provider(record)
async def delete_provider(provider_id: str) -> bool:
"""
Delete an LLM provider.
A provider can only be deleted if it has no associated models.
Due to onDelete: Restrict on LlmModel.Provider, the database will
block deletion if models exist.
Args:
provider_id: UUID of the provider to delete
Returns:
True if deleted successfully
Raises:
ValueError: If provider not found or has associated models
"""
# Check if provider exists
provider = await prisma.models.LlmProvider.prisma().find_unique(
where={"id": provider_id},
include={"Models": True},
)
if not provider:
raise ValueError(f"Provider with id '{provider_id}' not found")
# Check if provider has any models
model_count = len(provider.Models) if provider.Models else 0
if model_count > 0:
raise ValueError(
f"Cannot delete provider '{provider.displayName}' because it has "
f"{model_count} model(s). Delete all models first."
)
# Safe to delete
await prisma.models.LlmProvider.prisma().delete(where={"id": provider_id})
return True
async def list_models(
provider_id: str | None = None,
enabled_only: bool = False,
page: int = 1,
page_size: int = 50,
) -> llm_model.LlmModelsResponse:
"""
List LLM models with pagination.
Args:
provider_id: Optional filter by provider ID
enabled_only: If True, only return enabled models (for public routes)
page: Page number (1-indexed)
page_size: Number of models per page
"""
where: Any = {}
if provider_id:
where["providerId"] = provider_id
if enabled_only:
where["isEnabled"] = True
# Get total count for pagination
total_items = await prisma.models.LlmModel.prisma().count(
where=where if where else None
)
# Calculate pagination
skip = (page - 1) * page_size
total_pages = (total_items + page_size - 1) // page_size if total_items > 0 else 0
records = await prisma.models.LlmModel.prisma().find_many(
where=where if where else None,
include={"Costs": True, "Creator": True},
skip=skip,
take=page_size,
)
models = [_map_model(record) for record in records]
return llm_model.LlmModelsResponse(
models=models,
pagination=Pagination(
total_items=total_items,
total_pages=total_pages,
current_page=page,
page_size=page_size,
),
)
def _cost_create_payload(
costs: Sequence[llm_model.LlmModelCostInput],
) -> dict[str, Iterable[dict[str, Any]]]:
create_items = []
for cost in costs:
item: dict[str, Any] = {
"unit": cost.unit,
"creditCost": cost.credit_cost,
"credentialProvider": cost.credential_provider,
}
# Only include optional fields if they have values
if cost.credential_id:
item["credentialId"] = cost.credential_id
if cost.credential_type:
item["credentialType"] = cost.credential_type
if cost.currency:
item["currency"] = cost.currency
# Handle metadata - use Prisma Json type
if cost.metadata is not None and cost.metadata != {}:
item["metadata"] = prisma.Json(cost.metadata)
create_items.append(item)
return {"create": create_items}
async def create_model(
request: llm_model.CreateLlmModelRequest,
) -> llm_model.LlmModel:
data: Any = {
"slug": request.slug,
"displayName": request.display_name,
"description": request.description,
"Provider": {"connect": {"id": request.provider_id}},
"contextWindow": request.context_window,
"maxOutputTokens": request.max_output_tokens,
"isEnabled": request.is_enabled,
"capabilities": prisma.Json(request.capabilities or {}),
"metadata": prisma.Json(request.metadata or {}),
"Costs": _cost_create_payload(request.costs),
}
if request.creator_id:
data["Creator"] = {"connect": {"id": request.creator_id}}
record = await prisma.models.LlmModel.prisma().create(
data=data,
include={"Costs": True, "Creator": True, "Provider": True},
)
return _map_model(record)
async def update_model(
model_id: str,
request: llm_model.UpdateLlmModelRequest,
) -> llm_model.LlmModel:
# Build scalar field updates (non-relation fields)
scalar_data: Any = {}
if request.display_name is not None:
scalar_data["displayName"] = request.display_name
if request.description is not None:
scalar_data["description"] = request.description
if request.context_window is not None:
scalar_data["contextWindow"] = request.context_window
if request.max_output_tokens is not None:
scalar_data["maxOutputTokens"] = request.max_output_tokens
if request.is_enabled is not None:
scalar_data["isEnabled"] = request.is_enabled
if request.capabilities is not None:
scalar_data["capabilities"] = request.capabilities
if request.metadata is not None:
scalar_data["metadata"] = request.metadata
# Foreign keys can be updated directly as scalar fields
if request.provider_id is not None:
scalar_data["providerId"] = request.provider_id
if request.creator_id is not None:
# Empty string means remove the creator
scalar_data["creatorId"] = request.creator_id if request.creator_id else None
# If we have costs to update, we need to handle them separately
# because nested writes have different constraints
if request.costs is not None:
# Wrap cost replacement in a transaction for atomicity
async with transaction() as tx:
# First update scalar fields
if scalar_data:
await tx.llmmodel.update(
where={"id": model_id},
data=scalar_data,
)
# Then handle costs: delete existing and create new
await tx.llmmodelcost.delete_many(where={"llmModelId": model_id})
if request.costs:
cost_payload = _cost_create_payload(request.costs)
for cost_item in cost_payload["create"]:
cost_item["llmModelId"] = model_id
await tx.llmmodelcost.create(data=cast(Any, cost_item))
# Fetch the updated record (outside transaction)
record = await prisma.models.LlmModel.prisma().find_unique(
where={"id": model_id},
include={"Costs": True, "Creator": True},
)
else:
# No costs update - simple update
record = await prisma.models.LlmModel.prisma().update(
where={"id": model_id},
data=scalar_data,
include={"Costs": True, "Creator": True},
)
if not record:
raise ValueError(f"Model with id '{model_id}' not found")
return _map_model(record)
async def toggle_model(
model_id: str,
is_enabled: bool,
migrate_to_slug: str | None = None,
migration_reason: str | None = None,
custom_credit_cost: int | None = None,
) -> llm_model.ToggleLlmModelResponse:
"""
Toggle a model's enabled status, optionally migrating workflows when disabling.
Args:
model_id: UUID of the model to toggle
is_enabled: New enabled status
migrate_to_slug: If disabling and this is provided, migrate all workflows
using this model to the specified replacement model
migration_reason: Optional reason for the migration (e.g., "Provider outage")
custom_credit_cost: Optional custom pricing override for migrated workflows.
When set, the billing system should use this cost instead
of the target model's cost for affected nodes.
Returns:
ToggleLlmModelResponse with the updated model and optional migration stats
"""
import json
# Get the model being toggled
model = await prisma.models.LlmModel.prisma().find_unique(
where={"id": model_id}, include={"Costs": True}
)
if not model:
raise ValueError(f"Model with id '{model_id}' not found")
nodes_migrated = 0
migration_id: str | None = None
# If disabling with migration, perform migration first
if not is_enabled and migrate_to_slug:
# Validate replacement model exists and is enabled
replacement = await prisma.models.LlmModel.prisma().find_unique(
where={"slug": migrate_to_slug}
)
if not replacement:
raise ValueError(f"Replacement model '{migrate_to_slug}' not found")
if not replacement.isEnabled:
raise ValueError(
f"Replacement model '{migrate_to_slug}' is disabled. "
f"Please enable it before using it as a replacement."
)
# Perform all operations atomically within a single transaction
# This ensures no nodes are missed between query and update
async with transaction() as tx:
# Get the IDs of nodes that will be migrated (inside transaction for consistency)
node_ids_result = await tx.query_raw(
"""
SELECT id
FROM "AgentNode"
WHERE "constantInput"::jsonb->>'model' = $1
FOR UPDATE
""",
model.slug,
)
migrated_node_ids = (
[row["id"] for row in node_ids_result] if node_ids_result else []
)
nodes_migrated = len(migrated_node_ids)
if nodes_migrated > 0:
# Update by IDs to ensure we only update the exact nodes we queried
# Use JSON array and jsonb_array_elements_text for safe parameterization
node_ids_json = json.dumps(migrated_node_ids)
await tx.execute_raw(
"""
UPDATE "AgentNode"
SET "constantInput" = JSONB_SET(
"constantInput"::jsonb,
'{model}',
to_jsonb($1::text)
)
WHERE id::text IN (
SELECT jsonb_array_elements_text($2::jsonb)
)
""",
migrate_to_slug,
node_ids_json,
)
record = await tx.llmmodel.update(
where={"id": model_id},
data={"isEnabled": is_enabled},
include={"Costs": True},
)
# Create migration record for revert capability
if nodes_migrated > 0:
migration_data: Any = {
"sourceModelSlug": model.slug,
"targetModelSlug": migrate_to_slug,
"reason": migration_reason,
"migratedNodeIds": json.dumps(migrated_node_ids),
"nodeCount": nodes_migrated,
"customCreditCost": custom_credit_cost,
}
migration_record = await tx.llmmodelmigration.create(
data=migration_data
)
migration_id = migration_record.id
else:
# Simple toggle without migration
record = await prisma.models.LlmModel.prisma().update(
where={"id": model_id},
data={"isEnabled": is_enabled},
include={"Costs": True},
)
if record is None:
raise ValueError(f"Model with id '{model_id}' not found")
return llm_model.ToggleLlmModelResponse(
model=_map_model(record),
nodes_migrated=nodes_migrated,
migrated_to_slug=migrate_to_slug if nodes_migrated > 0 else None,
migration_id=migration_id,
)
async def get_model_usage(model_id: str) -> llm_model.LlmModelUsageResponse:
"""Get usage count for a model."""
import prisma as prisma_module
model = await prisma.models.LlmModel.prisma().find_unique(where={"id": model_id})
if not model:
raise ValueError(f"Model with id '{model_id}' not found")
count_result = await prisma_module.get_client().query_raw(
"""
SELECT COUNT(*) as count
FROM "AgentNode"
WHERE "constantInput"::jsonb->>'model' = $1
""",
model.slug,
)
node_count = int(count_result[0]["count"]) if count_result else 0
return llm_model.LlmModelUsageResponse(model_slug=model.slug, node_count=node_count)
async def delete_model(
model_id: str, replacement_model_slug: str | None = None
) -> llm_model.DeleteLlmModelResponse:
"""
Delete a model and optionally migrate all AgentNodes using it to a replacement model.
This performs an atomic operation within a database transaction:
1. Validates the model exists
2. Counts affected nodes
3. If nodes exist, validates replacement model and migrates them
4. Deletes the LlmModel record (CASCADE deletes costs)
Args:
model_id: UUID of the model to delete
replacement_model_slug: Slug of the model to migrate to (required only if nodes use this model)
Returns:
DeleteLlmModelResponse with migration stats
Raises:
ValueError: If model not found, nodes exist but no replacement provided,
replacement not found, or replacement is disabled
"""
# 1. Get the model being deleted (validation - outside transaction)
model = await prisma.models.LlmModel.prisma().find_unique(
where={"id": model_id}, include={"Costs": True}
)
if not model:
raise ValueError(f"Model with id '{model_id}' not found")
deleted_slug = model.slug
deleted_display_name = model.displayName
# 2. Count affected nodes first to determine if replacement is needed
import prisma as prisma_module
count_result = await prisma_module.get_client().query_raw(
"""
SELECT COUNT(*) as count
FROM "AgentNode"
WHERE "constantInput"::jsonb->>'model' = $1
""",
deleted_slug,
)
nodes_to_migrate = int(count_result[0]["count"]) if count_result else 0
# 3. Validate replacement model only if there are nodes to migrate
if nodes_to_migrate > 0:
if not replacement_model_slug:
raise ValueError(
f"Cannot delete model '{deleted_slug}': {nodes_to_migrate} workflow node(s) "
f"are using it. Please provide a replacement_model_slug to migrate them."
)
replacement = await prisma.models.LlmModel.prisma().find_unique(
where={"slug": replacement_model_slug}
)
if not replacement:
raise ValueError(f"Replacement model '{replacement_model_slug}' not found")
if not replacement.isEnabled:
raise ValueError(
f"Replacement model '{replacement_model_slug}' is disabled. "
f"Please enable it before using it as a replacement."
)
# 4. Perform migration (if needed) and deletion atomically within a transaction
async with transaction() as tx:
# Migrate all AgentNode.constantInput->model to replacement
if nodes_to_migrate > 0 and replacement_model_slug:
await tx.execute_raw(
"""
UPDATE "AgentNode"
SET "constantInput" = JSONB_SET(
"constantInput"::jsonb,
'{model}',
to_jsonb($1::text)
)
WHERE "constantInput"::jsonb->>'model' = $2
""",
replacement_model_slug,
deleted_slug,
)
# Delete the model (CASCADE will delete costs automatically)
await tx.llmmodel.delete(where={"id": model_id})
# Build appropriate message based on whether migration happened
if nodes_to_migrate > 0:
message = (
f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}) "
f"and migrated {nodes_to_migrate} workflow node(s) to '{replacement_model_slug}'."
)
else:
message = (
f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}). "
f"No workflows were using this model."
)
return llm_model.DeleteLlmModelResponse(
deleted_model_slug=deleted_slug,
deleted_model_display_name=deleted_display_name,
replacement_model_slug=replacement_model_slug,
nodes_migrated=nodes_to_migrate,
message=message,
)
def _map_migration(
record: prisma.models.LlmModelMigration,
) -> llm_model.LlmModelMigration:
return llm_model.LlmModelMigration(
id=record.id,
source_model_slug=record.sourceModelSlug,
target_model_slug=record.targetModelSlug,
reason=record.reason,
node_count=record.nodeCount,
custom_credit_cost=record.customCreditCost,
is_reverted=record.isReverted,
created_at=record.createdAt.isoformat(),
reverted_at=record.revertedAt.isoformat() if record.revertedAt else None,
)
async def list_migrations(
include_reverted: bool = False,
) -> list[llm_model.LlmModelMigration]:
"""
List model migrations, optionally including reverted ones.
Args:
include_reverted: If True, include reverted migrations. Default is False.
Returns:
List of LlmModelMigration records
"""
where: Any = None if include_reverted else {"isReverted": False}
records = await prisma.models.LlmModelMigration.prisma().find_many(
where=where,
order={"createdAt": "desc"},
)
return [_map_migration(record) for record in records]
async def get_migration(migration_id: str) -> llm_model.LlmModelMigration | None:
"""Get a specific migration by ID."""
record = await prisma.models.LlmModelMigration.prisma().find_unique(
where={"id": migration_id}
)
return _map_migration(record) if record else None
async def revert_migration(
migration_id: str,
re_enable_source_model: bool = True,
) -> llm_model.RevertMigrationResponse:
"""
Revert a model migration, restoring affected nodes to their original model.
This only reverts the specific nodes that were migrated, not all nodes
currently using the target model.
Args:
migration_id: UUID of the migration to revert
re_enable_source_model: Whether to re-enable the source model if it's disabled
Returns:
RevertMigrationResponse with revert stats
Raises:
ValueError: If migration not found, already reverted, or source model not available
"""
import json
from datetime import datetime, timezone
# Get the migration record
migration = await prisma.models.LlmModelMigration.prisma().find_unique(
where={"id": migration_id}
)
if not migration:
raise ValueError(f"Migration with id '{migration_id}' not found")
if migration.isReverted:
raise ValueError(
f"Migration '{migration_id}' has already been reverted "
f"on {migration.revertedAt.isoformat() if migration.revertedAt else 'unknown date'}"
)
# Check if source model exists
source_model = await prisma.models.LlmModel.prisma().find_unique(
where={"slug": migration.sourceModelSlug}
)
if not source_model:
raise ValueError(
f"Source model '{migration.sourceModelSlug}' no longer exists. "
f"Cannot revert migration."
)
# Get the migrated node IDs (Prisma auto-parses JSONB to list)
migrated_node_ids: list[str] = (
migration.migratedNodeIds
if isinstance(migration.migratedNodeIds, list)
else json.loads(migration.migratedNodeIds) # type: ignore
)
if not migrated_node_ids:
raise ValueError("No nodes to revert in this migration")
# Track if we need to re-enable the source model
source_model_was_disabled = not source_model.isEnabled
should_re_enable = source_model_was_disabled and re_enable_source_model
source_model_re_enabled = False
# Perform revert atomically
async with transaction() as tx:
# Re-enable the source model if requested and it was disabled
if should_re_enable:
await tx.llmmodel.update(
where={"id": source_model.id},
data={"isEnabled": True},
)
source_model_re_enabled = True
# Update only the specific nodes that were migrated
# We need to check that they still have the target model (haven't been changed since)
# Use a single batch update for efficiency
# Use JSON array and jsonb_array_elements_text for safe parameterization
node_ids_json = json.dumps(migrated_node_ids)
result = await tx.execute_raw(
"""
UPDATE "AgentNode"
SET "constantInput" = JSONB_SET(
"constantInput"::jsonb,
'{model}',
to_jsonb($1::text)
)
WHERE id::text IN (
SELECT jsonb_array_elements_text($2::jsonb)
)
AND "constantInput"::jsonb->>'model' = $3
""",
migration.sourceModelSlug,
node_ids_json,
migration.targetModelSlug,
)
nodes_reverted = result if result else 0
# Mark migration as reverted
await tx.llmmodelmigration.update(
where={"id": migration_id},
data={
"isReverted": True,
"revertedAt": datetime.now(timezone.utc),
},
)
# Calculate nodes that were already changed since migration
nodes_already_changed = len(migrated_node_ids) - nodes_reverted
# Build appropriate message
message_parts = [
f"Successfully reverted migration: {nodes_reverted} node(s) restored "
f"from '{migration.targetModelSlug}' to '{migration.sourceModelSlug}'."
]
if nodes_already_changed > 0:
message_parts.append(
f" {nodes_already_changed} node(s) were already changed and not reverted."
)
if source_model_re_enabled:
message_parts.append(
f" Model '{migration.sourceModelSlug}' has been re-enabled."
)
return llm_model.RevertMigrationResponse(
migration_id=migration_id,
source_model_slug=migration.sourceModelSlug,
target_model_slug=migration.targetModelSlug,
nodes_reverted=nodes_reverted,
nodes_already_changed=nodes_already_changed,
source_model_re_enabled=source_model_re_enabled,
message="".join(message_parts),
)
# ============================================================================
# Creator CRUD operations
# ============================================================================
async def list_creators() -> list[llm_model.LlmModelCreator]:
"""List all LLM model creators."""
records = await prisma.models.LlmModelCreator.prisma().find_many(
order={"displayName": "asc"}
)
return [_map_creator(record) for record in records]
async def get_creator(creator_id: str) -> llm_model.LlmModelCreator | None:
"""Get a specific creator by ID."""
record = await prisma.models.LlmModelCreator.prisma().find_unique(
where={"id": creator_id}
)
return _map_creator(record) if record else None
async def upsert_creator(
request: llm_model.UpsertLlmCreatorRequest,
creator_id: str | None = None,
) -> llm_model.LlmModelCreator:
"""Create or update a model creator."""
data: Any = {
"name": request.name,
"displayName": request.display_name,
"description": request.description,
"websiteUrl": request.website_url,
"logoUrl": request.logo_url,
"metadata": prisma.Json(request.metadata or {}),
}
if creator_id:
record = await prisma.models.LlmModelCreator.prisma().update(
where={"id": creator_id},
data=data,
)
else:
record = await prisma.models.LlmModelCreator.prisma().create(data=data)
if record is None:
raise ValueError("Failed to create/update creator")
return _map_creator(record)
async def delete_creator(creator_id: str) -> bool:
"""
Delete a model creator.
This will set creatorId to NULL on all associated models (due to onDelete: SetNull).
Args:
creator_id: UUID of the creator to delete
Returns:
True if deleted successfully
Raises:
ValueError: If creator not found
"""
creator = await prisma.models.LlmModelCreator.prisma().find_unique(
where={"id": creator_id}
)
if not creator:
raise ValueError(f"Creator with id '{creator_id}' not found")
await prisma.models.LlmModelCreator.prisma().delete(where={"id": creator_id})
return True
async def get_recommended_model() -> llm_model.LlmModel | None:
"""
Get the currently recommended LLM model.
Returns:
The recommended model, or None if no model is marked as recommended.
"""
record = await prisma.models.LlmModel.prisma().find_first(
where={"isRecommended": True, "isEnabled": True},
include={"Costs": True, "Creator": True},
)
return _map_model(record) if record else None
async def set_recommended_model(
model_id: str,
) -> tuple[llm_model.LlmModel, str | None]:
"""
Set a model as the recommended model.
This will clear the isRecommended flag from any other model and set it
on the specified model. The model must be enabled.
Args:
model_id: UUID of the model to set as recommended
Returns:
Tuple of (the updated model, previous recommended model slug or None)
Raises:
ValueError: If model not found or not enabled
"""
# First, verify the model exists and is enabled
target_model = await prisma.models.LlmModel.prisma().find_unique(
where={"id": model_id}
)
if not target_model:
raise ValueError(f"Model with id '{model_id}' not found")
if not target_model.isEnabled:
raise ValueError(
f"Cannot set disabled model '{target_model.slug}' as recommended"
)
# Get the current recommended model (if any)
current_recommended = await prisma.models.LlmModel.prisma().find_first(
where={"isRecommended": True}
)
previous_slug = current_recommended.slug if current_recommended else None
# Use a transaction to ensure atomicity
async with transaction() as tx:
# Clear isRecommended from all models
await tx.llmmodel.update_many(
where={"isRecommended": True},
data={"isRecommended": False},
)
# Set the new recommended model
await tx.llmmodel.update(
where={"id": model_id},
data={"isRecommended": True},
)
# Fetch and return the updated model
updated_record = await prisma.models.LlmModel.prisma().find_unique(
where={"id": model_id},
include={"Costs": True, "Creator": True},
)
if not updated_record:
raise ValueError("Failed to fetch updated model")
return _map_model(updated_record), previous_slug
async def get_recommended_model_slug() -> str | None:
"""
Get the slug of the currently recommended LLM model.
Returns:
The slug of the recommended model, or None if no model is marked as recommended.
"""
record = await prisma.models.LlmModel.prisma().find_first(
where={"isRecommended": True, "isEnabled": True},
)
return record.slug if record else None

View File

@@ -1,235 +0,0 @@
from __future__ import annotations
import re
from datetime import datetime
from typing import Any, Optional
import prisma.enums
import pydantic
from backend.util.models import Pagination
# Pattern for valid model slugs: alphanumeric start, then alphanumeric, dots, underscores, slashes, hyphens
SLUG_PATTERN = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9._/-]*$")
class LlmModelCost(pydantic.BaseModel):
id: str
unit: prisma.enums.LlmCostUnit = prisma.enums.LlmCostUnit.RUN
credit_cost: int
credential_provider: str
credential_id: Optional[str] = None
credential_type: Optional[str] = None
currency: Optional[str] = None
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
class LlmModelCreator(pydantic.BaseModel):
"""Represents the organization that created/trained the model (e.g., OpenAI, Meta)."""
id: str
name: str
display_name: str
description: Optional[str] = None
website_url: Optional[str] = None
logo_url: Optional[str] = None
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
class LlmModel(pydantic.BaseModel):
id: str
slug: str
display_name: str
description: Optional[str] = None
provider_id: str
creator_id: Optional[str] = None
creator: Optional[LlmModelCreator] = None
context_window: int
max_output_tokens: Optional[int] = None
is_enabled: bool = True
is_recommended: bool = False
capabilities: dict[str, Any] = pydantic.Field(default_factory=dict)
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
costs: list[LlmModelCost] = pydantic.Field(default_factory=list)
class LlmProvider(pydantic.BaseModel):
id: str
name: str
display_name: str
description: Optional[str] = None
default_credential_provider: Optional[str] = None
default_credential_id: Optional[str] = None
default_credential_type: Optional[str] = None
supports_tools: bool = True
supports_json_output: bool = True
supports_reasoning: bool = False
supports_parallel_tool: bool = False
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
models: list[LlmModel] = pydantic.Field(default_factory=list)
class LlmProvidersResponse(pydantic.BaseModel):
providers: list[LlmProvider]
class LlmModelsResponse(pydantic.BaseModel):
models: list[LlmModel]
pagination: Optional[Pagination] = None
class LlmCreatorsResponse(pydantic.BaseModel):
creators: list[LlmModelCreator]
class UpsertLlmProviderRequest(pydantic.BaseModel):
name: str
display_name: str
description: Optional[str] = None
default_credential_provider: Optional[str] = None
default_credential_id: Optional[str] = None
default_credential_type: Optional[str] = "api_key"
supports_tools: bool = True
supports_json_output: bool = True
supports_reasoning: bool = False
supports_parallel_tool: bool = False
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
class UpsertLlmCreatorRequest(pydantic.BaseModel):
name: str
display_name: str
description: Optional[str] = None
website_url: Optional[str] = None
logo_url: Optional[str] = None
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
class LlmModelCostInput(pydantic.BaseModel):
unit: prisma.enums.LlmCostUnit = prisma.enums.LlmCostUnit.RUN
credit_cost: int
credential_provider: str
credential_id: Optional[str] = None
credential_type: Optional[str] = "api_key"
currency: Optional[str] = None
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
class CreateLlmModelRequest(pydantic.BaseModel):
slug: str
display_name: str
description: Optional[str] = None
provider_id: str
creator_id: Optional[str] = None
context_window: int
max_output_tokens: Optional[int] = None
is_enabled: bool = True
capabilities: dict[str, Any] = pydantic.Field(default_factory=dict)
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
costs: list[LlmModelCostInput]
@pydantic.field_validator("slug")
@classmethod
def validate_slug(cls, v: str) -> str:
if not v or len(v) > 100:
raise ValueError("Slug must be 1-100 characters")
if not SLUG_PATTERN.match(v):
raise ValueError(
"Slug must start with alphanumeric and contain only "
"alphanumeric characters, dots, underscores, slashes, or hyphens"
)
return v
class UpdateLlmModelRequest(pydantic.BaseModel):
display_name: Optional[str] = None
description: Optional[str] = None
context_window: Optional[int] = None
max_output_tokens: Optional[int] = None
is_enabled: Optional[bool] = None
capabilities: Optional[dict[str, Any]] = None
metadata: Optional[dict[str, Any]] = None
provider_id: Optional[str] = None
creator_id: Optional[str] = None
costs: Optional[list[LlmModelCostInput]] = None
class ToggleLlmModelRequest(pydantic.BaseModel):
is_enabled: bool
migrate_to_slug: Optional[str] = None
migration_reason: Optional[str] = None # e.g., "Provider outage"
# Custom pricing override for migrated workflows. When set, billing should use
# this cost instead of the target model's cost for affected nodes.
# See LlmModelMigration in schema.prisma for full documentation.
custom_credit_cost: Optional[int] = None
class ToggleLlmModelResponse(pydantic.BaseModel):
model: LlmModel
nodes_migrated: int = 0
migrated_to_slug: Optional[str] = None
migration_id: Optional[str] = None # ID of the migration record for revert
class DeleteLlmModelResponse(pydantic.BaseModel):
deleted_model_slug: str
deleted_model_display_name: str
replacement_model_slug: Optional[str] = None
nodes_migrated: int
message: str
class LlmModelUsageResponse(pydantic.BaseModel):
model_slug: str
node_count: int
# Migration tracking models
class LlmModelMigration(pydantic.BaseModel):
id: str
source_model_slug: str
target_model_slug: str
reason: Optional[str] = None
node_count: int
# Custom pricing override - billing should use this instead of target model's cost
custom_credit_cost: Optional[int] = None
is_reverted: bool = False
created_at: datetime
reverted_at: Optional[datetime] = None
class LlmMigrationsResponse(pydantic.BaseModel):
migrations: list[LlmModelMigration]
class RevertMigrationRequest(pydantic.BaseModel):
re_enable_source_model: bool = (
True # Whether to re-enable the source model if disabled
)
class RevertMigrationResponse(pydantic.BaseModel):
migration_id: str
source_model_slug: str
target_model_slug: str
nodes_reverted: int
nodes_already_changed: int = (
0 # Nodes that were modified since migration (not reverted)
)
source_model_re_enabled: bool = False # Whether the source model was re-enabled
message: str
class SetRecommendedModelRequest(pydantic.BaseModel):
model_id: str
class SetRecommendedModelResponse(pydantic.BaseModel):
model: LlmModel
previous_recommended_slug: Optional[str] = None
message: str
class RecommendedModelResponse(pydantic.BaseModel):
model: Optional[LlmModel] = None
slug: Optional[str] = None

View File

@@ -1,29 +0,0 @@
import autogpt_libs.auth
import fastapi
from backend.server.v2.llm import db as llm_db
from backend.server.v2.llm import model as llm_model
router = fastapi.APIRouter(
prefix="/llm",
tags=["llm"],
dependencies=[fastapi.Security(autogpt_libs.auth.requires_user)],
)
@router.get("/models", response_model=llm_model.LlmModelsResponse)
async def list_models(
page: int = fastapi.Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = fastapi.Query(
default=50, ge=1, le=100, description="Number of models per page"
),
):
"""List all enabled LLM models available to users."""
return await llm_db.list_models(enabled_only=True, page=page, page_size=page_size)
@router.get("/providers", response_model=llm_model.LlmProvidersResponse)
async def list_providers():
"""List all LLM providers with their enabled models."""
providers = await llm_db.list_providers(include_models=True, enabled_only=True)
return llm_model.LlmProvidersResponse(providers=providers)

View File

@@ -1,81 +0,0 @@
-- CreateEnum
CREATE TYPE "LlmCostUnit" AS ENUM ('RUN', 'TOKENS');
-- CreateTable
CREATE TABLE "LlmProvider" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"name" TEXT NOT NULL,
"displayName" TEXT NOT NULL,
"description" TEXT,
"defaultCredentialProvider" TEXT,
"defaultCredentialId" TEXT,
"defaultCredentialType" TEXT,
"supportsTools" BOOLEAN NOT NULL DEFAULT TRUE,
"supportsJsonOutput" BOOLEAN NOT NULL DEFAULT TRUE,
"supportsReasoning" BOOLEAN NOT NULL DEFAULT FALSE,
"supportsParallelTool" BOOLEAN NOT NULL DEFAULT FALSE,
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
CONSTRAINT "LlmProvider_pkey" PRIMARY KEY ("id"),
CONSTRAINT "LlmProvider_name_key" UNIQUE ("name")
);
-- CreateTable
CREATE TABLE "LlmModel" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"slug" TEXT NOT NULL,
"displayName" TEXT NOT NULL,
"description" TEXT,
"providerId" TEXT NOT NULL,
"contextWindow" INTEGER NOT NULL,
"maxOutputTokens" INTEGER,
"isEnabled" BOOLEAN NOT NULL DEFAULT TRUE,
"capabilities" JSONB NOT NULL DEFAULT '{}'::jsonb,
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
CONSTRAINT "LlmModel_pkey" PRIMARY KEY ("id"),
CONSTRAINT "LlmModel_slug_key" UNIQUE ("slug")
);
-- CreateTable
CREATE TABLE "LlmModelCost" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"unit" "LlmCostUnit" NOT NULL DEFAULT 'RUN',
"creditCost" INTEGER NOT NULL,
"credentialProvider" TEXT NOT NULL,
"credentialId" TEXT,
"credentialType" TEXT,
"currency" TEXT,
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
"llmModelId" TEXT NOT NULL,
CONSTRAINT "LlmModelCost_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE INDEX "LlmModel_providerId_isEnabled_idx" ON "LlmModel"("providerId", "isEnabled");
-- CreateIndex
CREATE INDEX "LlmModel_slug_idx" ON "LlmModel"("slug");
-- CreateIndex
CREATE INDEX "LlmModelCost_llmModelId_idx" ON "LlmModelCost"("llmModelId");
-- CreateIndex
CREATE INDEX "LlmModelCost_credentialProvider_idx" ON "LlmModelCost"("credentialProvider");
-- CreateIndex
CREATE UNIQUE INDEX "LlmModelCost_llmModelId_credentialProvider_unit_key" ON "LlmModelCost"("llmModelId", "credentialProvider", "unit");
-- AddForeignKey
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_providerId_fkey" FOREIGN KEY ("providerId") REFERENCES "LlmProvider"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "LlmModelCost" ADD CONSTRAINT "LlmModelCost_llmModelId_fkey" FOREIGN KEY ("llmModelId") REFERENCES "LlmModel"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -1,226 +0,0 @@
-- Seed LLM Registry from existing hard-coded data
-- This migration populates the LlmProvider, LlmModel, and LlmModelCost tables
-- with data from the existing MODEL_METADATA and MODEL_COST dictionaries
-- Insert Providers
INSERT INTO "LlmProvider" ("id", "name", "displayName", "description", "defaultCredentialProvider", "defaultCredentialType", "supportsTools", "supportsJsonOutput", "supportsReasoning", "supportsParallelTool", "metadata")
VALUES
(gen_random_uuid(), 'openai', 'OpenAI', 'OpenAI language models', 'openai', 'api_key', true, true, true, true, '{}'::jsonb),
(gen_random_uuid(), 'anthropic', 'Anthropic', 'Anthropic Claude models', 'anthropic', 'api_key', true, true, true, false, '{}'::jsonb),
(gen_random_uuid(), 'groq', 'Groq', 'Groq inference API', 'groq', 'api_key', false, true, false, false, '{}'::jsonb),
(gen_random_uuid(), 'open_router', 'OpenRouter', 'OpenRouter unified API', 'open_router', 'api_key', true, true, false, false, '{}'::jsonb),
(gen_random_uuid(), 'aiml_api', 'AI/ML API', 'AI/ML API models', 'aiml_api', 'api_key', false, true, false, false, '{}'::jsonb),
(gen_random_uuid(), 'ollama', 'Ollama', 'Ollama local models', 'ollama', 'api_key', false, true, false, false, '{}'::jsonb),
(gen_random_uuid(), 'llama_api', 'Llama API', 'Llama API models', 'llama_api', 'api_key', false, true, false, false, '{}'::jsonb),
(gen_random_uuid(), 'v0', 'v0', 'v0 by Vercel models', 'v0', 'api_key', true, true, false, false, '{}'::jsonb)
ON CONFLICT ("name") DO NOTHING;
-- Insert Models (using CTEs to reference provider IDs)
WITH provider_ids AS (
SELECT "id", "name" FROM "LlmProvider"
)
INSERT INTO "LlmModel" ("id", "slug", "displayName", "description", "providerId", "contextWindow", "maxOutputTokens", "isEnabled", "capabilities", "metadata")
SELECT
gen_random_uuid(),
model_slug,
model_display_name,
NULL,
p."id",
context_window,
max_output_tokens,
true,
'{}'::jsonb,
'{}'::jsonb
FROM (VALUES
-- OpenAI models
('o3', 'O3', 'openai', 200000, 100000),
('o3-mini', 'O3 Mini', 'openai', 200000, 100000),
('o1', 'O1', 'openai', 200000, 100000),
('o1-mini', 'O1 Mini', 'openai', 128000, 65536),
('gpt-5-2025-08-07', 'GPT 5', 'openai', 400000, 128000),
('gpt-5.1-2025-11-13', 'GPT 5.1', 'openai', 400000, 128000),
('gpt-5-mini-2025-08-07', 'GPT 5 Mini', 'openai', 400000, 128000),
('gpt-5-nano-2025-08-07', 'GPT 5 Nano', 'openai', 400000, 128000),
('gpt-5-chat-latest', 'GPT 5 Chat', 'openai', 400000, 16384),
('gpt-4.1-2025-04-14', 'GPT 4.1', 'openai', 1000000, 32768),
('gpt-4.1-mini-2025-04-14', 'GPT 4.1 Mini', 'openai', 1047576, 32768),
('gpt-4o-mini', 'GPT 4o Mini', 'openai', 128000, 16384),
('gpt-4o', 'GPT 4o', 'openai', 128000, 16384),
('gpt-4-turbo', 'GPT 4 Turbo', 'openai', 128000, 4096),
('gpt-3.5-turbo', 'GPT 3.5 Turbo', 'openai', 16385, 4096),
-- Anthropic models
('claude-opus-4-1-20250805', 'Claude 4.1 Opus', 'anthropic', 200000, 32000),
('claude-opus-4-20250514', 'Claude 4 Opus', 'anthropic', 200000, 32000),
('claude-sonnet-4-20250514', 'Claude 4 Sonnet', 'anthropic', 200000, 64000),
('claude-opus-4-5-20251101', 'Claude 4.5 Opus', 'anthropic', 200000, 64000),
('claude-sonnet-4-5-20250929', 'Claude 4.5 Sonnet', 'anthropic', 200000, 64000),
('claude-haiku-4-5-20251001', 'Claude 4.5 Haiku', 'anthropic', 200000, 64000),
('claude-3-7-sonnet-20250219', 'Claude 3.7 Sonnet', 'anthropic', 200000, 64000),
('claude-3-haiku-20240307', 'Claude 3 Haiku', 'anthropic', 200000, 4096),
-- AI/ML API models
('Qwen/Qwen2.5-72B-Instruct-Turbo', 'Qwen 2.5 72B', 'aiml_api', 32000, 8000),
('nvidia/llama-3.1-nemotron-70b-instruct', 'Llama 3.1 Nemotron 70B', 'aiml_api', 128000, 40000),
('meta-llama/Llama-3.3-70B-Instruct-Turbo', 'Llama 3.3 70B', 'aiml_api', 128000, NULL),
('meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 'Meta Llama 3.1 70B', 'aiml_api', 131000, 2000),
('meta-llama/Llama-3.2-3B-Instruct-Turbo', 'Llama 3.2 3B', 'aiml_api', 128000, NULL),
-- Groq models
('llama-3.3-70b-versatile', 'Llama 3.3 70B', 'groq', 128000, 32768),
('llama-3.1-8b-instant', 'Llama 3.1 8B', 'groq', 128000, 8192),
-- Ollama models
('llama3.3', 'Llama 3.3', 'ollama', 8192, NULL),
('llama3.2', 'Llama 3.2', 'ollama', 8192, NULL),
('llama3', 'Llama 3', 'ollama', 8192, NULL),
('llama3.1:405b', 'Llama 3.1 405B', 'ollama', 8192, NULL),
('dolphin-mistral:latest', 'Dolphin Mistral', 'ollama', 32768, NULL),
-- OpenRouter models
('google/gemini-2.5-pro-preview-03-25', 'Gemini 2.5 Pro', 'open_router', 1050000, 8192),
('google/gemini-3-pro-preview', 'Gemini 3 Pro Preview', 'open_router', 1048576, 65535),
('google/gemini-2.5-flash', 'Gemini 2.5 Flash', 'open_router', 1048576, 65535),
('google/gemini-2.0-flash-001', 'Gemini 2.0 Flash', 'open_router', 1048576, 8192),
('google/gemini-2.5-flash-lite-preview-06-17', 'Gemini 2.5 Flash Lite Preview', 'open_router', 1048576, 65535),
('google/gemini-2.0-flash-lite-001', 'Gemini 2.0 Flash Lite', 'open_router', 1048576, 8192),
('mistralai/mistral-nemo', 'Mistral Nemo', 'open_router', 128000, 4096),
('cohere/command-r-08-2024', 'Command R', 'open_router', 128000, 4096),
('cohere/command-r-plus-08-2024', 'Command R Plus', 'open_router', 128000, 4096),
('deepseek/deepseek-chat', 'DeepSeek Chat', 'open_router', 64000, 2048),
('deepseek/deepseek-r1-0528', 'DeepSeek R1', 'open_router', 163840, 163840),
('perplexity/sonar', 'Perplexity Sonar', 'open_router', 127000, 8000),
('perplexity/sonar-pro', 'Perplexity Sonar Pro', 'open_router', 200000, 8000),
('perplexity/sonar-deep-research', 'Perplexity Sonar Deep Research', 'open_router', 128000, 16000),
('nousresearch/hermes-3-llama-3.1-405b', 'Hermes 3 Llama 3.1 405B', 'open_router', 131000, 4096),
('nousresearch/hermes-3-llama-3.1-70b', 'Hermes 3 Llama 3.1 70B', 'open_router', 12288, 12288),
('openai/gpt-oss-120b', 'GPT OSS 120B', 'open_router', 131072, 131072),
('openai/gpt-oss-20b', 'GPT OSS 20B', 'open_router', 131072, 32768),
('amazon/nova-lite-v1', 'Amazon Nova Lite', 'open_router', 300000, 5120),
('amazon/nova-micro-v1', 'Amazon Nova Micro', 'open_router', 128000, 5120),
('amazon/nova-pro-v1', 'Amazon Nova Pro', 'open_router', 300000, 5120),
('microsoft/wizardlm-2-8x22b', 'WizardLM 2 8x22B', 'open_router', 65536, 4096),
('gryphe/mythomax-l2-13b', 'MythoMax L2 13B', 'open_router', 4096, 4096),
('meta-llama/llama-4-scout', 'Llama 4 Scout', 'open_router', 131072, 131072),
('meta-llama/llama-4-maverick', 'Llama 4 Maverick', 'open_router', 1048576, 1000000),
('x-ai/grok-4', 'Grok 4', 'open_router', 256000, 256000),
('x-ai/grok-4-fast', 'Grok 4 Fast', 'open_router', 2000000, 30000),
('x-ai/grok-4.1-fast', 'Grok 4.1 Fast', 'open_router', 2000000, 30000),
('x-ai/grok-code-fast-1', 'Grok Code Fast 1', 'open_router', 256000, 10000),
('moonshotai/kimi-k2', 'Kimi K2', 'open_router', 131000, 131000),
('qwen/qwen3-235b-a22b-thinking-2507', 'Qwen 3 235B Thinking', 'open_router', 262144, 262144),
('qwen/qwen3-coder', 'Qwen 3 Coder', 'open_router', 262144, 262144),
-- Llama API models
('Llama-4-Scout-17B-16E-Instruct-FP8', 'Llama 4 Scout', 'llama_api', 128000, 4028),
('Llama-4-Maverick-17B-128E-Instruct-FP8', 'Llama 4 Maverick', 'llama_api', 128000, 4028),
('Llama-3.3-8B-Instruct', 'Llama 3.3 8B', 'llama_api', 128000, 4028),
('Llama-3.3-70B-Instruct', 'Llama 3.3 70B', 'llama_api', 128000, 4028),
-- v0 models
('v0-1.5-md', 'v0 1.5 MD', 'v0', 128000, 64000),
('v0-1.5-lg', 'v0 1.5 LG', 'v0', 512000, 64000),
('v0-1.0-md', 'v0 1.0 MD', 'v0', 128000, 64000)
) AS models(model_slug, model_display_name, provider_name, context_window, max_output_tokens)
JOIN provider_ids p ON p."name" = models.provider_name
ON CONFLICT ("slug") DO NOTHING;
-- Insert Costs (using CTEs to reference model IDs)
WITH model_ids AS (
SELECT "id", "slug", "providerId" FROM "LlmModel"
),
provider_ids AS (
SELECT "id", "name" FROM "LlmProvider"
)
INSERT INTO "LlmModelCost" ("id", "unit", "creditCost", "credentialProvider", "credentialId", "credentialType", "currency", "metadata", "llmModelId")
SELECT
gen_random_uuid(),
'RUN'::"LlmCostUnit",
cost,
p."name",
NULL,
'api_key',
NULL,
'{}'::jsonb,
m."id"
FROM (VALUES
-- OpenAI costs
('o3', 4),
('o3-mini', 2),
('o1', 16),
('o1-mini', 4),
('gpt-5-2025-08-07', 2),
('gpt-5.1-2025-11-13', 5),
('gpt-5-mini-2025-08-07', 1),
('gpt-5-nano-2025-08-07', 1),
('gpt-5-chat-latest', 5),
('gpt-4.1-2025-04-14', 2),
('gpt-4.1-mini-2025-04-14', 1),
('gpt-4o-mini', 1),
('gpt-4o', 3),
('gpt-4-turbo', 10),
('gpt-3.5-turbo', 1),
-- Anthropic costs
('claude-opus-4-1-20250805', 21),
('claude-opus-4-20250514', 21),
('claude-sonnet-4-20250514', 5),
('claude-haiku-4-5-20251001', 4),
('claude-opus-4-5-20251101', 14),
('claude-sonnet-4-5-20250929', 9),
('claude-3-7-sonnet-20250219', 5),
('claude-3-haiku-20240307', 1),
-- AI/ML API costs
('Qwen/Qwen2.5-72B-Instruct-Turbo', 1),
('nvidia/llama-3.1-nemotron-70b-instruct', 1),
('meta-llama/Llama-3.3-70B-Instruct-Turbo', 1),
('meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 1),
('meta-llama/Llama-3.2-3B-Instruct-Turbo', 1),
-- Groq costs
('llama-3.3-70b-versatile', 1),
('llama-3.1-8b-instant', 1),
-- Ollama costs
('llama3.3', 1),
('llama3.2', 1),
('llama3', 1),
('llama3.1:405b', 1),
('dolphin-mistral:latest', 1),
-- OpenRouter costs
('google/gemini-2.5-pro-preview-03-25', 4),
('google/gemini-3-pro-preview', 5),
('mistralai/mistral-nemo', 1),
('cohere/command-r-08-2024', 1),
('cohere/command-r-plus-08-2024', 3),
('deepseek/deepseek-chat', 2),
('perplexity/sonar', 1),
('perplexity/sonar-pro', 5),
('perplexity/sonar-deep-research', 10),
('nousresearch/hermes-3-llama-3.1-405b', 1),
('nousresearch/hermes-3-llama-3.1-70b', 1),
('amazon/nova-lite-v1', 1),
('amazon/nova-micro-v1', 1),
('amazon/nova-pro-v1', 1),
('microsoft/wizardlm-2-8x22b', 1),
('gryphe/mythomax-l2-13b', 1),
('meta-llama/llama-4-scout', 1),
('meta-llama/llama-4-maverick', 1),
('x-ai/grok-4', 9),
('x-ai/grok-4-fast', 1),
('x-ai/grok-4.1-fast', 1),
('x-ai/grok-code-fast-1', 1),
('moonshotai/kimi-k2', 1),
('qwen/qwen3-235b-a22b-thinking-2507', 1),
('qwen/qwen3-coder', 9),
('google/gemini-2.5-flash', 1),
('google/gemini-2.0-flash-001', 1),
('google/gemini-2.5-flash-lite-preview-06-17', 1),
('google/gemini-2.0-flash-lite-001', 1),
('deepseek/deepseek-r1-0528', 1),
('openai/gpt-oss-120b', 1),
('openai/gpt-oss-20b', 1),
-- Llama API costs
('Llama-4-Scout-17B-16E-Instruct-FP8', 1),
('Llama-4-Maverick-17B-128E-Instruct-FP8', 1),
('Llama-3.3-8B-Instruct', 1),
('Llama-3.3-70B-Instruct', 1),
-- v0 costs
('v0-1.5-md', 1),
('v0-1.5-lg', 2),
('v0-1.0-md', 1)
) AS costs(model_slug, cost)
JOIN model_ids m ON m."slug" = costs.model_slug
JOIN provider_ids p ON p."id" = m."providerId"
ON CONFLICT ("llmModelId", "credentialProvider", "unit") DO NOTHING;

View File

@@ -1,25 +0,0 @@
-- CreateTable
CREATE TABLE "LlmModelMigration" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
"sourceModelSlug" TEXT NOT NULL,
"targetModelSlug" TEXT NOT NULL,
"reason" TEXT,
"migratedNodeIds" JSONB NOT NULL DEFAULT '[]',
"nodeCount" INTEGER NOT NULL,
"customCreditCost" INTEGER,
"isReverted" BOOLEAN NOT NULL DEFAULT false,
"revertedAt" TIMESTAMP(3),
CONSTRAINT "LlmModelMigration_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE INDEX "LlmModelMigration_sourceModelSlug_idx" ON "LlmModelMigration"("sourceModelSlug");
-- CreateIndex
CREATE INDEX "LlmModelMigration_targetModelSlug_idx" ON "LlmModelMigration"("targetModelSlug");
-- CreateIndex
CREATE INDEX "LlmModelMigration_isReverted_idx" ON "LlmModelMigration"("isReverted");

View File

@@ -1,127 +0,0 @@
-- Add LlmModelCreator table
-- Creator represents who made/trained the model (e.g., OpenAI, Meta)
-- This is distinct from Provider who hosts/serves the model (e.g., OpenRouter)
-- Create the LlmModelCreator table
CREATE TABLE "LlmModelCreator" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
"name" TEXT NOT NULL,
"displayName" TEXT NOT NULL,
"description" TEXT,
"websiteUrl" TEXT,
"logoUrl" TEXT,
"metadata" JSONB NOT NULL DEFAULT '{}',
CONSTRAINT "LlmModelCreator_pkey" PRIMARY KEY ("id")
);
-- Create unique index on name
CREATE UNIQUE INDEX "LlmModelCreator_name_key" ON "LlmModelCreator"("name");
-- Add creatorId column to LlmModel
ALTER TABLE "LlmModel" ADD COLUMN "creatorId" TEXT;
-- Add foreign key constraint
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_creatorId_fkey"
FOREIGN KEY ("creatorId") REFERENCES "LlmModelCreator"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- Create index on creatorId
CREATE INDEX "LlmModel_creatorId_idx" ON "LlmModel"("creatorId");
-- Seed creators based on known model creators
INSERT INTO "LlmModelCreator" ("id", "updatedAt", "name", "displayName", "description", "websiteUrl", "metadata")
VALUES
(gen_random_uuid(), CURRENT_TIMESTAMP, 'openai', 'OpenAI', 'Creator of GPT models', 'https://openai.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'anthropic', 'Anthropic', 'Creator of Claude models', 'https://anthropic.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'meta', 'Meta', 'Creator of Llama models', 'https://ai.meta.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'google', 'Google', 'Creator of Gemini models', 'https://deepmind.google', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'mistral', 'Mistral AI', 'Creator of Mistral models', 'https://mistral.ai', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'cohere', 'Cohere', 'Creator of Command models', 'https://cohere.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'deepseek', 'DeepSeek', 'Creator of DeepSeek models', 'https://deepseek.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'perplexity', 'Perplexity AI', 'Creator of Sonar models', 'https://perplexity.ai', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'qwen', 'Qwen (Alibaba)', 'Creator of Qwen models', 'https://qwenlm.github.io', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'xai', 'xAI', 'Creator of Grok models', 'https://x.ai', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'amazon', 'Amazon', 'Creator of Nova models', 'https://aws.amazon.com/bedrock', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'microsoft', 'Microsoft', 'Creator of WizardLM models', 'https://microsoft.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'moonshot', 'Moonshot AI', 'Creator of Kimi models', 'https://moonshot.cn', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'nvidia', 'NVIDIA', 'Creator of Nemotron models', 'https://nvidia.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'nous_research', 'Nous Research', 'Creator of Hermes models', 'https://nousresearch.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'vercel', 'Vercel', 'Creator of v0 models', 'https://vercel.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'cognitive_computations', 'Cognitive Computations', 'Creator of Dolphin models', 'https://erichartford.com', '{}'),
(gen_random_uuid(), CURRENT_TIMESTAMP, 'gryphe', 'Gryphe', 'Creator of MythoMax models', 'https://huggingface.co/Gryphe', '{}')
ON CONFLICT ("name") DO NOTHING;
-- Update existing models with their creators
-- OpenAI models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'openai')
WHERE "slug" LIKE 'gpt-%' OR "slug" LIKE 'o1%' OR "slug" LIKE 'o3%' OR "slug" LIKE 'openai/%';
-- Anthropic models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'anthropic')
WHERE "slug" LIKE 'claude-%';
-- Meta/Llama models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'meta')
WHERE "slug" LIKE 'llama%' OR "slug" LIKE 'Llama%' OR "slug" LIKE 'meta-llama/%' OR "slug" LIKE '%/llama-%';
-- Google models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'google')
WHERE "slug" LIKE 'google/%' OR "slug" LIKE 'gemini%';
-- Mistral models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'mistral')
WHERE "slug" LIKE 'mistral%' OR "slug" LIKE 'mistralai/%';
-- Cohere models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'cohere')
WHERE "slug" LIKE 'cohere/%' OR "slug" LIKE 'command-%';
-- DeepSeek models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'deepseek')
WHERE "slug" LIKE 'deepseek/%' OR "slug" LIKE 'deepseek-%';
-- Perplexity models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'perplexity')
WHERE "slug" LIKE 'perplexity/%' OR "slug" LIKE 'sonar%';
-- Qwen models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'qwen')
WHERE "slug" LIKE 'Qwen/%' OR "slug" LIKE 'qwen/%';
-- xAI/Grok models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'xai')
WHERE "slug" LIKE 'x-ai/%' OR "slug" LIKE 'grok%';
-- Amazon models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'amazon')
WHERE "slug" LIKE 'amazon/%' OR "slug" LIKE 'nova-%';
-- Microsoft models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'microsoft')
WHERE "slug" LIKE 'microsoft/%' OR "slug" LIKE 'wizardlm%';
-- Moonshot models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'moonshot')
WHERE "slug" LIKE 'moonshotai/%' OR "slug" LIKE 'kimi%';
-- NVIDIA models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'nvidia')
WHERE "slug" LIKE 'nvidia/%' OR "slug" LIKE '%nemotron%';
-- Nous Research models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'nous_research')
WHERE "slug" LIKE 'nousresearch/%' OR "slug" LIKE 'hermes%';
-- Vercel/v0 models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'vercel')
WHERE "slug" LIKE 'v0-%';
-- Dolphin models (Cognitive Computations / Eric Hartford)
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'cognitive_computations')
WHERE "slug" LIKE 'dolphin-%';
-- Gryphe models
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'gryphe')
WHERE "slug" LIKE 'gryphe/%' OR "slug" LIKE 'mythomax%';

View File

@@ -1,4 +0,0 @@
-- CreateIndex
-- Index for efficient LLM model lookups on AgentNode.constantInput->>'model'
-- This improves performance of model migration queries in the LLM registry
CREATE INDEX "AgentNode_constantInput_model_idx" ON "AgentNode" ((("constantInput"->>'model')));

View File

@@ -1,52 +0,0 @@
-- Add GPT-5.2 model and update O3 slug
-- This migration adds the new GPT-5.2 model added in dev branch
-- Update O3 slug to match dev branch format
UPDATE "LlmModel"
SET "slug" = 'o3-2025-04-16'
WHERE "slug" = 'o3';
-- Update cost reference for O3 if needed
-- (costs are linked by model ID, so no update needed)
-- Add GPT-5.2 model
WITH provider_id AS (
SELECT "id" FROM "LlmProvider" WHERE "name" = 'openai'
)
INSERT INTO "LlmModel" ("id", "slug", "displayName", "description", "providerId", "contextWindow", "maxOutputTokens", "isEnabled", "capabilities", "metadata")
SELECT
gen_random_uuid(),
'gpt-5.2-2025-12-11',
'GPT 5.2',
'OpenAI GPT-5.2 model',
p."id",
400000,
128000,
true,
'{}'::jsonb,
'{}'::jsonb
FROM provider_id p
ON CONFLICT ("slug") DO NOTHING;
-- Add cost for GPT-5.2
WITH model_id AS (
SELECT m."id", p."name" as provider_name
FROM "LlmModel" m
JOIN "LlmProvider" p ON p."id" = m."providerId"
WHERE m."slug" = 'gpt-5.2-2025-12-11'
)
INSERT INTO "LlmModelCost" ("id", "unit", "creditCost", "credentialProvider", "credentialId", "credentialType", "currency", "metadata", "llmModelId")
SELECT
gen_random_uuid(),
'RUN'::"LlmCostUnit",
3, -- Same cost tier as GPT-5.1
m.provider_name,
NULL,
'api_key',
NULL,
'{}'::jsonb,
m."id"
FROM model_id m
WHERE NOT EXISTS (
SELECT 1 FROM "LlmModelCost" c WHERE c."llmModelId" = m."id"
);

View File

@@ -1,11 +0,0 @@
-- Add isRecommended field to LlmModel table
-- This allows admins to mark a model as the recommended default
ALTER TABLE "LlmModel" ADD COLUMN "isRecommended" BOOLEAN NOT NULL DEFAULT false;
-- Set gpt-4o-mini as the default recommended model (if it exists)
UPDATE "LlmModel" SET "isRecommended" = true WHERE "slug" = 'gpt-4o-mini' AND "isEnabled" = true;
-- Create unique partial index to enforce only one recommended model at the database level
-- This prevents multiple rows from having isRecommended = true
CREATE UNIQUE INDEX "LlmModel_single_recommended_idx" ON "LlmModel" ("isRecommended") WHERE "isRecommended" = true;

View File

@@ -1,61 +0,0 @@
-- Add new columns to LlmModel table for extended model metadata
-- These columns support the LLM Picker UI enhancements
-- Add priceTier column: 1=cheapest, 2=medium, 3=expensive
ALTER TABLE "LlmModel" ADD COLUMN IF NOT EXISTS "priceTier" INTEGER NOT NULL DEFAULT 1;
-- Add creatorId column for model creator relationship (if not exists)
ALTER TABLE "LlmModel" ADD COLUMN IF NOT EXISTS "creatorId" TEXT;
-- Add isRecommended column (if not exists)
ALTER TABLE "LlmModel" ADD COLUMN IF NOT EXISTS "isRecommended" BOOLEAN NOT NULL DEFAULT FALSE;
-- Add index on creatorId if not exists
CREATE INDEX IF NOT EXISTS "LlmModel_creatorId_idx" ON "LlmModel"("creatorId");
-- Add foreign key for creatorId if not exists
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'LlmModel_creatorId_fkey') THEN
-- Only add FK if LlmModelCreator table exists
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'LlmModelCreator') THEN
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_creatorId_fkey"
FOREIGN KEY ("creatorId") REFERENCES "LlmModelCreator"("id") ON DELETE SET NULL ON UPDATE CASCADE;
END IF;
END IF;
END $$;
-- Update priceTier values for existing models based on original MODEL_METADATA
-- Tier 1 = cheapest, Tier 2 = medium, Tier 3 = expensive
-- OpenAI models
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'o3';
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'o3-mini';
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'o1';
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'o1-mini';
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'gpt-5.2';
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'gpt-5.1';
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-5';
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-5-mini';
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-5-nano';
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'gpt-5-chat-latest';
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" LIKE 'gpt-4.1%';
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-4o-mini';
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'gpt-4o';
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'gpt-4-turbo';
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-3.5-turbo';
-- Anthropic models
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE 'claude-opus%';
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE 'claude-sonnet%';
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE 'claude%-4-5-sonnet%';
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE 'claude%-haiku%';
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'claude-3-haiku-20240307';
-- OpenRouter models - Pro/expensive tiers
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE 'google/gemini%-pro%';
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE '%command-r-plus%';
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE '%sonar-pro%';
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE '%sonar-deep-research%';
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'x-ai/grok-4';
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE '%qwen3-coder%';

View File

@@ -1096,153 +1096,6 @@ enum APIKeyStatus {
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
///////////// LLM REGISTRY AND BILLING DATA /////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// LlmCostUnit: Defines how LLM MODEL costs are calculated (per run or per token).
// This is distinct from BlockCostType (in backend/data/block.py) which defines
// how BLOCK EXECUTION costs are calculated (per run, per byte, or per second).
// LlmCostUnit is for pricing individual LLM model API calls in the registry,
// while BlockCostType is for billing platform block executions.
enum LlmCostUnit {
RUN
TOKENS
}
model LlmModelCreator {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
name String @unique // e.g., "openai", "anthropic", "meta"
displayName String // e.g., "OpenAI", "Anthropic", "Meta"
description String?
websiteUrl String? // Link to creator's website
logoUrl String? // URL to creator's logo
metadata Json @default("{}")
Models LlmModel[]
}
model LlmProvider {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
name String @unique
displayName String
description String?
defaultCredentialProvider String?
defaultCredentialId String?
defaultCredentialType String?
supportsTools Boolean @default(true)
supportsJsonOutput Boolean @default(true)
supportsReasoning Boolean @default(false)
supportsParallelTool Boolean @default(false)
metadata Json @default("{}")
Models LlmModel[]
}
model LlmModel {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
slug String @unique
displayName String
description String?
providerId String
Provider LlmProvider @relation(fields: [providerId], references: [id], onDelete: Restrict)
// Creator is the organization that created/trained the model (e.g., OpenAI, Meta)
// This is distinct from the provider who hosts/serves the model (e.g., OpenRouter)
creatorId String?
Creator LlmModelCreator? @relation(fields: [creatorId], references: [id], onDelete: SetNull)
contextWindow Int
maxOutputTokens Int?
priceTier Int @default(1) // 1=cheapest, 2=medium, 3=expensive
isEnabled Boolean @default(true)
isRecommended Boolean @default(false)
capabilities Json @default("{}")
metadata Json @default("{}")
Costs LlmModelCost[]
@@index([providerId, isEnabled])
@@index([creatorId])
@@index([slug])
}
model LlmModelCost {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
unit LlmCostUnit @default(RUN)
creditCost Int
credentialProvider String
credentialId String?
credentialType String?
currency String?
metadata Json @default("{}")
llmModelId String
Model LlmModel @relation(fields: [llmModelId], references: [id], onDelete: Cascade)
@@unique([llmModelId, credentialProvider, unit])
@@index([llmModelId])
@@index([credentialProvider])
}
// Tracks model migrations for revert capability
// When a model is disabled with migration, we record which nodes were affected
// so they can be reverted when the original model is back online
model LlmModelMigration {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
sourceModelSlug String // The original model that was disabled
targetModelSlug String // The model workflows were migrated to
reason String? // Why the migration happened (e.g., "Provider outage")
// Track affected nodes as JSON array of node IDs
// Format: ["node-uuid-1", "node-uuid-2", ...]
migratedNodeIds Json @default("[]")
nodeCount Int // Number of nodes migrated
// Custom pricing override for migrated workflows during the migration period.
// Use case: When migrating users from an expensive model (e.g., GPT-4) to a cheaper
// one (e.g., GPT-3.5), you may want to temporarily maintain the original pricing
// to avoid billing surprises, or offer a discount during the transition.
//
// IMPORTANT: This field is intended for integration with the billing system.
// When billing calculates costs for nodes affected by this migration, it should
// check if customCreditCost is set and use it instead of the target model's cost.
// If null, the target model's normal cost applies.
//
// TODO: Integrate with billing system to apply this override during cost calculation.
customCreditCost Int?
// Revert tracking
isReverted Boolean @default(false)
revertedAt DateTime?
@@index([sourceModelSlug])
@@index([targetModelSlug])
@@index([isReverted])
}
////////////// OAUTH PROVIDER TABLES //////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////

View File

@@ -1,8 +1,5 @@
"use client";
import { Sidebar } from "@/components/__legacy__/Sidebar";
import { Users, DollarSign, UserSearch, FileText } from "lucide-react";
import { Cpu } from "@phosphor-icons/react";
import { IconSliders } from "@/components/__legacy__/ui/icons";
@@ -29,11 +26,6 @@ const sidebarLinkGroups = [
href: "/admin/execution-analytics",
icon: <FileText className="h-6 w-6" />,
},
{
text: "LLM Registry",
href: "/admin/llms",
icon: <Cpu size={24} />,
},
{
text: "Admin User Management",
href: "/admin/settings",

View File

@@ -1,493 +0,0 @@
"use server";
import { revalidatePath } from "next/cache";
// Generated API functions
import {
getV2ListLlmProviders,
postV2CreateLlmProvider,
patchV2UpdateLlmProvider,
deleteV2DeleteLlmProvider,
getV2ListLlmModels,
postV2CreateLlmModel,
patchV2UpdateLlmModel,
patchV2ToggleLlmModelAvailability,
deleteV2DeleteLlmModelAndMigrateWorkflows,
getV2GetModelUsageCount,
getV2ListModelMigrations,
postV2RevertAModelMigration,
getV2ListModelCreators,
postV2CreateModelCreator,
patchV2UpdateModelCreator,
deleteV2DeleteModelCreator,
postV2SetRecommendedModel,
} from "@/app/api/__generated__/endpoints/admin/admin";
// Generated types
import type { LlmProvidersResponse } from "@/app/api/__generated__/models/llmProvidersResponse";
import type { LlmModelsResponse } from "@/app/api/__generated__/models/llmModelsResponse";
import type { UpsertLlmProviderRequest } from "@/app/api/__generated__/models/upsertLlmProviderRequest";
import type { CreateLlmModelRequest } from "@/app/api/__generated__/models/createLlmModelRequest";
import type { UpdateLlmModelRequest } from "@/app/api/__generated__/models/updateLlmModelRequest";
import type { ToggleLlmModelRequest } from "@/app/api/__generated__/models/toggleLlmModelRequest";
import type { LlmMigrationsResponse } from "@/app/api/__generated__/models/llmMigrationsResponse";
import type { LlmCreatorsResponse } from "@/app/api/__generated__/models/llmCreatorsResponse";
import type { UpsertLlmCreatorRequest } from "@/app/api/__generated__/models/upsertLlmCreatorRequest";
import type { LlmModelUsageResponse } from "@/app/api/__generated__/models/llmModelUsageResponse";
import { LlmCostUnit } from "@/app/api/__generated__/models/llmCostUnit";
const ADMIN_LLM_PATH = "/admin/llms";
// =============================================================================
// Utilities
// =============================================================================
/**
* Extracts and validates a required string field from FormData.
* Throws an error if the field is missing or empty.
*/
function getRequiredFormField(
formData: FormData,
fieldName: string,
displayName?: string,
): string {
const raw = formData.get(fieldName);
const value = raw ? String(raw).trim() : "";
if (!value) {
throw new Error(`${displayName || fieldName} is required`);
}
return value;
}
/**
* Extracts and validates a required positive number field from FormData.
* Throws an error if the field is missing, empty, or not a positive number.
*/
function getRequiredPositiveNumber(
formData: FormData,
fieldName: string,
displayName?: string,
): number {
const raw = formData.get(fieldName);
const value = Number(raw);
if (raw === null || raw === "" || !Number.isFinite(value) || value <= 0) {
throw new Error(`${displayName || fieldName} must be a positive number`);
}
return value;
}
/**
* Extracts and validates a required number field from FormData.
* Throws an error if the field is missing, empty, or not a finite number.
*/
function getRequiredNumber(
formData: FormData,
fieldName: string,
displayName?: string,
): number {
const raw = formData.get(fieldName);
const value = Number(raw);
if (raw === null || raw === "" || !Number.isFinite(value)) {
throw new Error(`${displayName || fieldName} is required`);
}
return value;
}
// =============================================================================
// Provider Actions
// =============================================================================
export async function fetchLlmProviders(): Promise<LlmProvidersResponse> {
const response = await getV2ListLlmProviders({ include_models: true });
if (response.status !== 200) {
throw new Error("Failed to fetch LLM providers");
}
return response.data;
}
export async function createLlmProviderAction(formData: FormData) {
const payload: UpsertLlmProviderRequest = {
name: String(formData.get("name") || "").trim(),
display_name: String(formData.get("display_name") || "").trim(),
description: formData.get("description")
? String(formData.get("description"))
: undefined,
default_credential_provider: formData.get("default_credential_provider")
? String(formData.get("default_credential_provider")).trim()
: undefined,
default_credential_id: formData.get("default_credential_id")
? String(formData.get("default_credential_id")).trim()
: undefined,
default_credential_type: formData.get("default_credential_type")
? String(formData.get("default_credential_type")).trim()
: "api_key",
supports_tools: formData.getAll("supports_tools").includes("on"),
supports_json_output: formData
.getAll("supports_json_output")
.includes("on"),
supports_reasoning: formData.getAll("supports_reasoning").includes("on"),
supports_parallel_tool: formData
.getAll("supports_parallel_tool")
.includes("on"),
metadata: {},
};
const response = await postV2CreateLlmProvider(payload);
if (response.status !== 200) {
throw new Error("Failed to create LLM provider");
}
revalidatePath(ADMIN_LLM_PATH);
}
export async function deleteLlmProviderAction(
formData: FormData,
): Promise<void> {
const providerId = getRequiredFormField(
formData,
"provider_id",
"Provider id",
);
const response = await deleteV2DeleteLlmProvider(providerId);
if (response.status !== 200) {
const errorData = response.data as { detail?: string };
throw new Error(errorData?.detail || "Failed to delete provider");
}
revalidatePath(ADMIN_LLM_PATH);
}
export async function updateLlmProviderAction(formData: FormData) {
const providerId = getRequiredFormField(
formData,
"provider_id",
"Provider id",
);
const payload: UpsertLlmProviderRequest = {
name: String(formData.get("name") || "").trim(),
display_name: String(formData.get("display_name") || "").trim(),
description: formData.get("description")
? String(formData.get("description"))
: undefined,
default_credential_provider: formData.get("default_credential_provider")
? String(formData.get("default_credential_provider")).trim()
: undefined,
default_credential_id: formData.get("default_credential_id")
? String(formData.get("default_credential_id")).trim()
: undefined,
default_credential_type: formData.get("default_credential_type")
? String(formData.get("default_credential_type")).trim()
: "api_key",
supports_tools: formData.getAll("supports_tools").includes("on"),
supports_json_output: formData
.getAll("supports_json_output")
.includes("on"),
supports_reasoning: formData.getAll("supports_reasoning").includes("on"),
supports_parallel_tool: formData
.getAll("supports_parallel_tool")
.includes("on"),
metadata: {},
};
const response = await patchV2UpdateLlmProvider(providerId, payload);
if (response.status !== 200) {
throw new Error("Failed to update LLM provider");
}
revalidatePath(ADMIN_LLM_PATH);
}
// =============================================================================
// Model Actions
// =============================================================================
export async function fetchLlmModels(): Promise<LlmModelsResponse> {
const response = await getV2ListLlmModels();
if (response.status !== 200) {
throw new Error("Failed to fetch LLM models");
}
return response.data;
}
export async function createLlmModelAction(formData: FormData) {
const providerId = getRequiredFormField(formData, "provider_id", "Provider");
const creatorId = formData.get("creator_id");
const contextWindow = getRequiredPositiveNumber(
formData,
"context_window",
"Context window",
);
const creditCost = getRequiredNumber(formData, "credit_cost", "Credit cost");
// Fetch provider to get default credentials
const providersResponse = await getV2ListLlmProviders({
include_models: false,
});
if (providersResponse.status !== 200) {
throw new Error("Failed to fetch providers");
}
const provider = providersResponse.data.providers.find(
(p) => p.id === providerId,
);
if (!provider) {
throw new Error("Provider not found");
}
const payload: CreateLlmModelRequest = {
slug: String(formData.get("slug") || "").trim(),
display_name: String(formData.get("display_name") || "").trim(),
description: formData.get("description")
? String(formData.get("description"))
: undefined,
provider_id: providerId,
creator_id: creatorId ? String(creatorId) : undefined,
context_window: contextWindow,
max_output_tokens: formData.get("max_output_tokens")
? Number(formData.get("max_output_tokens"))
: undefined,
is_enabled: formData.getAll("is_enabled").includes("on"),
capabilities: {},
metadata: {},
costs: [
{
unit: (formData.get("unit") as LlmCostUnit) || LlmCostUnit.RUN,
credit_cost: creditCost,
credential_provider:
provider.default_credential_provider || provider.name,
credential_id: provider.default_credential_id || undefined,
credential_type: provider.default_credential_type || "api_key",
metadata: {},
},
],
};
const response = await postV2CreateLlmModel(payload);
if (response.status !== 200) {
throw new Error("Failed to create LLM model");
}
revalidatePath(ADMIN_LLM_PATH);
}
export async function updateLlmModelAction(formData: FormData) {
const modelId = getRequiredFormField(formData, "model_id", "Model id");
const creatorId = formData.get("creator_id");
const payload: UpdateLlmModelRequest = {
display_name: formData.get("display_name")
? String(formData.get("display_name"))
: undefined,
description: formData.get("description")
? String(formData.get("description"))
: undefined,
provider_id: formData.get("provider_id")
? String(formData.get("provider_id"))
: undefined,
creator_id: creatorId ? String(creatorId) : undefined,
context_window: formData.get("context_window")
? Number(formData.get("context_window"))
: undefined,
max_output_tokens: formData.get("max_output_tokens")
? Number(formData.get("max_output_tokens"))
: undefined,
is_enabled: formData.has("is_enabled")
? formData.getAll("is_enabled").includes("on")
: undefined,
costs: formData.get("credit_cost")
? [
{
unit: (formData.get("unit") as LlmCostUnit) || LlmCostUnit.RUN,
credit_cost: Number(formData.get("credit_cost")),
credential_provider: String(
formData.get("credential_provider") || "",
).trim(),
credential_id: formData.get("credential_id")
? String(formData.get("credential_id"))
: undefined,
credential_type: formData.get("credential_type")
? String(formData.get("credential_type"))
: undefined,
metadata: {},
},
]
: undefined,
};
const response = await patchV2UpdateLlmModel(modelId, payload);
if (response.status !== 200) {
throw new Error("Failed to update LLM model");
}
revalidatePath(ADMIN_LLM_PATH);
}
export async function toggleLlmModelAction(formData: FormData): Promise<void> {
const modelId = getRequiredFormField(formData, "model_id", "Model id");
const shouldEnable = formData.get("is_enabled") === "true";
const migrateToSlug = formData.get("migrate_to_slug");
const migrationReason = formData.get("migration_reason");
const customCreditCost = formData.get("custom_credit_cost");
const payload: ToggleLlmModelRequest = {
is_enabled: shouldEnable,
migrate_to_slug: migrateToSlug ? String(migrateToSlug) : undefined,
migration_reason: migrationReason ? String(migrationReason) : undefined,
custom_credit_cost: customCreditCost ? Number(customCreditCost) : undefined,
};
const response = await patchV2ToggleLlmModelAvailability(modelId, payload);
if (response.status !== 200) {
throw new Error("Failed to toggle LLM model");
}
revalidatePath(ADMIN_LLM_PATH);
}
export async function deleteLlmModelAction(formData: FormData): Promise<void> {
const modelId = getRequiredFormField(formData, "model_id", "Model id");
const rawReplacement = formData.get("replacement_model_slug");
const replacementModelSlug =
rawReplacement && String(rawReplacement).trim()
? String(rawReplacement).trim()
: undefined;
const response = await deleteV2DeleteLlmModelAndMigrateWorkflows(modelId, {
replacement_model_slug: replacementModelSlug,
});
if (response.status !== 200) {
throw new Error("Failed to delete model");
}
revalidatePath(ADMIN_LLM_PATH);
}
export async function fetchLlmModelUsage(
modelId: string,
): Promise<LlmModelUsageResponse> {
const response = await getV2GetModelUsageCount(modelId);
if (response.status !== 200) {
throw new Error("Failed to fetch model usage");
}
return response.data;
}
// =============================================================================
// Migration Actions
// =============================================================================
export async function fetchLlmMigrations(
includeReverted: boolean = false,
): Promise<LlmMigrationsResponse> {
const response = await getV2ListModelMigrations({
include_reverted: includeReverted,
});
if (response.status !== 200) {
throw new Error("Failed to fetch migrations");
}
return response.data;
}
export async function revertLlmMigrationAction(
formData: FormData,
): Promise<void> {
const migrationId = getRequiredFormField(
formData,
"migration_id",
"Migration id",
);
const response = await postV2RevertAModelMigration(migrationId, null);
if (response.status !== 200) {
throw new Error("Failed to revert migration");
}
revalidatePath(ADMIN_LLM_PATH);
}
// =============================================================================
// Creator Actions
// =============================================================================
export async function fetchLlmCreators(): Promise<LlmCreatorsResponse> {
const response = await getV2ListModelCreators();
if (response.status !== 200) {
throw new Error("Failed to fetch creators");
}
return response.data;
}
export async function createLlmCreatorAction(
formData: FormData,
): Promise<void> {
const payload: UpsertLlmCreatorRequest = {
name: String(formData.get("name") || "").trim(),
display_name: String(formData.get("display_name") || "").trim(),
description: formData.get("description")
? String(formData.get("description"))
: undefined,
website_url: formData.get("website_url")
? String(formData.get("website_url")).trim()
: undefined,
logo_url: formData.get("logo_url")
? String(formData.get("logo_url")).trim()
: undefined,
metadata: {},
};
const response = await postV2CreateModelCreator(payload);
if (response.status !== 200) {
throw new Error("Failed to create creator");
}
revalidatePath(ADMIN_LLM_PATH);
}
export async function updateLlmCreatorAction(
formData: FormData,
): Promise<void> {
const creatorId = getRequiredFormField(formData, "creator_id", "Creator id");
const payload: UpsertLlmCreatorRequest = {
name: String(formData.get("name") || "").trim(),
display_name: String(formData.get("display_name") || "").trim(),
description: formData.get("description")
? String(formData.get("description"))
: undefined,
website_url: formData.get("website_url")
? String(formData.get("website_url")).trim()
: undefined,
logo_url: formData.get("logo_url")
? String(formData.get("logo_url")).trim()
: undefined,
metadata: {},
};
const response = await patchV2UpdateModelCreator(creatorId, payload);
if (response.status !== 200) {
throw new Error("Failed to update creator");
}
revalidatePath(ADMIN_LLM_PATH);
}
export async function deleteLlmCreatorAction(
formData: FormData,
): Promise<void> {
const creatorId = getRequiredFormField(formData, "creator_id", "Creator id");
const response = await deleteV2DeleteModelCreator(creatorId);
if (response.status !== 200) {
throw new Error("Failed to delete creator");
}
revalidatePath(ADMIN_LLM_PATH);
}
// =============================================================================
// Recommended Model Actions
// =============================================================================
export async function setRecommendedModelAction(
formData: FormData,
): Promise<void> {
const modelId = getRequiredFormField(formData, "model_id", "Model id");
const response = await postV2SetRecommendedModel({ model_id: modelId });
if (response.status !== 200) {
throw new Error("Failed to set recommended model");
}
revalidatePath(ADMIN_LLM_PATH);
}

View File

@@ -1,147 +0,0 @@
"use client";
import { useState } from "react";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import { createLlmCreatorAction } from "../actions";
import { useRouter } from "next/navigation";
export function AddCreatorModal() {
const [open, setOpen] = useState(false);
const [isSubmitting, setIsSubmitting] = useState(false);
const [error, setError] = useState<string | null>(null);
const router = useRouter();
async function handleSubmit(formData: FormData) {
setIsSubmitting(true);
setError(null);
try {
await createLlmCreatorAction(formData);
setOpen(false);
router.refresh();
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to create creator");
} finally {
setIsSubmitting(false);
}
}
return (
<Dialog
title="Add Creator"
controlled={{ isOpen: open, set: setOpen }}
styling={{ maxWidth: "512px" }}
>
<Dialog.Trigger>
<Button variant="primary" size="small">
Add Creator
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="mb-4 text-sm text-muted-foreground">
Add a new model creator (the organization that made/trained the
model).
</div>
<form action={handleSubmit} className="space-y-4">
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-2">
<label
htmlFor="name"
className="text-sm font-medium text-foreground"
>
Name (slug) <span className="text-destructive">*</span>
</label>
<input
id="name"
required
name="name"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="openai"
/>
<p className="text-xs text-muted-foreground">
Lowercase identifier (e.g., openai, meta, anthropic)
</p>
</div>
<div className="space-y-2">
<label
htmlFor="display_name"
className="text-sm font-medium text-foreground"
>
Display Name <span className="text-destructive">*</span>
</label>
<input
id="display_name"
required
name="display_name"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="OpenAI"
/>
</div>
</div>
<div className="space-y-2">
<label
htmlFor="description"
className="text-sm font-medium text-foreground"
>
Description
</label>
<textarea
id="description"
name="description"
rows={2}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="Creator of GPT models..."
/>
</div>
<div className="space-y-2">
<label
htmlFor="website_url"
className="text-sm font-medium text-foreground"
>
Website URL
</label>
<input
id="website_url"
name="website_url"
type="url"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="https://openai.com"
/>
</div>
{error && (
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
type="button"
onClick={() => {
setOpen(false);
setError(null);
}}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
variant="primary"
size="small"
type="submit"
disabled={isSubmitting}
>
{isSubmitting ? "Creating..." : "Add Creator"}
</Button>
</Dialog.Footer>
</form>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,314 +0,0 @@
"use client";
import { useState } from "react";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
import { createLlmModelAction } from "../actions";
import { useRouter } from "next/navigation";
interface Props {
providers: LlmProvider[];
creators: LlmModelCreator[];
}
export function AddModelModal({ providers, creators }: Props) {
const [open, setOpen] = useState(false);
const [selectedCreatorId, setSelectedCreatorId] = useState("");
const [isSubmitting, setIsSubmitting] = useState(false);
const [error, setError] = useState<string | null>(null);
const router = useRouter();
async function handleSubmit(formData: FormData) {
setIsSubmitting(true);
setError(null);
try {
await createLlmModelAction(formData);
setOpen(false);
router.refresh();
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to create model");
} finally {
setIsSubmitting(false);
}
}
// When provider changes, auto-select matching creator if one exists
function handleProviderChange(providerId: string) {
const provider = providers.find((p) => p.id === providerId);
if (provider) {
// Find creator with same name as provider (e.g., "openai" -> "openai")
const matchingCreator = creators.find((c) => c.name === provider.name);
if (matchingCreator) {
setSelectedCreatorId(matchingCreator.id);
} else {
// No matching creator (e.g., OpenRouter hosts other creators' models)
setSelectedCreatorId("");
}
}
}
return (
<Dialog
title="Add Model"
controlled={{ isOpen: open, set: setOpen }}
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
>
<Dialog.Trigger>
<Button variant="primary" size="small">
Add Model
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="mb-4 text-sm text-muted-foreground">
Register a new model slug, metadata, and pricing.
</div>
<form action={handleSubmit} className="space-y-6">
{/* Basic Information */}
<div className="space-y-4">
<div className="space-y-1">
<h3 className="text-sm font-semibold text-foreground">
Basic Information
</h3>
<p className="text-xs text-muted-foreground">
Core model details
</p>
</div>
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-2">
<label
htmlFor="slug"
className="text-sm font-medium text-foreground"
>
Model Slug <span className="text-destructive">*</span>
</label>
<input
id="slug"
required
name="slug"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="gpt-4.1-mini-2025-04-14"
/>
</div>
<div className="space-y-2">
<label
htmlFor="display_name"
className="text-sm font-medium text-foreground"
>
Display Name <span className="text-destructive">*</span>
</label>
<input
id="display_name"
required
name="display_name"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="GPT 4.1 Mini"
/>
</div>
</div>
<div className="space-y-2">
<label
htmlFor="description"
className="text-sm font-medium text-foreground"
>
Description
</label>
<textarea
id="description"
name="description"
rows={3}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="Optional description..."
/>
</div>
</div>
{/* Model Configuration */}
<div className="space-y-4 border-t border-border pt-6">
<div className="space-y-1">
<h3 className="text-sm font-semibold text-foreground">
Model Configuration
</h3>
<p className="text-xs text-muted-foreground">
Model capabilities and limits
</p>
</div>
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-2">
<label
htmlFor="provider_id"
className="text-sm font-medium text-foreground"
>
Provider <span className="text-destructive">*</span>
</label>
<select
id="provider_id"
required
name="provider_id"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
defaultValue=""
onChange={(e) => handleProviderChange(e.target.value)}
>
<option value="" disabled>
Select provider
</option>
{providers.map((provider) => (
<option key={provider.id} value={provider.id}>
{provider.display_name} ({provider.name})
</option>
))}
</select>
<p className="text-xs text-muted-foreground">
Who hosts/serves the model
</p>
</div>
<div className="space-y-2">
<label
htmlFor="creator_id"
className="text-sm font-medium text-foreground"
>
Creator
</label>
<select
id="creator_id"
name="creator_id"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
value={selectedCreatorId}
onChange={(e) => setSelectedCreatorId(e.target.value)}
>
<option value="">No creator selected</option>
{creators.map((creator) => (
<option key={creator.id} value={creator.id}>
{creator.display_name} ({creator.name})
</option>
))}
</select>
<p className="text-xs text-muted-foreground">
Who made/trained the model (e.g., OpenAI, Meta)
</p>
</div>
</div>
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-2">
<label
htmlFor="context_window"
className="text-sm font-medium text-foreground"
>
Context Window <span className="text-destructive">*</span>
</label>
<input
id="context_window"
required
type="number"
name="context_window"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="128000"
min={1}
/>
</div>
<div className="space-y-2">
<label
htmlFor="max_output_tokens"
className="text-sm font-medium text-foreground"
>
Max Output Tokens
</label>
<input
id="max_output_tokens"
type="number"
name="max_output_tokens"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="16384"
min={1}
/>
</div>
</div>
</div>
{/* Pricing */}
<div className="space-y-4 border-t border-border pt-6">
<div className="space-y-1">
<h3 className="text-sm font-semibold text-foreground">Pricing</h3>
<p className="text-xs text-muted-foreground">
Credit cost per run (credentials are managed via the provider)
</p>
</div>
<div className="grid gap-4 sm:grid-cols-1">
<div className="space-y-2">
<label
htmlFor="credit_cost"
className="text-sm font-medium text-foreground"
>
Credit Cost <span className="text-destructive">*</span>
</label>
<input
id="credit_cost"
required
type="number"
name="credit_cost"
step="1"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="5"
min={0}
/>
</div>
</div>
<p className="text-xs text-muted-foreground">
Credit cost is always in platform credits. Credentials are
inherited from the selected provider.
</p>
</div>
{/* Enabled Toggle */}
<div className="flex items-center gap-3 border-t border-border pt-6">
<input type="hidden" name="is_enabled" value="off" />
<input
id="is_enabled"
type="checkbox"
name="is_enabled"
defaultChecked
className="h-4 w-4 rounded border-input"
/>
<label
htmlFor="is_enabled"
className="text-sm font-medium text-foreground"
>
Enabled by default
</label>
</div>
{error && (
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
type="button"
onClick={() => {
setOpen(false);
setError(null);
}}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
variant="primary"
size="small"
type="submit"
disabled={isSubmitting}
>
{isSubmitting ? "Creating..." : "Save Model"}
</Button>
</Dialog.Footer>
</form>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,268 +0,0 @@
"use client";
import { useState } from "react";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import { createLlmProviderAction } from "../actions";
import { useRouter } from "next/navigation";
export function AddProviderModal() {
const [open, setOpen] = useState(false);
const [isSubmitting, setIsSubmitting] = useState(false);
const [error, setError] = useState<string | null>(null);
const router = useRouter();
async function handleSubmit(formData: FormData) {
setIsSubmitting(true);
setError(null);
try {
await createLlmProviderAction(formData);
setOpen(false);
router.refresh();
} catch (err) {
setError(
err instanceof Error ? err.message : "Failed to create provider",
);
} finally {
setIsSubmitting(false);
}
}
return (
<Dialog
title="Add Provider"
controlled={{ isOpen: open, set: setOpen }}
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
>
<Dialog.Trigger>
<Button variant="primary" size="small">
Add Provider
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="mb-4 text-sm text-muted-foreground">
Define a new upstream provider and default credential information.
</div>
{/* Setup Instructions */}
<div className="mb-6 rounded-lg border border-primary/30 bg-primary/5 p-4">
<div className="space-y-2">
<h4 className="text-sm font-semibold text-foreground">
Before Adding a Provider
</h4>
<p className="text-xs text-muted-foreground">
To use a new provider, you must first configure its credentials in
the backend:
</p>
<ol className="list-inside list-decimal space-y-1 text-xs text-muted-foreground">
<li>
Add the credential to{" "}
<code className="rounded bg-muted px-1 py-0.5 font-mono">
backend/integrations/credentials_store.py
</code>{" "}
with a UUID, provider name, and settings secret reference
</li>
<li>
Add it to the{" "}
<code className="rounded bg-muted px-1 py-0.5 font-mono">
PROVIDER_CREDENTIALS
</code>{" "}
dictionary in{" "}
<code className="rounded bg-muted px-1 py-0.5 font-mono">
backend/data/block_cost_config.py
</code>
</li>
<li>
Use the <strong>same provider name</strong> in the
&quot;Credential Provider&quot; field below that matches the key
in{" "}
<code className="rounded bg-muted px-1 py-0.5 font-mono">
PROVIDER_CREDENTIALS
</code>
</li>
</ol>
</div>
</div>
<form action={handleSubmit} className="space-y-6">
{/* Basic Information */}
<div className="space-y-4">
<div className="space-y-1">
<h3 className="text-sm font-semibold text-foreground">
Basic Information
</h3>
<p className="text-xs text-muted-foreground">
Core provider details
</p>
</div>
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-2">
<label
htmlFor="name"
className="text-sm font-medium text-foreground"
>
Provider Slug <span className="text-destructive">*</span>
</label>
<input
id="name"
required
name="name"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="e.g. openai"
/>
</div>
<div className="space-y-2">
<label
htmlFor="display_name"
className="text-sm font-medium text-foreground"
>
Display Name <span className="text-destructive">*</span>
</label>
<input
id="display_name"
required
name="display_name"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="OpenAI"
/>
</div>
</div>
<div className="space-y-2">
<label
htmlFor="description"
className="text-sm font-medium text-foreground"
>
Description
</label>
<textarea
id="description"
name="description"
rows={3}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="Optional description..."
/>
</div>
</div>
{/* Default Credentials */}
<div className="space-y-4 border-t border-border pt-6">
<div className="space-y-1">
<h3 className="text-sm font-semibold text-foreground">
Default Credentials
</h3>
<p className="text-xs text-muted-foreground">
Credential provider name that matches the key in{" "}
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
PROVIDER_CREDENTIALS
</code>
</p>
</div>
<div className="space-y-2">
<label
htmlFor="default_credential_provider"
className="text-sm font-medium text-foreground"
>
Credential Provider <span className="text-destructive">*</span>
</label>
<input
id="default_credential_provider"
name="default_credential_provider"
required
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="openai"
/>
<p className="text-xs text-muted-foreground">
<strong>Important:</strong> This must exactly match the key in
the{" "}
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
PROVIDER_CREDENTIALS
</code>{" "}
dictionary in{" "}
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
block_cost_config.py
</code>
. Common values: &quot;openai&quot;, &quot;anthropic&quot;,
&quot;groq&quot;, &quot;open_router&quot;, etc.
</p>
</div>
</div>
{/* Capabilities */}
<div className="space-y-4 border-t border-border pt-6">
<div className="space-y-1">
<h3 className="text-sm font-semibold text-foreground">
Capabilities
</h3>
<p className="text-xs text-muted-foreground">
Provider feature flags
</p>
</div>
<div className="grid gap-3 sm:grid-cols-2">
{[
{ name: "supports_tools", label: "Supports tools" },
{ name: "supports_json_output", label: "Supports JSON output" },
{ name: "supports_reasoning", label: "Supports reasoning" },
{
name: "supports_parallel_tool",
label: "Supports parallel tool calls",
},
].map(({ name, label }) => (
<div
key={name}
className="flex items-center gap-3 rounded-md border border-border bg-muted/30 px-4 py-3 transition-colors hover:bg-muted/50"
>
<input type="hidden" name={name} value="off" />
<input
id={name}
type="checkbox"
name={name}
defaultChecked={
name !== "supports_reasoning" &&
name !== "supports_parallel_tool"
}
className="h-4 w-4 rounded border-input"
/>
<label
htmlFor={name}
className="text-sm font-medium text-foreground"
>
{label}
</label>
</div>
))}
</div>
</div>
{error && (
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
type="button"
onClick={() => {
setOpen(false);
setError(null);
}}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
variant="primary"
size="small"
type="submit"
disabled={isSubmitting}
>
{isSubmitting ? "Creating..." : "Save Provider"}
</Button>
</Dialog.Footer>
</form>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,195 +0,0 @@
"use client";
import { useState } from "react";
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "@/components/atoms/Table/Table";
import { Button } from "@/components/atoms/Button/Button";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { updateLlmCreatorAction } from "../actions";
import { useRouter } from "next/navigation";
import { DeleteCreatorModal } from "./DeleteCreatorModal";
export function CreatorsTable({ creators }: { creators: LlmModelCreator[] }) {
if (!creators.length) {
return (
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
No creators registered yet.
</div>
);
}
return (
<div className="rounded-lg border">
<Table>
<TableHeader>
<TableRow>
<TableHead>Creator</TableHead>
<TableHead>Description</TableHead>
<TableHead>Website</TableHead>
<TableHead>Actions</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{creators.map((creator) => (
<TableRow key={creator.id}>
<TableCell>
<div className="font-medium">{creator.display_name}</div>
<div className="text-xs text-muted-foreground">
{creator.name}
</div>
</TableCell>
<TableCell>
<span className="text-sm text-muted-foreground">
{creator.description || "—"}
</span>
</TableCell>
<TableCell>
{creator.website_url ? (
<a
href={creator.website_url}
target="_blank"
rel="noopener noreferrer"
className="text-sm text-primary hover:underline"
>
{(() => {
try {
return new URL(creator.website_url).hostname;
} catch {
return creator.website_url;
}
})()}
</a>
) : (
<span className="text-muted-foreground"></span>
)}
</TableCell>
<TableCell>
<div className="flex items-center justify-end gap-2">
<EditCreatorModal creator={creator} />
<DeleteCreatorModal creator={creator} />
</div>
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</div>
);
}
function EditCreatorModal({ creator }: { creator: LlmModelCreator }) {
const [open, setOpen] = useState(false);
const [isSubmitting, setIsSubmitting] = useState(false);
const [error, setError] = useState<string | null>(null);
const router = useRouter();
async function handleSubmit(formData: FormData) {
setIsSubmitting(true);
setError(null);
try {
await updateLlmCreatorAction(formData);
setOpen(false);
router.refresh();
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to update creator");
} finally {
setIsSubmitting(false);
}
}
return (
<Dialog
title="Edit Creator"
controlled={{ isOpen: open, set: setOpen }}
styling={{ maxWidth: "512px" }}
>
<Dialog.Trigger>
<Button variant="outline" size="small" className="min-w-0">
Edit
</Button>
</Dialog.Trigger>
<Dialog.Content>
<form action={handleSubmit} className="space-y-4">
<input type="hidden" name="creator_id" value={creator.id} />
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-2">
<label className="text-sm font-medium">Name (slug)</label>
<input
required
name="name"
defaultValue={creator.name}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
/>
</div>
<div className="space-y-2">
<label className="text-sm font-medium">Display Name</label>
<input
required
name="display_name"
defaultValue={creator.display_name}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
/>
</div>
</div>
<div className="space-y-2">
<label className="text-sm font-medium">Description</label>
<textarea
name="description"
rows={2}
defaultValue={creator.description ?? ""}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
/>
</div>
<div className="space-y-2">
<label className="text-sm font-medium">Website URL</label>
<input
name="website_url"
type="url"
defaultValue={creator.website_url ?? ""}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
/>
</div>
{error && (
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
type="button"
onClick={() => {
setOpen(false);
setError(null);
}}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
variant="primary"
size="small"
type="submit"
disabled={isSubmitting}
>
{isSubmitting ? "Updating..." : "Update"}
</Button>
</Dialog.Footer>
</form>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,107 +0,0 @@
"use client";
import { useState } from "react";
import { useRouter } from "next/navigation";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
import { deleteLlmCreatorAction } from "../actions";
export function DeleteCreatorModal({ creator }: { creator: LlmModelCreator }) {
const [open, setOpen] = useState(false);
const [isDeleting, setIsDeleting] = useState(false);
const [error, setError] = useState<string | null>(null);
const router = useRouter();
async function handleDelete(formData: FormData) {
setIsDeleting(true);
setError(null);
try {
await deleteLlmCreatorAction(formData);
setOpen(false);
router.refresh();
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to delete creator");
} finally {
setIsDeleting(false);
}
}
return (
<Dialog
title="Delete Creator"
controlled={{ isOpen: open, set: setOpen }}
styling={{ maxWidth: "480px" }}
>
<Dialog.Trigger>
<Button
type="button"
variant="outline"
size="small"
className="min-w-0 text-destructive hover:bg-destructive/10"
>
Delete
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="space-y-4">
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
<div className="flex items-start gap-3">
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
</div>
<div className="text-sm text-foreground">
<p className="font-semibold">You are about to delete:</p>
<p className="mt-1">
<span className="font-medium">{creator.display_name}</span>{" "}
<span className="text-muted-foreground">
({creator.name})
</span>
</p>
<p className="mt-2 text-muted-foreground">
Models using this creator will have their creator field
cleared. This is safe and won&apos;t affect model
functionality.
</p>
</div>
</div>
</div>
<form action={handleDelete} className="space-y-4">
<input type="hidden" name="creator_id" value={creator.id} />
{error && (
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
onClick={() => {
setOpen(false);
setError(null);
}}
disabled={isDeleting}
type="button"
>
Cancel
</Button>
<Button
type="submit"
variant="primary"
size="small"
disabled={isDeleting}
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
>
{isDeleting ? "Deleting..." : "Delete Creator"}
</Button>
</Dialog.Footer>
</form>
</div>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,224 +0,0 @@
"use client";
import { useState } from "react";
import { useRouter } from "next/navigation";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
import { deleteLlmModelAction, fetchLlmModelUsage } from "../actions";
export function DeleteModelModal({
model,
availableModels,
}: {
model: LlmModel;
availableModels: LlmModel[];
}) {
const router = useRouter();
const [open, setOpen] = useState(false);
const [selectedReplacement, setSelectedReplacement] = useState<string>("");
const [isDeleting, setIsDeleting] = useState(false);
const [error, setError] = useState<string | null>(null);
const [usageCount, setUsageCount] = useState<number | null>(null);
const [usageLoading, setUsageLoading] = useState(false);
const [usageError, setUsageError] = useState<string | null>(null);
// Filter out the current model and disabled models from replacement options
const replacementOptions = availableModels.filter(
(m) => m.id !== model.id && m.is_enabled,
);
// Check if migration is required (has blocks using this model)
const requiresMigration = usageCount !== null && usageCount > 0;
async function fetchUsage() {
setUsageLoading(true);
setUsageError(null);
try {
const usage = await fetchLlmModelUsage(model.id);
setUsageCount(usage.node_count);
} catch (err) {
console.error("Failed to fetch model usage:", err);
setUsageError("Failed to load usage count");
setUsageCount(null);
} finally {
setUsageLoading(false);
}
}
async function handleDelete(formData: FormData) {
setIsDeleting(true);
setError(null);
try {
await deleteLlmModelAction(formData);
setOpen(false);
router.refresh();
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to delete model");
} finally {
setIsDeleting(false);
}
}
// Determine if delete button should be enabled
const canDelete =
!isDeleting &&
!usageLoading &&
usageCount !== null &&
(requiresMigration
? selectedReplacement && replacementOptions.length > 0
: true);
return (
<Dialog
title="Delete Model"
controlled={{
isOpen: open,
set: async (isOpen) => {
setOpen(isOpen);
if (isOpen) {
setUsageCount(null);
setUsageError(null);
setError(null);
setSelectedReplacement("");
await fetchUsage();
}
},
}}
styling={{ maxWidth: "600px" }}
>
<Dialog.Trigger>
<Button
type="button"
variant="outline"
size="small"
className="min-w-0 text-destructive hover:bg-destructive/10"
>
Delete
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="mb-4 text-sm text-muted-foreground">
{requiresMigration
? "This action cannot be undone. All workflows using this model will be migrated to the replacement model you select."
: "This action cannot be undone."}
</div>
<div className="space-y-4">
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
<div className="flex items-start gap-3">
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
</div>
<div className="text-sm text-foreground">
<p className="font-semibold">You are about to delete:</p>
<p className="mt-1">
<span className="font-medium">{model.display_name}</span>{" "}
<span className="text-muted-foreground">({model.slug})</span>
</p>
{usageLoading && (
<p className="mt-2 text-muted-foreground">
Loading usage count...
</p>
)}
{usageError && (
<p className="mt-2 text-destructive">{usageError}</p>
)}
{!usageLoading && !usageError && usageCount !== null && (
<p className="mt-2 font-semibold">
Impact: {usageCount} block{usageCount !== 1 ? "s" : ""}{" "}
currently use this model
</p>
)}
{requiresMigration && (
<p className="mt-2 text-muted-foreground">
All workflows currently using this model will be
automatically updated to use the replacement model you
choose below.
</p>
)}
{!usageLoading && usageCount === 0 && (
<p className="mt-2 text-muted-foreground">
No workflows are using this model. It can be safely deleted.
</p>
)}
</div>
</div>
</div>
<form action={handleDelete} className="space-y-4">
<input type="hidden" name="model_id" value={model.id} />
<input
type="hidden"
name="replacement_model_slug"
value={selectedReplacement}
/>
{requiresMigration && (
<label className="text-sm font-medium">
<span className="mb-2 block">
Select Replacement Model{" "}
<span className="text-destructive">*</span>
</span>
<select
required
value={selectedReplacement}
onChange={(e) => setSelectedReplacement(e.target.value)}
className="w-full rounded border border-input bg-background p-2 text-sm"
>
<option value="">-- Choose a replacement model --</option>
{replacementOptions.map((m) => (
<option key={m.id} value={m.slug}>
{m.display_name} ({m.slug})
</option>
))}
</select>
{replacementOptions.length === 0 && (
<p className="mt-2 text-xs text-destructive">
No replacement models available. You must have at least one
other enabled model before deleting this one.
</p>
)}
</label>
)}
{error && (
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
type="button"
onClick={() => {
setOpen(false);
setSelectedReplacement("");
setError(null);
}}
disabled={isDeleting}
>
Cancel
</Button>
<Button
type="submit"
variant="primary"
size="small"
disabled={!canDelete}
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
>
{isDeleting
? "Deleting..."
: requiresMigration
? "Delete and Migrate"
: "Delete"}
</Button>
</Dialog.Footer>
</form>
</div>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,129 +0,0 @@
"use client";
import { useState } from "react";
import { useRouter } from "next/navigation";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
import { deleteLlmProviderAction } from "../actions";
export function DeleteProviderModal({ provider }: { provider: LlmProvider }) {
const [open, setOpen] = useState(false);
const [isDeleting, setIsDeleting] = useState(false);
const [error, setError] = useState<string | null>(null);
const router = useRouter();
const modelCount = provider.models?.length ?? 0;
const hasModels = modelCount > 0;
async function handleDelete(formData: FormData) {
setIsDeleting(true);
setError(null);
try {
await deleteLlmProviderAction(formData);
setOpen(false);
router.refresh();
} catch (err) {
setError(
err instanceof Error ? err.message : "Failed to delete provider",
);
} finally {
setIsDeleting(false);
}
}
return (
<Dialog
title="Delete Provider"
controlled={{ isOpen: open, set: setOpen }}
styling={{ maxWidth: "480px" }}
>
<Dialog.Trigger>
<Button
type="button"
variant="outline"
size="small"
className="min-w-0 text-destructive hover:bg-destructive/10"
>
Delete
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="space-y-4">
<div
className={`rounded-lg border p-4 ${
hasModels
? "border-destructive/30 bg-destructive/10"
: "border-amber-500/30 bg-amber-500/10 dark:border-amber-400/30 dark:bg-amber-400/10"
}`}
>
<div className="flex items-start gap-3">
<div
className={`flex-shrink-0 ${
hasModels
? "text-destructive"
: "text-amber-600 dark:text-amber-400"
}`}
>
{hasModels ? "🚫" : "⚠️"}
</div>
<div className="text-sm text-foreground">
<p className="font-semibold">You are about to delete:</p>
<p className="mt-1">
<span className="font-medium">{provider.display_name}</span>{" "}
<span className="text-muted-foreground">
({provider.name})
</span>
</p>
{hasModels ? (
<p className="mt-2 text-destructive">
This provider has {modelCount} model(s). You must delete all
models before you can delete this provider.
</p>
) : (
<p className="mt-2 text-muted-foreground">
This provider has no models and can be safely deleted.
</p>
)}
</div>
</div>
</div>
<form action={handleDelete} className="space-y-4">
<input type="hidden" name="provider_id" value={provider.id} />
{error && (
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
onClick={() => {
setOpen(false);
setError(null);
}}
disabled={isDeleting}
type="button"
>
Cancel
</Button>
<Button
type="submit"
variant="primary"
size="small"
disabled={isDeleting || hasModels}
className="bg-destructive text-destructive-foreground hover:bg-destructive/90 disabled:opacity-50"
>
{isDeleting ? "Deleting..." : "Delete Provider"}
</Button>
</Dialog.Footer>
</form>
</div>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,288 +0,0 @@
"use client";
import { useState } from "react";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
import { toggleLlmModelAction, fetchLlmModelUsage } from "../actions";
export function DisableModelModal({
model,
availableModels,
}: {
model: LlmModel;
availableModels: LlmModel[];
}) {
const [open, setOpen] = useState(false);
const [isDisabling, setIsDisabling] = useState(false);
const [error, setError] = useState<string | null>(null);
const [usageCount, setUsageCount] = useState<number | null>(null);
const [selectedMigration, setSelectedMigration] = useState<string>("");
const [wantsMigration, setWantsMigration] = useState(false);
const [migrationReason, setMigrationReason] = useState("");
const [customCreditCost, setCustomCreditCost] = useState<string>("");
// Filter out the current model and disabled models from replacement options
const migrationOptions = availableModels.filter(
(m) => m.id !== model.id && m.is_enabled,
);
async function fetchUsage() {
try {
const usage = await fetchLlmModelUsage(model.id);
setUsageCount(usage.node_count);
} catch {
setUsageCount(null);
}
}
async function handleDisable(formData: FormData) {
setIsDisabling(true);
setError(null);
try {
await toggleLlmModelAction(formData);
setOpen(false);
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to disable model");
} finally {
setIsDisabling(false);
}
}
function resetState() {
setError(null);
setSelectedMigration("");
setWantsMigration(false);
setMigrationReason("");
setCustomCreditCost("");
}
const hasUsage = usageCount !== null && usageCount > 0;
return (
<Dialog
title="Disable Model"
controlled={{
isOpen: open,
set: async (isOpen) => {
setOpen(isOpen);
if (isOpen) {
setUsageCount(null);
resetState();
await fetchUsage();
}
},
}}
styling={{ maxWidth: "600px" }}
>
<Dialog.Trigger>
<Button
type="button"
variant="outline"
size="small"
className="min-w-0"
>
Disable
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="mb-4 text-sm text-muted-foreground">
Disabling a model will hide it from users when creating new workflows.
</div>
<div className="space-y-4">
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
<div className="flex items-start gap-3">
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
</div>
<div className="text-sm text-foreground">
<p className="font-semibold">You are about to disable:</p>
<p className="mt-1">
<span className="font-medium">{model.display_name}</span>{" "}
<span className="text-muted-foreground">({model.slug})</span>
</p>
{usageCount === null ? (
<p className="mt-2 text-muted-foreground">
Loading usage data...
</p>
) : usageCount > 0 ? (
<p className="mt-2 font-semibold">
Impact: {usageCount} block{usageCount !== 1 ? "s" : ""}{" "}
currently use this model
</p>
) : (
<p className="mt-2 text-muted-foreground">
No workflows are currently using this model.
</p>
)}
</div>
</div>
</div>
{hasUsage && (
<div className="space-y-4 rounded-lg border border-border bg-muted/50 p-4">
<label className="flex items-start gap-3">
<input
type="checkbox"
checked={wantsMigration}
onChange={(e) => {
setWantsMigration(e.target.checked);
if (!e.target.checked) {
setSelectedMigration("");
}
}}
className="mt-1"
/>
<div className="text-sm">
<span className="font-medium">
Migrate existing workflows to another model
</span>
<p className="mt-1 text-muted-foreground">
Creates a revertible migration record. If unchecked,
existing workflows will use automatic fallback to an enabled
model from the same provider.
</p>
</div>
</label>
{wantsMigration && (
<div className="space-y-4 border-t border-border pt-4">
<label className="block text-sm font-medium">
<span className="mb-2 block">
Replacement Model{" "}
<span className="text-destructive">*</span>
</span>
<select
required
value={selectedMigration}
onChange={(e) => setSelectedMigration(e.target.value)}
className="w-full rounded border border-input bg-background p-2 text-sm"
>
<option value="">-- Choose a replacement model --</option>
{migrationOptions.map((m) => (
<option key={m.id} value={m.slug}>
{m.display_name} ({m.slug})
</option>
))}
</select>
{migrationOptions.length === 0 && (
<p className="mt-2 text-xs text-destructive">
No other enabled models available for migration.
</p>
)}
</label>
<label className="block text-sm font-medium">
<span className="mb-2 block">
Migration Reason{" "}
<span className="font-normal text-muted-foreground">
(optional)
</span>
</span>
<input
type="text"
value={migrationReason}
onChange={(e) => setMigrationReason(e.target.value)}
placeholder="e.g., Provider outage, Cost reduction"
className="w-full rounded border border-input bg-background p-2 text-sm"
/>
<p className="mt-1 text-xs text-muted-foreground">
Helps track why the migration was made
</p>
</label>
<label className="block text-sm font-medium">
<span className="mb-2 block">
Custom Credit Cost{" "}
<span className="font-normal text-muted-foreground">
(optional)
</span>
</span>
<input
type="number"
min="0"
value={customCreditCost}
onChange={(e) => setCustomCreditCost(e.target.value)}
placeholder="Leave blank to use target model's cost"
className="w-full rounded border border-input bg-background p-2 text-sm"
/>
<p className="mt-1 text-xs text-muted-foreground">
Override pricing for migrated workflows. When set, billing
will use this cost instead of the target model&apos;s
cost.
</p>
</label>
</div>
)}
</div>
)}
<form action={handleDisable} className="space-y-4">
<input type="hidden" name="model_id" value={model.id} />
<input type="hidden" name="is_enabled" value="false" />
{wantsMigration && selectedMigration && (
<>
<input
type="hidden"
name="migrate_to_slug"
value={selectedMigration}
/>
{migrationReason && (
<input
type="hidden"
name="migration_reason"
value={migrationReason}
/>
)}
{customCreditCost && (
<input
type="hidden"
name="custom_credit_cost"
value={customCreditCost}
/>
)}
</>
)}
{error && (
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
onClick={() => {
setOpen(false);
resetState();
}}
disabled={isDisabling}
>
Cancel
</Button>
<Button
type="submit"
variant="primary"
size="small"
disabled={
isDisabling ||
(wantsMigration && !selectedMigration) ||
usageCount === null
}
>
{isDisabling
? "Disabling..."
: wantsMigration && selectedMigration
? "Disable & Migrate"
: "Disable Model"}
</Button>
</Dialog.Footer>
</form>
</div>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,223 +0,0 @@
"use client";
import { useState } from "react";
import { useRouter } from "next/navigation";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
import { updateLlmModelAction } from "../actions";
export function EditModelModal({
model,
providers,
creators,
}: {
model: LlmModel;
providers: LlmProvider[];
creators: LlmModelCreator[];
}) {
const router = useRouter();
const [open, setOpen] = useState(false);
const [isSubmitting, setIsSubmitting] = useState(false);
const [error, setError] = useState<string | null>(null);
const cost = model.costs?.[0];
const provider = providers.find((p) => p.id === model.provider_id);
async function handleSubmit(formData: FormData) {
setIsSubmitting(true);
setError(null);
try {
await updateLlmModelAction(formData);
setOpen(false);
router.refresh();
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to update model");
} finally {
setIsSubmitting(false);
}
}
return (
<Dialog
title="Edit Model"
controlled={{ isOpen: open, set: setOpen }}
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
>
<Dialog.Trigger>
<Button variant="outline" size="small" className="min-w-0">
Edit
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="mb-4 text-sm text-muted-foreground">
Update model metadata and pricing information.
</div>
{error && (
<div className="mb-4 rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<form action={handleSubmit} className="space-y-4">
<input type="hidden" name="model_id" value={model.id} />
<div className="grid gap-4 md:grid-cols-2">
<label className="text-sm font-medium">
Display Name
<input
required
name="display_name"
defaultValue={model.display_name}
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
/>
</label>
<label className="text-sm font-medium">
Provider
<select
required
name="provider_id"
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
defaultValue={model.provider_id}
>
{providers.map((p) => (
<option key={p.id} value={p.id}>
{p.display_name} ({p.name})
</option>
))}
</select>
<span className="text-xs text-muted-foreground">
Who hosts/serves the model
</span>
</label>
</div>
<div className="grid gap-4 md:grid-cols-2">
<label className="text-sm font-medium">
Creator
<select
name="creator_id"
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
defaultValue={model.creator_id ?? ""}
>
<option value="">No creator selected</option>
{creators.map((c) => (
<option key={c.id} value={c.id}>
{c.display_name} ({c.name})
</option>
))}
</select>
<span className="text-xs text-muted-foreground">
Who made/trained the model (e.g., OpenAI, Meta)
</span>
</label>
</div>
<label className="text-sm font-medium">
Description
<textarea
name="description"
rows={2}
defaultValue={model.description ?? ""}
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
placeholder="Optional description..."
/>
</label>
<div className="grid gap-4 md:grid-cols-2">
<label className="text-sm font-medium">
Context Window
<input
required
type="number"
name="context_window"
defaultValue={model.context_window}
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
min={1}
/>
</label>
<label className="text-sm font-medium">
Max Output Tokens
<input
type="number"
name="max_output_tokens"
defaultValue={model.max_output_tokens ?? undefined}
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
min={1}
/>
</label>
</div>
<div className="grid gap-4 md:grid-cols-2">
<label className="text-sm font-medium">
Credit Cost
<input
required
type="number"
name="credit_cost"
defaultValue={cost?.credit_cost ?? 0}
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
min={0}
/>
<span className="text-xs text-muted-foreground">
Credits charged per run
</span>
</label>
<label className="text-sm font-medium">
Credential Provider
<select
required
name="credential_provider"
defaultValue={cost?.credential_provider ?? provider?.name ?? ""}
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
>
<option value="" disabled>
Select provider
</option>
{providers.map((p) => (
<option key={p.id} value={p.name}>
{p.display_name} ({p.name})
</option>
))}
</select>
<span className="text-xs text-muted-foreground">
Must match a key in PROVIDER_CREDENTIALS
</span>
</label>
</div>
{/* Hidden defaults for credential_type and unit */}
<input
type="hidden"
name="credential_type"
value={
cost?.credential_type ??
provider?.default_credential_type ??
"api_key"
}
/>
<input type="hidden" name="unit" value={cost?.unit ?? "RUN"} />
<Dialog.Footer>
<Button
type="button"
variant="ghost"
size="small"
onClick={() => setOpen(false)}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
variant="primary"
size="small"
type="submit"
disabled={isSubmitting}
>
{isSubmitting ? "Updating..." : "Update Model"}
</Button>
</Dialog.Footer>
</form>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,263 +0,0 @@
"use client";
import { useState } from "react";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import { updateLlmProviderAction } from "../actions";
import { useRouter } from "next/navigation";
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
export function EditProviderModal({ provider }: { provider: LlmProvider }) {
const [open, setOpen] = useState(false);
const [isSubmitting, setIsSubmitting] = useState(false);
const [error, setError] = useState<string | null>(null);
const router = useRouter();
async function handleSubmit(formData: FormData) {
setIsSubmitting(true);
setError(null);
try {
await updateLlmProviderAction(formData);
setOpen(false);
router.refresh();
} catch (err) {
setError(
err instanceof Error ? err.message : "Failed to update provider",
);
} finally {
setIsSubmitting(false);
}
}
return (
<Dialog
title="Edit Provider"
controlled={{ isOpen: open, set: setOpen }}
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
>
<Dialog.Trigger>
<Button variant="outline" size="small">
Edit
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="mb-4 text-sm text-muted-foreground">
Update provider configuration and capabilities.
</div>
<form action={handleSubmit} className="space-y-6">
<input type="hidden" name="provider_id" value={provider.id} />
{/* Basic Information */}
<div className="space-y-4">
<div className="space-y-1">
<h3 className="text-sm font-semibold text-foreground">
Basic Information
</h3>
<p className="text-xs text-muted-foreground">
Core provider details
</p>
</div>
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-2">
<label
htmlFor="name"
className="text-sm font-medium text-foreground"
>
Provider Slug <span className="text-destructive">*</span>
</label>
<input
id="name"
required
name="name"
defaultValue={provider.name}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="e.g. openai"
/>
</div>
<div className="space-y-2">
<label
htmlFor="display_name"
className="text-sm font-medium text-foreground"
>
Display Name <span className="text-destructive">*</span>
</label>
<input
id="display_name"
required
name="display_name"
defaultValue={provider.display_name}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="OpenAI"
/>
</div>
</div>
<div className="space-y-2">
<label
htmlFor="description"
className="text-sm font-medium text-foreground"
>
Description
</label>
<textarea
id="description"
name="description"
rows={3}
defaultValue={provider.description ?? ""}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="Optional description..."
/>
</div>
</div>
{/* Default Credentials */}
<div className="space-y-4 border-t border-border pt-6">
<div className="space-y-1">
<h3 className="text-sm font-semibold text-foreground">
Default Credentials
</h3>
<p className="text-xs text-muted-foreground">
Credential provider name that matches the key in{" "}
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
PROVIDER_CREDENTIALS
</code>
</p>
</div>
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-2">
<label
htmlFor="default_credential_provider"
className="text-sm font-medium text-foreground"
>
Credential Provider
</label>
<input
id="default_credential_provider"
name="default_credential_provider"
defaultValue={provider.default_credential_provider ?? ""}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="openai"
/>
</div>
<div className="space-y-2">
<label
htmlFor="default_credential_id"
className="text-sm font-medium text-foreground"
>
Credential ID
</label>
<input
id="default_credential_id"
name="default_credential_id"
defaultValue={provider.default_credential_id ?? ""}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="Optional credential ID"
/>
</div>
</div>
<div className="space-y-2">
<label
htmlFor="default_credential_type"
className="text-sm font-medium text-foreground"
>
Credential Type
</label>
<input
id="default_credential_type"
name="default_credential_type"
defaultValue={provider.default_credential_type ?? "api_key"}
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
placeholder="api_key"
/>
</div>
</div>
{/* Capabilities */}
<div className="space-y-4 border-t border-border pt-6">
<div className="space-y-1">
<h3 className="text-sm font-semibold text-foreground">
Capabilities
</h3>
<p className="text-xs text-muted-foreground">
Provider feature flags
</p>
</div>
<div className="grid gap-3 sm:grid-cols-2">
{[
{
name: "supports_tools",
label: "Supports tools",
checked: provider.supports_tools,
},
{
name: "supports_json_output",
label: "Supports JSON output",
checked: provider.supports_json_output,
},
{
name: "supports_reasoning",
label: "Supports reasoning",
checked: provider.supports_reasoning,
},
{
name: "supports_parallel_tool",
label: "Supports parallel tool calls",
checked: provider.supports_parallel_tool,
},
].map(({ name, label, checked }) => (
<div
key={name}
className="flex items-center gap-3 rounded-md border border-border bg-muted/30 px-4 py-3 transition-colors hover:bg-muted/50"
>
<input type="hidden" name={name} value="off" />
<input
id={name}
type="checkbox"
name={name}
defaultChecked={checked}
className="h-4 w-4 rounded border-input"
/>
<label
htmlFor={name}
className="text-sm font-medium text-foreground"
>
{label}
</label>
</div>
))}
</div>
</div>
{error && (
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
type="button"
onClick={() => {
setOpen(false);
setError(null);
}}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
variant="primary"
size="small"
type="submit"
disabled={isSubmitting}
>
{isSubmitting ? "Saving..." : "Save Changes"}
</Button>
</Dialog.Footer>
</form>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,131 +0,0 @@
"use client";
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
import type { LlmModelMigration } from "@/app/api/__generated__/models/llmModelMigration";
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
import { ErrorBoundary } from "@/components/molecules/ErrorBoundary/ErrorBoundary";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { AddProviderModal } from "./AddProviderModal";
import { AddModelModal } from "./AddModelModal";
import { AddCreatorModal } from "./AddCreatorModal";
import { ProviderList } from "./ProviderList";
import { ModelsTable } from "./ModelsTable";
import { MigrationsTable } from "./MigrationsTable";
import { CreatorsTable } from "./CreatorsTable";
import { RecommendedModelSelector } from "./RecommendedModelSelector";
interface Props {
providers: LlmProvider[];
models: LlmModel[];
migrations: LlmModelMigration[];
creators: LlmModelCreator[];
}
function AdminErrorFallback() {
return (
<div className="mx-auto max-w-xl p-6">
<ErrorCard
responseError={{
message:
"An error occurred while loading the LLM Registry. Please refresh the page.",
}}
context="llm-registry"
onRetry={() => window.location.reload()}
/>
</div>
);
}
export function LlmRegistryDashboard({
providers,
models,
migrations,
creators,
}: Props) {
return (
<ErrorBoundary fallback={<AdminErrorFallback />} context="llm-registry">
<div className="mx-auto p-6">
<div className="flex flex-col gap-6">
{/* Header */}
<div>
<h1 className="text-3xl font-bold">LLM Registry</h1>
<p className="text-muted-foreground">
Manage providers, creators, models, and credit pricing
</p>
</div>
{/* Active Migrations Section - Only show if there are migrations */}
{migrations.length > 0 && (
<div className="rounded-lg border border-primary/30 bg-primary/5 p-6 shadow-sm">
<div className="mb-4">
<h2 className="text-xl font-semibold">Active Migrations</h2>
<p className="mt-1 text-sm text-muted-foreground">
These migrations can be reverted to restore workflows to their
original model
</p>
</div>
<MigrationsTable migrations={migrations} />
</div>
)}
{/* Providers & Creators Section - Side by Side */}
<div className="grid gap-6 lg:grid-cols-2">
{/* Providers */}
<div className="rounded-lg border bg-card p-6 shadow-sm">
<div className="mb-4 flex items-center justify-between">
<div>
<h2 className="text-xl font-semibold">Providers</h2>
<p className="mt-1 text-sm text-muted-foreground">
Who hosts/serves the models
</p>
</div>
<AddProviderModal />
</div>
<ProviderList providers={providers} />
</div>
{/* Creators */}
<div className="rounded-lg border bg-card p-6 shadow-sm">
<div className="mb-4 flex items-center justify-between">
<div>
<h2 className="text-xl font-semibold">Creators</h2>
<p className="mt-1 text-sm text-muted-foreground">
Who made/trained the models
</p>
</div>
<AddCreatorModal />
</div>
<CreatorsTable creators={creators} />
</div>
</div>
{/* Models Section */}
<div className="rounded-lg border bg-card p-6 shadow-sm">
<div className="mb-4 flex items-center justify-between">
<div>
<h2 className="text-xl font-semibold">Models</h2>
<p className="mt-1 text-sm text-muted-foreground">
Toggle availability, adjust context windows, and update credit
pricing
</p>
</div>
<AddModelModal providers={providers} creators={creators} />
</div>
{/* Recommended Model Selector */}
<div className="mb-6">
<RecommendedModelSelector models={models} />
</div>
<ModelsTable
models={models}
providers={providers}
creators={creators}
/>
</div>
</div>
</div>
</ErrorBoundary>
);
}

View File

@@ -1,133 +0,0 @@
"use client";
import { useState } from "react";
import type { LlmModelMigration } from "@/app/api/__generated__/models/llmModelMigration";
import { Button } from "@/components/atoms/Button/Button";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "@/components/atoms/Table/Table";
import { revertLlmMigrationAction } from "../actions";
export function MigrationsTable({
migrations,
}: {
migrations: LlmModelMigration[];
}) {
if (!migrations.length) {
return (
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
No active migrations. Migrations are created when you disable a model
with the &quot;Migrate existing workflows&quot; option.
</div>
);
}
return (
<div className="rounded-lg border">
<Table>
<TableHeader>
<TableRow>
<TableHead>Migration</TableHead>
<TableHead>Reason</TableHead>
<TableHead>Nodes Affected</TableHead>
<TableHead>Custom Cost</TableHead>
<TableHead>Created</TableHead>
<TableHead className="text-right">Actions</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{migrations.map((migration) => (
<MigrationRow key={migration.id} migration={migration} />
))}
</TableBody>
</Table>
</div>
);
}
function MigrationRow({ migration }: { migration: LlmModelMigration }) {
const [isReverting, setIsReverting] = useState(false);
const [error, setError] = useState<string | null>(null);
async function handleRevert(formData: FormData) {
setIsReverting(true);
setError(null);
try {
await revertLlmMigrationAction(formData);
} catch (err) {
setError(
err instanceof Error ? err.message : "Failed to revert migration",
);
} finally {
setIsReverting(false);
}
}
const createdDate = new Date(migration.created_at);
return (
<>
<TableRow>
<TableCell>
<div className="text-sm">
<span className="font-medium">{migration.source_model_slug}</span>
<span className="mx-2 text-muted-foreground"></span>
<span className="font-medium">{migration.target_model_slug}</span>
</div>
</TableCell>
<TableCell>
<div className="text-sm text-muted-foreground">
{migration.reason || "—"}
</div>
</TableCell>
<TableCell>
<div className="text-sm">{migration.node_count}</div>
</TableCell>
<TableCell>
<div className="text-sm">
{migration.custom_credit_cost !== null &&
migration.custom_credit_cost !== undefined
? `${migration.custom_credit_cost} credits`
: "—"}
</div>
</TableCell>
<TableCell>
<div className="text-sm text-muted-foreground">
{createdDate.toLocaleDateString()}{" "}
{createdDate.toLocaleTimeString([], {
hour: "2-digit",
minute: "2-digit",
})}
</div>
</TableCell>
<TableCell className="text-right">
<form action={handleRevert} className="inline">
<input type="hidden" name="migration_id" value={migration.id} />
<Button
type="submit"
variant="outline"
size="small"
disabled={isReverting}
>
{isReverting ? "Reverting..." : "Revert"}
</Button>
</form>
</TableCell>
</TableRow>
{error && (
<TableRow>
<TableCell colSpan={6}>
<div className="rounded border border-destructive/30 bg-destructive/10 p-2 text-sm text-destructive">
{error}
</div>
</TableCell>
</TableRow>
)}
</>
);
}

View File

@@ -1,172 +0,0 @@
"use client";
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "@/components/atoms/Table/Table";
import { Button } from "@/components/atoms/Button/Button";
import { toggleLlmModelAction } from "../actions";
import { DeleteModelModal } from "./DeleteModelModal";
import { DisableModelModal } from "./DisableModelModal";
import { EditModelModal } from "./EditModelModal";
import { Star } from "@phosphor-icons/react";
export function ModelsTable({
models,
providers,
creators,
}: {
models: LlmModel[];
providers: LlmProvider[];
creators: LlmModelCreator[];
}) {
if (!models.length) {
return (
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
No models registered yet.
</div>
);
}
const providerLookup = new Map(
providers.map((provider) => [provider.id, provider]),
);
return (
<div className="rounded-lg border">
<Table>
<TableHeader>
<TableRow>
<TableHead>Model</TableHead>
<TableHead>Provider</TableHead>
<TableHead>Creator</TableHead>
<TableHead>Context Window</TableHead>
<TableHead>Max Output</TableHead>
<TableHead>Cost</TableHead>
<TableHead>Status</TableHead>
<TableHead>Actions</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{models.map((model) => {
const cost = model.costs?.[0];
const provider = providerLookup.get(model.provider_id);
return (
<TableRow
key={model.id}
className={model.is_enabled ? "" : "opacity-60"}
>
<TableCell>
<div className="font-medium">{model.display_name}</div>
<div className="text-xs text-muted-foreground">
{model.slug}
</div>
</TableCell>
<TableCell>
{provider ? (
<>
<div>{provider.display_name}</div>
<div className="text-xs text-muted-foreground">
{provider.name}
</div>
</>
) : (
model.provider_id
)}
</TableCell>
<TableCell>
{model.creator ? (
<>
<div>{model.creator.display_name}</div>
<div className="text-xs text-muted-foreground">
{model.creator.name}
</div>
</>
) : (
<span className="text-muted-foreground"></span>
)}
</TableCell>
<TableCell>{model.context_window.toLocaleString()}</TableCell>
<TableCell>
{model.max_output_tokens
? model.max_output_tokens.toLocaleString()
: "—"}
</TableCell>
<TableCell>
{cost ? (
<>
<div className="font-medium">
{cost.credit_cost} credits
</div>
<div className="text-xs text-muted-foreground">
{cost.credential_provider}
</div>
</>
) : (
"—"
)}
</TableCell>
<TableCell>
<div className="flex flex-col gap-1">
<span
className={`inline-flex rounded-full px-2.5 py-1 text-xs font-semibold ${
model.is_enabled
? "bg-primary/10 text-primary"
: "bg-muted text-muted-foreground"
}`}
>
{model.is_enabled ? "Enabled" : "Disabled"}
</span>
{model.is_recommended && (
<span className="inline-flex items-center gap-1 rounded-full bg-amber-500/10 px-2.5 py-1 text-xs font-semibold text-amber-600 dark:text-amber-400">
<Star size={12} weight="fill" />
Recommended
</span>
)}
</div>
</TableCell>
<TableCell>
<div className="flex items-center justify-end gap-2">
{model.is_enabled ? (
<DisableModelModal
model={model}
availableModels={models}
/>
) : (
<EnableModelButton modelId={model.id} />
)}
<EditModelModal
model={model}
providers={providers}
creators={creators}
/>
<DeleteModelModal model={model} availableModels={models} />
</div>
</TableCell>
</TableRow>
);
})}
</TableBody>
</Table>
</div>
);
}
function EnableModelButton({ modelId }: { modelId: string }) {
return (
<form action={toggleLlmModelAction} className="inline">
<input type="hidden" name="model_id" value={modelId} />
<input type="hidden" name="is_enabled" value="true" />
<Button type="submit" variant="outline" size="small" className="min-w-0">
Enable
</Button>
</form>
);
}

View File

@@ -1,94 +0,0 @@
"use client";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "@/components/atoms/Table/Table";
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
import { DeleteProviderModal } from "./DeleteProviderModal";
import { EditProviderModal } from "./EditProviderModal";
export function ProviderList({ providers }: { providers: LlmProvider[] }) {
if (!providers.length) {
return (
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
No providers configured yet.
</div>
);
}
return (
<div className="rounded-lg border">
<Table>
<TableHeader>
<TableRow>
<TableHead>Name</TableHead>
<TableHead>Display Name</TableHead>
<TableHead>Default Credential</TableHead>
<TableHead>Capabilities</TableHead>
<TableHead>Models</TableHead>
<TableHead className="w-[100px]">Actions</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{providers.map((provider) => (
<TableRow key={provider.id}>
<TableCell className="font-medium">{provider.name}</TableCell>
<TableCell>{provider.display_name}</TableCell>
<TableCell>
{provider.default_credential_provider
? `${provider.default_credential_provider} (${provider.default_credential_id ?? "id?"})`
: "—"}
</TableCell>
<TableCell className="text-sm text-muted-foreground">
<div className="flex flex-wrap gap-2">
{provider.supports_tools && (
<span className="rounded bg-muted px-2 py-0.5 text-xs">
Tools
</span>
)}
{provider.supports_json_output && (
<span className="rounded bg-muted px-2 py-0.5 text-xs">
JSON
</span>
)}
{provider.supports_reasoning && (
<span className="rounded bg-muted px-2 py-0.5 text-xs">
Reasoning
</span>
)}
{provider.supports_parallel_tool && (
<span className="rounded bg-muted px-2 py-0.5 text-xs">
Parallel Tools
</span>
)}
</div>
</TableCell>
<TableCell className="text-sm">
<span
className={
(provider.models?.length ?? 0) > 0
? "text-foreground"
: "text-muted-foreground"
}
>
{provider.models?.length ?? 0}
</span>
</TableCell>
<TableCell>
<div className="flex gap-2">
<EditProviderModal provider={provider} />
<DeleteProviderModal provider={provider} />
</div>
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</div>
);
}

View File

@@ -1,87 +0,0 @@
"use client";
import { useState } from "react";
import { useRouter } from "next/navigation";
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
import { Button } from "@/components/atoms/Button/Button";
import { setRecommendedModelAction } from "../actions";
import { Star } from "@phosphor-icons/react";
export function RecommendedModelSelector({ models }: { models: LlmModel[] }) {
const router = useRouter();
const enabledModels = models.filter((m) => m.is_enabled);
const currentRecommended = models.find((m) => m.is_recommended);
const [selectedModelId, setSelectedModelId] = useState<string>(
currentRecommended?.id || "",
);
const [isSaving, setIsSaving] = useState(false);
const [error, setError] = useState<string | null>(null);
const hasChanges = selectedModelId !== (currentRecommended?.id || "");
async function handleSave() {
if (!selectedModelId) return;
setIsSaving(true);
setError(null);
try {
const formData = new FormData();
formData.set("model_id", selectedModelId);
await setRecommendedModelAction(formData);
router.refresh();
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to save");
} finally {
setIsSaving(false);
}
}
return (
<div className="rounded-lg border border-border bg-card p-4">
<div className="mb-3 flex items-center gap-2">
<Star size={20} weight="fill" className="text-amber-500" />
<h3 className="text-sm font-semibold">Recommended Model</h3>
</div>
<p className="mb-3 text-xs text-muted-foreground">
The recommended model is shown as the default suggestion in model
selection dropdowns throughout the platform.
</p>
<div className="flex items-center gap-3">
<select
value={selectedModelId}
onChange={(e) => setSelectedModelId(e.target.value)}
className="flex-1 rounded-md border border-input bg-background px-3 py-2 text-sm"
disabled={isSaving}
>
<option value="">-- Select a model --</option>
{enabledModels.map((model) => (
<option key={model.id} value={model.id}>
{model.display_name} ({model.slug})
</option>
))}
</select>
<Button
type="button"
variant="primary"
size="small"
onClick={handleSave}
disabled={!hasChanges || !selectedModelId || isSaving}
>
{isSaving ? "Saving..." : "Save"}
</Button>
</div>
{error && <p className="mt-2 text-xs text-destructive">{error}</p>}
{currentRecommended && !hasChanges && (
<p className="mt-2 text-xs text-muted-foreground">
Currently set to:{" "}
<span className="font-medium">{currentRecommended.display_name}</span>
</p>
)}
</div>
);
}

View File

@@ -1,46 +0,0 @@
/**
* Server-side data fetching for LLM Registry page.
*/
import {
fetchLlmCreators,
fetchLlmMigrations,
fetchLlmModels,
fetchLlmProviders,
} from "./actions";
export async function getLlmRegistryPageData() {
// Fetch providers and models (required)
const [providersResponse, modelsResponse] = await Promise.all([
fetchLlmProviders(),
fetchLlmModels(),
]);
// Fetch migrations separately with fallback (table might not exist yet)
let migrations: Awaited<ReturnType<typeof fetchLlmMigrations>>["migrations"] =
[];
try {
const migrationsResponse = await fetchLlmMigrations(false);
migrations = migrationsResponse.migrations;
} catch {
// Migrations table might not exist yet - that's ok, just show empty list
console.warn("Could not fetch migrations - table may not exist yet");
}
// Fetch creators separately with fallback (table might not exist yet)
let creators: Awaited<ReturnType<typeof fetchLlmCreators>>["creators"] = [];
try {
const creatorsResponse = await fetchLlmCreators();
creators = creatorsResponse.creators;
} catch {
// Creators table might not exist yet - that's ok, just show empty list
console.warn("Could not fetch creators - table may not exist yet");
}
return {
providers: providersResponse.providers,
models: modelsResponse.models,
migrations,
creators,
};
}

View File

@@ -1,14 +0,0 @@
import { withRoleAccess } from "@/lib/withRoleAccess";
import { getLlmRegistryPageData } from "./getLlmRegistryPage";
import { LlmRegistryDashboard } from "./components/LlmRegistryDashboard";
async function LlmRegistryPage() {
const data = await getLlmRegistryPageData();
return <LlmRegistryDashboard {...data} />;
}
export default async function AdminLlmRegistryPage() {
const withAdminAccess = await withRoleAccess(["admin"]);
const ProtectedLlmRegistryPage = await withAdminAccess(LlmRegistryPage);
return <ProtectedLlmRegistryPage />;
}

View File

@@ -7,9 +7,8 @@ import { BlockCategoryResponse } from "@/app/api/__generated__/models/blockCateg
import { BlockResponse } from "@/app/api/__generated__/models/blockResponse";
import * as Sentry from "@sentry/nextjs";
import { getQueryClient } from "@/lib/react-query/queryClient";
import { useState, useEffect } from "react";
import { useState } from "react";
import { useToast } from "@/components/molecules/Toast/use-toast";
import BackendApi from "@/lib/autogpt-server-api";
export const useAllBlockContent = () => {
const { toast } = useToast();
@@ -94,32 +93,6 @@ export const useAllBlockContent = () => {
const isErrorOnLoadingMore = (categoryName: string) =>
errorLoadingCategories.has(categoryName);
// Listen for LLM registry refresh notifications
useEffect(() => {
const api = new BackendApi();
const queryClient = getQueryClient();
const handleNotification = (notification: any) => {
if (
notification?.type === "LLM_REGISTRY_REFRESH" ||
notification?.event === "registry_updated"
) {
// Invalidate all block-related queries to force refresh
const categoriesQueryKey = getGetV2GetBuilderBlockCategoriesQueryKey();
queryClient.invalidateQueries({ queryKey: categoriesQueryKey });
}
};
const unsubscribe = api.onWebSocketMessage(
"notification",
handleNotification,
);
return () => {
unsubscribe();
};
}, []);
return {
data,
isLoading,

View File

@@ -610,11 +610,8 @@ const NodeOneOfDiscriminatorField: FC<{
return oneOfVariants
.map((variant) => {
const discProperty = variant.properties?.[discriminatorProperty];
const variantDiscValue =
discProperty && "const" in discProperty
? (discProperty.const as string)
: undefined; // NOTE: can discriminators only be strings?
const variantDiscValue = variant.properties?.[discriminatorProperty]
?.const as string; // NOTE: can discriminators only be strings?
return {
value: variantDiscValue,
@@ -1127,47 +1124,9 @@ const NodeStringInput: FC<{
displayName,
}) => {
value ||= schema.default || "";
// Check if we have options with labels (e.g., LLM model picker)
const hasOptions = schema.options && schema.options.length > 0;
const hasEnum = schema.enum && schema.enum.length > 0;
// Helper to get display label for a value
const getDisplayLabel = (val: string) => {
if (hasOptions) {
const option = schema.options!.find((opt) => opt.value === val);
return option?.label || beautifyString(val);
}
return beautifyString(val);
};
return (
<div className={className}>
{hasOptions ? (
// Render options with proper labels (used by LLM model picker)
<Select
defaultValue={value}
onValueChange={(newValue) => handleInputChange(selfKey, newValue)}
>
<SelectTrigger>
<SelectValue placeholder={schema.placeholder || displayName}>
{value ? getDisplayLabel(value) : undefined}
</SelectValue>
</SelectTrigger>
<SelectContent className="nodrag">
{schema.options!.map((option, index) => (
<SelectItem
key={index}
value={option.value}
title={option.description}
>
{option.label || beautifyString(option.value)}
</SelectItem>
))}
</SelectContent>
</Select>
) : hasEnum ? (
// Fallback to enum with beautified strings
{schema.enum && schema.enum.length > 0 ? (
<Select
defaultValue={value}
onValueChange={(newValue) => handleInputChange(selfKey, newValue)}
@@ -1176,8 +1135,8 @@ const NodeStringInput: FC<{
<SelectValue placeholder={schema.placeholder || displayName} />
</SelectTrigger>
<SelectContent className="nodrag">
{schema
.enum!.filter((option) => option)
{schema.enum
.filter((option) => option)
.map((option, index) => (
<SelectItem key={index} value={option}>
{beautifyString(option)}

View File

@@ -0,0 +1,41 @@
"use client";
import { createContext, useContext, useRef, type ReactNode } from "react";
interface NewChatContextValue {
onNewChatClick: () => void;
setOnNewChatClick: (handler?: () => void) => void;
performNewChat?: () => void;
setPerformNewChat: (handler?: () => void) => void;
}
const NewChatContext = createContext<NewChatContextValue | null>(null);
export function NewChatProvider({ children }: { children: ReactNode }) {
const onNewChatRef = useRef<(() => void) | undefined>();
const performNewChatRef = useRef<(() => void) | undefined>();
const contextValueRef = useRef<NewChatContextValue>({
onNewChatClick() {
onNewChatRef.current?.();
},
setOnNewChatClick(handler?: () => void) {
onNewChatRef.current = handler;
},
performNewChat() {
performNewChatRef.current?.();
},
setPerformNewChat(handler?: () => void) {
performNewChatRef.current = handler;
},
});
return (
<NewChatContext.Provider value={contextValueRef.current}>
{children}
</NewChatContext.Provider>
);
}
export function useNewChat() {
return useContext(NewChatContext);
}

View File

@@ -1,8 +1,10 @@
"use client";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader";
import { NAVBAR_HEIGHT_PX } from "@/lib/constants";
import type { ReactNode } from "react";
import { useEffect } from "react";
import { useNewChat } from "../../NewChatContext";
import { DesktopSidebar } from "./components/DesktopSidebar/DesktopSidebar";
import { LoadingState } from "./components/LoadingState/LoadingState";
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
@@ -33,10 +35,25 @@ export function CopilotShell({ children }: Props) {
isReadyToShowContent,
} = useCopilotShell();
const newChatContext = useNewChat();
const handleNewChatClickWrapper =
newChatContext?.onNewChatClick || handleNewChat;
useEffect(
function registerNewChatHandler() {
if (!newChatContext) return;
newChatContext.setPerformNewChat(handleNewChat);
return function cleanup() {
newChatContext.setPerformNewChat(undefined);
};
},
[newChatContext, handleNewChat],
);
if (!isLoggedIn) {
return (
<div className="flex h-full items-center justify-center">
<LoadingSpinner size="large" />
<ChatLoader />
</div>
);
}
@@ -55,7 +72,7 @@ export function CopilotShell({ children }: Props) {
isFetchingNextPage={isFetchingNextPage}
onSelectSession={handleSelectSession}
onFetchNextPage={fetchNextPage}
onNewChat={handleNewChat}
onNewChat={handleNewChatClickWrapper}
hasActiveSession={Boolean(hasActiveSession)}
/>
)}
@@ -77,7 +94,7 @@ export function CopilotShell({ children }: Props) {
isFetchingNextPage={isFetchingNextPage}
onSelectSession={handleSelectSession}
onFetchNextPage={fetchNextPage}
onNewChat={handleNewChat}
onNewChat={handleNewChatClickWrapper}
onClose={handleCloseDrawer}
onOpenChange={handleDrawerOpenChange}
hasActiveSession={Boolean(hasActiveSession)}

View File

@@ -115,13 +115,13 @@ export function useCopilotShell() {
const isReadyToShowContent = isOnHomepage
? true
: checkReadyToShowContent(
areAllSessionsLoaded,
paramSessionId,
accumulatedSessions,
isCurrentSessionLoading,
currentSessionData,
hasAutoSelectedSession,
);
areAllSessionsLoaded,
paramSessionId,
accumulatedSessions,
isCurrentSessionLoading,
currentSessionData,
hasAutoSelectedSession,
);
function handleSelectSession(sessionId: string) {
// Navigate using replaceState to avoid full page reload
@@ -148,13 +148,15 @@ export function useCopilotShell() {
setHasAutoSelectedSession(false);
}
const isLoading = isSessionsLoading && accumulatedSessions.length === 0;
return {
isMobile,
isDrawerOpen,
isLoggedIn,
hasActiveSession:
Boolean(currentSessionId) && (!isOnHomepage || Boolean(paramSessionId)),
isLoading: isSessionsLoading || !areAllSessionsLoaded,
isLoading,
sessions: visibleSessions,
currentSessionId: sidebarSelectedSessionId,
handleSelectSession,

View File

@@ -1,5 +1,28 @@
import type { User } from "@supabase/supabase-js";
export type PageState =
| { type: "welcome" }
| { type: "newChat" }
| { type: "creating"; prompt: string }
| { type: "chat"; sessionId: string; initialPrompt?: string };
export function getInitialPromptFromState(
pageState: PageState,
storedInitialPrompt: string | undefined,
) {
if (storedInitialPrompt) return storedInitialPrompt;
if (pageState.type === "creating") return pageState.prompt;
if (pageState.type === "chat") return pageState.initialPrompt;
}
export function shouldResetToWelcome(pageState: PageState) {
return (
pageState.type !== "newChat" &&
pageState.type !== "creating" &&
pageState.type !== "welcome"
);
}
export function getGreetingName(user?: User | null): string {
if (!user) return "there";
const metadata = user.user_metadata as Record<string, unknown> | undefined;

View File

@@ -1,6 +1,11 @@
import type { ReactNode } from "react";
import { NewChatProvider } from "./NewChatContext";
import { CopilotShell } from "./components/CopilotShell/CopilotShell";
export default function CopilotLayout({ children }: { children: ReactNode }) {
return <CopilotShell>{children}</CopilotShell>;
return (
<NewChatProvider>
<CopilotShell>{children}</CopilotShell>
</NewChatProvider>
);
}

View File

@@ -1,142 +1,35 @@
"use client";
import { postV2CreateSession } from "@/app/api/__generated__/endpoints/chat/chat";
import { Skeleton } from "@/components/__legacy__/ui/skeleton";
import { Button } from "@/components/atoms/Button/Button";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { Text } from "@/components/atoms/Text/Text";
import { Chat } from "@/components/contextual/Chat/Chat";
import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput";
import { getHomepageRoute } from "@/lib/constants";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import {
Flag,
type FlagValues,
useGetFlag,
} from "@/services/feature-flags/use-get-flag";
import { useFlags } from "launchdarkly-react-client-sdk";
import { useRouter, useSearchParams } from "next/navigation";
import { useEffect, useMemo, useRef, useState } from "react";
import { getGreetingName, getQuickActions } from "./helpers";
type PageState =
| { type: "welcome" }
| { type: "creating"; prompt: string }
| { type: "chat"; sessionId: string; initialPrompt?: string };
import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { useCopilotPage } from "./useCopilotPage";
export default function CopilotPage() {
const router = useRouter();
const searchParams = useSearchParams();
const { user, isLoggedIn, isUserLoading } = useSupabase();
const { state, handlers } = useCopilotPage();
const {
greetingName,
quickActions,
isLoading,
pageState,
isNewChatModalOpen,
isReady,
} = state;
const {
handleQuickAction,
startChatWithPrompt,
handleSessionNotFound,
handleStreamingChange,
handleCancelNewChat,
proceedWithNewChat,
handleNewChatModalOpen,
} = handlers;
const isChatEnabled = useGetFlag(Flag.CHAT);
const flags = useFlags<FlagValues>();
const homepageRoute = getHomepageRoute(isChatEnabled);
const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true";
const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID;
const isLaunchDarklyConfigured = envEnabled && Boolean(clientId);
const isFlagReady =
!isLaunchDarklyConfigured || flags[Flag.CHAT] !== undefined;
const [pageState, setPageState] = useState<PageState>({ type: "welcome" });
const initialPromptRef = useRef<Map<string, string>>(new Map());
const urlSessionId = searchParams.get("sessionId");
// Sync with URL sessionId (preserve initialPrompt from ref)
useEffect(
function syncSessionFromUrl() {
if (urlSessionId) {
// If we're already in chat state with this sessionId, don't overwrite
if (pageState.type === "chat" && pageState.sessionId === urlSessionId) {
return;
}
// Get initialPrompt from ref or current state
const storedInitialPrompt = initialPromptRef.current.get(urlSessionId);
const currentInitialPrompt =
storedInitialPrompt ||
(pageState.type === "creating"
? pageState.prompt
: pageState.type === "chat"
? pageState.initialPrompt
: undefined);
if (currentInitialPrompt) {
initialPromptRef.current.set(urlSessionId, currentInitialPrompt);
}
setPageState({
type: "chat",
sessionId: urlSessionId,
initialPrompt: currentInitialPrompt,
});
} else if (pageState.type === "chat") {
setPageState({ type: "welcome" });
}
},
[urlSessionId],
);
useEffect(
function ensureAccess() {
if (!isFlagReady) return;
if (isChatEnabled === false) {
router.replace(homepageRoute);
}
},
[homepageRoute, isChatEnabled, isFlagReady, router],
);
const greetingName = useMemo(
function getName() {
return getGreetingName(user);
},
[user],
);
const quickActions = useMemo(function getActions() {
return getQuickActions();
}, []);
async function startChatWithPrompt(prompt: string) {
if (!prompt?.trim()) return;
if (pageState.type === "creating") return;
const trimmedPrompt = prompt.trim();
setPageState({ type: "creating", prompt: trimmedPrompt });
try {
// Create session
const sessionResponse = await postV2CreateSession({
body: JSON.stringify({}),
});
if (sessionResponse.status !== 200 || !sessionResponse.data?.id) {
throw new Error("Failed to create session");
}
const sessionId = sessionResponse.data.id;
// Store initialPrompt in ref so it persists across re-renders
initialPromptRef.current.set(sessionId, trimmedPrompt);
// Update URL and show Chat with initial prompt
// Chat will handle sending the message and streaming
window.history.replaceState(null, "", `/copilot?sessionId=${sessionId}`);
setPageState({ type: "chat", sessionId, initialPrompt: trimmedPrompt });
} catch (error) {
console.error("[CopilotPage] Failed to start chat:", error);
setPageState({ type: "welcome" });
}
}
function handleQuickAction(action: string) {
startChatWithPrompt(action);
}
function handleSessionNotFound() {
router.replace("/copilot");
}
if (!isFlagReady || isChatEnabled === false || !isLoggedIn) {
if (!isReady) {
return null;
}
@@ -150,7 +43,55 @@ export default function CopilotPage() {
urlSessionId={pageState.sessionId}
initialPrompt={pageState.initialPrompt}
onSessionNotFound={handleSessionNotFound}
onStreamingChange={handleStreamingChange}
/>
<Dialog
title="Interrupt current chat?"
styling={{ maxWidth: 300, width: "100%" }}
controlled={{
isOpen: isNewChatModalOpen,
set: handleNewChatModalOpen,
}}
onClose={handleCancelNewChat}
>
<Dialog.Content>
<div className="flex flex-col gap-4">
<Text variant="body">
The current chat response will be interrupted. Are you sure you
want to start a new chat?
</Text>
<Dialog.Footer>
<Button
type="button"
variant="outline"
onClick={handleCancelNewChat}
>
Cancel
</Button>
<Button
type="button"
variant="primary"
onClick={proceedWithNewChat}
>
Start new chat
</Button>
</Dialog.Footer>
</div>
</Dialog.Content>
</Dialog>
</div>
);
}
if (pageState.type === "newChat") {
return (
<div className="flex h-full flex-1 flex-col items-center justify-center bg-[#f8f8f9]">
<div className="flex flex-col items-center gap-4">
<ChatLoader />
<Text variant="body" className="text-zinc-500">
Loading your chats...
</Text>
</div>
</div>
);
}
@@ -158,18 +99,18 @@ export default function CopilotPage() {
// Show loading state while creating session and sending first message
if (pageState.type === "creating") {
return (
<div className="flex h-full flex-1 flex-col items-center justify-center bg-[#f8f8f9] px-6 py-10">
<LoadingSpinner size="large" />
<Text variant="body" className="mt-4 text-zinc-500">
Starting your chat...
</Text>
<div className="flex h-full flex-1 flex-col items-center justify-center bg-[#f8f8f9]">
<div className="flex flex-col items-center gap-4">
<ChatLoader />
<Text variant="body" className="text-zinc-500">
Loading your chats...
</Text>
</div>
</div>
);
}
// Show Welcome screen
const isLoading = isUserLoading;
return (
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-6 py-10">
<div className="w-full text-center">

View File

@@ -0,0 +1,258 @@
import { postV2CreateSession } from "@/app/api/__generated__/endpoints/chat/chat";
import { getHomepageRoute } from "@/lib/constants";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import {
Flag,
type FlagValues,
useGetFlag,
} from "@/services/feature-flags/use-get-flag";
import { useFlags } from "launchdarkly-react-client-sdk";
import { useRouter } from "next/navigation";
import { useEffect, useReducer } from "react";
import { useNewChat } from "./NewChatContext";
import {
getGreetingName,
getQuickActions,
type PageState,
} from "./helpers";
import { useCopilotURLState } from "./useCopilotURLState";
type CopilotState = {
pageState: PageState;
isStreaming: boolean;
isNewChatModalOpen: boolean;
initialPrompts: Record<string, string>;
previousSessionId: string | null;
};
type CopilotAction =
| { type: "setPageState"; pageState: PageState }
| { type: "setStreaming"; isStreaming: boolean }
| { type: "setNewChatModalOpen"; isOpen: boolean }
| { type: "setInitialPrompt"; sessionId: string; prompt: string }
| { type: "setPreviousSessionId"; sessionId: string | null };
function isSamePageState(next: PageState, current: PageState) {
if (next.type !== current.type) return false;
if (next.type === "creating" && current.type === "creating") {
return next.prompt === current.prompt;
}
if (next.type === "chat" && current.type === "chat") {
return (
next.sessionId === current.sessionId &&
next.initialPrompt === current.initialPrompt
);
}
return true;
}
function copilotReducer(state: CopilotState, action: CopilotAction): CopilotState {
if (action.type === "setPageState") {
if (isSamePageState(action.pageState, state.pageState)) return state;
return { ...state, pageState: action.pageState };
}
if (action.type === "setStreaming") {
if (action.isStreaming === state.isStreaming) return state;
return { ...state, isStreaming: action.isStreaming };
}
if (action.type === "setNewChatModalOpen") {
if (action.isOpen === state.isNewChatModalOpen) return state;
return { ...state, isNewChatModalOpen: action.isOpen };
}
if (action.type === "setInitialPrompt") {
if (state.initialPrompts[action.sessionId] === action.prompt) return state;
return {
...state,
initialPrompts: {
...state.initialPrompts,
[action.sessionId]: action.prompt,
},
};
}
if (action.type === "setPreviousSessionId") {
if (state.previousSessionId === action.sessionId) return state;
return { ...state, previousSessionId: action.sessionId };
}
return state;
}
export function useCopilotPage() {
const router = useRouter();
const { user, isLoggedIn, isUserLoading } = useSupabase();
const isChatEnabled = useGetFlag(Flag.CHAT);
const flags = useFlags<FlagValues>();
const homepageRoute = getHomepageRoute(isChatEnabled);
const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true";
const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID;
const isLaunchDarklyConfigured = envEnabled && Boolean(clientId);
const isFlagReady =
!isLaunchDarklyConfigured || flags[Flag.CHAT] !== undefined;
const [state, dispatch] = useReducer(copilotReducer, {
pageState: { type: "welcome" },
isStreaming: false,
isNewChatModalOpen: false,
initialPrompts: {},
previousSessionId: null,
});
const newChatContext = useNewChat();
const greetingName = getGreetingName(user);
const quickActions = getQuickActions();
function setPageState(pageState: PageState) {
dispatch({ type: "setPageState", pageState });
}
function setInitialPrompt(sessionId: string, prompt: string) {
dispatch({ type: "setInitialPrompt", sessionId, prompt });
}
function setPreviousSessionId(sessionId: string | null) {
dispatch({ type: "setPreviousSessionId", sessionId });
}
const { setUrlSessionId } = useCopilotURLState({
pageState: state.pageState,
initialPrompts: state.initialPrompts,
previousSessionId: state.previousSessionId,
setPageState,
setInitialPrompt,
setPreviousSessionId,
});
useEffect(
function registerNewChatHandler() {
if (!newChatContext) return;
newChatContext.setOnNewChatClick(handleNewChatClick);
return function cleanup() {
newChatContext.setOnNewChatClick(undefined);
};
},
[newChatContext, handleNewChatClick],
);
useEffect(
function transitionNewChatToWelcome() {
if (state.pageState.type === "newChat") {
function setWelcomeState() {
dispatch({ type: "setPageState", pageState: { type: "welcome" } });
}
const timer = setTimeout(setWelcomeState, 300);
return function cleanup() {
clearTimeout(timer);
};
}
},
[state.pageState.type],
);
useEffect(
function ensureAccess() {
if (!isFlagReady) return;
if (isChatEnabled === false) {
router.replace(homepageRoute);
}
},
[homepageRoute, isChatEnabled, isFlagReady, router],
);
async function startChatWithPrompt(prompt: string) {
if (!prompt?.trim()) return;
if (state.pageState.type === "creating") return;
const trimmedPrompt = prompt.trim();
dispatch({
type: "setPageState",
pageState: { type: "creating", prompt: trimmedPrompt },
});
try {
const sessionResponse = await postV2CreateSession({
body: JSON.stringify({}),
});
if (sessionResponse.status !== 200 || !sessionResponse.data?.id) {
throw new Error("Failed to create session");
}
const sessionId = sessionResponse.data.id;
dispatch({
type: "setInitialPrompt",
sessionId,
prompt: trimmedPrompt,
});
await setUrlSessionId(sessionId, { shallow: false });
dispatch({
type: "setPageState",
pageState: { type: "chat", sessionId, initialPrompt: trimmedPrompt },
});
} catch (error) {
console.error("[CopilotPage] Failed to start chat:", error);
dispatch({ type: "setPageState", pageState: { type: "welcome" } });
}
}
function handleQuickAction(action: string) {
startChatWithPrompt(action);
}
function handleSessionNotFound() {
router.replace("/copilot");
}
function handleStreamingChange(isStreamingValue: boolean) {
dispatch({ type: "setStreaming", isStreaming: isStreamingValue });
}
function proceedWithNewChat() {
dispatch({ type: "setNewChatModalOpen", isOpen: false });
if (newChatContext?.performNewChat) {
newChatContext.performNewChat();
return;
}
setUrlSessionId(null, { shallow: false });
router.replace("/copilot");
}
function handleCancelNewChat() {
dispatch({ type: "setNewChatModalOpen", isOpen: false });
}
function handleNewChatModalOpen(isOpen: boolean) {
dispatch({ type: "setNewChatModalOpen", isOpen });
}
function handleNewChatClick() {
if (state.isStreaming) {
dispatch({ type: "setNewChatModalOpen", isOpen: true });
} else {
proceedWithNewChat();
}
}
return {
state: {
greetingName,
quickActions,
isLoading: isUserLoading,
pageState: state.pageState,
isNewChatModalOpen: state.isNewChatModalOpen,
isReady: isFlagReady && isChatEnabled !== false && isLoggedIn,
},
handlers: {
handleQuickAction,
startChatWithPrompt,
handleSessionNotFound,
handleStreamingChange,
handleCancelNewChat,
proceedWithNewChat,
handleNewChatModalOpen,
},
};
}

View File

@@ -0,0 +1,80 @@
import { parseAsString, useQueryState } from "nuqs";
import { useLayoutEffect } from "react";
import {
getInitialPromptFromState,
type PageState,
shouldResetToWelcome,
} from "./helpers";
interface UseCopilotUrlStateArgs {
pageState: PageState;
initialPrompts: Record<string, string>;
previousSessionId: string | null;
setPageState: (pageState: PageState) => void;
setInitialPrompt: (sessionId: string, prompt: string) => void;
setPreviousSessionId: (sessionId: string | null) => void;
}
export function useCopilotURLState({
pageState,
initialPrompts,
previousSessionId,
setPageState,
setInitialPrompt,
setPreviousSessionId,
}: UseCopilotUrlStateArgs) {
const [urlSessionId, setUrlSessionId] = useQueryState(
"sessionId",
parseAsString,
);
function syncSessionFromUrl() {
if (urlSessionId) {
if (pageState.type === "chat" && pageState.sessionId === urlSessionId) {
setPreviousSessionId(urlSessionId);
return;
}
const storedInitialPrompt = initialPrompts[urlSessionId];
const currentInitialPrompt = getInitialPromptFromState(
pageState,
storedInitialPrompt,
);
if (currentInitialPrompt) {
setInitialPrompt(urlSessionId, currentInitialPrompt);
}
setPageState({
type: "chat",
sessionId: urlSessionId,
initialPrompt: currentInitialPrompt,
});
setPreviousSessionId(urlSessionId);
return;
}
const wasInChat = previousSessionId !== null && pageState.type === "chat";
setPreviousSessionId(null);
if (wasInChat) {
setPageState({ type: "newChat" });
return;
}
if (shouldResetToWelcome(pageState)) {
setPageState({ type: "welcome" });
}
}
useLayoutEffect(syncSessionFromUrl, [
urlSessionId,
pageState.type,
previousSessionId,
initialPrompts,
]);
return {
urlSessionId,
setUrlSessionId,
};
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,123 +0,0 @@
import * as React from "react";
import { cn } from "@/lib/utils";
const Table = React.forwardRef<
HTMLTableElement,
React.HTMLAttributes<HTMLTableElement>
>(({ className, ...props }, ref) => (
<div className="relative w-full overflow-auto">
<table
ref={ref}
className={cn("w-full caption-bottom text-sm", className)}
{...props}
/>
</div>
));
Table.displayName = "Table";
const TableHeader = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<thead ref={ref} className={cn("[&_tr]:border-b", className)} {...props} />
));
TableHeader.displayName = "TableHeader";
const TableBody = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tbody
ref={ref}
className={cn("[&_tr:last-child]:border-0", className)}
{...props}
/>
));
TableBody.displayName = "TableBody";
const TableFooter = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tfoot
ref={ref}
className={cn(
"border-t bg-neutral-100/50 font-medium dark:bg-neutral-800/50 [&>tr]:last:border-b-0",
className,
)}
{...props}
/>
));
TableFooter.displayName = "TableFooter";
const TableRow = React.forwardRef<
HTMLTableRowElement,
React.HTMLAttributes<HTMLTableRowElement>
>(({ className, ...props }, ref) => (
<tr
ref={ref}
className={cn(
"border-b transition-colors data-[state=selected]:bg-neutral-100 hover:bg-neutral-100/50 dark:data-[state=selected]:bg-neutral-800 dark:hover:bg-neutral-800/50",
className,
)}
{...props}
/>
));
TableRow.displayName = "TableRow";
const TableHead = React.forwardRef<
HTMLTableCellElement,
React.ThHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<th
ref={ref}
className={cn(
"h-10 px-2 text-left align-middle font-medium text-neutral-500 dark:text-neutral-400 [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]",
className,
)}
{...props}
/>
));
TableHead.displayName = "TableHead";
const TableCell = React.forwardRef<
HTMLTableCellElement,
React.TdHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<td
ref={ref}
className={cn(
"p-2 align-middle [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]",
className,
)}
{...props}
/>
));
TableCell.displayName = "TableCell";
const TableCaption = React.forwardRef<
HTMLTableCaptionElement,
React.HTMLAttributes<HTMLTableCaptionElement>
>(({ className, ...props }, ref) => (
<caption
ref={ref}
className={cn(
"mt-4 text-sm text-neutral-500 dark:text-neutral-400",
className,
)}
{...props}
/>
));
TableCaption.displayName = "TableCaption";
export {
Table,
TableHeader,
TableBody,
TableFooter,
TableHead,
TableRow,
TableCell,
TableCaption,
};

View File

@@ -13,6 +13,7 @@ export interface ChatProps {
urlSessionId?: string | null;
initialPrompt?: string;
onSessionNotFound?: () => void;
onStreamingChange?: (isStreaming: boolean) => void;
}
export function Chat({
@@ -20,6 +21,7 @@ export function Chat({
urlSessionId,
initialPrompt,
onSessionNotFound,
onStreamingChange,
}: ChatProps) {
const hasHandledNotFoundRef = useRef(false);
const {
@@ -73,6 +75,7 @@ export function Chat({
initialMessages={messages}
initialPrompt={initialPrompt}
className="flex-1"
onStreamingChange={onStreamingChange}
/>
)}
</main>

View File

@@ -4,6 +4,7 @@ import { Text } from "@/components/atoms/Text/Text";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
import { cn } from "@/lib/utils";
import { useEffect } from "react";
import { ChatInput } from "../ChatInput/ChatInput";
import { MessageList } from "../MessageList/MessageList";
import { useChatContainer } from "./useChatContainer";
@@ -13,6 +14,7 @@ export interface ChatContainerProps {
initialMessages: SessionDetailResponse["messages"];
initialPrompt?: string;
className?: string;
onStreamingChange?: (isStreaming: boolean) => void;
}
export function ChatContainer({
@@ -20,6 +22,7 @@ export function ChatContainer({
initialMessages,
initialPrompt,
className,
onStreamingChange,
}: ChatContainerProps) {
const {
messages,
@@ -36,6 +39,10 @@ export function ChatContainer({
initialPrompt,
});
useEffect(() => {
onStreamingChange?.(isStreaming);
}, [isStreaming, onStreamingChange]);
const breakpoint = useBreakpoint();
const isMobile =
breakpoint === "base" || breakpoint === "sm" || breakpoint === "md";

View File

@@ -1,12 +1,8 @@
import { Text } from "@/components/atoms/Text/Text";
export function ChatLoader() {
return (
<Text
variant="small"
className="bg-gradient-to-r from-neutral-600 via-neutral-500 to-neutral-600 bg-[length:200%_100%] bg-clip-text text-xs text-transparent [animation:shimmer_2s_ease-in-out_infinite]"
>
Taking a bit more time...
</Text>
<div className="flex items-center gap-2">
<div className="h-5 w-5 rounded-full bg-black animate-loader" />
</div>
);
}

View File

@@ -7,7 +7,6 @@ import {
ArrowsClockwiseIcon,
CheckCircleIcon,
CheckIcon,
CopyIcon,
} from "@phosphor-icons/react";
import { useRouter } from "next/navigation";
import { useCallback, useState } from "react";
@@ -340,11 +339,26 @@ export function ChatMessage({
size="icon"
onClick={handleCopy}
aria-label="Copy message"
className="p-1"
>
{copied ? (
<CheckIcon className="size-4 text-green-600" />
) : (
<CopyIcon className="size-4 text-zinc-600" />
<svg
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
className="size-3 text-zinc-600"
>
<rect width="14" height="14" x="8" y="8" rx="2" ry="2" />
<path d="M4 16c-1.1 0-2-.9-2-2V4c0-1.1.9-2 2-2h10c1.1 0 2 .9 2 2" />
</svg>
)}
</Button>
)}

View File

@@ -1,7 +1,6 @@
import { cn } from "@/lib/utils";
import { useEffect, useRef, useState } from "react";
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
import { ChatLoader } from "../ChatLoader/ChatLoader";
export interface ThinkingMessageProps {
className?: string;
@@ -9,7 +8,9 @@ export interface ThinkingMessageProps {
export function ThinkingMessage({ className }: ThinkingMessageProps) {
const [showSlowLoader, setShowSlowLoader] = useState(false);
const [showCoffeeMessage, setShowCoffeeMessage] = useState(false);
const timerRef = useRef<NodeJS.Timeout | null>(null);
const coffeeTimerRef = useRef<NodeJS.Timeout | null>(null);
useEffect(() => {
if (timerRef.current === null) {
@@ -18,11 +19,21 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
}, 8000);
}
if (coffeeTimerRef.current === null) {
coffeeTimerRef.current = setTimeout(() => {
setShowCoffeeMessage(true);
}, 10000);
}
return () => {
if (timerRef.current) {
clearTimeout(timerRef.current);
timerRef.current = null;
}
if (coffeeTimerRef.current) {
clearTimeout(coffeeTimerRef.current);
coffeeTimerRef.current = null;
}
};
}, []);
@@ -37,16 +48,16 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
<div className="flex min-w-0 flex-1 flex-col">
<AIChatBubble>
<div className="transition-all duration-500 ease-in-out">
{showSlowLoader ? (
<ChatLoader />
{showCoffeeMessage ? (
<span className="inline-block bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent animate-shimmer">
This could take a few minutes, grab a coffee
</span>
) : showSlowLoader ? (
<span className="inline-block bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent animate-shimmer">
Taking a bit more time...
</span>
) : (
<span
className="inline-block bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-clip-text text-transparent"
style={{
backgroundSize: "200% 100%",
animation: "shimmer 2s ease-in-out infinite",
}}
>
<span className="inline-block bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent animate-shimmer">
Thinking...
</span>
)}

View File

@@ -1,19 +1,8 @@
import { RJSFSchema } from "@rjsf/utils";
/**
* Options type for fields with label/value pairs (e.g., LLM model picker)
*/
type SchemaOption = {
label: string;
value: string;
group?: string;
description?: string;
};
/**
* Pre-processes the input schema to ensure all properties have a type defined.
* If a property doesn't have a type, it assigns a union of all supported JSON Schema types.
* Also converts custom 'options' array to RJSF's enum/enumNames format.
*/
export function preprocessInputSchema(schema: RJSFSchema): RJSFSchema {
@@ -31,20 +20,6 @@ export function preprocessInputSchema(schema: RJSFSchema): RJSFSchema {
if (property && typeof property === "object") {
const processedProperty = { ...property };
// Convert custom 'options' array to RJSF's enum/enumNames format
// This enables proper label display for dropdowns like the LLM model picker
if (
(processedProperty as any).options &&
Array.isArray((processedProperty as any).options) &&
(processedProperty as any).options.length > 0
) {
const options = (processedProperty as any).options as SchemaOption[];
processedProperty.enum = options.map((opt) => opt.value);
(processedProperty as any).enumNames = options.map(
(opt) => opt.label,
);
}
// Only add type if no type is defined AND no anyOf/oneOf/allOf is present
if (
!processedProperty.type &&

View File

@@ -77,45 +77,17 @@ export default function useAgentGraph(
// Load available blocks & flows (stable - only loads once)
useEffect(() => {
const loadBlocks = () => {
api
.getBlocks()
.then((blocks) => {
setAllBlocks(blocks);
})
.catch();
};
api
.getBlocks()
.then((blocks) => {
setAllBlocks(blocks);
})
.catch();
const loadFlows = () => {
api
.listGraphs()
.then((flows) => setAvailableFlows(flows))
.catch();
};
// Initial load
loadBlocks();
loadFlows();
// Listen for LLM registry refresh notifications to reload blocks
const deregisterRegistryRefresh = api.onWebSocketMessage(
"notification",
(notification) => {
if (
notification?.type === "LLM_REGISTRY_REFRESH" ||
notification?.event === "registry_updated"
) {
console.log(
"Received LLM registry refresh notification, reloading blocks...",
);
loadBlocks();
}
},
);
return () => {
deregisterRegistryRefresh();
};
api
.listGraphs()
.then((flows) => setAvailableFlows(flows))
.catch();
}, [api]);
// Subscribe to execution events

View File

@@ -186,7 +186,6 @@ export type BlockIOStringSubSchema = BlockIOSubSchemaMeta & {
default?: string;
format?: string;
maxLength?: number;
options?: { value: string; label: string; description?: string }[];
};
export type BlockIONumberSubSchema = BlockIOSubSchemaMeta & {

View File

@@ -285,20 +285,17 @@ export function fillObjectDefaultsFromSchema(
// Apply simple default values
obj[key] ??= propertySchema.default;
} else if (
"type" in propertySchema &&
propertySchema.type === "object" &&
"properties" in propertySchema
) {
// Recursively fill defaults for nested objects
obj[key] = fillObjectDefaultsFromSchema(obj[key] ?? {}, propertySchema);
} else if ("type" in propertySchema && propertySchema.type === "array") {
} else if (propertySchema.type === "array") {
obj[key] ??= [];
// If the array items are objects, fill their defaults as well
if (
Array.isArray(obj[key]) &&
propertySchema.items &&
"type" in propertySchema.items &&
propertySchema.items.type === "object" &&
propertySchema.items?.type === "object" &&
"properties" in propertySchema.items
) {
for (const item of obj[key]) {

View File

@@ -157,12 +157,21 @@ const config = {
backgroundPosition: "-200% 0",
},
},
loader: {
"0%": {
boxShadow: "0 0 0 0 rgba(0, 0, 0, 0.25)",
},
"100%": {
boxShadow: "0 0 0 30px rgba(0, 0, 0, 0)",
},
},
},
animation: {
"accordion-down": "accordion-down 0.2s ease-out",
"accordion-up": "accordion-up 0.2s ease-out",
"fade-in": "fade-in 0.2s ease-out",
shimmer: "shimmer 2s ease-in-out infinite",
loader: "loader 1s infinite",
},
transitionDuration: {
"2000": "2000ms",