mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-17 02:58:01 -05:00
Compare commits
8 Commits
add-llm-ma
...
fix/fronte
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b98b2df40 | ||
|
|
e55f05c7a8 | ||
|
|
4a9b13acb6 | ||
|
|
5ff669e999 | ||
|
|
ec03a13e26 | ||
|
|
b08851f5d7 | ||
|
|
8b1720e61d | ||
|
|
aa5a039c5e |
@@ -122,24 +122,6 @@ class ConnectionManager:
|
||||
|
||||
return len(connections)
|
||||
|
||||
async def broadcast_to_all(self, *, method: WSMethod, data: dict) -> int:
|
||||
"""Broadcast a message to all active websocket connections."""
|
||||
message = WSMessage(
|
||||
method=method,
|
||||
data=data,
|
||||
).model_dump_json()
|
||||
|
||||
connections = tuple(self.active_connections)
|
||||
if not connections:
|
||||
return 0
|
||||
|
||||
await asyncio.gather(
|
||||
*(connection.send_text(message) for connection in connections),
|
||||
return_exceptions=True,
|
||||
)
|
||||
|
||||
return len(connections)
|
||||
|
||||
async def _subscribe(self, channel_key: str, websocket: WebSocket) -> str:
|
||||
if channel_key not in self.subscriptions:
|
||||
self.subscriptions[channel_key] = set()
|
||||
|
||||
@@ -173,64 +173,30 @@ async def get_execution_analytics_config(
|
||||
# Return with provider prefix for clarity
|
||||
return f"{provider_name}: {model_name}"
|
||||
|
||||
# Get all models from the registry (dynamic, not hardcoded enum)
|
||||
from backend.data import llm_registry
|
||||
from backend.server.v2.llm import db as llm_db
|
||||
|
||||
# Get the recommended model from the database (configurable via admin UI)
|
||||
recommended_model_slug = await llm_db.get_recommended_model_slug()
|
||||
|
||||
# Build the available models list
|
||||
first_enabled_slug = None
|
||||
for registry_model in llm_registry.iter_dynamic_models():
|
||||
# Only include enabled models in the list
|
||||
if not registry_model.is_enabled:
|
||||
continue
|
||||
|
||||
# Track first enabled model as fallback
|
||||
if first_enabled_slug is None:
|
||||
first_enabled_slug = registry_model.slug
|
||||
|
||||
model_enum = LlmModel(registry_model.slug) # Create enum instance from slug
|
||||
label = generate_model_label(model_enum)
|
||||
# Include all LlmModel values (no more filtering by hardcoded list)
|
||||
recommended_model = LlmModel.GPT4O_MINI.value
|
||||
for model in LlmModel:
|
||||
label = generate_model_label(model)
|
||||
# Add "(Recommended)" suffix to the recommended model
|
||||
if registry_model.slug == recommended_model_slug:
|
||||
if model.value == recommended_model:
|
||||
label += " (Recommended)"
|
||||
|
||||
available_models.append(
|
||||
ModelInfo(
|
||||
value=registry_model.slug,
|
||||
value=model.value,
|
||||
label=label,
|
||||
provider=registry_model.metadata.provider,
|
||||
provider=model.provider,
|
||||
)
|
||||
)
|
||||
|
||||
# Sort models by provider and name for better UX
|
||||
available_models.sort(key=lambda x: (x.provider, x.label))
|
||||
|
||||
# Handle case where no models are available
|
||||
if not available_models:
|
||||
logger.warning(
|
||||
"No enabled LLM models found in registry. "
|
||||
"Ensure models are configured and enabled in the LLM Registry."
|
||||
)
|
||||
# Provide a placeholder entry so admins see meaningful feedback
|
||||
available_models.append(
|
||||
ModelInfo(
|
||||
value="",
|
||||
label="No models available - configure in LLM Registry",
|
||||
provider="none",
|
||||
)
|
||||
)
|
||||
|
||||
# Use the DB recommended model, or fallback to first enabled model
|
||||
final_recommended = recommended_model_slug or first_enabled_slug or ""
|
||||
|
||||
return ExecutionAnalyticsConfig(
|
||||
available_models=available_models,
|
||||
default_system_prompt=DEFAULT_SYSTEM_PROMPT,
|
||||
default_user_prompt=DEFAULT_USER_PROMPT,
|
||||
recommended_model=final_recommended,
|
||||
recommended_model=recommended_model,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,557 +0,0 @@
|
||||
import logging
|
||||
|
||||
import autogpt_libs.auth
|
||||
import fastapi
|
||||
|
||||
from backend.data import llm_registry
|
||||
from backend.data.block_cost_config import refresh_llm_costs
|
||||
from backend.server.v2.llm import db as llm_db
|
||||
from backend.server.v2.llm import model as llm_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = fastapi.APIRouter(
|
||||
tags=["llm", "admin"],
|
||||
dependencies=[fastapi.Security(autogpt_libs.auth.requires_admin_user)],
|
||||
)
|
||||
|
||||
|
||||
async def _refresh_runtime_state() -> None:
|
||||
"""Refresh the LLM registry and clear all related caches to ensure real-time updates."""
|
||||
logger.info("Refreshing LLM registry runtime state...")
|
||||
try:
|
||||
# Refresh registry from database
|
||||
await llm_registry.refresh_llm_registry()
|
||||
refresh_llm_costs()
|
||||
|
||||
# Clear block schema caches so they're regenerated with updated model options
|
||||
from backend.data.block import BlockSchema
|
||||
|
||||
BlockSchema.clear_all_schema_caches()
|
||||
logger.info("Cleared all block schema caches")
|
||||
|
||||
# Clear the /blocks endpoint cache so frontend gets updated schemas
|
||||
try:
|
||||
from backend.api.features.v1 import _get_cached_blocks
|
||||
|
||||
_get_cached_blocks.cache_clear()
|
||||
logger.info("Cleared /blocks endpoint cache")
|
||||
except Exception as e:
|
||||
logger.warning("Failed to clear /blocks cache: %s", e)
|
||||
|
||||
# Clear the v2 builder providers cache (if it exists)
|
||||
try:
|
||||
from backend.api.features.builder import db as builder_db
|
||||
|
||||
if hasattr(builder_db, "_get_all_providers"):
|
||||
builder_db._get_all_providers.cache_clear()
|
||||
logger.info("Cleared v2 builder providers cache")
|
||||
except Exception as e:
|
||||
logger.debug("Could not clear v2 builder cache: %s", e)
|
||||
|
||||
# Notify all executor services to refresh their registry cache
|
||||
from backend.data.llm_registry import publish_registry_refresh_notification
|
||||
|
||||
await publish_registry_refresh_notification()
|
||||
logger.info("Published registry refresh notification")
|
||||
except Exception as exc:
|
||||
logger.exception(
|
||||
"LLM runtime state refresh failed; caches may be stale: %s", exc
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/providers",
|
||||
summary="List LLM providers",
|
||||
response_model=llm_model.LlmProvidersResponse,
|
||||
)
|
||||
async def list_llm_providers(include_models: bool = True):
|
||||
providers = await llm_db.list_providers(include_models=include_models)
|
||||
return llm_model.LlmProvidersResponse(providers=providers)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/providers",
|
||||
summary="Create LLM provider",
|
||||
response_model=llm_model.LlmProvider,
|
||||
)
|
||||
async def create_llm_provider(request: llm_model.UpsertLlmProviderRequest):
|
||||
provider = await llm_db.upsert_provider(request=request)
|
||||
await _refresh_runtime_state()
|
||||
return provider
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/providers/{provider_id}",
|
||||
summary="Update LLM provider",
|
||||
response_model=llm_model.LlmProvider,
|
||||
)
|
||||
async def update_llm_provider(
|
||||
provider_id: str,
|
||||
request: llm_model.UpsertLlmProviderRequest,
|
||||
):
|
||||
provider = await llm_db.upsert_provider(request=request, provider_id=provider_id)
|
||||
await _refresh_runtime_state()
|
||||
return provider
|
||||
|
||||
|
||||
@router.get(
|
||||
"/models",
|
||||
summary="List LLM models",
|
||||
response_model=llm_model.LlmModelsResponse,
|
||||
)
|
||||
async def list_llm_models(provider_id: str | None = fastapi.Query(default=None)):
|
||||
models = await llm_db.list_models(provider_id=provider_id)
|
||||
return llm_model.LlmModelsResponse(models=models)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/models",
|
||||
summary="Create LLM model",
|
||||
response_model=llm_model.LlmModel,
|
||||
)
|
||||
async def create_llm_model(request: llm_model.CreateLlmModelRequest):
|
||||
model = await llm_db.create_model(request=request)
|
||||
await _refresh_runtime_state()
|
||||
return model
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/models/{model_id}",
|
||||
summary="Update LLM model",
|
||||
response_model=llm_model.LlmModel,
|
||||
)
|
||||
async def update_llm_model(
|
||||
model_id: str,
|
||||
request: llm_model.UpdateLlmModelRequest,
|
||||
):
|
||||
model = await llm_db.update_model(model_id=model_id, request=request)
|
||||
await _refresh_runtime_state()
|
||||
return model
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/models/{model_id}/toggle",
|
||||
summary="Toggle LLM model availability",
|
||||
response_model=llm_model.ToggleLlmModelResponse,
|
||||
)
|
||||
async def toggle_llm_model(
|
||||
model_id: str,
|
||||
request: llm_model.ToggleLlmModelRequest,
|
||||
):
|
||||
"""
|
||||
Toggle a model's enabled status, optionally migrating workflows when disabling.
|
||||
|
||||
If disabling a model and `migrate_to_slug` is provided, all workflows using
|
||||
this model will be migrated to the specified replacement model before disabling.
|
||||
A migration record is created which can be reverted later using the revert endpoint.
|
||||
|
||||
Optional fields:
|
||||
- `migration_reason`: Reason for the migration (e.g., "Provider outage")
|
||||
- `custom_credit_cost`: Custom pricing override for billing during migration
|
||||
"""
|
||||
try:
|
||||
result = await llm_db.toggle_model(
|
||||
model_id=model_id,
|
||||
is_enabled=request.is_enabled,
|
||||
migrate_to_slug=request.migrate_to_slug,
|
||||
migration_reason=request.migration_reason,
|
||||
custom_credit_cost=request.custom_credit_cost,
|
||||
)
|
||||
await _refresh_runtime_state()
|
||||
if result.nodes_migrated > 0:
|
||||
logger.info(
|
||||
"Toggled model '%s' to %s and migrated %d nodes to '%s' (migration_id=%s)",
|
||||
result.model.slug,
|
||||
"enabled" if request.is_enabled else "disabled",
|
||||
result.nodes_migrated,
|
||||
result.migrated_to_slug,
|
||||
result.migration_id,
|
||||
)
|
||||
return result
|
||||
except ValueError as exc:
|
||||
logger.warning("Model toggle validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to toggle LLM model %s: %s", model_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to toggle model availability",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.get(
|
||||
"/models/{model_id}/usage",
|
||||
summary="Get model usage count",
|
||||
response_model=llm_model.LlmModelUsageResponse,
|
||||
)
|
||||
async def get_llm_model_usage(model_id: str):
|
||||
"""Get the number of workflow nodes using this model."""
|
||||
try:
|
||||
return await llm_db.get_model_usage(model_id=model_id)
|
||||
except ValueError as exc:
|
||||
raise fastapi.HTTPException(status_code=404, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get model usage %s: %s", model_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to get model usage",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/models/{model_id}",
|
||||
summary="Delete LLM model and migrate workflows",
|
||||
response_model=llm_model.DeleteLlmModelResponse,
|
||||
)
|
||||
async def delete_llm_model(
|
||||
model_id: str,
|
||||
replacement_model_slug: str = fastapi.Query(
|
||||
..., description="Slug of the model to migrate existing workflows to"
|
||||
),
|
||||
):
|
||||
"""
|
||||
Delete a model and automatically migrate all workflows using it to a replacement model.
|
||||
|
||||
This endpoint:
|
||||
1. Validates the replacement model exists and is enabled
|
||||
2. Counts how many workflow nodes use the model being deleted
|
||||
3. Updates all AgentNode.constantInput->model fields to the replacement
|
||||
4. Deletes the model record
|
||||
5. Refreshes all caches and notifies executors
|
||||
|
||||
Example: DELETE /admin/llm/models/{id}?replacement_model_slug=gpt-4o
|
||||
"""
|
||||
try:
|
||||
result = await llm_db.delete_model(
|
||||
model_id=model_id, replacement_model_slug=replacement_model_slug
|
||||
)
|
||||
await _refresh_runtime_state()
|
||||
logger.info(
|
||||
"Deleted model '%s' and migrated %d nodes to '%s'",
|
||||
result.deleted_model_slug,
|
||||
result.nodes_migrated,
|
||||
result.replacement_model_slug,
|
||||
)
|
||||
return result
|
||||
except ValueError as exc:
|
||||
# Validation errors (model not found, replacement invalid, etc.)
|
||||
logger.warning("Model deletion validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to delete LLM model %s: %s", model_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to delete model and migrate workflows",
|
||||
) from exc
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Migration Management Endpoints
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/migrations",
|
||||
summary="List model migrations",
|
||||
response_model=llm_model.LlmMigrationsResponse,
|
||||
)
|
||||
async def list_llm_migrations(
|
||||
include_reverted: bool = fastapi.Query(
|
||||
default=False, description="Include reverted migrations in the list"
|
||||
),
|
||||
):
|
||||
"""
|
||||
List all model migrations.
|
||||
|
||||
Migrations are created when disabling a model with the migrate_to_slug option.
|
||||
They can be reverted to restore the original model configuration.
|
||||
"""
|
||||
try:
|
||||
migrations = await llm_db.list_migrations(include_reverted=include_reverted)
|
||||
return llm_model.LlmMigrationsResponse(migrations=migrations)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to list migrations: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to list migrations",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.get(
|
||||
"/migrations/{migration_id}",
|
||||
summary="Get migration details",
|
||||
response_model=llm_model.LlmModelMigration,
|
||||
)
|
||||
async def get_llm_migration(migration_id: str):
|
||||
"""Get details of a specific migration."""
|
||||
try:
|
||||
migration = await llm_db.get_migration(migration_id)
|
||||
if not migration:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=404, detail=f"Migration '{migration_id}' not found"
|
||||
)
|
||||
return migration
|
||||
except fastapi.HTTPException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get migration %s: %s", migration_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to get migration",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/migrations/{migration_id}/revert",
|
||||
summary="Revert a model migration",
|
||||
response_model=llm_model.RevertMigrationResponse,
|
||||
)
|
||||
async def revert_llm_migration(
|
||||
migration_id: str,
|
||||
request: llm_model.RevertMigrationRequest | None = None,
|
||||
):
|
||||
"""
|
||||
Revert a model migration, restoring affected workflows to their original model.
|
||||
|
||||
This only reverts the specific nodes that were part of the migration.
|
||||
The source model must exist for the revert to succeed.
|
||||
|
||||
Options:
|
||||
- `re_enable_source_model`: Whether to re-enable the source model if disabled (default: True)
|
||||
|
||||
Response includes:
|
||||
- `nodes_reverted`: Number of nodes successfully reverted
|
||||
- `nodes_already_changed`: Number of nodes that were modified since migration (not reverted)
|
||||
- `source_model_re_enabled`: Whether the source model was re-enabled
|
||||
|
||||
Requirements:
|
||||
- Migration must not already be reverted
|
||||
- Source model must exist
|
||||
"""
|
||||
try:
|
||||
re_enable = request.re_enable_source_model if request else True
|
||||
result = await llm_db.revert_migration(
|
||||
migration_id,
|
||||
re_enable_source_model=re_enable,
|
||||
)
|
||||
await _refresh_runtime_state()
|
||||
logger.info(
|
||||
"Reverted migration '%s': %d nodes restored from '%s' to '%s' "
|
||||
"(%d already changed, source re-enabled=%s)",
|
||||
migration_id,
|
||||
result.nodes_reverted,
|
||||
result.target_model_slug,
|
||||
result.source_model_slug,
|
||||
result.nodes_already_changed,
|
||||
result.source_model_re_enabled,
|
||||
)
|
||||
return result
|
||||
except ValueError as exc:
|
||||
logger.warning("Migration revert validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to revert migration %s: %s", migration_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to revert migration",
|
||||
) from exc
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Creator Management Endpoints
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/creators",
|
||||
summary="List model creators",
|
||||
response_model=llm_model.LlmCreatorsResponse,
|
||||
)
|
||||
async def list_llm_creators():
|
||||
"""
|
||||
List all model creators.
|
||||
|
||||
Creators are organizations that create/train models (e.g., OpenAI, Meta, Anthropic).
|
||||
This is distinct from providers who host/serve the models (e.g., OpenRouter).
|
||||
"""
|
||||
try:
|
||||
creators = await llm_db.list_creators()
|
||||
return llm_model.LlmCreatorsResponse(creators=creators)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to list creators: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to list creators",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.get(
|
||||
"/creators/{creator_id}",
|
||||
summary="Get creator details",
|
||||
operation_id="getV2GetLlmCreatorDetails",
|
||||
response_model=llm_model.LlmModelCreator,
|
||||
)
|
||||
async def get_llm_creator(creator_id: str):
|
||||
"""Get details of a specific model creator."""
|
||||
try:
|
||||
creator = await llm_db.get_creator(creator_id)
|
||||
if not creator:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=404, detail=f"Creator '{creator_id}' not found"
|
||||
)
|
||||
return creator
|
||||
except fastapi.HTTPException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get creator %s: %s", creator_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to get creator",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/creators",
|
||||
summary="Create model creator",
|
||||
response_model=llm_model.LlmModelCreator,
|
||||
)
|
||||
async def create_llm_creator(request: llm_model.UpsertLlmCreatorRequest):
|
||||
"""
|
||||
Create a new model creator.
|
||||
|
||||
A creator represents an organization that creates/trains AI models,
|
||||
such as OpenAI, Anthropic, Meta, or Google.
|
||||
"""
|
||||
try:
|
||||
creator = await llm_db.upsert_creator(request=request)
|
||||
await _refresh_runtime_state()
|
||||
logger.info("Created model creator '%s' (%s)", creator.display_name, creator.id)
|
||||
return creator
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to create creator: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to create creator",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/creators/{creator_id}",
|
||||
summary="Update model creator",
|
||||
response_model=llm_model.LlmModelCreator,
|
||||
)
|
||||
async def update_llm_creator(
|
||||
creator_id: str,
|
||||
request: llm_model.UpsertLlmCreatorRequest,
|
||||
):
|
||||
"""Update an existing model creator."""
|
||||
try:
|
||||
creator = await llm_db.upsert_creator(request=request, creator_id=creator_id)
|
||||
await _refresh_runtime_state()
|
||||
logger.info("Updated model creator '%s' (%s)", creator.display_name, creator_id)
|
||||
return creator
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to update creator %s: %s", creator_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to update creator",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/creators/{creator_id}",
|
||||
summary="Delete model creator",
|
||||
response_model=dict,
|
||||
)
|
||||
async def delete_llm_creator(creator_id: str):
|
||||
"""
|
||||
Delete a model creator.
|
||||
|
||||
This will remove the creator association from all models that reference it
|
||||
(sets creatorId to NULL), but will not delete the models themselves.
|
||||
"""
|
||||
try:
|
||||
await llm_db.delete_creator(creator_id)
|
||||
await _refresh_runtime_state()
|
||||
logger.info("Deleted model creator '%s'", creator_id)
|
||||
return {"success": True, "message": f"Creator '{creator_id}' deleted"}
|
||||
except ValueError as exc:
|
||||
logger.warning("Creator deletion validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=404, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to delete creator %s: %s", creator_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to delete creator",
|
||||
) from exc
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Recommended Model Endpoints
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/recommended-model",
|
||||
summary="Get recommended model",
|
||||
response_model=llm_model.RecommendedModelResponse,
|
||||
)
|
||||
async def get_recommended_model():
|
||||
"""
|
||||
Get the currently recommended LLM model.
|
||||
|
||||
The recommended model is shown to users as the default/suggested option
|
||||
in model selection dropdowns.
|
||||
"""
|
||||
try:
|
||||
model = await llm_db.get_recommended_model()
|
||||
return llm_model.RecommendedModelResponse(
|
||||
model=model,
|
||||
slug=model.slug if model else None,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get recommended model: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to get recommended model",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/recommended-model",
|
||||
summary="Set recommended model",
|
||||
response_model=llm_model.SetRecommendedModelResponse,
|
||||
)
|
||||
async def set_recommended_model(request: llm_model.SetRecommendedModelRequest):
|
||||
"""
|
||||
Set a model as the recommended model.
|
||||
|
||||
This clears the recommended flag from any other model and sets it on
|
||||
the specified model. The model must be enabled to be set as recommended.
|
||||
|
||||
The recommended model is displayed to users as the default/suggested
|
||||
option in model selection dropdowns throughout the platform.
|
||||
"""
|
||||
try:
|
||||
model, previous_slug = await llm_db.set_recommended_model(request.model_id)
|
||||
await _refresh_runtime_state()
|
||||
logger.info(
|
||||
"Set recommended model to '%s' (previous: %s)",
|
||||
model.slug,
|
||||
previous_slug or "none",
|
||||
)
|
||||
return llm_model.SetRecommendedModelResponse(
|
||||
model=model,
|
||||
previous_recommended_slug=previous_slug,
|
||||
message=f"Model '{model.display_name}' is now the recommended model",
|
||||
)
|
||||
except ValueError as exc:
|
||||
logger.warning("Set recommended model validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to set recommended model: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to set recommended model",
|
||||
) from exc
|
||||
@@ -1,405 +0,0 @@
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.api.features.admin.llm_routes as llm_routes
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(llm_routes.router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_app_admin_auth(mock_jwt_admin):
|
||||
"""Setup admin auth overrides for all tests in this module"""
|
||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_admin["get_jwt_payload"]
|
||||
yield
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
def test_list_llm_providers_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful listing of LLM providers"""
|
||||
# Mock the database function
|
||||
mock_providers = [
|
||||
{
|
||||
"id": "provider-1",
|
||||
"name": "openai",
|
||||
"display_name": "OpenAI",
|
||||
"description": "OpenAI LLM provider",
|
||||
"supports_tools": True,
|
||||
"supports_json_output": True,
|
||||
"supports_reasoning": False,
|
||||
"supports_parallel_tool": True,
|
||||
"metadata": {},
|
||||
"models": [],
|
||||
},
|
||||
{
|
||||
"id": "provider-2",
|
||||
"name": "anthropic",
|
||||
"display_name": "Anthropic",
|
||||
"description": "Anthropic LLM provider",
|
||||
"supports_tools": True,
|
||||
"supports_json_output": True,
|
||||
"supports_reasoning": False,
|
||||
"supports_parallel_tool": True,
|
||||
"metadata": {},
|
||||
"models": [],
|
||||
},
|
||||
]
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.list_providers",
|
||||
new=AsyncMock(return_value=mock_providers),
|
||||
)
|
||||
|
||||
response = client.get("/admin/llm/providers")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert len(response_data["providers"]) == 2
|
||||
assert response_data["providers"][0]["name"] == "openai"
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(response_data, "list_llm_providers_success.json")
|
||||
|
||||
|
||||
def test_list_llm_models_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful listing of LLM models"""
|
||||
# Mock the database function
|
||||
mock_models = [
|
||||
{
|
||||
"id": "model-1",
|
||||
"slug": "gpt-4o",
|
||||
"display_name": "GPT-4o",
|
||||
"description": "GPT-4 Optimized",
|
||||
"provider_id": "provider-1",
|
||||
"context_window": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"is_enabled": True,
|
||||
"capabilities": {},
|
||||
"metadata": {},
|
||||
"costs": [
|
||||
{
|
||||
"id": "cost-1",
|
||||
"credit_cost": 10,
|
||||
"credential_provider": "openai",
|
||||
"metadata": {},
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.list_models",
|
||||
new=AsyncMock(return_value=mock_models),
|
||||
)
|
||||
|
||||
response = client.get("/admin/llm/models")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert len(response_data["models"]) == 1
|
||||
assert response_data["models"][0]["slug"] == "gpt-4o"
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(response_data, "list_llm_models_success.json")
|
||||
|
||||
|
||||
def test_create_llm_provider_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful creation of LLM provider"""
|
||||
mock_provider = {
|
||||
"id": "new-provider-id",
|
||||
"name": "groq",
|
||||
"display_name": "Groq",
|
||||
"description": "Groq LLM provider",
|
||||
"supports_tools": True,
|
||||
"supports_json_output": True,
|
||||
"supports_reasoning": False,
|
||||
"supports_parallel_tool": False,
|
||||
"metadata": {},
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.upsert_provider",
|
||||
new=AsyncMock(return_value=mock_provider),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"name": "groq",
|
||||
"display_name": "Groq",
|
||||
"description": "Groq LLM provider",
|
||||
"supports_tools": True,
|
||||
"supports_json_output": True,
|
||||
"supports_reasoning": False,
|
||||
"supports_parallel_tool": False,
|
||||
"metadata": {},
|
||||
}
|
||||
|
||||
response = client.post("/admin/llm/providers", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["name"] == "groq"
|
||||
assert response_data["display_name"] == "Groq"
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(response_data, "create_llm_provider_success.json")
|
||||
|
||||
|
||||
def test_create_llm_model_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful creation of LLM model"""
|
||||
mock_model = {
|
||||
"id": "new-model-id",
|
||||
"slug": "gpt-4.1-mini",
|
||||
"display_name": "GPT-4.1 Mini",
|
||||
"description": "Latest GPT-4.1 Mini model",
|
||||
"provider_id": "provider-1",
|
||||
"context_window": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"is_enabled": True,
|
||||
"capabilities": {},
|
||||
"metadata": {},
|
||||
"costs": [
|
||||
{
|
||||
"id": "cost-id",
|
||||
"credit_cost": 5,
|
||||
"credential_provider": "openai",
|
||||
"metadata": {},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.create_model",
|
||||
new=AsyncMock(return_value=mock_model),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"slug": "gpt-4.1-mini",
|
||||
"display_name": "GPT-4.1 Mini",
|
||||
"description": "Latest GPT-4.1 Mini model",
|
||||
"provider_id": "provider-1",
|
||||
"context_window": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"is_enabled": True,
|
||||
"capabilities": {},
|
||||
"metadata": {},
|
||||
"costs": [
|
||||
{
|
||||
"credit_cost": 5,
|
||||
"credential_provider": "openai",
|
||||
"metadata": {},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
response = client.post("/admin/llm/models", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["slug"] == "gpt-4.1-mini"
|
||||
assert response_data["is_enabled"] is True
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(response_data, "create_llm_model_success.json")
|
||||
|
||||
|
||||
def test_update_llm_model_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful update of LLM model"""
|
||||
mock_model = {
|
||||
"id": "model-1",
|
||||
"slug": "gpt-4o",
|
||||
"display_name": "GPT-4o Updated",
|
||||
"description": "Updated description",
|
||||
"provider_id": "provider-1",
|
||||
"context_window": 256000,
|
||||
"max_output_tokens": 32768,
|
||||
"is_enabled": True,
|
||||
"capabilities": {},
|
||||
"metadata": {},
|
||||
"costs": [
|
||||
{
|
||||
"id": "cost-1",
|
||||
"credit_cost": 15,
|
||||
"credential_provider": "openai",
|
||||
"metadata": {},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.update_model",
|
||||
new=AsyncMock(return_value=mock_model),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"display_name": "GPT-4o Updated",
|
||||
"description": "Updated description",
|
||||
"context_window": 256000,
|
||||
"max_output_tokens": 32768,
|
||||
}
|
||||
|
||||
response = client.patch("/admin/llm/models/model-1", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["display_name"] == "GPT-4o Updated"
|
||||
assert response_data["context_window"] == 256000
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(response_data, "update_llm_model_success.json")
|
||||
|
||||
|
||||
def test_toggle_llm_model_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful toggling of LLM model enabled status"""
|
||||
mock_model = {
|
||||
"id": "model-1",
|
||||
"slug": "gpt-4o",
|
||||
"display_name": "GPT-4o",
|
||||
"description": "GPT-4 Optimized",
|
||||
"provider_id": "provider-1",
|
||||
"context_window": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"is_enabled": False,
|
||||
"capabilities": {},
|
||||
"metadata": {},
|
||||
"costs": [],
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.toggle_model",
|
||||
new=AsyncMock(return_value=mock_model),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
request_data = {"is_enabled": False}
|
||||
|
||||
response = client.patch("/admin/llm/models/model-1/toggle", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["is_enabled"] is False
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(response_data, "toggle_llm_model_success.json")
|
||||
|
||||
|
||||
def test_delete_llm_model_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful deletion of LLM model with migration"""
|
||||
mock_response = {
|
||||
"deleted_model_slug": "gpt-3.5-turbo",
|
||||
"deleted_model_display_name": "GPT-3.5 Turbo",
|
||||
"replacement_model_slug": "gpt-4o-mini",
|
||||
"nodes_migrated": 42,
|
||||
"message": "Successfully deleted model 'GPT-3.5 Turbo' (gpt-3.5-turbo) "
|
||||
"and migrated 42 workflow node(s) to 'gpt-4o-mini'.",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
||||
new=AsyncMock(return_value=type("obj", (object,), mock_response)()),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
response = client.delete(
|
||||
"/admin/llm/models/model-1?replacement_model_slug=gpt-4o-mini"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["deleted_model_slug"] == "gpt-3.5-turbo"
|
||||
assert response_data["nodes_migrated"] == 42
|
||||
assert response_data["replacement_model_slug"] == "gpt-4o-mini"
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(response_data, "delete_llm_model_success.json")
|
||||
|
||||
|
||||
def test_delete_llm_model_validation_error(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test deletion fails with proper error when validation fails"""
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
||||
new=AsyncMock(side_effect=ValueError("Replacement model 'invalid' not found")),
|
||||
)
|
||||
|
||||
response = client.delete("/admin/llm/models/model-1?replacement_model_slug=invalid")
|
||||
|
||||
assert response.status_code == 400
|
||||
assert "Replacement model 'invalid' not found" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_delete_llm_model_missing_replacement(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test deletion fails when replacement_model_slug is not provided"""
|
||||
response = client.delete("/admin/llm/models/model-1")
|
||||
|
||||
# FastAPI will return 422 for missing required query params
|
||||
assert response.status_code == 422
|
||||
@@ -15,7 +15,6 @@ from backend.blocks import load_all_blocks
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
||||
from backend.data.db import query_raw_with_schema
|
||||
from backend.data.llm_registry import get_all_model_slugs_for_validation
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.cache import cached
|
||||
from backend.util.models import Pagination
|
||||
@@ -32,14 +31,7 @@ from .model import (
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_llm_models() -> list[str]:
|
||||
"""Get LLM model names for search matching from the registry."""
|
||||
return [
|
||||
slug.lower().replace("-", " ") for slug in get_all_model_slugs_for_validation()
|
||||
]
|
||||
|
||||
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
|
||||
|
||||
MAX_LIBRARY_AGENT_RESULTS = 100
|
||||
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
||||
@@ -504,8 +496,8 @@ async def _get_static_counts():
|
||||
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
for field in schema_cls.model_fields.values():
|
||||
if field.annotation == LlmModel:
|
||||
# Check if query matches any value in llm_models from registry
|
||||
if any(query in name for name in _get_llm_models()):
|
||||
# Check if query matches any value in llm_models
|
||||
if any(query in name for name in llm_models):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@@ -299,9 +299,6 @@ async def stream_chat_completion(
|
||||
f"new message_count={len(session.messages)}"
|
||||
)
|
||||
|
||||
if len(session.messages) > config.max_context_messages:
|
||||
raise ValueError(f"Max messages exceeded: {config.max_context_messages}")
|
||||
|
||||
logger.info(
|
||||
f"Upserting session: {session.session_id} with user id {session.user_id}, "
|
||||
f"message_count={len(session.messages)}"
|
||||
|
||||
@@ -8,8 +8,12 @@ from .add_understanding import AddUnderstandingTool
|
||||
from .agent_output import AgentOutputTool
|
||||
from .base import BaseTool
|
||||
from .find_agent import FindAgentTool
|
||||
from .find_block import FindBlockTool
|
||||
from .find_library_agent import FindLibraryAgentTool
|
||||
from .get_doc_page import GetDocPageTool
|
||||
from .run_agent import RunAgentTool
|
||||
from .run_block import RunBlockTool
|
||||
from .search_docs import SearchDocsTool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.api.features.chat.response_model import StreamToolOutputAvailable
|
||||
@@ -18,9 +22,13 @@ if TYPE_CHECKING:
|
||||
TOOL_REGISTRY: dict[str, BaseTool] = {
|
||||
"add_understanding": AddUnderstandingTool(),
|
||||
"find_agent": FindAgentTool(),
|
||||
"find_block": FindBlockTool(),
|
||||
"find_library_agent": FindLibraryAgentTool(),
|
||||
"run_agent": RunAgentTool(),
|
||||
"run_block": RunBlockTool(),
|
||||
"agent_output": AgentOutputTool(),
|
||||
"search_docs": SearchDocsTool(),
|
||||
"get_doc_page": GetDocPageTool(),
|
||||
}
|
||||
|
||||
# Export individual tool instances for backwards compatibility
|
||||
|
||||
@@ -0,0 +1,192 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ContentType
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
||||
from backend.api.features.chat.tools.models import (
|
||||
BlockInfoSummary,
|
||||
BlockInputFieldInfo,
|
||||
BlockListResponse,
|
||||
ErrorResponse,
|
||||
NoResultsResponse,
|
||||
)
|
||||
from backend.api.features.store.hybrid_search import unified_hybrid_search
|
||||
from backend.data.block import get_block
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FindBlockTool(BaseTool):
|
||||
"""Tool for searching available blocks."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "find_block"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search for available blocks by name or description. "
|
||||
"Blocks are reusable components that perform specific tasks like "
|
||||
"sending emails, making API calls, processing text, etc. "
|
||||
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
||||
"The response includes each block's id, required_inputs, and input_schema."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Search query to find blocks by name or description. "
|
||||
"Use keywords like 'email', 'http', 'text', 'ai', etc."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Search for blocks matching the query.
|
||||
|
||||
Args:
|
||||
user_id: User ID (required)
|
||||
session: Chat session
|
||||
query: Search query
|
||||
|
||||
Returns:
|
||||
BlockListResponse: List of matching blocks
|
||||
NoResultsResponse: No blocks found
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id
|
||||
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
# Search for blocks using hybrid search
|
||||
results, total = await unified_hybrid_search(
|
||||
query=query,
|
||||
content_types=[ContentType.BLOCK],
|
||||
page=1,
|
||||
page_size=10,
|
||||
)
|
||||
|
||||
if not results:
|
||||
return NoResultsResponse(
|
||||
message=f"No blocks found for '{query}'",
|
||||
suggestions=[
|
||||
"Try broader keywords like 'email', 'http', 'text', 'ai'",
|
||||
"Check spelling of technical terms",
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Enrich results with full block information
|
||||
blocks: list[BlockInfoSummary] = []
|
||||
for result in results:
|
||||
block_id = result["content_id"]
|
||||
block = get_block(block_id)
|
||||
|
||||
if block:
|
||||
# Get input/output schemas
|
||||
input_schema = {}
|
||||
output_schema = {}
|
||||
try:
|
||||
input_schema = block.input_schema.jsonschema()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
output_schema = block.output_schema.jsonschema()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Get categories from block instance
|
||||
categories = []
|
||||
if hasattr(block, "categories") and block.categories:
|
||||
categories = [cat.value for cat in block.categories]
|
||||
|
||||
# Extract required inputs for easier use
|
||||
required_inputs: list[BlockInputFieldInfo] = []
|
||||
if input_schema:
|
||||
properties = input_schema.get("properties", {})
|
||||
required_fields = set(input_schema.get("required", []))
|
||||
# Get credential field names to exclude from required inputs
|
||||
credentials_fields = set(
|
||||
block.input_schema.get_credentials_fields().keys()
|
||||
)
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
# Skip credential fields - they're handled separately
|
||||
if field_name in credentials_fields:
|
||||
continue
|
||||
|
||||
required_inputs.append(
|
||||
BlockInputFieldInfo(
|
||||
name=field_name,
|
||||
type=field_schema.get("type", "string"),
|
||||
description=field_schema.get("description", ""),
|
||||
required=field_name in required_fields,
|
||||
default=field_schema.get("default"),
|
||||
)
|
||||
)
|
||||
|
||||
blocks.append(
|
||||
BlockInfoSummary(
|
||||
id=block_id,
|
||||
name=block.name,
|
||||
description=block.description or "",
|
||||
categories=categories,
|
||||
input_schema=input_schema,
|
||||
output_schema=output_schema,
|
||||
required_inputs=required_inputs,
|
||||
)
|
||||
)
|
||||
|
||||
if not blocks:
|
||||
return NoResultsResponse(
|
||||
message=f"No blocks found for '{query}'",
|
||||
suggestions=[
|
||||
"Try broader keywords like 'email', 'http', 'text', 'ai'",
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
return BlockListResponse(
|
||||
message=(
|
||||
f"Found {len(blocks)} block(s) matching '{query}'. "
|
||||
"To execute a block, use run_block with the block's 'id' field "
|
||||
"and provide 'input_data' matching the block's input_schema."
|
||||
),
|
||||
blocks=blocks,
|
||||
count=len(blocks),
|
||||
query=query,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching blocks: {e}", exc_info=True)
|
||||
return ErrorResponse(
|
||||
message="Failed to search blocks",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -0,0 +1,148 @@
|
||||
"""GetDocPageTool - Fetch full content of a documentation page."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
DocPageResponse,
|
||||
ErrorResponse,
|
||||
ToolResponseBase,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Base URL for documentation (can be configured)
|
||||
DOCS_BASE_URL = "https://docs.agpt.co"
|
||||
|
||||
|
||||
class GetDocPageTool(BaseTool):
|
||||
"""Tool for fetching full content of a documentation page."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "get_doc_page"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Get the full content of a documentation page by its path. "
|
||||
"Use this after search_docs to read the complete content of a relevant page."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The path to the documentation file, as returned by search_docs. "
|
||||
"Example: 'platform/block-sdk-guide.md'"
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["path"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return False # Documentation is public
|
||||
|
||||
def _get_docs_root(self) -> Path:
|
||||
"""Get the documentation root directory."""
|
||||
this_file = Path(__file__)
|
||||
project_root = this_file.parent.parent.parent.parent.parent.parent.parent.parent
|
||||
return project_root / "docs"
|
||||
|
||||
def _extract_title(self, content: str, fallback: str) -> str:
|
||||
"""Extract title from markdown content."""
|
||||
lines = content.split("\n")
|
||||
for line in lines:
|
||||
if line.startswith("# "):
|
||||
return line[2:].strip()
|
||||
return fallback
|
||||
|
||||
def _make_doc_url(self, path: str) -> str:
|
||||
"""Create a URL for a documentation page."""
|
||||
url_path = path.rsplit(".", 1)[0] if "." in path else path
|
||||
return f"{DOCS_BASE_URL}/{url_path}"
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Fetch full content of a documentation page.
|
||||
|
||||
Args:
|
||||
user_id: User ID (not required for docs)
|
||||
session: Chat session
|
||||
path: Path to the documentation file
|
||||
|
||||
Returns:
|
||||
DocPageResponse: Full document content
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
path = kwargs.get("path", "").strip()
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not path:
|
||||
return ErrorResponse(
|
||||
message="Please provide a documentation path.",
|
||||
error="Missing path parameter",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Sanitize path to prevent directory traversal
|
||||
if ".." in path or path.startswith("/"):
|
||||
return ErrorResponse(
|
||||
message="Invalid documentation path.",
|
||||
error="invalid_path",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
docs_root = self._get_docs_root()
|
||||
full_path = docs_root / path
|
||||
|
||||
if not full_path.exists():
|
||||
return ErrorResponse(
|
||||
message=f"Documentation page not found: {path}",
|
||||
error="not_found",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Ensure the path is within docs root
|
||||
try:
|
||||
full_path.resolve().relative_to(docs_root.resolve())
|
||||
except ValueError:
|
||||
return ErrorResponse(
|
||||
message="Invalid documentation path.",
|
||||
error="invalid_path",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
content = full_path.read_text(encoding="utf-8")
|
||||
title = self._extract_title(content, path)
|
||||
|
||||
return DocPageResponse(
|
||||
message=f"Retrieved documentation page: {title}",
|
||||
title=title,
|
||||
path=path,
|
||||
content=content,
|
||||
doc_url=self._make_doc_url(path),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read documentation page {path}: {e}")
|
||||
return ErrorResponse(
|
||||
message=f"Failed to read documentation page: {str(e)}",
|
||||
error="read_failed",
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -21,6 +21,10 @@ class ResponseType(str, Enum):
|
||||
NO_RESULTS = "no_results"
|
||||
AGENT_OUTPUT = "agent_output"
|
||||
UNDERSTANDING_UPDATED = "understanding_updated"
|
||||
BLOCK_LIST = "block_list"
|
||||
BLOCK_OUTPUT = "block_output"
|
||||
DOC_SEARCH_RESULTS = "doc_search_results"
|
||||
DOC_PAGE = "doc_page"
|
||||
|
||||
|
||||
# Base response model
|
||||
@@ -209,3 +213,83 @@ class UnderstandingUpdatedResponse(ToolResponseBase):
|
||||
type: ResponseType = ResponseType.UNDERSTANDING_UPDATED
|
||||
updated_fields: list[str] = Field(default_factory=list)
|
||||
current_understanding: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
# Documentation search models
|
||||
class DocSearchResult(BaseModel):
|
||||
"""A single documentation search result."""
|
||||
|
||||
title: str
|
||||
path: str
|
||||
section: str
|
||||
snippet: str # Short excerpt for UI display
|
||||
score: float
|
||||
doc_url: str | None = None
|
||||
|
||||
|
||||
class DocSearchResultsResponse(ToolResponseBase):
|
||||
"""Response for search_docs tool."""
|
||||
|
||||
type: ResponseType = ResponseType.DOC_SEARCH_RESULTS
|
||||
results: list[DocSearchResult]
|
||||
count: int
|
||||
query: str
|
||||
|
||||
|
||||
class DocPageResponse(ToolResponseBase):
|
||||
"""Response for get_doc_page tool."""
|
||||
|
||||
type: ResponseType = ResponseType.DOC_PAGE
|
||||
title: str
|
||||
path: str
|
||||
content: str # Full document content
|
||||
doc_url: str | None = None
|
||||
|
||||
|
||||
# Block models
|
||||
class BlockInputFieldInfo(BaseModel):
|
||||
"""Information about a block input field."""
|
||||
|
||||
name: str
|
||||
type: str
|
||||
description: str = ""
|
||||
required: bool = False
|
||||
default: Any | None = None
|
||||
|
||||
|
||||
class BlockInfoSummary(BaseModel):
|
||||
"""Summary of a block for search results."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
input_schema: dict[str, Any]
|
||||
output_schema: dict[str, Any]
|
||||
required_inputs: list[BlockInputFieldInfo] = Field(
|
||||
default_factory=list,
|
||||
description="List of required input fields for this block",
|
||||
)
|
||||
|
||||
|
||||
class BlockListResponse(ToolResponseBase):
|
||||
"""Response for find_block tool."""
|
||||
|
||||
type: ResponseType = ResponseType.BLOCK_LIST
|
||||
blocks: list[BlockInfoSummary]
|
||||
count: int
|
||||
query: str
|
||||
usage_hint: str = Field(
|
||||
default="To execute a block, call run_block with block_id set to the block's "
|
||||
"'id' field and input_data containing the required fields from input_schema."
|
||||
)
|
||||
|
||||
|
||||
class BlockOutputResponse(ToolResponseBase):
|
||||
"""Response for run_block tool."""
|
||||
|
||||
type: ResponseType = ResponseType.BLOCK_OUTPUT
|
||||
block_id: str
|
||||
block_name: str
|
||||
outputs: dict[str, list[Any]]
|
||||
success: bool = True
|
||||
|
||||
@@ -0,0 +1,297 @@
|
||||
"""Tool for executing blocks directly."""
|
||||
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.data.block import get_block
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.util.exceptions import BlockError
|
||||
|
||||
from .base import BaseTool
|
||||
from .models import (
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
SetupInfo,
|
||||
SetupRequirementsResponse,
|
||||
ToolResponseBase,
|
||||
UserReadiness,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RunBlockTool(BaseTool):
|
||||
"""Tool for executing a block and returning its outputs."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "run_block"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Execute a specific block with the provided input data. "
|
||||
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
||||
"do NOT guess or make up block IDs. "
|
||||
"Use the 'id' from find_block results and provide input_data "
|
||||
"matching the block's required_inputs."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"block_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The block's 'id' field from find_block results. "
|
||||
"NEVER guess this - always get it from find_block first."
|
||||
),
|
||||
},
|
||||
"input_data": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Input values for the block. Use the 'required_inputs' field "
|
||||
"from find_block to see what fields are needed."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["block_id", "input_data"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _check_block_credentials(
|
||||
self,
|
||||
user_id: str,
|
||||
block: Any,
|
||||
) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]:
|
||||
"""
|
||||
Check if user has required credentials for a block.
|
||||
|
||||
Returns:
|
||||
tuple[matched_credentials, missing_credentials]
|
||||
"""
|
||||
matched_credentials: dict[str, CredentialsMetaInput] = {}
|
||||
missing_credentials: list[CredentialsMetaInput] = []
|
||||
|
||||
# Get credential field info from block's input schema
|
||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||
|
||||
if not credentials_fields_info:
|
||||
return matched_credentials, missing_credentials
|
||||
|
||||
# Get user's available credentials
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
available_creds = await creds_manager.store.get_all_creds(user_id)
|
||||
|
||||
for field_name, field_info in credentials_fields_info.items():
|
||||
# field_info.provider is a frozenset of acceptable providers
|
||||
# field_info.supported_types is a frozenset of acceptable types
|
||||
matching_cred = next(
|
||||
(
|
||||
cred
|
||||
for cred in available_creds
|
||||
if cred.provider in field_info.provider
|
||||
and cred.type in field_info.supported_types
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if matching_cred:
|
||||
matched_credentials[field_name] = CredentialsMetaInput(
|
||||
id=matching_cred.id,
|
||||
provider=matching_cred.provider, # type: ignore
|
||||
type=matching_cred.type,
|
||||
title=matching_cred.title,
|
||||
)
|
||||
else:
|
||||
# Create a placeholder for the missing credential
|
||||
provider = next(iter(field_info.provider), "unknown")
|
||||
cred_type = next(iter(field_info.supported_types), "api_key")
|
||||
missing_credentials.append(
|
||||
CredentialsMetaInput(
|
||||
id=field_name,
|
||||
provider=provider, # type: ignore
|
||||
type=cred_type, # type: ignore
|
||||
title=field_name.replace("_", " ").title(),
|
||||
)
|
||||
)
|
||||
|
||||
return matched_credentials, missing_credentials
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute a block with the given input data.
|
||||
|
||||
Args:
|
||||
user_id: User ID (required)
|
||||
session: Chat session
|
||||
block_id: Block UUID to execute
|
||||
input_data: Input values for the block
|
||||
|
||||
Returns:
|
||||
BlockOutputResponse: Block execution outputs
|
||||
SetupRequirementsResponse: Missing credentials
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
block_id = kwargs.get("block_id", "").strip()
|
||||
input_data = kwargs.get("input_data", {})
|
||||
session_id = session.session_id
|
||||
|
||||
if not block_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide a block_id",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not isinstance(input_data, dict):
|
||||
return ErrorResponse(
|
||||
message="input_data must be an object",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="Authentication required",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Get the block
|
||||
block = get_block(block_id)
|
||||
if not block:
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block_id}' not found",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
|
||||
|
||||
# Check credentials
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
matched_credentials, missing_credentials = await self._check_block_credentials(
|
||||
user_id, block
|
||||
)
|
||||
|
||||
if missing_credentials:
|
||||
# Return setup requirements response with missing credentials
|
||||
missing_creds_dict = {c.id: c.model_dump() for c in missing_credentials}
|
||||
|
||||
return SetupRequirementsResponse(
|
||||
message=(
|
||||
f"Block '{block.name}' requires credentials that are not configured. "
|
||||
"Please set up the required credentials before running this block."
|
||||
),
|
||||
session_id=session_id,
|
||||
setup_info=SetupInfo(
|
||||
agent_id=block_id,
|
||||
agent_name=block.name,
|
||||
user_readiness=UserReadiness(
|
||||
has_all_credentials=False,
|
||||
missing_credentials=missing_creds_dict,
|
||||
ready_to_run=False,
|
||||
),
|
||||
requirements={
|
||||
"credentials": [c.model_dump() for c in missing_credentials],
|
||||
"inputs": self._get_inputs_list(block),
|
||||
"execution_modes": ["immediate"],
|
||||
},
|
||||
),
|
||||
graph_id=None,
|
||||
graph_version=None,
|
||||
)
|
||||
|
||||
try:
|
||||
# Fetch actual credentials and prepare kwargs for block execution
|
||||
# Create execution context with defaults (blocks may require it)
|
||||
exec_kwargs: dict[str, Any] = {
|
||||
"user_id": user_id,
|
||||
"execution_context": ExecutionContext(),
|
||||
}
|
||||
|
||||
for field_name, cred_meta in matched_credentials.items():
|
||||
# Inject metadata into input_data (for validation)
|
||||
if field_name not in input_data:
|
||||
input_data[field_name] = cred_meta.model_dump()
|
||||
|
||||
# Fetch actual credentials and pass as kwargs (for execution)
|
||||
actual_credentials = await creds_manager.get(
|
||||
user_id, cred_meta.id, lock=False
|
||||
)
|
||||
if actual_credentials:
|
||||
exec_kwargs[field_name] = actual_credentials
|
||||
else:
|
||||
return ErrorResponse(
|
||||
message=f"Failed to retrieve credentials for {field_name}",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Execute the block and collect outputs
|
||||
outputs: dict[str, list[Any]] = defaultdict(list)
|
||||
async for output_name, output_data in block.execute(
|
||||
input_data,
|
||||
**exec_kwargs,
|
||||
):
|
||||
outputs[output_name].append(output_data)
|
||||
|
||||
return BlockOutputResponse(
|
||||
message=f"Block '{block.name}' executed successfully",
|
||||
block_id=block_id,
|
||||
block_name=block.name,
|
||||
outputs=dict(outputs),
|
||||
success=True,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
except BlockError as e:
|
||||
logger.warning(f"Block execution failed: {e}")
|
||||
return ErrorResponse(
|
||||
message=f"Block execution failed: {e}",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error executing block: {e}", exc_info=True)
|
||||
return ErrorResponse(
|
||||
message=f"Failed to execute block: {str(e)}",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
def _get_inputs_list(self, block: Any) -> list[dict[str, Any]]:
|
||||
"""Extract non-credential inputs from block schema."""
|
||||
inputs_list = []
|
||||
schema = block.input_schema.jsonschema()
|
||||
properties = schema.get("properties", {})
|
||||
required_fields = set(schema.get("required", []))
|
||||
|
||||
# Get credential field names to exclude
|
||||
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
# Skip credential fields
|
||||
if field_name in credentials_fields:
|
||||
continue
|
||||
|
||||
inputs_list.append(
|
||||
{
|
||||
"name": field_name,
|
||||
"title": field_schema.get("title", field_name),
|
||||
"type": field_schema.get("type", "string"),
|
||||
"description": field_schema.get("description", ""),
|
||||
"required": field_name in required_fields,
|
||||
}
|
||||
)
|
||||
|
||||
return inputs_list
|
||||
@@ -0,0 +1,208 @@
|
||||
"""SearchDocsTool - Search documentation using hybrid search."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ContentType
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
DocSearchResult,
|
||||
DocSearchResultsResponse,
|
||||
ErrorResponse,
|
||||
NoResultsResponse,
|
||||
ToolResponseBase,
|
||||
)
|
||||
from backend.api.features.store.hybrid_search import unified_hybrid_search
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Base URL for documentation (can be configured)
|
||||
DOCS_BASE_URL = "https://docs.agpt.co"
|
||||
|
||||
# Maximum number of results to return
|
||||
MAX_RESULTS = 5
|
||||
|
||||
# Snippet length for preview
|
||||
SNIPPET_LENGTH = 200
|
||||
|
||||
|
||||
class SearchDocsTool(BaseTool):
|
||||
"""Tool for searching AutoGPT platform documentation."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "search_docs"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search the AutoGPT platform documentation for information about "
|
||||
"how to use the platform, build agents, configure blocks, and more. "
|
||||
"Returns relevant documentation sections. Use get_doc_page to read full content."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Search query to find relevant documentation. "
|
||||
"Use natural language to describe what you're looking for."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return False # Documentation is public
|
||||
|
||||
def _create_snippet(self, content: str, max_length: int = SNIPPET_LENGTH) -> str:
|
||||
"""Create a short snippet from content for preview."""
|
||||
# Remove markdown formatting for cleaner snippet
|
||||
clean_content = content.replace("#", "").replace("*", "").replace("`", "")
|
||||
# Remove extra whitespace
|
||||
clean_content = " ".join(clean_content.split())
|
||||
|
||||
if len(clean_content) <= max_length:
|
||||
return clean_content
|
||||
|
||||
# Truncate at word boundary
|
||||
truncated = clean_content[:max_length]
|
||||
last_space = truncated.rfind(" ")
|
||||
if last_space > max_length // 2:
|
||||
truncated = truncated[:last_space]
|
||||
|
||||
return truncated + "..."
|
||||
|
||||
def _make_doc_url(self, path: str) -> str:
|
||||
"""Create a URL for a documentation page."""
|
||||
# Remove file extension for URL
|
||||
url_path = path.rsplit(".", 1)[0] if "." in path else path
|
||||
return f"{DOCS_BASE_URL}/{url_path}"
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Search documentation and return relevant sections.
|
||||
|
||||
Args:
|
||||
user_id: User ID (not required for docs)
|
||||
session: Chat session
|
||||
query: Search query
|
||||
|
||||
Returns:
|
||||
DocSearchResultsResponse: List of matching documentation sections
|
||||
NoResultsResponse: No results found
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query.",
|
||||
error="Missing query parameter",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
# Search using hybrid search for DOCUMENTATION content type only
|
||||
results, total = await unified_hybrid_search(
|
||||
query=query,
|
||||
content_types=[ContentType.DOCUMENTATION],
|
||||
page=1,
|
||||
page_size=MAX_RESULTS * 2, # Fetch extra for deduplication
|
||||
min_score=0.1, # Lower threshold for docs
|
||||
)
|
||||
|
||||
if not results:
|
||||
return NoResultsResponse(
|
||||
message=f"No documentation found for '{query}'.",
|
||||
suggestions=[
|
||||
"Try different keywords",
|
||||
"Use more general terms",
|
||||
"Check for typos in your query",
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Deduplicate by document path (keep highest scoring section per doc)
|
||||
seen_docs: dict[str, dict[str, Any]] = {}
|
||||
for result in results:
|
||||
metadata = result.get("metadata", {})
|
||||
doc_path = metadata.get("path", "")
|
||||
|
||||
if not doc_path:
|
||||
continue
|
||||
|
||||
# Keep the highest scoring result for each document
|
||||
if doc_path not in seen_docs:
|
||||
seen_docs[doc_path] = result
|
||||
elif result.get("combined_score", 0) > seen_docs[doc_path].get(
|
||||
"combined_score", 0
|
||||
):
|
||||
seen_docs[doc_path] = result
|
||||
|
||||
# Sort by score and take top MAX_RESULTS
|
||||
deduplicated = sorted(
|
||||
seen_docs.values(),
|
||||
key=lambda x: x.get("combined_score", 0),
|
||||
reverse=True,
|
||||
)[:MAX_RESULTS]
|
||||
|
||||
if not deduplicated:
|
||||
return NoResultsResponse(
|
||||
message=f"No documentation found for '{query}'.",
|
||||
suggestions=[
|
||||
"Try different keywords",
|
||||
"Use more general terms",
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Build response
|
||||
doc_results: list[DocSearchResult] = []
|
||||
for result in deduplicated:
|
||||
metadata = result.get("metadata", {})
|
||||
doc_path = metadata.get("path", "")
|
||||
doc_title = metadata.get("doc_title", "")
|
||||
section_title = metadata.get("section_title", "")
|
||||
searchable_text = result.get("searchable_text", "")
|
||||
score = result.get("combined_score", 0)
|
||||
|
||||
doc_results.append(
|
||||
DocSearchResult(
|
||||
title=doc_title or section_title or doc_path,
|
||||
path=doc_path,
|
||||
section=section_title,
|
||||
snippet=self._create_snippet(searchable_text),
|
||||
score=round(score, 3),
|
||||
doc_url=self._make_doc_url(doc_path),
|
||||
)
|
||||
)
|
||||
|
||||
return DocSearchResultsResponse(
|
||||
message=f"Found {len(doc_results)} relevant documentation sections.",
|
||||
results=doc_results,
|
||||
count=len(doc_results),
|
||||
query=query,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Documentation search failed: {e}")
|
||||
return ErrorResponse(
|
||||
message=f"Failed to search documentation: {str(e)}",
|
||||
error="search_failed",
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -275,8 +275,22 @@ class BlockHandler(ContentHandler):
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class MarkdownSection:
|
||||
"""Represents a section of a markdown document."""
|
||||
|
||||
title: str # Section heading text
|
||||
content: str # Section content (including the heading line)
|
||||
level: int # Heading level (1 for #, 2 for ##, etc.)
|
||||
index: int # Section index within the document
|
||||
|
||||
|
||||
class DocumentationHandler(ContentHandler):
|
||||
"""Handler for documentation files (.md/.mdx)."""
|
||||
"""Handler for documentation files (.md/.mdx).
|
||||
|
||||
Chunks documents by markdown headings to create multiple embeddings per file.
|
||||
Each section (## heading) becomes a separate embedding for better retrieval.
|
||||
"""
|
||||
|
||||
@property
|
||||
def content_type(self) -> ContentType:
|
||||
@@ -297,35 +311,162 @@ class DocumentationHandler(ContentHandler):
|
||||
docs_root = project_root / "docs"
|
||||
return docs_root
|
||||
|
||||
def _extract_title_and_content(self, file_path: Path) -> tuple[str, str]:
|
||||
"""Extract title and content from markdown file."""
|
||||
def _extract_doc_title(self, file_path: Path) -> str:
|
||||
"""Extract the document title from a markdown file."""
|
||||
try:
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
lines = content.split("\n")
|
||||
|
||||
# Try to extract title from first # heading
|
||||
lines = content.split("\n")
|
||||
title = ""
|
||||
body_lines = []
|
||||
|
||||
for line in lines:
|
||||
if line.startswith("# ") and not title:
|
||||
title = line[2:].strip()
|
||||
else:
|
||||
body_lines.append(line)
|
||||
if line.startswith("# "):
|
||||
return line[2:].strip()
|
||||
|
||||
# If no title found, use filename
|
||||
if not title:
|
||||
title = file_path.stem.replace("-", " ").replace("_", " ").title()
|
||||
return file_path.stem.replace("-", " ").replace("_", " ").title()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to read title from {file_path}: {e}")
|
||||
return file_path.stem.replace("-", " ").replace("_", " ").title()
|
||||
|
||||
body = "\n".join(body_lines)
|
||||
def _chunk_markdown_by_headings(
|
||||
self, file_path: Path, min_heading_level: int = 2
|
||||
) -> list[MarkdownSection]:
|
||||
"""
|
||||
Split a markdown file into sections based on headings.
|
||||
|
||||
return title, body
|
||||
Args:
|
||||
file_path: Path to the markdown file
|
||||
min_heading_level: Minimum heading level to split on (default: 2 for ##)
|
||||
|
||||
Returns:
|
||||
List of MarkdownSection objects, one per section.
|
||||
If no headings found, returns a single section with all content.
|
||||
"""
|
||||
try:
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to read {file_path}: {e}")
|
||||
return file_path.stem, ""
|
||||
return []
|
||||
|
||||
lines = content.split("\n")
|
||||
sections: list[MarkdownSection] = []
|
||||
current_section_lines: list[str] = []
|
||||
current_title = ""
|
||||
current_level = 0
|
||||
section_index = 0
|
||||
doc_title = ""
|
||||
|
||||
for line in lines:
|
||||
# Check if line is a heading
|
||||
if line.startswith("#"):
|
||||
# Count heading level
|
||||
level = 0
|
||||
for char in line:
|
||||
if char == "#":
|
||||
level += 1
|
||||
else:
|
||||
break
|
||||
|
||||
heading_text = line[level:].strip()
|
||||
|
||||
# Track document title (level 1 heading)
|
||||
if level == 1 and not doc_title:
|
||||
doc_title = heading_text
|
||||
# Don't create a section for just the title - add it to first section
|
||||
current_section_lines.append(line)
|
||||
continue
|
||||
|
||||
# Check if this heading should start a new section
|
||||
if level >= min_heading_level:
|
||||
# Save previous section if it has content
|
||||
if current_section_lines:
|
||||
section_content = "\n".join(current_section_lines).strip()
|
||||
if section_content:
|
||||
# Use doc title for first section if no specific title
|
||||
title = current_title if current_title else doc_title
|
||||
if not title:
|
||||
title = file_path.stem.replace("-", " ").replace(
|
||||
"_", " "
|
||||
)
|
||||
sections.append(
|
||||
MarkdownSection(
|
||||
title=title,
|
||||
content=section_content,
|
||||
level=current_level if current_level else 1,
|
||||
index=section_index,
|
||||
)
|
||||
)
|
||||
section_index += 1
|
||||
|
||||
# Start new section
|
||||
current_section_lines = [line]
|
||||
current_title = heading_text
|
||||
current_level = level
|
||||
else:
|
||||
# Lower level heading (e.g., # when splitting on ##)
|
||||
current_section_lines.append(line)
|
||||
else:
|
||||
current_section_lines.append(line)
|
||||
|
||||
# Don't forget the last section
|
||||
if current_section_lines:
|
||||
section_content = "\n".join(current_section_lines).strip()
|
||||
if section_content:
|
||||
title = current_title if current_title else doc_title
|
||||
if not title:
|
||||
title = file_path.stem.replace("-", " ").replace("_", " ")
|
||||
sections.append(
|
||||
MarkdownSection(
|
||||
title=title,
|
||||
content=section_content,
|
||||
level=current_level if current_level else 1,
|
||||
index=section_index,
|
||||
)
|
||||
)
|
||||
|
||||
# If no sections were created (no headings found), create one section with all content
|
||||
if not sections and content.strip():
|
||||
title = (
|
||||
doc_title
|
||||
if doc_title
|
||||
else file_path.stem.replace("-", " ").replace("_", " ")
|
||||
)
|
||||
sections.append(
|
||||
MarkdownSection(
|
||||
title=title,
|
||||
content=content.strip(),
|
||||
level=1,
|
||||
index=0,
|
||||
)
|
||||
)
|
||||
|
||||
return sections
|
||||
|
||||
def _make_section_content_id(self, doc_path: str, section_index: int) -> str:
|
||||
"""Create a unique content ID for a document section.
|
||||
|
||||
Format: doc_path::section_index
|
||||
Example: 'platform/getting-started.md::0'
|
||||
"""
|
||||
return f"{doc_path}::{section_index}"
|
||||
|
||||
def _parse_section_content_id(self, content_id: str) -> tuple[str, int]:
|
||||
"""Parse a section content ID back into doc_path and section_index.
|
||||
|
||||
Returns: (doc_path, section_index)
|
||||
"""
|
||||
if "::" in content_id:
|
||||
parts = content_id.rsplit("::", 1)
|
||||
return parts[0], int(parts[1])
|
||||
# Legacy format (whole document)
|
||||
return content_id, 0
|
||||
|
||||
async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
|
||||
"""Fetch documentation files without embeddings."""
|
||||
"""Fetch documentation sections without embeddings.
|
||||
|
||||
Chunks each document by markdown headings and creates embeddings for each section.
|
||||
Content IDs use the format: 'path/to/doc.md::section_index'
|
||||
"""
|
||||
docs_root = self._get_docs_root()
|
||||
|
||||
if not docs_root.exists():
|
||||
@@ -335,14 +476,28 @@ class DocumentationHandler(ContentHandler):
|
||||
# Find all .md and .mdx files
|
||||
all_docs = list(docs_root.rglob("*.md")) + list(docs_root.rglob("*.mdx"))
|
||||
|
||||
# Get relative paths for content IDs
|
||||
doc_paths = [str(doc.relative_to(docs_root)) for doc in all_docs]
|
||||
|
||||
if not doc_paths:
|
||||
if not all_docs:
|
||||
return []
|
||||
|
||||
# Build list of all sections from all documents
|
||||
all_sections: list[tuple[str, Path, MarkdownSection]] = []
|
||||
for doc_file in all_docs:
|
||||
doc_path = str(doc_file.relative_to(docs_root))
|
||||
sections = self._chunk_markdown_by_headings(doc_file)
|
||||
for section in sections:
|
||||
all_sections.append((doc_path, doc_file, section))
|
||||
|
||||
if not all_sections:
|
||||
return []
|
||||
|
||||
# Generate content IDs for all sections
|
||||
section_content_ids = [
|
||||
self._make_section_content_id(doc_path, section.index)
|
||||
for doc_path, _, section in all_sections
|
||||
]
|
||||
|
||||
# Check which ones have embeddings
|
||||
placeholders = ",".join([f"${i+1}" for i in range(len(doc_paths))])
|
||||
placeholders = ",".join([f"${i+1}" for i in range(len(section_content_ids))])
|
||||
existing_result = await query_raw_with_schema(
|
||||
f"""
|
||||
SELECT "contentId"
|
||||
@@ -350,76 +505,100 @@ class DocumentationHandler(ContentHandler):
|
||||
WHERE "contentType" = 'DOCUMENTATION'::{{schema_prefix}}"ContentType"
|
||||
AND "contentId" = ANY(ARRAY[{placeholders}])
|
||||
""",
|
||||
*doc_paths,
|
||||
*section_content_ids,
|
||||
)
|
||||
|
||||
existing_ids = {row["contentId"] for row in existing_result}
|
||||
missing_docs = [
|
||||
(doc_path, doc_file)
|
||||
for doc_path, doc_file in zip(doc_paths, all_docs)
|
||||
if doc_path not in existing_ids
|
||||
|
||||
# Filter to missing sections
|
||||
missing_sections = [
|
||||
(doc_path, doc_file, section, content_id)
|
||||
for (doc_path, doc_file, section), content_id in zip(
|
||||
all_sections, section_content_ids
|
||||
)
|
||||
if content_id not in existing_ids
|
||||
]
|
||||
|
||||
# Convert to ContentItem
|
||||
# Convert to ContentItem (up to batch_size)
|
||||
items = []
|
||||
for doc_path, doc_file in missing_docs[:batch_size]:
|
||||
for doc_path, doc_file, section, content_id in missing_sections[:batch_size]:
|
||||
try:
|
||||
title, content = self._extract_title_and_content(doc_file)
|
||||
# Get document title for context
|
||||
doc_title = self._extract_doc_title(doc_file)
|
||||
|
||||
# Build searchable text
|
||||
searchable_text = f"{title} {content}"
|
||||
# Build searchable text with context
|
||||
# Include doc title and section title for better search relevance
|
||||
searchable_text = f"{doc_title} - {section.title}\n\n{section.content}"
|
||||
|
||||
items.append(
|
||||
ContentItem(
|
||||
content_id=doc_path,
|
||||
content_id=content_id,
|
||||
content_type=ContentType.DOCUMENTATION,
|
||||
searchable_text=searchable_text,
|
||||
metadata={
|
||||
"title": title,
|
||||
"doc_title": doc_title,
|
||||
"section_title": section.title,
|
||||
"section_index": section.index,
|
||||
"heading_level": section.level,
|
||||
"path": doc_path,
|
||||
},
|
||||
user_id=None, # Documentation is public
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process doc {doc_path}: {e}")
|
||||
logger.warning(f"Failed to process section {content_id}: {e}")
|
||||
continue
|
||||
|
||||
return items
|
||||
|
||||
def _get_all_section_content_ids(self, docs_root: Path) -> set[str]:
|
||||
"""Get all current section content IDs from the docs directory.
|
||||
|
||||
Used for stats and cleanup to know what sections should exist.
|
||||
"""
|
||||
all_docs = list(docs_root.rglob("*.md")) + list(docs_root.rglob("*.mdx"))
|
||||
content_ids = set()
|
||||
|
||||
for doc_file in all_docs:
|
||||
doc_path = str(doc_file.relative_to(docs_root))
|
||||
sections = self._chunk_markdown_by_headings(doc_file)
|
||||
for section in sections:
|
||||
content_ids.add(self._make_section_content_id(doc_path, section.index))
|
||||
|
||||
return content_ids
|
||||
|
||||
async def get_stats(self) -> dict[str, int]:
|
||||
"""Get statistics about documentation embedding coverage."""
|
||||
"""Get statistics about documentation embedding coverage.
|
||||
|
||||
Counts sections (not documents) since each section gets its own embedding.
|
||||
"""
|
||||
docs_root = self._get_docs_root()
|
||||
|
||||
if not docs_root.exists():
|
||||
return {"total": 0, "with_embeddings": 0, "without_embeddings": 0}
|
||||
|
||||
# Count all .md and .mdx files
|
||||
all_docs = list(docs_root.rglob("*.md")) + list(docs_root.rglob("*.mdx"))
|
||||
total_docs = len(all_docs)
|
||||
# Get all section content IDs
|
||||
all_section_ids = self._get_all_section_content_ids(docs_root)
|
||||
total_sections = len(all_section_ids)
|
||||
|
||||
if total_docs == 0:
|
||||
if total_sections == 0:
|
||||
return {"total": 0, "with_embeddings": 0, "without_embeddings": 0}
|
||||
|
||||
doc_paths = [str(doc.relative_to(docs_root)) for doc in all_docs]
|
||||
placeholders = ",".join([f"${i+1}" for i in range(len(doc_paths))])
|
||||
|
||||
# Count embeddings in database for DOCUMENTATION type
|
||||
embedded_result = await query_raw_with_schema(
|
||||
f"""
|
||||
"""
|
||||
SELECT COUNT(*) as count
|
||||
FROM {{schema_prefix}}"UnifiedContentEmbedding"
|
||||
WHERE "contentType" = 'DOCUMENTATION'::{{schema_prefix}}"ContentType"
|
||||
AND "contentId" = ANY(ARRAY[{placeholders}])
|
||||
""",
|
||||
*doc_paths,
|
||||
FROM {schema_prefix}"UnifiedContentEmbedding"
|
||||
WHERE "contentType" = 'DOCUMENTATION'::{schema_prefix}"ContentType"
|
||||
"""
|
||||
)
|
||||
|
||||
with_embeddings = embedded_result[0]["count"] if embedded_result else 0
|
||||
|
||||
return {
|
||||
"total": total_docs,
|
||||
"total": total_sections,
|
||||
"with_embeddings": with_embeddings,
|
||||
"without_embeddings": total_docs - with_embeddings,
|
||||
"without_embeddings": total_sections - with_embeddings,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -164,20 +164,20 @@ async def test_documentation_handler_get_missing_items(tmp_path, mocker):
|
||||
|
||||
assert len(items) == 2
|
||||
|
||||
# Check guide.md
|
||||
# Check guide.md (content_id format: doc_path::section_index)
|
||||
guide_item = next(
|
||||
(item for item in items if item.content_id == "guide.md"), None
|
||||
(item for item in items if item.content_id == "guide.md::0"), None
|
||||
)
|
||||
assert guide_item is not None
|
||||
assert guide_item.content_type == ContentType.DOCUMENTATION
|
||||
assert "Getting Started" in guide_item.searchable_text
|
||||
assert "This is a guide" in guide_item.searchable_text
|
||||
assert guide_item.metadata["title"] == "Getting Started"
|
||||
assert guide_item.metadata["doc_title"] == "Getting Started"
|
||||
assert guide_item.user_id is None
|
||||
|
||||
# Check api.mdx
|
||||
# Check api.mdx (content_id format: doc_path::section_index)
|
||||
api_item = next(
|
||||
(item for item in items if item.content_id == "api.mdx"), None
|
||||
(item for item in items if item.content_id == "api.mdx::0"), None
|
||||
)
|
||||
assert api_item is not None
|
||||
assert "API Reference" in api_item.searchable_text
|
||||
@@ -218,17 +218,74 @@ async def test_documentation_handler_title_extraction(tmp_path):
|
||||
# Test with heading
|
||||
doc_with_heading = tmp_path / "with_heading.md"
|
||||
doc_with_heading.write_text("# My Title\n\nContent here")
|
||||
title, content = handler._extract_title_and_content(doc_with_heading)
|
||||
title = handler._extract_doc_title(doc_with_heading)
|
||||
assert title == "My Title"
|
||||
assert "# My Title" not in content
|
||||
assert "Content here" in content
|
||||
|
||||
# Test without heading
|
||||
doc_without_heading = tmp_path / "no-heading.md"
|
||||
doc_without_heading.write_text("Just content, no heading")
|
||||
title, content = handler._extract_title_and_content(doc_without_heading)
|
||||
title = handler._extract_doc_title(doc_without_heading)
|
||||
assert title == "No Heading" # Uses filename
|
||||
assert "Just content" in content
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_documentation_handler_markdown_chunking(tmp_path):
|
||||
"""Test DocumentationHandler chunks markdown by headings."""
|
||||
handler = DocumentationHandler()
|
||||
|
||||
# Test document with multiple sections
|
||||
doc_with_sections = tmp_path / "sections.md"
|
||||
doc_with_sections.write_text(
|
||||
"# Document Title\n\n"
|
||||
"Intro paragraph.\n\n"
|
||||
"## Section One\n\n"
|
||||
"Content for section one.\n\n"
|
||||
"## Section Two\n\n"
|
||||
"Content for section two.\n"
|
||||
)
|
||||
sections = handler._chunk_markdown_by_headings(doc_with_sections)
|
||||
|
||||
# Should have 3 sections: intro (with doc title), section one, section two
|
||||
assert len(sections) == 3
|
||||
assert sections[0].title == "Document Title"
|
||||
assert sections[0].index == 0
|
||||
assert "Intro paragraph" in sections[0].content
|
||||
|
||||
assert sections[1].title == "Section One"
|
||||
assert sections[1].index == 1
|
||||
assert "Content for section one" in sections[1].content
|
||||
|
||||
assert sections[2].title == "Section Two"
|
||||
assert sections[2].index == 2
|
||||
assert "Content for section two" in sections[2].content
|
||||
|
||||
# Test document without headings
|
||||
doc_no_sections = tmp_path / "no-sections.md"
|
||||
doc_no_sections.write_text("Just plain content without any headings.")
|
||||
sections = handler._chunk_markdown_by_headings(doc_no_sections)
|
||||
assert len(sections) == 1
|
||||
assert sections[0].index == 0
|
||||
assert "Just plain content" in sections[0].content
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_documentation_handler_section_content_ids():
|
||||
"""Test DocumentationHandler creates and parses section content IDs."""
|
||||
handler = DocumentationHandler()
|
||||
|
||||
# Test making content ID
|
||||
content_id = handler._make_section_content_id("docs/guide.md", 2)
|
||||
assert content_id == "docs/guide.md::2"
|
||||
|
||||
# Test parsing content ID
|
||||
doc_path, section_index = handler._parse_section_content_id("docs/guide.md::2")
|
||||
assert doc_path == "docs/guide.md"
|
||||
assert section_index == 2
|
||||
|
||||
# Test parsing legacy format (no section index)
|
||||
doc_path, section_index = handler._parse_section_content_id("docs/old-format.md")
|
||||
assert doc_path == "docs/old-format.md"
|
||||
assert section_index == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
|
||||
@@ -683,20 +683,20 @@ async def cleanup_orphaned_embeddings() -> dict[str, Any]:
|
||||
|
||||
current_ids = set(get_blocks().keys())
|
||||
elif content_type == ContentType.DOCUMENTATION:
|
||||
from pathlib import Path
|
||||
|
||||
# embeddings.py is at: backend/backend/api/features/store/embeddings.py
|
||||
# Need to go up to project root then into docs/
|
||||
this_file = Path(__file__)
|
||||
project_root = (
|
||||
this_file.parent.parent.parent.parent.parent.parent.parent
|
||||
# Use DocumentationHandler to get section-based content IDs
|
||||
from backend.api.features.store.content_handlers import (
|
||||
DocumentationHandler,
|
||||
)
|
||||
docs_root = project_root / "docs"
|
||||
if docs_root.exists():
|
||||
all_docs = list(docs_root.rglob("*.md")) + list(
|
||||
docs_root.rglob("*.mdx")
|
||||
)
|
||||
current_ids = {str(doc.relative_to(docs_root)) for doc in all_docs}
|
||||
|
||||
doc_handler = CONTENT_HANDLERS.get(ContentType.DOCUMENTATION)
|
||||
if isinstance(doc_handler, DocumentationHandler):
|
||||
docs_root = doc_handler._get_docs_root()
|
||||
if docs_root.exists():
|
||||
current_ids = doc_handler._get_all_section_content_ids(
|
||||
docs_root
|
||||
)
|
||||
else:
|
||||
current_ids = set()
|
||||
else:
|
||||
current_ids = set()
|
||||
else:
|
||||
|
||||
@@ -3,13 +3,16 @@ Unified Hybrid Search
|
||||
|
||||
Combines semantic (embedding) search with lexical (tsvector) search
|
||||
for improved relevance across all content types (agents, blocks, docs).
|
||||
Includes BM25 reranking for improved lexical relevance.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Literal
|
||||
|
||||
from prisma.enums import ContentType
|
||||
from rank_bm25 import BM25Okapi
|
||||
|
||||
from backend.api.features.store.embeddings import (
|
||||
EMBEDDING_DIM,
|
||||
@@ -21,6 +24,84 @@ from backend.data.db import query_raw_with_schema
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# BM25 Reranking
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def tokenize(text: str) -> list[str]:
|
||||
"""Simple tokenizer for BM25 - lowercase and split on non-alphanumeric."""
|
||||
if not text:
|
||||
return []
|
||||
# Lowercase and split on non-alphanumeric characters
|
||||
tokens = re.findall(r"\b\w+\b", text.lower())
|
||||
return tokens
|
||||
|
||||
|
||||
def bm25_rerank(
|
||||
query: str,
|
||||
results: list[dict[str, Any]],
|
||||
text_field: str = "searchable_text",
|
||||
bm25_weight: float = 0.3,
|
||||
original_score_field: str = "combined_score",
|
||||
) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Rerank search results using BM25.
|
||||
|
||||
Combines the original combined_score with BM25 score for improved
|
||||
lexical relevance, especially for exact term matches.
|
||||
|
||||
Args:
|
||||
query: The search query
|
||||
results: List of result dicts with text_field and original_score_field
|
||||
text_field: Field name containing the text to score
|
||||
bm25_weight: Weight for BM25 score (0-1). Original score gets (1 - bm25_weight)
|
||||
original_score_field: Field name containing the original score
|
||||
|
||||
Returns:
|
||||
Results list sorted by combined score (BM25 + original)
|
||||
"""
|
||||
if not results or not query:
|
||||
return results
|
||||
|
||||
# Extract texts and tokenize
|
||||
corpus = [tokenize(r.get(text_field, "") or "") for r in results]
|
||||
|
||||
# Handle edge case where all documents are empty
|
||||
if all(len(doc) == 0 for doc in corpus):
|
||||
return results
|
||||
|
||||
# Build BM25 index
|
||||
bm25 = BM25Okapi(corpus)
|
||||
|
||||
# Score query against corpus
|
||||
query_tokens = tokenize(query)
|
||||
if not query_tokens:
|
||||
return results
|
||||
|
||||
bm25_scores = bm25.get_scores(query_tokens)
|
||||
|
||||
# Normalize BM25 scores to 0-1 range
|
||||
max_bm25 = max(bm25_scores) if max(bm25_scores) > 0 else 1.0
|
||||
normalized_bm25 = [s / max_bm25 for s in bm25_scores]
|
||||
|
||||
# Combine scores
|
||||
original_weight = 1.0 - bm25_weight
|
||||
for i, result in enumerate(results):
|
||||
original_score = result.get(original_score_field, 0) or 0
|
||||
result["bm25_score"] = normalized_bm25[i]
|
||||
final_score = (
|
||||
original_weight * original_score + bm25_weight * normalized_bm25[i]
|
||||
)
|
||||
result["final_score"] = final_score
|
||||
result["relevance"] = final_score
|
||||
|
||||
# Sort by relevance descending
|
||||
results.sort(key=lambda x: x.get("relevance", 0), reverse=True)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnifiedSearchWeights:
|
||||
"""Weights for unified search (no popularity signal)."""
|
||||
@@ -273,9 +354,7 @@ async def unified_hybrid_search(
|
||||
FROM normalized
|
||||
),
|
||||
filtered AS (
|
||||
SELECT
|
||||
*,
|
||||
COUNT(*) OVER () as total_count
|
||||
SELECT *, COUNT(*) OVER () as total_count
|
||||
FROM scored
|
||||
WHERE combined_score >= {min_score_param}
|
||||
)
|
||||
@@ -289,6 +368,15 @@ async def unified_hybrid_search(
|
||||
)
|
||||
|
||||
total = results[0]["total_count"] if results else 0
|
||||
# Apply BM25 reranking
|
||||
if results:
|
||||
results = bm25_rerank(
|
||||
query=query,
|
||||
results=results,
|
||||
text_field="searchable_text",
|
||||
bm25_weight=0.3,
|
||||
original_score_field="combined_score",
|
||||
)
|
||||
|
||||
# Clean up results
|
||||
for result in results:
|
||||
@@ -516,6 +604,8 @@ async def hybrid_search(
|
||||
sa.featured,
|
||||
sa.is_available,
|
||||
sa.updated_at,
|
||||
-- Searchable text for BM25 reranking
|
||||
COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text,
|
||||
-- Semantic score
|
||||
COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score,
|
||||
-- Lexical score (raw, will normalize)
|
||||
@@ -573,6 +663,7 @@ async def hybrid_search(
|
||||
featured,
|
||||
is_available,
|
||||
updated_at,
|
||||
searchable_text,
|
||||
semantic_score,
|
||||
lexical_score,
|
||||
category_score,
|
||||
@@ -603,8 +694,19 @@ async def hybrid_search(
|
||||
|
||||
total = results[0]["total_count"] if results else 0
|
||||
|
||||
# Apply BM25 reranking
|
||||
if results:
|
||||
results = bm25_rerank(
|
||||
query=query,
|
||||
results=results,
|
||||
text_field="searchable_text",
|
||||
bm25_weight=0.3,
|
||||
original_score_field="combined_score",
|
||||
)
|
||||
|
||||
for result in results:
|
||||
result.pop("total_count", None)
|
||||
result.pop("searchable_text", None)
|
||||
|
||||
logger.info(f"Hybrid search (store agents): {len(results)} results, {total} total")
|
||||
|
||||
|
||||
@@ -311,11 +311,43 @@ async def test_hybrid_search_min_score_filtering():
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
@pytest.mark.integration
|
||||
async def test_hybrid_search_pagination():
|
||||
"""Test hybrid search pagination."""
|
||||
"""Test hybrid search pagination.
|
||||
|
||||
Pagination happens in SQL (LIMIT/OFFSET), then BM25 reranking is applied
|
||||
to the paginated results.
|
||||
"""
|
||||
# Create mock results that SQL would return for a page
|
||||
mock_results = [
|
||||
{
|
||||
"slug": f"agent-{i}",
|
||||
"agent_name": f"Agent {i}",
|
||||
"agent_image": "test.png",
|
||||
"creator_username": "test",
|
||||
"creator_avatar": "avatar.png",
|
||||
"sub_heading": "Test",
|
||||
"description": "Test description",
|
||||
"runs": 100 - i,
|
||||
"rating": 4.5,
|
||||
"categories": ["test"],
|
||||
"featured": False,
|
||||
"is_available": True,
|
||||
"updated_at": "2024-01-01T00:00:00Z",
|
||||
"searchable_text": f"Agent {i} test description",
|
||||
"combined_score": 0.9 - (i * 0.01),
|
||||
"semantic_score": 0.7,
|
||||
"lexical_score": 0.6,
|
||||
"category_score": 0.5,
|
||||
"recency_score": 0.4,
|
||||
"popularity_score": 0.3,
|
||||
"total_count": 25,
|
||||
}
|
||||
for i in range(10) # SQL returns page_size results
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.api.features.store.hybrid_search.query_raw_with_schema"
|
||||
) as mock_query:
|
||||
mock_query.return_value = []
|
||||
mock_query.return_value = mock_results
|
||||
|
||||
with patch(
|
||||
"backend.api.features.store.hybrid_search.embed_query"
|
||||
@@ -329,16 +361,18 @@ async def test_hybrid_search_pagination():
|
||||
page_size=10,
|
||||
)
|
||||
|
||||
# Verify pagination parameters
|
||||
# Verify results returned
|
||||
assert len(results) == 10
|
||||
assert total == 25 # Total from SQL COUNT(*) OVER()
|
||||
|
||||
# Verify the SQL query uses page_size and offset
|
||||
call_args = mock_query.call_args
|
||||
params = call_args[0]
|
||||
|
||||
# Last two params should be LIMIT and OFFSET
|
||||
limit = params[-2]
|
||||
offset = params[-1]
|
||||
|
||||
assert limit == 10 # page_size
|
||||
assert offset == 10 # (page - 1) * page_size = (2 - 1) * 10
|
||||
# Last two params are page_size and offset
|
||||
page_size_param = params[-2]
|
||||
offset_param = params[-1]
|
||||
assert page_size_param == 10
|
||||
assert offset_param == 10 # (page 2 - 1) * 10
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
@@ -609,14 +643,36 @@ async def test_unified_hybrid_search_empty_query():
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
@pytest.mark.integration
|
||||
async def test_unified_hybrid_search_pagination():
|
||||
"""Test unified search pagination."""
|
||||
"""Test unified search pagination with BM25 reranking.
|
||||
|
||||
Pagination happens in SQL (LIMIT/OFFSET), then BM25 reranking is applied
|
||||
to the paginated results.
|
||||
"""
|
||||
# Create mock results that SQL would return for a page
|
||||
mock_results = [
|
||||
{
|
||||
"content_type": "STORE_AGENT",
|
||||
"content_id": f"agent-{i}",
|
||||
"searchable_text": f"Agent {i} description",
|
||||
"metadata": {"name": f"Agent {i}"},
|
||||
"updated_at": "2025-01-01T00:00:00Z",
|
||||
"semantic_score": 0.7,
|
||||
"lexical_score": 0.8 - (i * 0.01),
|
||||
"category_score": 0.5,
|
||||
"recency_score": 0.3,
|
||||
"combined_score": 0.6 - (i * 0.01),
|
||||
"total_count": 50,
|
||||
}
|
||||
for i in range(15) # SQL returns page_size results
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.api.features.store.hybrid_search.query_raw_with_schema"
|
||||
) as mock_query:
|
||||
with patch(
|
||||
"backend.api.features.store.hybrid_search.embed_query"
|
||||
) as mock_embed:
|
||||
mock_query.return_value = []
|
||||
mock_query.return_value = mock_results
|
||||
mock_embed.return_value = [0.1] * embeddings.EMBEDDING_DIM
|
||||
|
||||
results, total = await unified_hybrid_search(
|
||||
@@ -625,15 +681,18 @@ async def test_unified_hybrid_search_pagination():
|
||||
page_size=15,
|
||||
)
|
||||
|
||||
# Verify pagination parameters (last two params are LIMIT and OFFSET)
|
||||
# Verify results returned
|
||||
assert len(results) == 15
|
||||
assert total == 50 # Total from SQL COUNT(*) OVER()
|
||||
|
||||
# Verify the SQL query uses page_size and offset
|
||||
call_args = mock_query.call_args
|
||||
params = call_args[0]
|
||||
|
||||
limit = params[-2]
|
||||
offset = params[-1]
|
||||
|
||||
assert limit == 15 # page_size
|
||||
assert offset == 30 # (page - 1) * page_size = (3 - 1) * 15
|
||||
# Last two params are page_size and offset
|
||||
page_size_param = params[-2]
|
||||
offset_param = params[-1]
|
||||
assert page_size_param == 15
|
||||
assert offset_param == 30 # (page 3 - 1) * 15
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
|
||||
@@ -393,7 +393,6 @@ async def get_creators(
|
||||
@router.get(
|
||||
"/creator/{username}",
|
||||
summary="Get creator details",
|
||||
operation_id="getV2GetCreatorDetails",
|
||||
tags=["store", "public"],
|
||||
response_model=store_model.CreatorDetails,
|
||||
)
|
||||
|
||||
@@ -18,7 +18,6 @@ from prisma.errors import PrismaError
|
||||
|
||||
import backend.api.features.admin.credit_admin_routes
|
||||
import backend.api.features.admin.execution_analytics_routes
|
||||
import backend.api.features.admin.llm_routes
|
||||
import backend.api.features.admin.store_admin_routes
|
||||
import backend.api.features.builder
|
||||
import backend.api.features.builder.routes
|
||||
@@ -38,11 +37,9 @@ import backend.data.db
|
||||
import backend.data.graph
|
||||
import backend.data.user
|
||||
import backend.integrations.webhooks.utils
|
||||
import backend.server.v2.llm.routes as public_llm_routes
|
||||
import backend.util.service
|
||||
import backend.util.settings
|
||||
from backend.data import llm_registry
|
||||
from backend.data.block_cost_config import refresh_llm_costs
|
||||
from backend.blocks.llm import DEFAULT_LLM_MODEL
|
||||
from backend.data.model import Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.monitoring.instrumentation import instrument_fastapi
|
||||
@@ -112,27 +109,11 @@ async def lifespan_context(app: fastapi.FastAPI):
|
||||
|
||||
AutoRegistry.patch_integrations()
|
||||
|
||||
# Refresh LLM registry before initializing blocks so blocks can use registry data
|
||||
await llm_registry.refresh_llm_registry()
|
||||
refresh_llm_costs()
|
||||
|
||||
# Clear block schema caches so they're regenerated with updated discriminator_mapping
|
||||
from backend.data.block import BlockSchema
|
||||
|
||||
BlockSchema.clear_all_schema_caches()
|
||||
|
||||
await backend.data.block.initialize_blocks()
|
||||
|
||||
await backend.data.user.migrate_and_encrypt_user_integrations()
|
||||
await backend.data.graph.fix_llm_provider_credentials()
|
||||
# migrate_llm_models uses registry default model
|
||||
from backend.blocks.llm import LlmModel
|
||||
|
||||
default_model_slug = llm_registry.get_default_model_slug()
|
||||
if default_model_slug:
|
||||
await backend.data.graph.migrate_llm_models(LlmModel(default_model_slug))
|
||||
else:
|
||||
logger.warning("Skipping LLM model migration: no default model available")
|
||||
await backend.data.graph.migrate_llm_models(DEFAULT_LLM_MODEL)
|
||||
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
|
||||
|
||||
with launch_darkly_context():
|
||||
@@ -317,16 +298,6 @@ app.include_router(
|
||||
tags=["v2", "executions", "review"],
|
||||
prefix="/api/review",
|
||||
)
|
||||
app.include_router(
|
||||
backend.api.features.admin.llm_routes.router,
|
||||
tags=["v2", "admin", "llm"],
|
||||
prefix="/api/llm/admin",
|
||||
)
|
||||
app.include_router(
|
||||
public_llm_routes.router,
|
||||
tags=["v2", "llm"],
|
||||
prefix="/api",
|
||||
)
|
||||
app.include_router(
|
||||
backend.api.features.library.routes.router, tags=["v2"], prefix="/api/library"
|
||||
)
|
||||
|
||||
@@ -77,39 +77,7 @@ async def event_broadcaster(manager: ConnectionManager):
|
||||
payload=notification.payload,
|
||||
)
|
||||
|
||||
async def registry_refresh_worker():
|
||||
"""Listen for LLM registry refresh notifications and broadcast to all clients."""
|
||||
from backend.data.llm_registry import REGISTRY_REFRESH_CHANNEL
|
||||
from backend.data.redis_client import connect_async
|
||||
|
||||
redis = await connect_async()
|
||||
pubsub = redis.pubsub()
|
||||
await pubsub.subscribe(REGISTRY_REFRESH_CHANNEL)
|
||||
logger.info(
|
||||
"Subscribed to LLM registry refresh notifications for WebSocket broadcast"
|
||||
)
|
||||
|
||||
async for message in pubsub.listen():
|
||||
if (
|
||||
message["type"] == "message"
|
||||
and message["channel"] == REGISTRY_REFRESH_CHANNEL
|
||||
):
|
||||
logger.info(
|
||||
"Broadcasting LLM registry refresh to all WebSocket clients"
|
||||
)
|
||||
await manager.broadcast_to_all(
|
||||
method=WSMethod.NOTIFICATION,
|
||||
data={
|
||||
"type": "LLM_REGISTRY_REFRESH",
|
||||
"event": "registry_updated",
|
||||
},
|
||||
)
|
||||
|
||||
await asyncio.gather(
|
||||
execution_worker(),
|
||||
notification_worker(),
|
||||
registry_refresh_worker(),
|
||||
)
|
||||
await asyncio.gather(execution_worker(), notification_worker())
|
||||
|
||||
|
||||
async def authenticate_websocket(websocket: WebSocket) -> str:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from typing import Any
|
||||
|
||||
from backend.blocks.llm import (
|
||||
DEFAULT_LLM_MODEL,
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
AIBlockBase,
|
||||
@@ -9,7 +10,6 @@ from backend.blocks.llm import (
|
||||
LlmModel,
|
||||
LLMResponse,
|
||||
llm_call,
|
||||
llm_model_schema_extra,
|
||||
)
|
||||
from backend.data.block import (
|
||||
BlockCategory,
|
||||
@@ -50,10 +50,9 @@ class AIConditionBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for evaluating the condition.",
|
||||
advanced=False,
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
|
||||
@@ -83,7 +82,7 @@ class AIConditionBlock(AIBlockBase):
|
||||
"condition": "the input is an email address",
|
||||
"yes_value": "Valid email",
|
||||
"no_value": "Not an email",
|
||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
|
||||
@@ -4,19 +4,17 @@ import logging
|
||||
import re
|
||||
import secrets
|
||||
from abc import ABC
|
||||
from enum import Enum
|
||||
from enum import Enum, EnumMeta
|
||||
from json import JSONDecodeError
|
||||
from typing import Any, Iterable, List, Literal, Optional
|
||||
from typing import Any, Iterable, List, Literal, NamedTuple, Optional
|
||||
|
||||
import anthropic
|
||||
import ollama
|
||||
import openai
|
||||
from anthropic.types import ToolParam
|
||||
from groq import AsyncGroq
|
||||
from pydantic import BaseModel, GetCoreSchemaHandler, SecretStr
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from backend.data import llm_registry
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
@@ -24,7 +22,6 @@ from backend.data.block import (
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.llm_registry import ModelMetadata
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
@@ -69,117 +66,114 @@ TEST_CREDENTIALS_INPUT = {
|
||||
|
||||
|
||||
def AICredentialsField() -> AICredentials:
|
||||
"""
|
||||
Returns a CredentialsField for LLM providers.
|
||||
The discriminator_mapping will be refreshed when the schema is generated
|
||||
if it's empty, ensuring the LLM registry is loaded.
|
||||
"""
|
||||
# Get the mapping now - it may be empty initially, but will be refreshed
|
||||
# when the schema is generated via CredentialsMetaInput._add_json_schema_extra
|
||||
mapping = llm_registry.get_llm_discriminator_mapping()
|
||||
|
||||
return CredentialsField(
|
||||
description="API key for the LLM provider.",
|
||||
discriminator="model",
|
||||
discriminator_mapping=mapping, # May be empty initially, refreshed later
|
||||
discriminator_mapping={
|
||||
model.value: model.metadata.provider for model in LlmModel
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def llm_model_schema_extra() -> dict[str, Any]:
|
||||
return {"options": llm_registry.get_llm_model_schema_options()}
|
||||
class ModelMetadata(NamedTuple):
|
||||
provider: str
|
||||
context_window: int
|
||||
max_output_tokens: int | None
|
||||
|
||||
|
||||
class LlmModelMeta(type):
|
||||
"""
|
||||
Metaclass for LlmModel that enables attribute-style access to dynamic models.
|
||||
|
||||
This allows code like `LlmModel.GPT4O` to work by converting the attribute
|
||||
name to a slug format:
|
||||
- GPT4O -> gpt-4o
|
||||
- GPT4O_MINI -> gpt-4o-mini
|
||||
- CLAUDE_3_5_SONNET -> claude-3-5-sonnet
|
||||
"""
|
||||
|
||||
def __getattr__(cls, name: str):
|
||||
# Don't intercept private/dunder attributes
|
||||
if name.startswith("_"):
|
||||
raise AttributeError(f"type object 'LlmModel' has no attribute '{name}'")
|
||||
|
||||
# Convert attribute name to slug format:
|
||||
# 1. Lowercase: GPT4O -> gpt4o
|
||||
# 2. Underscores to hyphens: GPT4O_MINI -> gpt4o-mini
|
||||
# 3. Insert hyphen between letter and digit: gpt4o -> gpt-4o
|
||||
slug = name.lower().replace("_", "-")
|
||||
slug = re.sub(r"([a-z])(\d)", r"\1-\2", slug)
|
||||
|
||||
return cls(slug)
|
||||
class LlmModelMeta(EnumMeta):
|
||||
pass
|
||||
|
||||
|
||||
class LlmModel(str, metaclass=LlmModelMeta):
|
||||
"""
|
||||
Dynamic LLM model type that accepts any model slug from the registry.
|
||||
|
||||
This is a string subclass (not an Enum) that allows any model slug value.
|
||||
All models are managed via the LLM Registry in the database.
|
||||
|
||||
Usage:
|
||||
model = LlmModel("gpt-4o") # Direct construction
|
||||
model = LlmModel.GPT4O # Attribute access (converted to "gpt-4o")
|
||||
model.value # Returns the slug string
|
||||
model.provider # Returns the provider from registry
|
||||
"""
|
||||
|
||||
def __new__(cls, value: str):
|
||||
if isinstance(value, LlmModel):
|
||||
return value
|
||||
return str.__new__(cls, value)
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, source_type: Any, handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
"""
|
||||
Tell Pydantic how to validate LlmModel.
|
||||
|
||||
Accepts strings and converts them to LlmModel instances.
|
||||
"""
|
||||
return core_schema.no_info_after_validator_function(
|
||||
cls, # The validator function (LlmModel constructor)
|
||||
core_schema.str_schema(), # Accept string input
|
||||
serialization=core_schema.to_string_ser_schema(), # Serialize as string
|
||||
)
|
||||
|
||||
@property
|
||||
def value(self) -> str:
|
||||
"""Return the model slug (for compatibility with enum-style access)."""
|
||||
return str(self)
|
||||
|
||||
@classmethod
|
||||
def default(cls) -> "LlmModel":
|
||||
"""
|
||||
Get the default model from the registry.
|
||||
|
||||
Returns the recommended model if set, otherwise gpt-4o if available
|
||||
and enabled, otherwise the first enabled model from the registry.
|
||||
Falls back to "gpt-4o" if registry is empty (e.g., at module import time).
|
||||
"""
|
||||
from backend.data.llm_registry import get_default_model_slug
|
||||
|
||||
slug = get_default_model_slug()
|
||||
if slug is None:
|
||||
# Registry is empty (e.g., at module import time before DB connection).
|
||||
# Fall back to gpt-4o for backward compatibility.
|
||||
slug = "gpt-4o"
|
||||
return cls(slug)
|
||||
class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
# OpenAI models
|
||||
O3_MINI = "o3-mini"
|
||||
O3 = "o3-2025-04-16"
|
||||
O1 = "o1"
|
||||
O1_MINI = "o1-mini"
|
||||
# GPT-5 models
|
||||
GPT5_2 = "gpt-5.2-2025-12-11"
|
||||
GPT5_1 = "gpt-5.1-2025-11-13"
|
||||
GPT5 = "gpt-5-2025-08-07"
|
||||
GPT5_MINI = "gpt-5-mini-2025-08-07"
|
||||
GPT5_NANO = "gpt-5-nano-2025-08-07"
|
||||
GPT5_CHAT = "gpt-5-chat-latest"
|
||||
GPT41 = "gpt-4.1-2025-04-14"
|
||||
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
||||
GPT4O_MINI = "gpt-4o-mini"
|
||||
GPT4O = "gpt-4o"
|
||||
GPT4_TURBO = "gpt-4-turbo"
|
||||
GPT3_5_TURBO = "gpt-3.5-turbo"
|
||||
# Anthropic models
|
||||
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
||||
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
||||
CLAUDE_4_SONNET = "claude-sonnet-4-20250514"
|
||||
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
# AI/ML API models
|
||||
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||
AIML_API_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct"
|
||||
AIML_API_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
||||
AIML_API_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
|
||||
AIML_API_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo"
|
||||
# Groq models
|
||||
LLAMA3_3_70B = "llama-3.3-70b-versatile"
|
||||
LLAMA3_1_8B = "llama-3.1-8b-instant"
|
||||
# Ollama models
|
||||
OLLAMA_LLAMA3_3 = "llama3.3"
|
||||
OLLAMA_LLAMA3_2 = "llama3.2"
|
||||
OLLAMA_LLAMA3_8B = "llama3"
|
||||
OLLAMA_LLAMA3_405B = "llama3.1:405b"
|
||||
OLLAMA_DOLPHIN = "dolphin-mistral:latest"
|
||||
# OpenRouter models
|
||||
OPENAI_GPT_OSS_120B = "openai/gpt-oss-120b"
|
||||
OPENAI_GPT_OSS_20B = "openai/gpt-oss-20b"
|
||||
GEMINI_2_5_PRO = "google/gemini-2.5-pro-preview-03-25"
|
||||
GEMINI_3_PRO_PREVIEW = "google/gemini-3-pro-preview"
|
||||
GEMINI_2_5_FLASH = "google/gemini-2.5-flash"
|
||||
GEMINI_2_0_FLASH = "google/gemini-2.0-flash-001"
|
||||
GEMINI_2_5_FLASH_LITE_PREVIEW = "google/gemini-2.5-flash-lite-preview-06-17"
|
||||
GEMINI_2_0_FLASH_LITE = "google/gemini-2.0-flash-lite-001"
|
||||
MISTRAL_NEMO = "mistralai/mistral-nemo"
|
||||
COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024"
|
||||
COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024"
|
||||
DEEPSEEK_CHAT = "deepseek/deepseek-chat" # Actually: DeepSeek V3
|
||||
DEEPSEEK_R1_0528 = "deepseek/deepseek-r1-0528"
|
||||
PERPLEXITY_SONAR = "perplexity/sonar"
|
||||
PERPLEXITY_SONAR_PRO = "perplexity/sonar-pro"
|
||||
PERPLEXITY_SONAR_DEEP_RESEARCH = "perplexity/sonar-deep-research"
|
||||
NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b"
|
||||
NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b"
|
||||
AMAZON_NOVA_LITE_V1 = "amazon/nova-lite-v1"
|
||||
AMAZON_NOVA_MICRO_V1 = "amazon/nova-micro-v1"
|
||||
AMAZON_NOVA_PRO_V1 = "amazon/nova-pro-v1"
|
||||
MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b"
|
||||
GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b"
|
||||
META_LLAMA_4_SCOUT = "meta-llama/llama-4-scout"
|
||||
META_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick"
|
||||
GROK_4 = "x-ai/grok-4"
|
||||
GROK_4_FAST = "x-ai/grok-4-fast"
|
||||
GROK_4_1_FAST = "x-ai/grok-4.1-fast"
|
||||
GROK_CODE_FAST_1 = "x-ai/grok-code-fast-1"
|
||||
KIMI_K2 = "moonshotai/kimi-k2"
|
||||
QWEN3_235B_A22B_THINKING = "qwen/qwen3-235b-a22b-thinking-2507"
|
||||
QWEN3_CODER = "qwen/qwen3-coder"
|
||||
# Llama API models
|
||||
LLAMA_API_LLAMA_4_SCOUT = "Llama-4-Scout-17B-16E-Instruct-FP8"
|
||||
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
|
||||
LLAMA_API_LLAMA3_3_8B = "Llama-3.3-8B-Instruct"
|
||||
LLAMA_API_LLAMA3_3_70B = "Llama-3.3-70B-Instruct"
|
||||
# v0 by Vercel models
|
||||
V0_1_5_MD = "v0-1.5-md"
|
||||
V0_1_5_LG = "v0-1.5-lg"
|
||||
V0_1_0_MD = "v0-1.0-md"
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
metadata = llm_registry.get_llm_model_metadata(self.value)
|
||||
if metadata:
|
||||
return metadata
|
||||
raise ValueError(
|
||||
f"Missing metadata for model: {self.value}. Model not found in LLM registry."
|
||||
)
|
||||
return MODEL_METADATA[self]
|
||||
|
||||
@property
|
||||
def provider(self) -> str:
|
||||
@@ -194,11 +188,128 @@ class LlmModel(str, metaclass=LlmModelMeta):
|
||||
return self.metadata.max_output_tokens
|
||||
|
||||
|
||||
# MODEL_METADATA removed - all models now come from the database via llm_registry
|
||||
MODEL_METADATA = {
|
||||
# https://platform.openai.com/docs/models
|
||||
LlmModel.O3: ModelMetadata("openai", 200000, 100000),
|
||||
LlmModel.O3_MINI: ModelMetadata("openai", 200000, 100000), # o3-mini-2025-01-31
|
||||
LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17
|
||||
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),
|
||||
LlmModel.GPT41: ModelMetadata("openai", 1047576, 32768),
|
||||
LlmModel.GPT41_MINI: ModelMetadata("openai", 1047576, 32768),
|
||||
LlmModel.GPT4O_MINI: ModelMetadata(
|
||||
"openai", 128000, 16384
|
||||
), # gpt-4o-mini-2024-07-18
|
||||
LlmModel.GPT4O: ModelMetadata("openai", 128000, 16384), # gpt-4o-2024-08-06
|
||||
LlmModel.GPT4_TURBO: ModelMetadata(
|
||||
"openai", 128000, 4096
|
||||
), # gpt-4-turbo-2024-04-09
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, 4096), # gpt-3.5-turbo-0125
|
||||
# https://docs.anthropic.com/en/docs/about-claude/models
|
||||
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 32000
|
||||
), # claude-opus-4-1-20250805
|
||||
LlmModel.CLAUDE_4_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 32000
|
||||
), # claude-4-opus-20250514
|
||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-4-sonnet-20250514
|
||||
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-opus-4-5-20251101
|
||||
LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-sonnet-4-5-20250929
|
||||
LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-haiku-4-5-20251001
|
||||
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-3-7-sonnet-20250219
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
|
||||
"anthropic", 200000, 4096
|
||||
), # claude-3-haiku-20240307
|
||||
# https://docs.aimlapi.com/api-overview/model-database/text-models
|
||||
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000, 8000),
|
||||
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000, 40000),
|
||||
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000, None),
|
||||
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000, 2000),
|
||||
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000, None),
|
||||
# https://console.groq.com/docs/models
|
||||
LlmModel.LLAMA3_3_70B: ModelMetadata("groq", 128000, 32768),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 128000, 8192),
|
||||
# https://ollama.com/library
|
||||
LlmModel.OLLAMA_LLAMA3_3: ModelMetadata("ollama", 8192, None),
|
||||
LlmModel.OLLAMA_LLAMA3_2: ModelMetadata("ollama", 8192, None),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, None),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, None),
|
||||
LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None),
|
||||
# https://openrouter.ai/models
|
||||
LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192),
|
||||
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata("open_router", 1048576, 65535),
|
||||
LlmModel.GEMINI_2_5_FLASH: ModelMetadata("open_router", 1048576, 65535),
|
||||
LlmModel.GEMINI_2_0_FLASH: ModelMetadata("open_router", 1048576, 8192),
|
||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
|
||||
"open_router", 1048576, 65535
|
||||
),
|
||||
LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata("open_router", 1048576, 8192),
|
||||
LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 128000, 4096),
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 128000, 4096),
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 128000, 4096),
|
||||
LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 64000, 2048),
|
||||
LlmModel.DEEPSEEK_R1_0528: ModelMetadata("open_router", 163840, 163840),
|
||||
LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 8000),
|
||||
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000),
|
||||
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
|
||||
"open_router",
|
||||
128000,
|
||||
16000,
|
||||
),
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
|
||||
"open_router", 131000, 4096
|
||||
),
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata(
|
||||
"open_router", 12288, 12288
|
||||
),
|
||||
LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata("open_router", 131072, 131072),
|
||||
LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata("open_router", 131072, 32768),
|
||||
LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata("open_router", 300000, 5120),
|
||||
LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata("open_router", 128000, 5120),
|
||||
LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata("open_router", 300000, 5120),
|
||||
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata("open_router", 65536, 4096),
|
||||
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4096, 4096),
|
||||
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072),
|
||||
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000),
|
||||
LlmModel.GROK_4: ModelMetadata("open_router", 256000, 256000),
|
||||
LlmModel.GROK_4_FAST: ModelMetadata("open_router", 2000000, 30000),
|
||||
LlmModel.GROK_4_1_FAST: ModelMetadata("open_router", 2000000, 30000),
|
||||
LlmModel.GROK_CODE_FAST_1: ModelMetadata("open_router", 256000, 10000),
|
||||
LlmModel.KIMI_K2: ModelMetadata("open_router", 131000, 131000),
|
||||
LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata("open_router", 262144, 262144),
|
||||
LlmModel.QWEN3_CODER: ModelMetadata("open_router", 262144, 262144),
|
||||
# Llama API models
|
||||
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028),
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000),
|
||||
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000),
|
||||
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
|
||||
}
|
||||
|
||||
# Default model constant for backward compatibility
|
||||
# Uses the dynamic registry to get the default model
|
||||
DEFAULT_LLM_MODEL = LlmModel.default()
|
||||
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
|
||||
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_METADATA:
|
||||
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
|
||||
|
||||
|
||||
class ToolCall(BaseModel):
|
||||
@@ -327,94 +438,19 @@ async def llm_call(
|
||||
- prompt_tokens: The number of tokens used in the prompt.
|
||||
- completion_tokens: The number of tokens used in the completion.
|
||||
"""
|
||||
# Get model metadata and check if enabled - with fallback support
|
||||
# The model we'll actually use (may differ if original is disabled)
|
||||
model_to_use = llm_model.value
|
||||
|
||||
# Check if model is in registry and if it's enabled
|
||||
from backend.data.llm_registry import (
|
||||
get_fallback_model_for_disabled,
|
||||
get_model_info,
|
||||
)
|
||||
|
||||
model_info = get_model_info(llm_model.value)
|
||||
|
||||
if model_info and not model_info.is_enabled:
|
||||
# Model is disabled - try to find a fallback from the same provider
|
||||
fallback = get_fallback_model_for_disabled(llm_model.value)
|
||||
if fallback:
|
||||
logger.warning(
|
||||
f"Model '{llm_model.value}' is disabled. Using fallback model '{fallback.slug}' from the same provider ({fallback.metadata.provider})."
|
||||
)
|
||||
model_to_use = fallback.slug
|
||||
# Use fallback model's metadata
|
||||
provider = fallback.metadata.provider
|
||||
context_window = fallback.metadata.context_window
|
||||
model_max_output = fallback.metadata.max_output_tokens or int(2**15)
|
||||
else:
|
||||
# No fallback available - raise error
|
||||
raise ValueError(
|
||||
f"LLM model '{llm_model.value}' is disabled and no fallback model "
|
||||
f"from the same provider is available. Please enable the model or "
|
||||
f"select a different model in the block configuration."
|
||||
)
|
||||
else:
|
||||
# Model is enabled or not in registry (legacy/static model)
|
||||
try:
|
||||
provider = llm_model.metadata.provider
|
||||
context_window = llm_model.context_window
|
||||
model_max_output = llm_model.max_output_tokens or int(2**15)
|
||||
except ValueError:
|
||||
# Model not in cache - try refreshing the registry once if we have DB access
|
||||
logger.warning(f"Model {llm_model.value} not found in registry cache")
|
||||
|
||||
# Try refreshing the registry if we have database access
|
||||
from backend.data.db import is_connected
|
||||
|
||||
if is_connected():
|
||||
try:
|
||||
logger.info(
|
||||
f"Refreshing LLM registry and retrying lookup for {llm_model.value}"
|
||||
)
|
||||
await llm_registry.refresh_llm_registry()
|
||||
# Try again after refresh
|
||||
try:
|
||||
provider = llm_model.metadata.provider
|
||||
context_window = llm_model.context_window
|
||||
model_max_output = llm_model.max_output_tokens or int(2**15)
|
||||
logger.info(
|
||||
f"Successfully loaded model {llm_model.value} metadata after registry refresh"
|
||||
)
|
||||
except ValueError:
|
||||
# Still not found after refresh
|
||||
raise ValueError(
|
||||
f"LLM model '{llm_model.value}' not found in registry after refresh. "
|
||||
"Please ensure the model is added and enabled in the LLM registry via the admin UI."
|
||||
)
|
||||
except Exception as refresh_exc:
|
||||
logger.error(f"Failed to refresh LLM registry: {refresh_exc}")
|
||||
raise ValueError(
|
||||
f"LLM model '{llm_model.value}' not found in registry and failed to refresh. "
|
||||
"Please ensure the model is added to the LLM registry via the admin UI."
|
||||
) from refresh_exc
|
||||
else:
|
||||
# No DB access (e.g., in executor without direct DB connection)
|
||||
# The registry should have been loaded on startup
|
||||
raise ValueError(
|
||||
f"LLM model '{llm_model.value}' not found in registry cache. "
|
||||
"The registry may need to be refreshed. Please contact support or try again later."
|
||||
)
|
||||
provider = llm_model.metadata.provider
|
||||
context_window = llm_model.context_window
|
||||
|
||||
if compress_prompt_to_fit:
|
||||
prompt = compress_prompt(
|
||||
messages=prompt,
|
||||
target_tokens=context_window // 2,
|
||||
target_tokens=llm_model.context_window // 2,
|
||||
lossy_ok=True,
|
||||
)
|
||||
|
||||
# Calculate available tokens based on context window and input length
|
||||
estimated_input_tokens = estimate_token_count(prompt)
|
||||
# model_max_output already set above
|
||||
model_max_output = llm_model.max_output_tokens or int(2**15)
|
||||
user_max = max_tokens or model_max_output
|
||||
available_tokens = max(context_window - estimated_input_tokens, 0)
|
||||
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
||||
@@ -432,7 +468,7 @@ async def llm_call(
|
||||
response_format = {"type": "json_object"}
|
||||
|
||||
response = await oai_client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_completion_tokens=max_tokens,
|
||||
@@ -479,7 +515,7 @@ async def llm_call(
|
||||
)
|
||||
try:
|
||||
resp = await client.messages.create(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
system=sysprompt,
|
||||
messages=messages,
|
||||
max_tokens=max_tokens,
|
||||
@@ -543,7 +579,7 @@ async def llm_call(
|
||||
client = AsyncGroq(api_key=credentials.api_key.get_secret_value())
|
||||
response_format = {"type": "json_object"} if force_json_output else None
|
||||
response = await client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
@@ -565,7 +601,7 @@ async def llm_call(
|
||||
sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
|
||||
usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
|
||||
response = await client.generate(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
prompt=f"{sys_messages}\n\n{usr_messages}",
|
||||
stream=False,
|
||||
options={"num_ctx": max_tokens},
|
||||
@@ -595,7 +631,7 @@ async def llm_call(
|
||||
"HTTP-Referer": "https://agpt.co",
|
||||
"X-Title": "AutoGPT",
|
||||
},
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
tools=tools_param, # type: ignore
|
||||
@@ -637,7 +673,7 @@ async def llm_call(
|
||||
"HTTP-Referer": "https://agpt.co",
|
||||
"X-Title": "AutoGPT",
|
||||
},
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
tools=tools_param, # type: ignore
|
||||
@@ -664,7 +700,7 @@ async def llm_call(
|
||||
reasoning=reasoning,
|
||||
)
|
||||
elif provider == "aiml_api":
|
||||
client = openai.AsyncOpenAI(
|
||||
client = openai.OpenAI(
|
||||
base_url="https://api.aimlapi.com/v2",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
default_headers={
|
||||
@@ -674,8 +710,8 @@ async def llm_call(
|
||||
},
|
||||
)
|
||||
|
||||
completion = await client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
completion = client.chat.completions.create(
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
@@ -707,7 +743,7 @@ async def llm_call(
|
||||
)
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
@@ -758,10 +794,9 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
force_json_output: bool = SchemaField(
|
||||
title="Restrict LLM to pure JSON output",
|
||||
@@ -824,7 +859,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
||||
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
||||
test_input={
|
||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"expected_format": {
|
||||
"key1": "value1",
|
||||
@@ -1190,10 +1225,9 @@ class AITextGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
sys_prompt: str = SchemaField(
|
||||
@@ -1287,9 +1321,8 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for summarizing the text.",
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
focus: str = SchemaField(
|
||||
title="Focus",
|
||||
@@ -1505,9 +1538,8 @@ class AIConversationBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for the conversation.",
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
max_tokens: int | None = SchemaField(
|
||||
@@ -1544,7 +1576,7 @@ class AIConversationBlock(AIBlockBase):
|
||||
},
|
||||
{"role": "user", "content": "Where was it played?"},
|
||||
],
|
||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
@@ -1607,10 +1639,9 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for generating the list.",
|
||||
advanced=True,
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
max_retries: int = SchemaField(
|
||||
@@ -1665,7 +1696,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
"drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
|
||||
"fictional worlds."
|
||||
),
|
||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"max_retries": 3,
|
||||
"force_json_output": False,
|
||||
|
||||
@@ -226,10 +226,9 @@ class SmartDecisionMakerBlock(Block):
|
||||
)
|
||||
model: llm.LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=llm.LlmModel.default,
|
||||
default=llm.DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
json_schema_extra=llm.llm_model_schema_extra(),
|
||||
)
|
||||
credentials: llm.AICredentials = llm.AICredentialsField()
|
||||
multiple_tool_calls: bool = SchemaField(
|
||||
|
||||
@@ -10,13 +10,13 @@ import stagehand.main
|
||||
from stagehand import Stagehand
|
||||
|
||||
from backend.blocks.llm import (
|
||||
MODEL_METADATA,
|
||||
AICredentials,
|
||||
AICredentialsField,
|
||||
LlmModel,
|
||||
ModelMetadata,
|
||||
)
|
||||
from backend.blocks.stagehand._config import stagehand as stagehand_provider
|
||||
from backend.data import llm_registry
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
@@ -91,7 +91,7 @@ class StagehandRecommendedLlmModel(str, Enum):
|
||||
Returns the provider name for the model in the required format for Stagehand:
|
||||
provider/model_name
|
||||
"""
|
||||
model_metadata = self.metadata
|
||||
model_metadata = MODEL_METADATA[LlmModel(self.value)]
|
||||
model_name = self.value
|
||||
|
||||
if len(model_name.split("/")) == 1 and not self.value.startswith(
|
||||
@@ -107,23 +107,19 @@ class StagehandRecommendedLlmModel(str, Enum):
|
||||
|
||||
@property
|
||||
def provider(self) -> str:
|
||||
return self.metadata.provider
|
||||
return MODEL_METADATA[LlmModel(self.value)].provider
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
metadata = llm_registry.get_llm_model_metadata(self.value)
|
||||
if metadata:
|
||||
return metadata
|
||||
# Fallback to LlmModel enum if registry lookup fails
|
||||
return LlmModel(self.value).metadata
|
||||
return MODEL_METADATA[LlmModel(self.value)]
|
||||
|
||||
@property
|
||||
def context_window(self) -> int:
|
||||
return self.metadata.context_window
|
||||
return MODEL_METADATA[LlmModel(self.value)].context_window
|
||||
|
||||
@property
|
||||
def max_output_tokens(self) -> int | None:
|
||||
return self.metadata.max_output_tokens
|
||||
return MODEL_METADATA[LlmModel(self.value)].max_output_tokens
|
||||
|
||||
|
||||
class StagehandObserveBlock(Block):
|
||||
|
||||
@@ -25,7 +25,6 @@ from prisma.models import AgentBlock
|
||||
from prisma.types import AgentBlockCreateInput
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.llm_registry import update_schema_with_llm_registry
|
||||
from backend.data.model import NodeExecutionStats
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util import json
|
||||
@@ -144,59 +143,35 @@ class BlockInfo(BaseModel):
|
||||
|
||||
|
||||
class BlockSchema(BaseModel):
|
||||
cached_jsonschema: ClassVar[dict[str, Any] | None] = None
|
||||
|
||||
@classmethod
|
||||
def clear_schema_cache(cls) -> None:
|
||||
"""Clear the cached JSON schema for this class."""
|
||||
# Use None instead of {} because {} is truthy and would prevent regeneration
|
||||
cls.cached_jsonschema = None # type: ignore
|
||||
|
||||
@staticmethod
|
||||
def clear_all_schema_caches() -> None:
|
||||
"""Clear cached JSON schemas for all BlockSchema subclasses."""
|
||||
|
||||
def clear_recursive(cls: type) -> None:
|
||||
"""Recursively clear cache for class and all subclasses."""
|
||||
if hasattr(cls, "clear_schema_cache"):
|
||||
cls.clear_schema_cache()
|
||||
for subclass in cls.__subclasses__():
|
||||
clear_recursive(subclass)
|
||||
|
||||
clear_recursive(BlockSchema)
|
||||
cached_jsonschema: ClassVar[dict[str, Any]]
|
||||
|
||||
@classmethod
|
||||
def jsonschema(cls) -> dict[str, Any]:
|
||||
# Generate schema if not cached
|
||||
if not cls.cached_jsonschema:
|
||||
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
|
||||
if cls.cached_jsonschema:
|
||||
return cls.cached_jsonschema
|
||||
|
||||
def ref_to_dict(obj):
|
||||
if isinstance(obj, dict):
|
||||
# OpenAPI <3.1 does not support sibling fields that has a $ref key
|
||||
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
|
||||
keys = {"allOf", "anyOf", "oneOf"}
|
||||
one_key = next(
|
||||
(k for k in keys if k in obj and len(obj[k]) == 1), None
|
||||
)
|
||||
if one_key:
|
||||
obj.update(obj[one_key][0])
|
||||
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
|
||||
|
||||
return {
|
||||
key: ref_to_dict(value)
|
||||
for key, value in obj.items()
|
||||
if not key.startswith("$") and key != one_key
|
||||
}
|
||||
elif isinstance(obj, list):
|
||||
return [ref_to_dict(item) for item in obj]
|
||||
def ref_to_dict(obj):
|
||||
if isinstance(obj, dict):
|
||||
# OpenAPI <3.1 does not support sibling fields that has a $ref key
|
||||
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
|
||||
keys = {"allOf", "anyOf", "oneOf"}
|
||||
one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None)
|
||||
if one_key:
|
||||
obj.update(obj[one_key][0])
|
||||
|
||||
return obj
|
||||
return {
|
||||
key: ref_to_dict(value)
|
||||
for key, value in obj.items()
|
||||
if not key.startswith("$") and key != one_key
|
||||
}
|
||||
elif isinstance(obj, list):
|
||||
return [ref_to_dict(item) for item in obj]
|
||||
|
||||
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
|
||||
return obj
|
||||
|
||||
# Always post-process to ensure LLM registry data is up-to-date
|
||||
# This refreshes model options and discriminator mappings even if schema was cached
|
||||
update_schema_with_llm_registry(cls.cached_jsonschema, cls)
|
||||
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
|
||||
|
||||
return cls.cached_jsonschema
|
||||
|
||||
@@ -705,12 +680,23 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
return False, reviewed_data
|
||||
|
||||
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||
# Check for review requirement and get potentially modified input data
|
||||
should_pause, input_data = await self.is_block_exec_need_review(
|
||||
input_data, **kwargs
|
||||
# Check for review requirement only if running within a graph execution context
|
||||
# Direct block execution (e.g., from chat) skips the review process
|
||||
has_graph_context = all(
|
||||
key in kwargs
|
||||
for key in (
|
||||
"node_exec_id",
|
||||
"graph_exec_id",
|
||||
"graph_id",
|
||||
"execution_context",
|
||||
)
|
||||
)
|
||||
if should_pause:
|
||||
return
|
||||
if has_graph_context:
|
||||
should_pause, input_data = await self.is_block_exec_need_review(
|
||||
input_data, **kwargs
|
||||
)
|
||||
if should_pause:
|
||||
return
|
||||
|
||||
# Validate the input data (original or reviewer-modified) once
|
||||
if error := self.input_schema.validate_data(input_data):
|
||||
@@ -884,28 +870,6 @@ def is_block_auth_configured(
|
||||
|
||||
|
||||
async def initialize_blocks() -> None:
|
||||
# Refresh LLM registry before initializing blocks so blocks can use registry data
|
||||
# This ensures the registry cache is populated even in executor context
|
||||
try:
|
||||
from backend.data import llm_registry
|
||||
from backend.data.block_cost_config import refresh_llm_costs
|
||||
|
||||
# Only refresh if we have DB access (check if Prisma is connected)
|
||||
from backend.data.db import is_connected
|
||||
|
||||
if is_connected():
|
||||
await llm_registry.refresh_llm_registry()
|
||||
refresh_llm_costs()
|
||||
logger.info("LLM registry refreshed during block initialization")
|
||||
else:
|
||||
logger.warning(
|
||||
"Prisma not connected, skipping LLM registry refresh during block initialization"
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to refresh LLM registry during block initialization: %s", exc
|
||||
)
|
||||
|
||||
# First, sync all provider costs to blocks
|
||||
# Imported here to avoid circular import
|
||||
from backend.sdk.cost_integration import sync_all_provider_costs
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import logging
|
||||
from typing import Type
|
||||
|
||||
from backend.blocks.ai_image_customizer import AIImageCustomizerBlock, GeminiImageModel
|
||||
@@ -24,18 +23,19 @@ from backend.blocks.ideogram import IdeogramModelBlock
|
||||
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
|
||||
from backend.blocks.jina.search import ExtractWebsiteContentBlock, SearchTheWebBlock
|
||||
from backend.blocks.llm import (
|
||||
MODEL_METADATA,
|
||||
AIConversationBlock,
|
||||
AIListGeneratorBlock,
|
||||
AIStructuredResponseGeneratorBlock,
|
||||
AITextGeneratorBlock,
|
||||
AITextSummarizerBlock,
|
||||
LlmModel,
|
||||
)
|
||||
from backend.blocks.replicate.flux_advanced import ReplicateFluxAdvancedModelBlock
|
||||
from backend.blocks.replicate.replicate_block import ReplicateModelBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
||||
from backend.data import llm_registry
|
||||
from backend.data.block import Block, BlockCost, BlockCostType
|
||||
from backend.integrations.credentials_store import (
|
||||
aiml_api_credentials,
|
||||
@@ -55,63 +55,210 @@ from backend.integrations.credentials_store import (
|
||||
v0_credentials,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
|
||||
PROVIDER_CREDENTIALS = {
|
||||
"openai": openai_credentials,
|
||||
"anthropic": anthropic_credentials,
|
||||
"groq": groq_credentials,
|
||||
"open_router": open_router_credentials,
|
||||
"llama_api": llama_api_credentials,
|
||||
"aiml_api": aiml_api_credentials,
|
||||
"v0": v0_credentials,
|
||||
MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.O3: 4,
|
||||
LlmModel.O3_MINI: 2,
|
||||
LlmModel.O1: 16,
|
||||
LlmModel.O1_MINI: 4,
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5_2: 6,
|
||||
LlmModel.GPT5_1: 5,
|
||||
LlmModel.GPT5: 2,
|
||||
LlmModel.GPT5_MINI: 1,
|
||||
LlmModel.GPT5_NANO: 1,
|
||||
LlmModel.GPT5_CHAT: 5,
|
||||
LlmModel.GPT41: 2,
|
||||
LlmModel.GPT41_MINI: 1,
|
||||
LlmModel.GPT4O_MINI: 1,
|
||||
LlmModel.GPT4O: 3,
|
||||
LlmModel.GPT4_TURBO: 10,
|
||||
LlmModel.GPT3_5_TURBO: 1,
|
||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_SONNET: 5,
|
||||
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||
LlmModel.CLAUDE_3_7_SONNET: 5,
|
||||
LlmModel.CLAUDE_3_HAIKU: 1,
|
||||
LlmModel.AIML_API_QWEN2_5_72B: 1,
|
||||
LlmModel.AIML_API_LLAMA3_1_70B: 1,
|
||||
LlmModel.AIML_API_LLAMA3_3_70B: 1,
|
||||
LlmModel.AIML_API_META_LLAMA_3_1_70B: 1,
|
||||
LlmModel.AIML_API_LLAMA_3_2_3B: 1,
|
||||
LlmModel.LLAMA3_3_70B: 1,
|
||||
LlmModel.LLAMA3_1_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_3: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_2: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_405B: 1,
|
||||
LlmModel.OLLAMA_DOLPHIN: 1,
|
||||
LlmModel.OPENAI_GPT_OSS_120B: 1,
|
||||
LlmModel.OPENAI_GPT_OSS_20B: 1,
|
||||
LlmModel.GEMINI_2_5_PRO: 4,
|
||||
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
|
||||
LlmModel.MISTRAL_NEMO: 1,
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: 1,
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
|
||||
LlmModel.DEEPSEEK_CHAT: 2,
|
||||
LlmModel.PERPLEXITY_SONAR: 1,
|
||||
LlmModel.PERPLEXITY_SONAR_PRO: 5,
|
||||
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10,
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: 1,
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: 1,
|
||||
LlmModel.AMAZON_NOVA_LITE_V1: 1,
|
||||
LlmModel.AMAZON_NOVA_MICRO_V1: 1,
|
||||
LlmModel.AMAZON_NOVA_PRO_V1: 1,
|
||||
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: 1,
|
||||
LlmModel.GRYPHE_MYTHOMAX_L2_13B: 1,
|
||||
LlmModel.META_LLAMA_4_SCOUT: 1,
|
||||
LlmModel.META_LLAMA_4_MAVERICK: 1,
|
||||
LlmModel.LLAMA_API_LLAMA_4_SCOUT: 1,
|
||||
LlmModel.LLAMA_API_LLAMA4_MAVERICK: 1,
|
||||
LlmModel.LLAMA_API_LLAMA3_3_8B: 1,
|
||||
LlmModel.LLAMA_API_LLAMA3_3_70B: 1,
|
||||
LlmModel.GROK_4: 9,
|
||||
LlmModel.GROK_4_FAST: 1,
|
||||
LlmModel.GROK_4_1_FAST: 1,
|
||||
LlmModel.GROK_CODE_FAST_1: 1,
|
||||
LlmModel.KIMI_K2: 1,
|
||||
LlmModel.QWEN3_235B_A22B_THINKING: 1,
|
||||
LlmModel.QWEN3_CODER: 9,
|
||||
LlmModel.GEMINI_2_5_FLASH: 1,
|
||||
LlmModel.GEMINI_2_0_FLASH: 1,
|
||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
|
||||
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
|
||||
LlmModel.DEEPSEEK_R1_0528: 1,
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: 1,
|
||||
LlmModel.V0_1_5_LG: 2,
|
||||
LlmModel.V0_1_0_MD: 1,
|
||||
}
|
||||
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
# All LLM costs now come from the database via llm_registry
|
||||
|
||||
LLM_COST: list[BlockCost] = []
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_COST:
|
||||
raise ValueError(f"Missing MODEL_COST for model: {model}")
|
||||
|
||||
|
||||
def _build_llm_costs_from_registry() -> list[BlockCost]:
|
||||
"""Build BlockCost list from all models in the LLM registry."""
|
||||
costs: list[BlockCost] = []
|
||||
for model in llm_registry.iter_dynamic_models():
|
||||
for cost in model.costs:
|
||||
credentials = PROVIDER_CREDENTIALS.get(cost.credential_provider)
|
||||
if not credentials:
|
||||
logger.warning(
|
||||
"Skipping cost entry for %s due to unknown credentials provider %s",
|
||||
model.slug,
|
||||
cost.credential_provider,
|
||||
)
|
||||
continue
|
||||
cost_filter = {
|
||||
"model": model.slug,
|
||||
LLM_COST = (
|
||||
# Anthropic Models
|
||||
[
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": credentials.id,
|
||||
"provider": credentials.provider,
|
||||
"type": credentials.type,
|
||||
"id": anthropic_credentials.id,
|
||||
"provider": anthropic_credentials.provider,
|
||||
"type": anthropic_credentials.type,
|
||||
},
|
||||
}
|
||||
costs.append(
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter=cost_filter,
|
||||
cost_amount=cost.credit_cost,
|
||||
)
|
||||
)
|
||||
return costs
|
||||
|
||||
|
||||
def refresh_llm_costs() -> None:
|
||||
"""Refresh LLM costs from the registry. All costs now come from the database."""
|
||||
LLM_COST.clear()
|
||||
LLM_COST.extend(_build_llm_costs_from_registry())
|
||||
|
||||
|
||||
# Initial load will happen after registry is refreshed at startup
|
||||
# Don't call refresh_llm_costs() here - it will be called after registry refresh
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "anthropic"
|
||||
]
|
||||
# OpenAI Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": openai_credentials.id,
|
||||
"provider": openai_credentials.provider,
|
||||
"type": openai_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "openai"
|
||||
]
|
||||
# Groq Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {"id": groq_credentials.id},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "groq"
|
||||
]
|
||||
# Open Router Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": open_router_credentials.id,
|
||||
"provider": open_router_credentials.provider,
|
||||
"type": open_router_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "open_router"
|
||||
]
|
||||
# Llama API Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": llama_api_credentials.id,
|
||||
"provider": llama_api_credentials.provider,
|
||||
"type": llama_api_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "llama_api"
|
||||
]
|
||||
# v0 by Vercel Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": v0_credentials.id,
|
||||
"provider": v0_credentials.provider,
|
||||
"type": v0_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "v0"
|
||||
]
|
||||
# AI/ML Api Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": aiml_api_credentials.id,
|
||||
"provider": aiml_api_credentials.provider,
|
||||
"type": aiml_api_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "aiml_api"
|
||||
]
|
||||
)
|
||||
|
||||
# =============== This is the exhaustive list of cost for each Block =============== #
|
||||
|
||||
|
||||
@@ -1483,10 +1483,8 @@ async def migrate_llm_models(migrate_to: LlmModel):
|
||||
if field.annotation == LlmModel:
|
||||
llm_model_fields[block.id] = field_name
|
||||
|
||||
# Get all model slugs from the registry (dynamic, not hardcoded enum)
|
||||
from backend.data import llm_registry
|
||||
|
||||
enum_values = list(llm_registry.get_all_model_slugs_for_validation())
|
||||
# Convert enum values to a list of strings for the SQL query
|
||||
enum_values = [v.value for v in LlmModel]
|
||||
escaped_enum_values = repr(tuple(enum_values)) # hack but works
|
||||
|
||||
# Update each block
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
"""
|
||||
LLM Registry module for managing LLM models, providers, and costs dynamically.
|
||||
|
||||
This module provides a database-driven registry system for LLM models,
|
||||
replacing hardcoded model configurations with a flexible admin-managed system.
|
||||
"""
|
||||
|
||||
from backend.data.llm_registry.model_types import ModelMetadata
|
||||
|
||||
# Re-export for backwards compatibility
|
||||
from backend.data.llm_registry.notifications import (
|
||||
REGISTRY_REFRESH_CHANNEL,
|
||||
publish_registry_refresh_notification,
|
||||
subscribe_to_registry_refresh,
|
||||
)
|
||||
from backend.data.llm_registry.registry import (
|
||||
RegistryModel,
|
||||
RegistryModelCost,
|
||||
RegistryModelCreator,
|
||||
get_all_model_slugs_for_validation,
|
||||
get_default_model_slug,
|
||||
get_dynamic_model_slugs,
|
||||
get_fallback_model_for_disabled,
|
||||
get_llm_discriminator_mapping,
|
||||
get_llm_model_cost,
|
||||
get_llm_model_metadata,
|
||||
get_llm_model_schema_options,
|
||||
get_model_info,
|
||||
is_model_enabled,
|
||||
iter_dynamic_models,
|
||||
refresh_llm_registry,
|
||||
register_static_costs,
|
||||
register_static_metadata,
|
||||
)
|
||||
from backend.data.llm_registry.schema_utils import (
|
||||
is_llm_model_field,
|
||||
refresh_llm_discriminator_mapping,
|
||||
refresh_llm_model_options,
|
||||
update_schema_with_llm_registry,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Types
|
||||
"ModelMetadata",
|
||||
"RegistryModel",
|
||||
"RegistryModelCost",
|
||||
"RegistryModelCreator",
|
||||
# Registry functions
|
||||
"get_all_model_slugs_for_validation",
|
||||
"get_default_model_slug",
|
||||
"get_dynamic_model_slugs",
|
||||
"get_fallback_model_for_disabled",
|
||||
"get_llm_discriminator_mapping",
|
||||
"get_llm_model_cost",
|
||||
"get_llm_model_metadata",
|
||||
"get_llm_model_schema_options",
|
||||
"get_model_info",
|
||||
"is_model_enabled",
|
||||
"iter_dynamic_models",
|
||||
"refresh_llm_registry",
|
||||
"register_static_costs",
|
||||
"register_static_metadata",
|
||||
# Notifications
|
||||
"REGISTRY_REFRESH_CHANNEL",
|
||||
"publish_registry_refresh_notification",
|
||||
"subscribe_to_registry_refresh",
|
||||
# Schema utilities
|
||||
"is_llm_model_field",
|
||||
"refresh_llm_discriminator_mapping",
|
||||
"refresh_llm_model_options",
|
||||
"update_schema_with_llm_registry",
|
||||
]
|
||||
@@ -1,11 +0,0 @@
|
||||
"""Type definitions for LLM model metadata."""
|
||||
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class ModelMetadata(NamedTuple):
|
||||
"""Metadata for an LLM model."""
|
||||
|
||||
provider: str
|
||||
context_window: int
|
||||
max_output_tokens: int | None
|
||||
@@ -1,89 +0,0 @@
|
||||
"""
|
||||
Redis pub/sub notifications for LLM registry updates.
|
||||
|
||||
When models are added/updated/removed via the admin UI, this module
|
||||
publishes notifications to Redis that all executor services subscribe to,
|
||||
ensuring they refresh their registry cache in real-time.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.data.redis_client import connect_async
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Redis channel name for LLM registry refresh notifications
|
||||
REGISTRY_REFRESH_CHANNEL = "llm_registry:refresh"
|
||||
|
||||
|
||||
async def publish_registry_refresh_notification() -> None:
|
||||
"""
|
||||
Publish a notification to Redis that the LLM registry has been updated.
|
||||
All executor services subscribed to this channel will refresh their registry.
|
||||
"""
|
||||
try:
|
||||
redis = await connect_async()
|
||||
await redis.publish(REGISTRY_REFRESH_CHANNEL, "refresh")
|
||||
logger.info("Published LLM registry refresh notification to Redis")
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to publish LLM registry refresh notification: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
async def subscribe_to_registry_refresh(
|
||||
on_refresh: Any, # Async callable that takes no args
|
||||
) -> None:
|
||||
"""
|
||||
Subscribe to Redis notifications for LLM registry updates.
|
||||
This runs in a loop and processes messages as they arrive.
|
||||
|
||||
Args:
|
||||
on_refresh: Async callable to execute when a refresh notification is received
|
||||
"""
|
||||
try:
|
||||
redis = await connect_async()
|
||||
pubsub = redis.pubsub()
|
||||
await pubsub.subscribe(REGISTRY_REFRESH_CHANNEL)
|
||||
logger.info(
|
||||
"Subscribed to LLM registry refresh notifications on channel: %s",
|
||||
REGISTRY_REFRESH_CHANNEL,
|
||||
)
|
||||
|
||||
# Process messages in a loop
|
||||
while True:
|
||||
try:
|
||||
message = await pubsub.get_message(
|
||||
ignore_subscribe_messages=True, timeout=1.0
|
||||
)
|
||||
if (
|
||||
message
|
||||
and message["type"] == "message"
|
||||
and message["channel"] == REGISTRY_REFRESH_CHANNEL
|
||||
):
|
||||
logger.info("Received LLM registry refresh notification")
|
||||
try:
|
||||
await on_refresh()
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
"Error refreshing LLM registry from notification: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Error processing registry refresh message: %s", exc, exc_info=True
|
||||
)
|
||||
# Continue listening even if one message fails
|
||||
await asyncio.sleep(1)
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
"Failed to subscribe to LLM registry refresh notifications: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
@@ -1,372 +0,0 @@
|
||||
"""Core LLM registry implementation for managing models dynamically."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Iterable
|
||||
|
||||
import prisma.models
|
||||
|
||||
from backend.data.llm_registry.model_types import ModelMetadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _json_to_dict(value: Any) -> dict[str, Any]:
|
||||
"""Convert Prisma Json type to dict, with fallback to empty dict."""
|
||||
if value is None:
|
||||
return {}
|
||||
if isinstance(value, dict):
|
||||
return value
|
||||
# Prisma Json type should always be a dict at runtime
|
||||
return dict(value) if value else {}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RegistryModelCost:
|
||||
"""Cost configuration for an LLM model."""
|
||||
|
||||
credit_cost: int
|
||||
credential_provider: str
|
||||
credential_id: str | None
|
||||
credential_type: str | None
|
||||
currency: str | None
|
||||
metadata: dict[str, Any]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RegistryModelCreator:
|
||||
"""Creator information for an LLM model."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
display_name: str
|
||||
description: str | None
|
||||
website_url: str | None
|
||||
logo_url: str | None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RegistryModel:
|
||||
"""Represents a model in the LLM registry."""
|
||||
|
||||
slug: str
|
||||
display_name: str
|
||||
description: str | None
|
||||
metadata: ModelMetadata
|
||||
capabilities: dict[str, Any]
|
||||
extra_metadata: dict[str, Any]
|
||||
provider_display_name: str
|
||||
is_enabled: bool
|
||||
is_recommended: bool = False
|
||||
costs: tuple[RegistryModelCost, ...] = field(default_factory=tuple)
|
||||
creator: RegistryModelCreator | None = None
|
||||
|
||||
|
||||
_static_metadata: dict[str, ModelMetadata] = {}
|
||||
_static_costs: dict[str, int] = {}
|
||||
_dynamic_models: dict[str, RegistryModel] = {}
|
||||
_schema_options: list[dict[str, str]] = []
|
||||
_discriminator_mapping: dict[str, str] = {}
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
|
||||
def register_static_metadata(metadata: dict[Any, ModelMetadata]) -> None:
|
||||
"""Register static metadata for legacy models (deprecated)."""
|
||||
_static_metadata.update({str(key): value for key, value in metadata.items()})
|
||||
_refresh_cached_schema()
|
||||
|
||||
|
||||
def register_static_costs(costs: dict[Any, int]) -> None:
|
||||
"""Register static costs for legacy models (deprecated)."""
|
||||
_static_costs.update({str(key): value for key, value in costs.items()})
|
||||
|
||||
|
||||
def _build_schema_options() -> list[dict[str, str]]:
|
||||
"""Build schema options for model selection dropdown. Only includes enabled models."""
|
||||
options: list[dict[str, str]] = []
|
||||
# Only include enabled models in the dropdown options
|
||||
for model in sorted(_dynamic_models.values(), key=lambda m: m.display_name.lower()):
|
||||
if model.is_enabled:
|
||||
options.append(
|
||||
{
|
||||
"label": model.display_name,
|
||||
"value": model.slug,
|
||||
"group": model.metadata.provider,
|
||||
"description": model.description or "",
|
||||
}
|
||||
)
|
||||
|
||||
for slug, metadata in _static_metadata.items():
|
||||
if slug in _dynamic_models:
|
||||
continue
|
||||
options.append(
|
||||
{
|
||||
"label": slug,
|
||||
"value": slug,
|
||||
"group": metadata.provider,
|
||||
"description": "",
|
||||
}
|
||||
)
|
||||
return options
|
||||
|
||||
|
||||
async def refresh_llm_registry() -> None:
|
||||
"""Refresh the LLM registry from the database. Loads all models (enabled and disabled)."""
|
||||
async with _lock:
|
||||
try:
|
||||
records = await prisma.models.LlmModel.prisma().find_many(
|
||||
include={
|
||||
"Provider": True,
|
||||
"Costs": True,
|
||||
"Creator": True,
|
||||
}
|
||||
)
|
||||
logger.debug("Found %d LLM model records in database", len(records))
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
"Failed to refresh LLM registry from DB: %s", exc, exc_info=True
|
||||
)
|
||||
return
|
||||
|
||||
dynamic: dict[str, RegistryModel] = {}
|
||||
for record in records:
|
||||
provider_name = (
|
||||
record.Provider.name if record.Provider else record.providerId
|
||||
)
|
||||
metadata = ModelMetadata(
|
||||
provider=provider_name,
|
||||
context_window=record.contextWindow,
|
||||
max_output_tokens=record.maxOutputTokens,
|
||||
)
|
||||
costs = tuple(
|
||||
RegistryModelCost(
|
||||
credit_cost=cost.creditCost,
|
||||
credential_provider=cost.credentialProvider,
|
||||
credential_id=cost.credentialId,
|
||||
credential_type=cost.credentialType,
|
||||
currency=cost.currency,
|
||||
metadata=_json_to_dict(cost.metadata),
|
||||
)
|
||||
for cost in (record.Costs or [])
|
||||
)
|
||||
|
||||
# Map creator if present
|
||||
creator = None
|
||||
if record.Creator:
|
||||
creator = RegistryModelCreator(
|
||||
id=record.Creator.id,
|
||||
name=record.Creator.name,
|
||||
display_name=record.Creator.displayName,
|
||||
description=record.Creator.description,
|
||||
website_url=record.Creator.websiteUrl,
|
||||
logo_url=record.Creator.logoUrl,
|
||||
)
|
||||
|
||||
dynamic[record.slug] = RegistryModel(
|
||||
slug=record.slug,
|
||||
display_name=record.displayName,
|
||||
description=record.description,
|
||||
metadata=metadata,
|
||||
capabilities=_json_to_dict(record.capabilities),
|
||||
extra_metadata=_json_to_dict(record.metadata),
|
||||
provider_display_name=(
|
||||
record.Provider.displayName
|
||||
if record.Provider
|
||||
else record.providerId
|
||||
),
|
||||
is_enabled=record.isEnabled,
|
||||
is_recommended=record.isRecommended,
|
||||
costs=costs,
|
||||
creator=creator,
|
||||
)
|
||||
|
||||
# Atomic swap - build new structures then replace references
|
||||
# This ensures readers never see partially updated state
|
||||
global _dynamic_models
|
||||
_dynamic_models = dynamic
|
||||
_refresh_cached_schema()
|
||||
logger.info(
|
||||
"LLM registry refreshed with %s dynamic models (enabled: %s, disabled: %s)",
|
||||
len(dynamic),
|
||||
sum(1 for m in dynamic.values() if m.is_enabled),
|
||||
sum(1 for m in dynamic.values() if not m.is_enabled),
|
||||
)
|
||||
|
||||
|
||||
def _refresh_cached_schema() -> None:
|
||||
"""Refresh cached schema options and discriminator mapping."""
|
||||
global _schema_options, _discriminator_mapping
|
||||
|
||||
# Build new structures
|
||||
new_options = _build_schema_options()
|
||||
new_mapping = {
|
||||
slug: entry.metadata.provider for slug, entry in _dynamic_models.items()
|
||||
}
|
||||
for slug, metadata in _static_metadata.items():
|
||||
new_mapping.setdefault(slug, metadata.provider)
|
||||
|
||||
# Atomic swap - replace references to ensure readers see consistent state
|
||||
_schema_options = new_options
|
||||
_discriminator_mapping = new_mapping
|
||||
|
||||
|
||||
def get_llm_model_metadata(slug: str) -> ModelMetadata | None:
|
||||
"""Get model metadata by slug. Checks dynamic models first, then static metadata."""
|
||||
if slug in _dynamic_models:
|
||||
return _dynamic_models[slug].metadata
|
||||
return _static_metadata.get(slug)
|
||||
|
||||
|
||||
def get_llm_model_cost(slug: str) -> tuple[RegistryModelCost, ...]:
|
||||
"""Get model cost configuration by slug."""
|
||||
if slug in _dynamic_models:
|
||||
return _dynamic_models[slug].costs
|
||||
cost_value = _static_costs.get(slug)
|
||||
if cost_value is None:
|
||||
return tuple()
|
||||
return (
|
||||
RegistryModelCost(
|
||||
credit_cost=cost_value,
|
||||
credential_provider="static",
|
||||
credential_id=None,
|
||||
credential_type=None,
|
||||
currency=None,
|
||||
metadata={},
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def get_llm_model_schema_options() -> list[dict[str, str]]:
|
||||
"""
|
||||
Get schema options for LLM model selection dropdown.
|
||||
|
||||
Returns a copy of cached schema options that are refreshed when the registry is
|
||||
updated via refresh_llm_registry() (called on startup and via Redis pub/sub).
|
||||
"""
|
||||
# Return a copy to prevent external mutation
|
||||
return list(_schema_options)
|
||||
|
||||
|
||||
def get_llm_discriminator_mapping() -> dict[str, str]:
|
||||
"""
|
||||
Get discriminator mapping for LLM models.
|
||||
|
||||
Returns a copy of cached discriminator mapping that is refreshed when the registry
|
||||
is updated via refresh_llm_registry() (called on startup and via Redis pub/sub).
|
||||
"""
|
||||
# Return a copy to prevent external mutation
|
||||
return dict(_discriminator_mapping)
|
||||
|
||||
|
||||
def get_dynamic_model_slugs() -> set[str]:
|
||||
"""Get all dynamic model slugs from the registry."""
|
||||
return set(_dynamic_models.keys())
|
||||
|
||||
|
||||
def get_all_model_slugs_for_validation() -> set[str]:
|
||||
"""
|
||||
Get ALL model slugs (both enabled and disabled) for validation purposes.
|
||||
|
||||
This is used for JSON schema enum validation - we need to accept any known
|
||||
model value (even disabled ones) so that existing graphs don't fail validation.
|
||||
The actual fallback/enforcement happens at runtime in llm_call().
|
||||
"""
|
||||
all_slugs = set(_dynamic_models.keys())
|
||||
all_slugs.update(_static_metadata.keys())
|
||||
return all_slugs
|
||||
|
||||
|
||||
def iter_dynamic_models() -> Iterable[RegistryModel]:
|
||||
"""Iterate over all dynamic models in the registry."""
|
||||
return tuple(_dynamic_models.values())
|
||||
|
||||
|
||||
def get_fallback_model_for_disabled(disabled_model_slug: str) -> RegistryModel | None:
|
||||
"""
|
||||
Find a fallback model when the requested model is disabled.
|
||||
|
||||
Looks for an enabled model from the same provider. Prefers models with
|
||||
similar names or capabilities if possible.
|
||||
|
||||
Args:
|
||||
disabled_model_slug: The slug of the disabled model
|
||||
|
||||
Returns:
|
||||
An enabled RegistryModel from the same provider, or None if no fallback found
|
||||
"""
|
||||
disabled_model = _dynamic_models.get(disabled_model_slug)
|
||||
if not disabled_model:
|
||||
return None
|
||||
|
||||
provider = disabled_model.metadata.provider
|
||||
|
||||
# Find all enabled models from the same provider
|
||||
candidates = [
|
||||
model
|
||||
for model in _dynamic_models.values()
|
||||
if model.is_enabled and model.metadata.provider == provider
|
||||
]
|
||||
|
||||
if not candidates:
|
||||
return None
|
||||
|
||||
# Sort by: prefer models with similar context window, then by name
|
||||
candidates.sort(
|
||||
key=lambda m: (
|
||||
abs(m.metadata.context_window - disabled_model.metadata.context_window),
|
||||
m.display_name.lower(),
|
||||
)
|
||||
)
|
||||
|
||||
return candidates[0]
|
||||
|
||||
|
||||
def is_model_enabled(model_slug: str) -> bool:
|
||||
"""Check if a model is enabled in the registry."""
|
||||
model = _dynamic_models.get(model_slug)
|
||||
if not model:
|
||||
# Model not in registry - assume it's a static/legacy model and allow it
|
||||
return True
|
||||
return model.is_enabled
|
||||
|
||||
|
||||
def get_model_info(model_slug: str) -> RegistryModel | None:
|
||||
"""Get model info from the registry."""
|
||||
return _dynamic_models.get(model_slug)
|
||||
|
||||
|
||||
def get_default_model_slug() -> str | None:
|
||||
"""
|
||||
Get the default model slug to use for block defaults.
|
||||
|
||||
Returns the recommended model if set (configured via admin UI),
|
||||
otherwise returns the first enabled model alphabetically.
|
||||
Returns None if no models are available or enabled.
|
||||
"""
|
||||
# Return the recommended model if one is set and enabled
|
||||
for model in _dynamic_models.values():
|
||||
if model.is_recommended and model.is_enabled:
|
||||
return model.slug
|
||||
|
||||
# No recommended model set - find first enabled model alphabetically
|
||||
for model in sorted(_dynamic_models.values(), key=lambda m: m.display_name.lower()):
|
||||
if model.is_enabled:
|
||||
logger.warning(
|
||||
"No recommended model set, using '%s' as default",
|
||||
model.slug,
|
||||
)
|
||||
return model.slug
|
||||
|
||||
# No enabled models available
|
||||
if _dynamic_models:
|
||||
logger.error(
|
||||
"No enabled models found in registry (%d models registered but all disabled)",
|
||||
len(_dynamic_models),
|
||||
)
|
||||
else:
|
||||
logger.error("No models registered in LLM registry")
|
||||
|
||||
return None
|
||||
@@ -1,130 +0,0 @@
|
||||
"""
|
||||
Helper utilities for LLM registry integration with block schemas.
|
||||
|
||||
This module handles the dynamic injection of discriminator mappings
|
||||
and model options from the LLM registry into block schemas.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.data.llm_registry.registry import (
|
||||
get_all_model_slugs_for_validation,
|
||||
get_default_model_slug,
|
||||
get_llm_discriminator_mapping,
|
||||
get_llm_model_schema_options,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def is_llm_model_field(field_name: str, field_info: Any) -> bool:
|
||||
"""
|
||||
Check if a field is an LLM model selection field.
|
||||
|
||||
Returns True if the field has 'options' in json_schema_extra
|
||||
(set by llm_model_schema_extra() in blocks/llm.py).
|
||||
"""
|
||||
if not hasattr(field_info, "json_schema_extra"):
|
||||
return False
|
||||
|
||||
extra = field_info.json_schema_extra
|
||||
if isinstance(extra, dict):
|
||||
return "options" in extra
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def refresh_llm_model_options(field_schema: dict[str, Any]) -> None:
|
||||
"""
|
||||
Refresh LLM model options from the registry.
|
||||
|
||||
Updates 'options' (for frontend dropdown) to show only enabled models,
|
||||
but keeps the 'enum' (for validation) inclusive of ALL known models.
|
||||
|
||||
This is important because:
|
||||
- Options: What users see in the dropdown (enabled models only)
|
||||
- Enum: What values pass validation (all known models, including disabled)
|
||||
|
||||
Existing graphs may have disabled models selected - they should pass validation
|
||||
and the fallback logic in llm_call() will handle using an alternative model.
|
||||
"""
|
||||
fresh_options = get_llm_model_schema_options()
|
||||
if not fresh_options:
|
||||
return
|
||||
|
||||
# Update options array (UI dropdown) - only enabled models
|
||||
if "options" in field_schema:
|
||||
field_schema["options"] = fresh_options
|
||||
|
||||
all_known_slugs = get_all_model_slugs_for_validation()
|
||||
if all_known_slugs and "enum" in field_schema:
|
||||
existing_enum = set(field_schema.get("enum", []))
|
||||
combined_enum = existing_enum | all_known_slugs
|
||||
field_schema["enum"] = sorted(combined_enum)
|
||||
|
||||
# Set the default value from the registry (gpt-4o if available, else first enabled)
|
||||
# This ensures new blocks have a sensible default pre-selected
|
||||
default_slug = get_default_model_slug()
|
||||
if default_slug:
|
||||
field_schema["default"] = default_slug
|
||||
|
||||
|
||||
def refresh_llm_discriminator_mapping(field_schema: dict[str, Any]) -> None:
|
||||
"""
|
||||
Refresh discriminator_mapping for fields that use model-based discrimination.
|
||||
|
||||
The discriminator is already set when AICredentialsField() creates the field.
|
||||
We only need to refresh the mapping when models are added/removed.
|
||||
"""
|
||||
if field_schema.get("discriminator") != "model":
|
||||
return
|
||||
|
||||
# Always refresh the mapping to get latest models
|
||||
fresh_mapping = get_llm_discriminator_mapping()
|
||||
if fresh_mapping:
|
||||
field_schema["discriminator_mapping"] = fresh_mapping
|
||||
|
||||
|
||||
def update_schema_with_llm_registry(
|
||||
schema: dict[str, Any], model_class: type | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Update a JSON schema with current LLM registry data.
|
||||
|
||||
Refreshes:
|
||||
1. Model options for LLM model selection fields (dropdown choices)
|
||||
2. Discriminator mappings for credentials fields (model → provider)
|
||||
|
||||
Args:
|
||||
schema: The JSON schema to update (mutated in-place)
|
||||
model_class: The Pydantic model class (optional, for field introspection)
|
||||
"""
|
||||
properties = schema.get("properties", {})
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
if not isinstance(field_schema, dict):
|
||||
continue
|
||||
|
||||
# Refresh model options for LLM model fields
|
||||
if model_class and hasattr(model_class, "model_fields"):
|
||||
field_info = model_class.model_fields.get(field_name)
|
||||
if field_info and is_llm_model_field(field_name, field_info):
|
||||
try:
|
||||
refresh_llm_model_options(field_schema)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to refresh LLM options for field %s: %s",
|
||||
field_name,
|
||||
exc,
|
||||
)
|
||||
|
||||
# Refresh discriminator mapping for fields that use model discrimination
|
||||
try:
|
||||
refresh_llm_discriminator_mapping(field_schema)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to refresh discriminator mapping for field %s: %s",
|
||||
field_name,
|
||||
exc,
|
||||
)
|
||||
@@ -40,7 +40,6 @@ from pydantic_core import (
|
||||
)
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.llm_registry import update_schema_with_llm_registry
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.json import loads as json_loads
|
||||
from backend.util.settings import Secrets
|
||||
@@ -545,9 +544,7 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
else:
|
||||
schema["credentials_provider"] = allowed_providers
|
||||
schema["credentials_types"] = model_class.allowed_cred_types()
|
||||
|
||||
# Ensure LLM discriminators are populated (delegates to shared helper)
|
||||
update_schema_with_llm_registry(schema, model_class)
|
||||
# Do not return anything, just mutate schema in place
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra=_add_json_schema_extra, # type: ignore
|
||||
@@ -696,20 +693,16 @@ def CredentialsField(
|
||||
This is enforced by the `BlockSchema` base class.
|
||||
"""
|
||||
|
||||
# Build field_schema_extra - always include discriminator and mapping if discriminator is set
|
||||
field_schema_extra: dict[str, Any] = {}
|
||||
|
||||
# Always include discriminator if provided
|
||||
if discriminator is not None:
|
||||
field_schema_extra["discriminator"] = discriminator
|
||||
# Always include discriminator_mapping when discriminator is set (even if empty initially)
|
||||
field_schema_extra["discriminator_mapping"] = discriminator_mapping or {}
|
||||
|
||||
# Include other optional fields (only if not None)
|
||||
if required_scopes:
|
||||
field_schema_extra["credentials_scopes"] = list(required_scopes)
|
||||
if discriminator_values:
|
||||
field_schema_extra["discriminator_values"] = discriminator_values
|
||||
field_schema_extra = {
|
||||
k: v
|
||||
for k, v in {
|
||||
"credentials_scopes": list(required_scopes) or None,
|
||||
"discriminator": discriminator,
|
||||
"discriminator_mapping": discriminator_mapping,
|
||||
"discriminator_values": discriminator_values,
|
||||
}.items()
|
||||
if v is not None
|
||||
}
|
||||
|
||||
# Merge any json_schema_extra passed in kwargs
|
||||
if "json_schema_extra" in kwargs:
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
"""
|
||||
Helper functions for LLM registry initialization in executor context.
|
||||
|
||||
These functions handle refreshing the LLM registry when the executor starts
|
||||
and subscribing to real-time updates via Redis pub/sub.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from backend.data import db, llm_registry
|
||||
from backend.data.block import BlockSchema, initialize_blocks
|
||||
from backend.data.block_cost_config import refresh_llm_costs
|
||||
from backend.data.llm_registry import subscribe_to_registry_refresh
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def initialize_registry_for_executor() -> None:
|
||||
"""
|
||||
Initialize blocks and refresh LLM registry in the executor context.
|
||||
|
||||
This must run in the executor's event loop to have access to the database.
|
||||
"""
|
||||
try:
|
||||
# Connect to database if not already connected
|
||||
if not db.is_connected():
|
||||
await db.connect()
|
||||
logger.info("[GraphExecutor] Connected to database for registry refresh")
|
||||
|
||||
# Initialize blocks (internally refreshes LLM registry and costs)
|
||||
await initialize_blocks()
|
||||
logger.info("[GraphExecutor] Blocks initialized")
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"[GraphExecutor] Failed to refresh LLM registry on startup: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
async def refresh_registry_on_notification() -> None:
|
||||
"""Refresh LLM registry when notified via Redis pub/sub."""
|
||||
try:
|
||||
# Ensure DB is connected
|
||||
if not db.is_connected():
|
||||
await db.connect()
|
||||
|
||||
# Refresh registry and costs
|
||||
await llm_registry.refresh_llm_registry()
|
||||
refresh_llm_costs()
|
||||
|
||||
# Clear block schema caches so they regenerate with new model options
|
||||
BlockSchema.clear_all_schema_caches()
|
||||
|
||||
logger.info("[GraphExecutor] LLM registry refreshed from notification")
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
"[GraphExecutor] Failed to refresh LLM registry from notification: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
async def subscribe_to_registry_updates() -> None:
|
||||
"""Subscribe to Redis pub/sub for LLM registry refresh notifications."""
|
||||
await subscribe_to_registry_refresh(refresh_registry_on_notification)
|
||||
@@ -702,20 +702,6 @@ class ExecutionProcessor:
|
||||
)
|
||||
self.node_execution_thread.start()
|
||||
self.node_evaluation_thread.start()
|
||||
|
||||
# Initialize LLM registry and subscribe to updates
|
||||
from backend.executor.llm_registry_init import (
|
||||
initialize_registry_for_executor,
|
||||
subscribe_to_registry_updates,
|
||||
)
|
||||
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
initialize_registry_for_executor(), self.node_execution_loop
|
||||
)
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
subscribe_to_registry_updates(), self.node_execution_loop
|
||||
)
|
||||
|
||||
logger.info(f"[GraphExecutor] {self.tid} started")
|
||||
|
||||
@error_logged(swallow=False)
|
||||
|
||||
@@ -602,6 +602,18 @@ class Scheduler(AppService):
|
||||
self.scheduler.add_listener(job_max_instances_listener, EVENT_JOB_MAX_INSTANCES)
|
||||
self.scheduler.start()
|
||||
|
||||
# Run embedding backfill immediately on startup
|
||||
# This ensures blocks/docs are searchable right away, not after 6 hours
|
||||
# Safe to run on multiple pods - uses upserts and checks for existing embeddings
|
||||
if self.register_system_tasks:
|
||||
logger.info("Running embedding backfill on startup...")
|
||||
try:
|
||||
result = ensure_embeddings_coverage()
|
||||
logger.info(f"Startup embedding backfill complete: {result}")
|
||||
except Exception as e:
|
||||
logger.error(f"Startup embedding backfill failed: {e}")
|
||||
# Don't fail startup - the scheduled job will retry later
|
||||
|
||||
# Keep the service running since BackgroundScheduler doesn't block
|
||||
super().run_service()
|
||||
|
||||
|
||||
@@ -1,854 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Iterable, Sequence, cast
|
||||
|
||||
import prisma
|
||||
import prisma.models
|
||||
|
||||
from backend.data.db import transaction
|
||||
from backend.server.v2.llm import model as llm_model
|
||||
|
||||
|
||||
def _json_dict(value: Any | None) -> dict[str, Any]:
|
||||
if not value:
|
||||
return {}
|
||||
if isinstance(value, dict):
|
||||
return value
|
||||
return {}
|
||||
|
||||
|
||||
def _map_cost(record: prisma.models.LlmModelCost) -> llm_model.LlmModelCost:
|
||||
return llm_model.LlmModelCost(
|
||||
id=record.id,
|
||||
unit=record.unit,
|
||||
credit_cost=record.creditCost,
|
||||
credential_provider=record.credentialProvider,
|
||||
credential_id=record.credentialId,
|
||||
credential_type=record.credentialType,
|
||||
currency=record.currency,
|
||||
metadata=_json_dict(record.metadata),
|
||||
)
|
||||
|
||||
|
||||
def _map_creator(
|
||||
record: prisma.models.LlmModelCreator,
|
||||
) -> llm_model.LlmModelCreator:
|
||||
return llm_model.LlmModelCreator(
|
||||
id=record.id,
|
||||
name=record.name,
|
||||
display_name=record.displayName,
|
||||
description=record.description,
|
||||
website_url=record.websiteUrl,
|
||||
logo_url=record.logoUrl,
|
||||
metadata=_json_dict(record.metadata),
|
||||
)
|
||||
|
||||
|
||||
def _map_model(record: prisma.models.LlmModel) -> llm_model.LlmModel:
|
||||
costs = []
|
||||
if record.Costs:
|
||||
costs = [_map_cost(cost) for cost in record.Costs]
|
||||
|
||||
creator = None
|
||||
if hasattr(record, "Creator") and record.Creator:
|
||||
creator = _map_creator(record.Creator)
|
||||
|
||||
return llm_model.LlmModel(
|
||||
id=record.id,
|
||||
slug=record.slug,
|
||||
display_name=record.displayName,
|
||||
description=record.description,
|
||||
provider_id=record.providerId,
|
||||
creator_id=record.creatorId,
|
||||
creator=creator,
|
||||
context_window=record.contextWindow,
|
||||
max_output_tokens=record.maxOutputTokens,
|
||||
is_enabled=record.isEnabled,
|
||||
is_recommended=record.isRecommended,
|
||||
capabilities=_json_dict(record.capabilities),
|
||||
metadata=_json_dict(record.metadata),
|
||||
costs=costs,
|
||||
)
|
||||
|
||||
|
||||
def _map_provider(record: prisma.models.LlmProvider) -> llm_model.LlmProvider:
|
||||
models: list[llm_model.LlmModel] = []
|
||||
if record.Models:
|
||||
models = [_map_model(model) for model in record.Models]
|
||||
|
||||
return llm_model.LlmProvider(
|
||||
id=record.id,
|
||||
name=record.name,
|
||||
display_name=record.displayName,
|
||||
description=record.description,
|
||||
default_credential_provider=record.defaultCredentialProvider,
|
||||
default_credential_id=record.defaultCredentialId,
|
||||
default_credential_type=record.defaultCredentialType,
|
||||
supports_tools=record.supportsTools,
|
||||
supports_json_output=record.supportsJsonOutput,
|
||||
supports_reasoning=record.supportsReasoning,
|
||||
supports_parallel_tool=record.supportsParallelTool,
|
||||
metadata=_json_dict(record.metadata),
|
||||
models=models,
|
||||
)
|
||||
|
||||
|
||||
async def list_providers(
|
||||
include_models: bool = True, enabled_only: bool = False
|
||||
) -> list[llm_model.LlmProvider]:
|
||||
"""
|
||||
List all LLM providers.
|
||||
|
||||
Args:
|
||||
include_models: Whether to include models for each provider
|
||||
enabled_only: If True, only include enabled models (for public routes)
|
||||
"""
|
||||
include: Any = None
|
||||
if include_models:
|
||||
model_where = {"isEnabled": True} if enabled_only else None
|
||||
include = {
|
||||
"Models": {
|
||||
"include": {"Costs": True, "Creator": True},
|
||||
"where": model_where,
|
||||
}
|
||||
}
|
||||
records = await prisma.models.LlmProvider.prisma().find_many(include=include)
|
||||
return [_map_provider(record) for record in records]
|
||||
|
||||
|
||||
async def upsert_provider(
|
||||
request: llm_model.UpsertLlmProviderRequest,
|
||||
provider_id: str | None = None,
|
||||
) -> llm_model.LlmProvider:
|
||||
data: Any = {
|
||||
"name": request.name,
|
||||
"displayName": request.display_name,
|
||||
"description": request.description,
|
||||
"defaultCredentialProvider": request.default_credential_provider,
|
||||
"defaultCredentialId": request.default_credential_id,
|
||||
"defaultCredentialType": request.default_credential_type,
|
||||
"supportsTools": request.supports_tools,
|
||||
"supportsJsonOutput": request.supports_json_output,
|
||||
"supportsReasoning": request.supports_reasoning,
|
||||
"supportsParallelTool": request.supports_parallel_tool,
|
||||
"metadata": request.metadata,
|
||||
}
|
||||
include: Any = {"Models": {"include": {"Costs": True, "Creator": True}}}
|
||||
if provider_id:
|
||||
record = await prisma.models.LlmProvider.prisma().update(
|
||||
where={"id": provider_id},
|
||||
data=data,
|
||||
include=include,
|
||||
)
|
||||
else:
|
||||
record = await prisma.models.LlmProvider.prisma().create(
|
||||
data=data,
|
||||
include=include,
|
||||
)
|
||||
if record is None:
|
||||
raise ValueError("Failed to create/update provider")
|
||||
return _map_provider(record)
|
||||
|
||||
|
||||
async def list_models(
|
||||
provider_id: str | None = None, enabled_only: bool = False
|
||||
) -> list[llm_model.LlmModel]:
|
||||
"""
|
||||
List LLM models.
|
||||
|
||||
Args:
|
||||
provider_id: Optional filter by provider ID
|
||||
enabled_only: If True, only return enabled models (for public routes)
|
||||
"""
|
||||
where: Any = {}
|
||||
if provider_id:
|
||||
where["providerId"] = provider_id
|
||||
if enabled_only:
|
||||
where["isEnabled"] = True
|
||||
|
||||
records = await prisma.models.LlmModel.prisma().find_many(
|
||||
where=where if where else None,
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
return [_map_model(record) for record in records]
|
||||
|
||||
|
||||
def _cost_create_payload(
|
||||
costs: Sequence[llm_model.LlmModelCostInput],
|
||||
) -> dict[str, Iterable[dict[str, Any]]]:
|
||||
|
||||
create_items = []
|
||||
for cost in costs:
|
||||
item: dict[str, Any] = {
|
||||
"unit": cost.unit,
|
||||
"creditCost": cost.credit_cost,
|
||||
"credentialProvider": cost.credential_provider,
|
||||
}
|
||||
# Only include optional fields if they have values
|
||||
if cost.credential_id:
|
||||
item["credentialId"] = cost.credential_id
|
||||
if cost.credential_type:
|
||||
item["credentialType"] = cost.credential_type
|
||||
if cost.currency:
|
||||
item["currency"] = cost.currency
|
||||
# Handle metadata - use Prisma Json type
|
||||
if cost.metadata is not None and cost.metadata != {}:
|
||||
item["metadata"] = prisma.Json(cost.metadata)
|
||||
create_items.append(item)
|
||||
return {"create": create_items}
|
||||
|
||||
|
||||
async def create_model(
|
||||
request: llm_model.CreateLlmModelRequest,
|
||||
) -> llm_model.LlmModel:
|
||||
data: Any = {
|
||||
"slug": request.slug,
|
||||
"displayName": request.display_name,
|
||||
"description": request.description,
|
||||
"providerId": request.provider_id,
|
||||
"contextWindow": request.context_window,
|
||||
"maxOutputTokens": request.max_output_tokens,
|
||||
"isEnabled": request.is_enabled,
|
||||
"capabilities": request.capabilities,
|
||||
"metadata": request.metadata,
|
||||
"Costs": _cost_create_payload(request.costs),
|
||||
}
|
||||
if request.creator_id:
|
||||
data["creatorId"] = request.creator_id
|
||||
|
||||
record = await prisma.models.LlmModel.prisma().create(
|
||||
data=data,
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
return _map_model(record)
|
||||
|
||||
|
||||
async def update_model(
|
||||
model_id: str,
|
||||
request: llm_model.UpdateLlmModelRequest,
|
||||
) -> llm_model.LlmModel:
|
||||
# Build scalar field updates (non-relation fields)
|
||||
scalar_data: Any = {}
|
||||
if request.display_name is not None:
|
||||
scalar_data["displayName"] = request.display_name
|
||||
if request.description is not None:
|
||||
scalar_data["description"] = request.description
|
||||
if request.context_window is not None:
|
||||
scalar_data["contextWindow"] = request.context_window
|
||||
if request.max_output_tokens is not None:
|
||||
scalar_data["maxOutputTokens"] = request.max_output_tokens
|
||||
if request.is_enabled is not None:
|
||||
scalar_data["isEnabled"] = request.is_enabled
|
||||
if request.capabilities is not None:
|
||||
scalar_data["capabilities"] = request.capabilities
|
||||
if request.metadata is not None:
|
||||
scalar_data["metadata"] = request.metadata
|
||||
# Foreign keys can be updated directly as scalar fields
|
||||
if request.provider_id is not None:
|
||||
scalar_data["providerId"] = request.provider_id
|
||||
if request.creator_id is not None:
|
||||
# Empty string means remove the creator
|
||||
scalar_data["creatorId"] = request.creator_id if request.creator_id else None
|
||||
|
||||
# If we have costs to update, we need to handle them separately
|
||||
# because nested writes have different constraints
|
||||
if request.costs is not None:
|
||||
# Wrap cost replacement in a transaction for atomicity
|
||||
async with transaction() as tx:
|
||||
# First update scalar fields
|
||||
if scalar_data:
|
||||
await tx.llmmodel.update(
|
||||
where={"id": model_id},
|
||||
data=scalar_data,
|
||||
)
|
||||
# Then handle costs: delete existing and create new
|
||||
await tx.llmmodelcost.delete_many(where={"llmModelId": model_id})
|
||||
if request.costs:
|
||||
cost_payload = _cost_create_payload(request.costs)
|
||||
for cost_item in cost_payload["create"]:
|
||||
cost_item["llmModelId"] = model_id
|
||||
await tx.llmmodelcost.create(data=cast(Any, cost_item))
|
||||
# Fetch the updated record (outside transaction)
|
||||
record = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id},
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
else:
|
||||
# No costs update - simple update
|
||||
record = await prisma.models.LlmModel.prisma().update(
|
||||
where={"id": model_id},
|
||||
data=scalar_data,
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
|
||||
if not record:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
return _map_model(record)
|
||||
|
||||
|
||||
async def toggle_model(
|
||||
model_id: str,
|
||||
is_enabled: bool,
|
||||
migrate_to_slug: str | None = None,
|
||||
migration_reason: str | None = None,
|
||||
custom_credit_cost: int | None = None,
|
||||
) -> llm_model.ToggleLlmModelResponse:
|
||||
"""
|
||||
Toggle a model's enabled status, optionally migrating workflows when disabling.
|
||||
|
||||
Args:
|
||||
model_id: UUID of the model to toggle
|
||||
is_enabled: New enabled status
|
||||
migrate_to_slug: If disabling and this is provided, migrate all workflows
|
||||
using this model to the specified replacement model
|
||||
migration_reason: Optional reason for the migration (e.g., "Provider outage")
|
||||
custom_credit_cost: Optional custom pricing override for migrated workflows.
|
||||
When set, the billing system should use this cost instead
|
||||
of the target model's cost for affected nodes.
|
||||
|
||||
Returns:
|
||||
ToggleLlmModelResponse with the updated model and optional migration stats
|
||||
"""
|
||||
import json
|
||||
|
||||
# Get the model being toggled
|
||||
model = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id}, include={"Costs": True}
|
||||
)
|
||||
if not model:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
|
||||
nodes_migrated = 0
|
||||
migration_id: str | None = None
|
||||
|
||||
# If disabling with migration, perform migration first
|
||||
if not is_enabled and migrate_to_slug:
|
||||
# Validate replacement model exists and is enabled
|
||||
replacement = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"slug": migrate_to_slug}
|
||||
)
|
||||
if not replacement:
|
||||
raise ValueError(f"Replacement model '{migrate_to_slug}' not found")
|
||||
if not replacement.isEnabled:
|
||||
raise ValueError(
|
||||
f"Replacement model '{migrate_to_slug}' is disabled. "
|
||||
f"Please enable it before using it as a replacement."
|
||||
)
|
||||
|
||||
# Perform all operations atomically within a single transaction
|
||||
# This ensures no nodes are missed between query and update
|
||||
async with transaction() as tx:
|
||||
# Get the IDs of nodes that will be migrated (inside transaction for consistency)
|
||||
node_ids_result = await tx.query_raw(
|
||||
"""
|
||||
SELECT id
|
||||
FROM "AgentNode"
|
||||
WHERE "constantInput"::jsonb->>'model' = $1
|
||||
FOR UPDATE
|
||||
""",
|
||||
model.slug,
|
||||
)
|
||||
migrated_node_ids = (
|
||||
[row["id"] for row in node_ids_result] if node_ids_result else []
|
||||
)
|
||||
nodes_migrated = len(migrated_node_ids)
|
||||
|
||||
if nodes_migrated > 0:
|
||||
# Update by IDs to ensure we only update the exact nodes we queried
|
||||
# Use JSON array and jsonb_array_elements_text for safe parameterization
|
||||
node_ids_json = json.dumps(migrated_node_ids)
|
||||
await tx.execute_raw(
|
||||
"""
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
to_jsonb($1::text)
|
||||
)
|
||||
WHERE id::text IN (
|
||||
SELECT jsonb_array_elements_text($2::jsonb)
|
||||
)
|
||||
""",
|
||||
migrate_to_slug,
|
||||
node_ids_json,
|
||||
)
|
||||
|
||||
record = await tx.llmmodel.update(
|
||||
where={"id": model_id},
|
||||
data={"isEnabled": is_enabled},
|
||||
include={"Costs": True},
|
||||
)
|
||||
|
||||
# Create migration record for revert capability
|
||||
if nodes_migrated > 0:
|
||||
migration_data: Any = {
|
||||
"sourceModelSlug": model.slug,
|
||||
"targetModelSlug": migrate_to_slug,
|
||||
"reason": migration_reason,
|
||||
"migratedNodeIds": json.dumps(migrated_node_ids),
|
||||
"nodeCount": nodes_migrated,
|
||||
"customCreditCost": custom_credit_cost,
|
||||
}
|
||||
migration_record = await tx.llmmodelmigration.create(
|
||||
data=migration_data
|
||||
)
|
||||
migration_id = migration_record.id
|
||||
else:
|
||||
# Simple toggle without migration
|
||||
record = await prisma.models.LlmModel.prisma().update(
|
||||
where={"id": model_id},
|
||||
data={"isEnabled": is_enabled},
|
||||
include={"Costs": True},
|
||||
)
|
||||
|
||||
if record is None:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
return llm_model.ToggleLlmModelResponse(
|
||||
model=_map_model(record),
|
||||
nodes_migrated=nodes_migrated,
|
||||
migrated_to_slug=migrate_to_slug if nodes_migrated > 0 else None,
|
||||
migration_id=migration_id,
|
||||
)
|
||||
|
||||
|
||||
async def get_model_usage(model_id: str) -> llm_model.LlmModelUsageResponse:
|
||||
"""Get usage count for a model."""
|
||||
import prisma as prisma_module
|
||||
|
||||
model = await prisma.models.LlmModel.prisma().find_unique(where={"id": model_id})
|
||||
if not model:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
|
||||
count_result = await prisma_module.get_client().query_raw(
|
||||
"""
|
||||
SELECT COUNT(*) as count
|
||||
FROM "AgentNode"
|
||||
WHERE "constantInput"::jsonb->>'model' = $1
|
||||
""",
|
||||
model.slug,
|
||||
)
|
||||
node_count = int(count_result[0]["count"]) if count_result else 0
|
||||
|
||||
return llm_model.LlmModelUsageResponse(model_slug=model.slug, node_count=node_count)
|
||||
|
||||
|
||||
async def delete_model(
|
||||
model_id: str, replacement_model_slug: str
|
||||
) -> llm_model.DeleteLlmModelResponse:
|
||||
"""
|
||||
Delete a model and migrate all AgentNodes using it to a replacement model.
|
||||
|
||||
This performs an atomic operation within a database transaction:
|
||||
1. Validates the model exists
|
||||
2. Validates the replacement model exists and is enabled
|
||||
3. Counts affected nodes
|
||||
4. Migrates all AgentNode.constantInput->model to replacement (in transaction)
|
||||
5. Deletes the LlmModel record (CASCADE deletes costs) (in transaction)
|
||||
|
||||
Args:
|
||||
model_id: UUID of the model to delete
|
||||
replacement_model_slug: Slug of the model to migrate to
|
||||
|
||||
Returns:
|
||||
DeleteLlmModelResponse with migration stats
|
||||
|
||||
Raises:
|
||||
ValueError: If model not found, replacement not found, or replacement is disabled
|
||||
"""
|
||||
# 1. Get the model being deleted (validation - outside transaction)
|
||||
model = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id}, include={"Costs": True}
|
||||
)
|
||||
if not model:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
|
||||
deleted_slug = model.slug
|
||||
deleted_display_name = model.displayName
|
||||
|
||||
# 2. Validate replacement model exists and is enabled (validation - outside transaction)
|
||||
replacement = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"slug": replacement_model_slug}
|
||||
)
|
||||
if not replacement:
|
||||
raise ValueError(f"Replacement model '{replacement_model_slug}' not found")
|
||||
if not replacement.isEnabled:
|
||||
raise ValueError(
|
||||
f"Replacement model '{replacement_model_slug}' is disabled. "
|
||||
f"Please enable it before using it as a replacement."
|
||||
)
|
||||
|
||||
# 3 & 4. Perform count, migration and deletion atomically within a transaction
|
||||
nodes_affected = 0
|
||||
async with transaction() as tx:
|
||||
# Count affected nodes (inside transaction for consistency)
|
||||
count_result = await tx.query_raw(
|
||||
"""
|
||||
SELECT COUNT(*) as count
|
||||
FROM "AgentNode"
|
||||
WHERE "constantInput"::jsonb->>'model' = $1
|
||||
""",
|
||||
deleted_slug,
|
||||
)
|
||||
nodes_affected = int(count_result[0]["count"]) if count_result else 0
|
||||
|
||||
# Migrate all AgentNode.constantInput->model to replacement
|
||||
if nodes_affected > 0:
|
||||
await tx.execute_raw(
|
||||
"""
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
to_jsonb($1::text)
|
||||
)
|
||||
WHERE "constantInput"::jsonb->>'model' = $2
|
||||
""",
|
||||
replacement_model_slug,
|
||||
deleted_slug,
|
||||
)
|
||||
|
||||
# Delete the model (CASCADE will delete costs automatically)
|
||||
await tx.llmmodel.delete(where={"id": model_id})
|
||||
|
||||
return llm_model.DeleteLlmModelResponse(
|
||||
deleted_model_slug=deleted_slug,
|
||||
deleted_model_display_name=deleted_display_name,
|
||||
replacement_model_slug=replacement_model_slug,
|
||||
nodes_migrated=nodes_affected,
|
||||
message=(
|
||||
f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}) "
|
||||
f"and migrated {nodes_affected} workflow node(s) to '{replacement_model_slug}'."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _map_migration(
|
||||
record: prisma.models.LlmModelMigration,
|
||||
) -> llm_model.LlmModelMigration:
|
||||
return llm_model.LlmModelMigration(
|
||||
id=record.id,
|
||||
source_model_slug=record.sourceModelSlug,
|
||||
target_model_slug=record.targetModelSlug,
|
||||
reason=record.reason,
|
||||
node_count=record.nodeCount,
|
||||
custom_credit_cost=record.customCreditCost,
|
||||
is_reverted=record.isReverted,
|
||||
created_at=record.createdAt.isoformat(),
|
||||
reverted_at=record.revertedAt.isoformat() if record.revertedAt else None,
|
||||
)
|
||||
|
||||
|
||||
async def list_migrations(
|
||||
include_reverted: bool = False,
|
||||
) -> list[llm_model.LlmModelMigration]:
|
||||
"""
|
||||
List model migrations, optionally including reverted ones.
|
||||
|
||||
Args:
|
||||
include_reverted: If True, include reverted migrations. Default is False.
|
||||
|
||||
Returns:
|
||||
List of LlmModelMigration records
|
||||
"""
|
||||
where: Any = None if include_reverted else {"isReverted": False}
|
||||
records = await prisma.models.LlmModelMigration.prisma().find_many(
|
||||
where=where,
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
return [_map_migration(record) for record in records]
|
||||
|
||||
|
||||
async def get_migration(migration_id: str) -> llm_model.LlmModelMigration | None:
|
||||
"""Get a specific migration by ID."""
|
||||
record = await prisma.models.LlmModelMigration.prisma().find_unique(
|
||||
where={"id": migration_id}
|
||||
)
|
||||
return _map_migration(record) if record else None
|
||||
|
||||
|
||||
async def revert_migration(
|
||||
migration_id: str,
|
||||
re_enable_source_model: bool = True,
|
||||
) -> llm_model.RevertMigrationResponse:
|
||||
"""
|
||||
Revert a model migration, restoring affected nodes to their original model.
|
||||
|
||||
This only reverts the specific nodes that were migrated, not all nodes
|
||||
currently using the target model.
|
||||
|
||||
Args:
|
||||
migration_id: UUID of the migration to revert
|
||||
re_enable_source_model: Whether to re-enable the source model if it's disabled
|
||||
|
||||
Returns:
|
||||
RevertMigrationResponse with revert stats
|
||||
|
||||
Raises:
|
||||
ValueError: If migration not found, already reverted, or source model not available
|
||||
"""
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
|
||||
# Get the migration record
|
||||
migration = await prisma.models.LlmModelMigration.prisma().find_unique(
|
||||
where={"id": migration_id}
|
||||
)
|
||||
if not migration:
|
||||
raise ValueError(f"Migration with id '{migration_id}' not found")
|
||||
|
||||
if migration.isReverted:
|
||||
raise ValueError(
|
||||
f"Migration '{migration_id}' has already been reverted "
|
||||
f"on {migration.revertedAt.isoformat() if migration.revertedAt else 'unknown date'}"
|
||||
)
|
||||
|
||||
# Check if source model exists
|
||||
source_model = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"slug": migration.sourceModelSlug}
|
||||
)
|
||||
if not source_model:
|
||||
raise ValueError(
|
||||
f"Source model '{migration.sourceModelSlug}' no longer exists. "
|
||||
f"Cannot revert migration."
|
||||
)
|
||||
|
||||
# Get the migrated node IDs (Prisma auto-parses JSONB to list)
|
||||
migrated_node_ids: list[str] = (
|
||||
migration.migratedNodeIds
|
||||
if isinstance(migration.migratedNodeIds, list)
|
||||
else json.loads(migration.migratedNodeIds) # type: ignore
|
||||
)
|
||||
if not migrated_node_ids:
|
||||
raise ValueError("No nodes to revert in this migration")
|
||||
|
||||
# Track if we need to re-enable the source model
|
||||
source_model_was_disabled = not source_model.isEnabled
|
||||
should_re_enable = source_model_was_disabled and re_enable_source_model
|
||||
source_model_re_enabled = False
|
||||
|
||||
# Perform revert atomically
|
||||
async with transaction() as tx:
|
||||
# Re-enable the source model if requested and it was disabled
|
||||
if should_re_enable:
|
||||
await tx.llmmodel.update(
|
||||
where={"id": source_model.id},
|
||||
data={"isEnabled": True},
|
||||
)
|
||||
source_model_re_enabled = True
|
||||
|
||||
# Update only the specific nodes that were migrated
|
||||
# We need to check that they still have the target model (haven't been changed since)
|
||||
# Use a single batch update for efficiency
|
||||
# Use JSON array and jsonb_array_elements_text for safe parameterization
|
||||
node_ids_json = json.dumps(migrated_node_ids)
|
||||
result = await tx.execute_raw(
|
||||
"""
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
to_jsonb($1::text)
|
||||
)
|
||||
WHERE id::text IN (
|
||||
SELECT jsonb_array_elements_text($2::jsonb)
|
||||
)
|
||||
AND "constantInput"::jsonb->>'model' = $3
|
||||
""",
|
||||
migration.sourceModelSlug,
|
||||
node_ids_json,
|
||||
migration.targetModelSlug,
|
||||
)
|
||||
nodes_reverted = result if result else 0
|
||||
|
||||
# Mark migration as reverted
|
||||
await tx.llmmodelmigration.update(
|
||||
where={"id": migration_id},
|
||||
data={
|
||||
"isReverted": True,
|
||||
"revertedAt": datetime.now(timezone.utc),
|
||||
},
|
||||
)
|
||||
|
||||
# Calculate nodes that were already changed since migration
|
||||
nodes_already_changed = len(migrated_node_ids) - nodes_reverted
|
||||
|
||||
# Build appropriate message
|
||||
message_parts = [
|
||||
f"Successfully reverted migration: {nodes_reverted} node(s) restored "
|
||||
f"from '{migration.targetModelSlug}' to '{migration.sourceModelSlug}'."
|
||||
]
|
||||
if nodes_already_changed > 0:
|
||||
message_parts.append(
|
||||
f" {nodes_already_changed} node(s) were already changed and not reverted."
|
||||
)
|
||||
if source_model_re_enabled:
|
||||
message_parts.append(
|
||||
f" Model '{migration.sourceModelSlug}' has been re-enabled."
|
||||
)
|
||||
|
||||
return llm_model.RevertMigrationResponse(
|
||||
migration_id=migration_id,
|
||||
source_model_slug=migration.sourceModelSlug,
|
||||
target_model_slug=migration.targetModelSlug,
|
||||
nodes_reverted=nodes_reverted,
|
||||
nodes_already_changed=nodes_already_changed,
|
||||
source_model_re_enabled=source_model_re_enabled,
|
||||
message="".join(message_parts),
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Creator CRUD operations
|
||||
# ============================================================================
|
||||
|
||||
|
||||
async def list_creators() -> list[llm_model.LlmModelCreator]:
|
||||
"""List all LLM model creators."""
|
||||
records = await prisma.models.LlmModelCreator.prisma().find_many(
|
||||
order={"displayName": "asc"}
|
||||
)
|
||||
return [_map_creator(record) for record in records]
|
||||
|
||||
|
||||
async def get_creator(creator_id: str) -> llm_model.LlmModelCreator | None:
|
||||
"""Get a specific creator by ID."""
|
||||
record = await prisma.models.LlmModelCreator.prisma().find_unique(
|
||||
where={"id": creator_id}
|
||||
)
|
||||
return _map_creator(record) if record else None
|
||||
|
||||
|
||||
async def upsert_creator(
|
||||
request: llm_model.UpsertLlmCreatorRequest,
|
||||
creator_id: str | None = None,
|
||||
) -> llm_model.LlmModelCreator:
|
||||
"""Create or update a model creator."""
|
||||
data: Any = {
|
||||
"name": request.name,
|
||||
"displayName": request.display_name,
|
||||
"description": request.description,
|
||||
"websiteUrl": request.website_url,
|
||||
"logoUrl": request.logo_url,
|
||||
"metadata": request.metadata,
|
||||
}
|
||||
if creator_id:
|
||||
record = await prisma.models.LlmModelCreator.prisma().update(
|
||||
where={"id": creator_id},
|
||||
data=data,
|
||||
)
|
||||
else:
|
||||
record = await prisma.models.LlmModelCreator.prisma().create(data=data)
|
||||
if record is None:
|
||||
raise ValueError("Failed to create/update creator")
|
||||
return _map_creator(record)
|
||||
|
||||
|
||||
async def delete_creator(creator_id: str) -> bool:
|
||||
"""
|
||||
Delete a model creator.
|
||||
|
||||
This will set creatorId to NULL on all associated models (due to onDelete: SetNull).
|
||||
|
||||
Args:
|
||||
creator_id: UUID of the creator to delete
|
||||
|
||||
Returns:
|
||||
True if deleted successfully
|
||||
|
||||
Raises:
|
||||
ValueError: If creator not found
|
||||
"""
|
||||
creator = await prisma.models.LlmModelCreator.prisma().find_unique(
|
||||
where={"id": creator_id}
|
||||
)
|
||||
if not creator:
|
||||
raise ValueError(f"Creator with id '{creator_id}' not found")
|
||||
|
||||
await prisma.models.LlmModelCreator.prisma().delete(where={"id": creator_id})
|
||||
return True
|
||||
|
||||
|
||||
async def get_recommended_model() -> llm_model.LlmModel | None:
|
||||
"""
|
||||
Get the currently recommended LLM model.
|
||||
|
||||
Returns:
|
||||
The recommended model, or None if no model is marked as recommended.
|
||||
"""
|
||||
record = await prisma.models.LlmModel.prisma().find_first(
|
||||
where={"isRecommended": True, "isEnabled": True},
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
return _map_model(record) if record else None
|
||||
|
||||
|
||||
async def set_recommended_model(
|
||||
model_id: str,
|
||||
) -> tuple[llm_model.LlmModel, str | None]:
|
||||
"""
|
||||
Set a model as the recommended model.
|
||||
|
||||
This will clear the isRecommended flag from any other model and set it
|
||||
on the specified model. The model must be enabled.
|
||||
|
||||
Args:
|
||||
model_id: UUID of the model to set as recommended
|
||||
|
||||
Returns:
|
||||
Tuple of (the updated model, previous recommended model slug or None)
|
||||
|
||||
Raises:
|
||||
ValueError: If model not found or not enabled
|
||||
"""
|
||||
# First, verify the model exists and is enabled
|
||||
target_model = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id}
|
||||
)
|
||||
if not target_model:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
if not target_model.isEnabled:
|
||||
raise ValueError(
|
||||
f"Cannot set disabled model '{target_model.slug}' as recommended"
|
||||
)
|
||||
|
||||
# Get the current recommended model (if any)
|
||||
current_recommended = await prisma.models.LlmModel.prisma().find_first(
|
||||
where={"isRecommended": True}
|
||||
)
|
||||
previous_slug = current_recommended.slug if current_recommended else None
|
||||
|
||||
# Use a transaction to ensure atomicity
|
||||
async with transaction() as tx:
|
||||
# Clear isRecommended from all models
|
||||
await tx.llmmodel.update_many(
|
||||
where={"isRecommended": True},
|
||||
data={"isRecommended": False},
|
||||
)
|
||||
# Set the new recommended model
|
||||
await tx.llmmodel.update(
|
||||
where={"id": model_id},
|
||||
data={"isRecommended": True},
|
||||
)
|
||||
|
||||
# Fetch and return the updated model
|
||||
updated_record = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id},
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
if not updated_record:
|
||||
raise ValueError("Failed to fetch updated model")
|
||||
|
||||
return _map_model(updated_record), previous_slug
|
||||
|
||||
|
||||
async def get_recommended_model_slug() -> str | None:
|
||||
"""
|
||||
Get the slug of the currently recommended LLM model.
|
||||
|
||||
Returns:
|
||||
The slug of the recommended model, or None if no model is marked as recommended.
|
||||
"""
|
||||
record = await prisma.models.LlmModel.prisma().find_first(
|
||||
where={"isRecommended": True, "isEnabled": True},
|
||||
)
|
||||
return record.slug if record else None
|
||||
@@ -1,231 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any, Optional
|
||||
|
||||
import prisma.enums
|
||||
import pydantic
|
||||
|
||||
# Pattern for valid model slugs: alphanumeric start, then alphanumeric, dots, underscores, slashes, hyphens
|
||||
SLUG_PATTERN = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9._/-]*$")
|
||||
|
||||
|
||||
class LlmModelCost(pydantic.BaseModel):
|
||||
id: str
|
||||
unit: prisma.enums.LlmCostUnit = prisma.enums.LlmCostUnit.RUN
|
||||
credit_cost: int
|
||||
credential_provider: str
|
||||
credential_id: Optional[str] = None
|
||||
credential_type: Optional[str] = None
|
||||
currency: Optional[str] = None
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class LlmModelCreator(pydantic.BaseModel):
|
||||
"""Represents the organization that created/trained the model (e.g., OpenAI, Meta)."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
website_url: Optional[str] = None
|
||||
logo_url: Optional[str] = None
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class LlmModel(pydantic.BaseModel):
|
||||
id: str
|
||||
slug: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
provider_id: str
|
||||
creator_id: Optional[str] = None
|
||||
creator: Optional[LlmModelCreator] = None
|
||||
context_window: int
|
||||
max_output_tokens: Optional[int] = None
|
||||
is_enabled: bool = True
|
||||
is_recommended: bool = False
|
||||
capabilities: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
costs: list[LlmModelCost] = pydantic.Field(default_factory=list)
|
||||
|
||||
|
||||
class LlmProvider(pydantic.BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
default_credential_provider: Optional[str] = None
|
||||
default_credential_id: Optional[str] = None
|
||||
default_credential_type: Optional[str] = None
|
||||
supports_tools: bool = True
|
||||
supports_json_output: bool = True
|
||||
supports_reasoning: bool = False
|
||||
supports_parallel_tool: bool = False
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
models: list[LlmModel] = pydantic.Field(default_factory=list)
|
||||
|
||||
|
||||
class LlmProvidersResponse(pydantic.BaseModel):
|
||||
providers: list[LlmProvider]
|
||||
|
||||
|
||||
class LlmModelsResponse(pydantic.BaseModel):
|
||||
models: list[LlmModel]
|
||||
|
||||
|
||||
class LlmCreatorsResponse(pydantic.BaseModel):
|
||||
creators: list[LlmModelCreator]
|
||||
|
||||
|
||||
class UpsertLlmProviderRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
default_credential_provider: Optional[str] = None
|
||||
default_credential_id: Optional[str] = None
|
||||
default_credential_type: Optional[str] = "api_key"
|
||||
supports_tools: bool = True
|
||||
supports_json_output: bool = True
|
||||
supports_reasoning: bool = False
|
||||
supports_parallel_tool: bool = False
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class UpsertLlmCreatorRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
website_url: Optional[str] = None
|
||||
logo_url: Optional[str] = None
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class LlmModelCostInput(pydantic.BaseModel):
|
||||
unit: prisma.enums.LlmCostUnit = prisma.enums.LlmCostUnit.RUN
|
||||
credit_cost: int
|
||||
credential_provider: str
|
||||
credential_id: Optional[str] = None
|
||||
credential_type: Optional[str] = "api_key"
|
||||
currency: Optional[str] = None
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class CreateLlmModelRequest(pydantic.BaseModel):
|
||||
slug: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
provider_id: str
|
||||
creator_id: Optional[str] = None
|
||||
context_window: int
|
||||
max_output_tokens: Optional[int] = None
|
||||
is_enabled: bool = True
|
||||
capabilities: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
costs: list[LlmModelCostInput]
|
||||
|
||||
@pydantic.field_validator("slug")
|
||||
@classmethod
|
||||
def validate_slug(cls, v: str) -> str:
|
||||
if not v or len(v) > 100:
|
||||
raise ValueError("Slug must be 1-100 characters")
|
||||
if not SLUG_PATTERN.match(v):
|
||||
raise ValueError(
|
||||
"Slug must start with alphanumeric and contain only "
|
||||
"alphanumeric characters, dots, underscores, slashes, or hyphens"
|
||||
)
|
||||
return v
|
||||
|
||||
|
||||
class UpdateLlmModelRequest(pydantic.BaseModel):
|
||||
display_name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
context_window: Optional[int] = None
|
||||
max_output_tokens: Optional[int] = None
|
||||
is_enabled: Optional[bool] = None
|
||||
capabilities: Optional[dict[str, Any]] = None
|
||||
metadata: Optional[dict[str, Any]] = None
|
||||
provider_id: Optional[str] = None
|
||||
creator_id: Optional[str] = None
|
||||
costs: Optional[list[LlmModelCostInput]] = None
|
||||
|
||||
|
||||
class ToggleLlmModelRequest(pydantic.BaseModel):
|
||||
is_enabled: bool
|
||||
migrate_to_slug: Optional[str] = None
|
||||
migration_reason: Optional[str] = None # e.g., "Provider outage"
|
||||
# Custom pricing override for migrated workflows. When set, billing should use
|
||||
# this cost instead of the target model's cost for affected nodes.
|
||||
# See LlmModelMigration in schema.prisma for full documentation.
|
||||
custom_credit_cost: Optional[int] = None
|
||||
|
||||
|
||||
class ToggleLlmModelResponse(pydantic.BaseModel):
|
||||
model: LlmModel
|
||||
nodes_migrated: int = 0
|
||||
migrated_to_slug: Optional[str] = None
|
||||
migration_id: Optional[str] = None # ID of the migration record for revert
|
||||
|
||||
|
||||
class DeleteLlmModelResponse(pydantic.BaseModel):
|
||||
deleted_model_slug: str
|
||||
deleted_model_display_name: str
|
||||
replacement_model_slug: str
|
||||
nodes_migrated: int
|
||||
message: str
|
||||
|
||||
|
||||
class LlmModelUsageResponse(pydantic.BaseModel):
|
||||
model_slug: str
|
||||
node_count: int
|
||||
|
||||
|
||||
# Migration tracking models
|
||||
class LlmModelMigration(pydantic.BaseModel):
|
||||
id: str
|
||||
source_model_slug: str
|
||||
target_model_slug: str
|
||||
reason: Optional[str] = None
|
||||
node_count: int
|
||||
# Custom pricing override - billing should use this instead of target model's cost
|
||||
custom_credit_cost: Optional[int] = None
|
||||
is_reverted: bool = False
|
||||
created_at: str # ISO datetime string
|
||||
reverted_at: Optional[str] = None
|
||||
|
||||
|
||||
class LlmMigrationsResponse(pydantic.BaseModel):
|
||||
migrations: list[LlmModelMigration]
|
||||
|
||||
|
||||
class RevertMigrationRequest(pydantic.BaseModel):
|
||||
re_enable_source_model: bool = (
|
||||
True # Whether to re-enable the source model if disabled
|
||||
)
|
||||
|
||||
|
||||
class RevertMigrationResponse(pydantic.BaseModel):
|
||||
migration_id: str
|
||||
source_model_slug: str
|
||||
target_model_slug: str
|
||||
nodes_reverted: int
|
||||
nodes_already_changed: int = (
|
||||
0 # Nodes that were modified since migration (not reverted)
|
||||
)
|
||||
source_model_re_enabled: bool = False # Whether the source model was re-enabled
|
||||
message: str
|
||||
|
||||
|
||||
class SetRecommendedModelRequest(pydantic.BaseModel):
|
||||
model_id: str
|
||||
|
||||
|
||||
class SetRecommendedModelResponse(pydantic.BaseModel):
|
||||
model: LlmModel
|
||||
previous_recommended_slug: Optional[str] = None
|
||||
message: str
|
||||
|
||||
|
||||
class RecommendedModelResponse(pydantic.BaseModel):
|
||||
model: Optional[LlmModel] = None
|
||||
slug: Optional[str] = None
|
||||
@@ -1,25 +0,0 @@
|
||||
import autogpt_libs.auth
|
||||
import fastapi
|
||||
|
||||
from backend.server.v2.llm import db as llm_db
|
||||
from backend.server.v2.llm import model as llm_model
|
||||
|
||||
router = fastapi.APIRouter(
|
||||
prefix="/llm",
|
||||
tags=["llm"],
|
||||
dependencies=[fastapi.Security(autogpt_libs.auth.requires_user)],
|
||||
)
|
||||
|
||||
|
||||
@router.get("/models", response_model=llm_model.LlmModelsResponse)
|
||||
async def list_models():
|
||||
"""List all enabled LLM models available to users."""
|
||||
models = await llm_db.list_models(enabled_only=True)
|
||||
return llm_model.LlmModelsResponse(models=models)
|
||||
|
||||
|
||||
@router.get("/providers", response_model=llm_model.LlmProvidersResponse)
|
||||
async def list_providers():
|
||||
"""List all LLM providers with their enabled models."""
|
||||
providers = await llm_db.list_providers(include_models=True, enabled_only=True)
|
||||
return llm_model.LlmProvidersResponse(providers=providers)
|
||||
@@ -16,7 +16,7 @@ import pickle
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from functools import wraps
|
||||
from functools import cache, wraps
|
||||
from typing import Any, Callable, ParamSpec, Protocol, TypeVar, cast, runtime_checkable
|
||||
|
||||
from redis import ConnectionPool, Redis
|
||||
@@ -38,29 +38,34 @@ settings = Settings()
|
||||
# maxmemory 2gb # Set memory limit (adjust based on your needs)
|
||||
# save "" # Disable persistence if using Redis purely for caching
|
||||
|
||||
# Create a dedicated Redis connection pool for caching (binary mode for pickle)
|
||||
_cache_pool: ConnectionPool | None = None
|
||||
|
||||
|
||||
@conn_retry("Redis", "Acquiring cache connection pool")
|
||||
@cache
|
||||
def _get_cache_pool() -> ConnectionPool:
|
||||
"""Get or create a connection pool for cache operations."""
|
||||
global _cache_pool
|
||||
if _cache_pool is None:
|
||||
_cache_pool = ConnectionPool(
|
||||
host=settings.config.redis_host,
|
||||
port=settings.config.redis_port,
|
||||
password=settings.config.redis_password or None,
|
||||
decode_responses=False, # Binary mode for pickle
|
||||
max_connections=50,
|
||||
socket_keepalive=True,
|
||||
socket_connect_timeout=5,
|
||||
retry_on_timeout=True,
|
||||
)
|
||||
return _cache_pool
|
||||
"""Get or create a connection pool for cache operations (lazy, thread-safe)."""
|
||||
return ConnectionPool(
|
||||
host=settings.config.redis_host,
|
||||
port=settings.config.redis_port,
|
||||
password=settings.config.redis_password or None,
|
||||
decode_responses=False, # Binary mode for pickle
|
||||
max_connections=50,
|
||||
socket_keepalive=True,
|
||||
socket_connect_timeout=5,
|
||||
retry_on_timeout=True,
|
||||
)
|
||||
|
||||
|
||||
redis = Redis(connection_pool=_get_cache_pool())
|
||||
@cache
|
||||
@conn_retry("Redis", "Acquiring cache connection")
|
||||
def _get_redis() -> Redis:
|
||||
"""
|
||||
Get the lazily-initialized Redis client for shared cache operations.
|
||||
Uses @cache for thread-safe singleton behavior - connection is only
|
||||
established when first accessed, allowing services that only use
|
||||
in-memory caching to work without Redis configuration.
|
||||
"""
|
||||
r = Redis(connection_pool=_get_cache_pool())
|
||||
r.ping() # Verify connection
|
||||
return r
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -179,9 +184,9 @@ def cached(
|
||||
try:
|
||||
if refresh_ttl_on_get:
|
||||
# Use GETEX to get value and refresh expiry atomically
|
||||
cached_bytes = redis.getex(redis_key, ex=ttl_seconds)
|
||||
cached_bytes = _get_redis().getex(redis_key, ex=ttl_seconds)
|
||||
else:
|
||||
cached_bytes = redis.get(redis_key)
|
||||
cached_bytes = _get_redis().get(redis_key)
|
||||
|
||||
if cached_bytes and isinstance(cached_bytes, bytes):
|
||||
return pickle.loads(cached_bytes)
|
||||
@@ -195,7 +200,7 @@ def cached(
|
||||
"""Set value in Redis with TTL."""
|
||||
try:
|
||||
pickled_value = pickle.dumps(value, protocol=pickle.HIGHEST_PROTOCOL)
|
||||
redis.setex(redis_key, ttl_seconds, pickled_value)
|
||||
_get_redis().setex(redis_key, ttl_seconds, pickled_value)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Redis error storing cache for {target_func.__name__}: {e}"
|
||||
@@ -333,14 +338,18 @@ def cached(
|
||||
if pattern:
|
||||
# Clear entries matching pattern
|
||||
keys = list(
|
||||
redis.scan_iter(f"cache:{target_func.__name__}:{pattern}")
|
||||
_get_redis().scan_iter(
|
||||
f"cache:{target_func.__name__}:{pattern}"
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Clear all cache keys
|
||||
keys = list(redis.scan_iter(f"cache:{target_func.__name__}:*"))
|
||||
keys = list(
|
||||
_get_redis().scan_iter(f"cache:{target_func.__name__}:*")
|
||||
)
|
||||
|
||||
if keys:
|
||||
pipeline = redis.pipeline()
|
||||
pipeline = _get_redis().pipeline()
|
||||
for key in keys:
|
||||
pipeline.delete(key)
|
||||
pipeline.execute()
|
||||
@@ -355,7 +364,9 @@ def cached(
|
||||
|
||||
def cache_info() -> dict[str, int | None]:
|
||||
if shared_cache:
|
||||
cache_keys = list(redis.scan_iter(f"cache:{target_func.__name__}:*"))
|
||||
cache_keys = list(
|
||||
_get_redis().scan_iter(f"cache:{target_func.__name__}:*")
|
||||
)
|
||||
return {
|
||||
"size": len(cache_keys),
|
||||
"maxsize": None, # Redis manages its own size
|
||||
@@ -373,10 +384,8 @@ def cached(
|
||||
key = _make_hashable_key(args, kwargs)
|
||||
if shared_cache:
|
||||
redis_key = _make_redis_key(key, target_func.__name__)
|
||||
if redis.exists(redis_key):
|
||||
redis.delete(redis_key)
|
||||
return True
|
||||
return False
|
||||
deleted_count = cast(int, _get_redis().delete(redis_key))
|
||||
return deleted_count > 0
|
||||
else:
|
||||
if key in cache_storage:
|
||||
del cache_storage[key]
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
-- CreateEnum
|
||||
CREATE TYPE "LlmCostUnit" AS ENUM ('RUN', 'TOKENS');
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "LlmProvider" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"name" TEXT NOT NULL,
|
||||
"displayName" TEXT NOT NULL,
|
||||
"description" TEXT,
|
||||
"defaultCredentialProvider" TEXT,
|
||||
"defaultCredentialId" TEXT,
|
||||
"defaultCredentialType" TEXT,
|
||||
"supportsTools" BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
"supportsJsonOutput" BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
"supportsReasoning" BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
"supportsParallelTool" BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
|
||||
CONSTRAINT "LlmProvider_pkey" PRIMARY KEY ("id"),
|
||||
CONSTRAINT "LlmProvider_name_key" UNIQUE ("name")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "LlmModel" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"slug" TEXT NOT NULL,
|
||||
"displayName" TEXT NOT NULL,
|
||||
"description" TEXT,
|
||||
"providerId" TEXT NOT NULL,
|
||||
"contextWindow" INTEGER NOT NULL,
|
||||
"maxOutputTokens" INTEGER,
|
||||
"isEnabled" BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
"capabilities" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
|
||||
CONSTRAINT "LlmModel_pkey" PRIMARY KEY ("id"),
|
||||
CONSTRAINT "LlmModel_slug_key" UNIQUE ("slug")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "LlmModelCost" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"unit" "LlmCostUnit" NOT NULL DEFAULT 'RUN',
|
||||
"creditCost" INTEGER NOT NULL,
|
||||
"credentialProvider" TEXT NOT NULL,
|
||||
"credentialId" TEXT,
|
||||
"credentialType" TEXT,
|
||||
"currency" TEXT,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
"llmModelId" TEXT NOT NULL,
|
||||
|
||||
CONSTRAINT "LlmModelCost_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModel_providerId_isEnabled_idx" ON "LlmModel"("providerId", "isEnabled");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModel_slug_idx" ON "LlmModel"("slug");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelCost_llmModelId_idx" ON "LlmModelCost"("llmModelId");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelCost_credentialProvider_idx" ON "LlmModelCost"("credentialProvider");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_providerId_fkey" FOREIGN KEY ("providerId") REFERENCES "LlmProvider"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LlmModelCost" ADD CONSTRAINT "LlmModelCost_llmModelId_fkey" FOREIGN KEY ("llmModelId") REFERENCES "LlmModel"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
@@ -1,225 +0,0 @@
|
||||
-- Seed LLM Registry from existing hard-coded data
|
||||
-- This migration populates the LlmProvider, LlmModel, and LlmModelCost tables
|
||||
-- with data from the existing MODEL_METADATA and MODEL_COST dictionaries
|
||||
|
||||
-- Insert Providers
|
||||
INSERT INTO "LlmProvider" ("id", "name", "displayName", "description", "defaultCredentialProvider", "defaultCredentialType", "supportsTools", "supportsJsonOutput", "supportsReasoning", "supportsParallelTool", "metadata")
|
||||
VALUES
|
||||
(gen_random_uuid(), 'openai', 'OpenAI', 'OpenAI language models', 'openai', 'api_key', true, true, true, true, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'anthropic', 'Anthropic', 'Anthropic Claude models', 'anthropic', 'api_key', true, true, true, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'groq', 'Groq', 'Groq inference API', 'groq', 'api_key', false, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'open_router', 'OpenRouter', 'OpenRouter unified API', 'open_router', 'api_key', true, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'aiml_api', 'AI/ML API', 'AI/ML API models', 'aiml_api', 'api_key', false, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'ollama', 'Ollama', 'Ollama local models', 'ollama', 'api_key', false, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'llama_api', 'Llama API', 'Llama API models', 'llama_api', 'api_key', false, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'v0', 'v0', 'v0 by Vercel models', 'v0', 'api_key', true, true, false, false, '{}'::jsonb)
|
||||
ON CONFLICT ("name") DO NOTHING;
|
||||
|
||||
-- Insert Models (using CTEs to reference provider IDs)
|
||||
WITH provider_ids AS (
|
||||
SELECT "id", "name" FROM "LlmProvider"
|
||||
)
|
||||
INSERT INTO "LlmModel" ("id", "slug", "displayName", "description", "providerId", "contextWindow", "maxOutputTokens", "isEnabled", "capabilities", "metadata")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
model_slug,
|
||||
model_display_name,
|
||||
NULL,
|
||||
p."id",
|
||||
context_window,
|
||||
max_output_tokens,
|
||||
true,
|
||||
'{}'::jsonb,
|
||||
'{}'::jsonb
|
||||
FROM (VALUES
|
||||
-- OpenAI models
|
||||
('o3', 'O3', 'openai', 200000, 100000),
|
||||
('o3-mini', 'O3 Mini', 'openai', 200000, 100000),
|
||||
('o1', 'O1', 'openai', 200000, 100000),
|
||||
('o1-mini', 'O1 Mini', 'openai', 128000, 65536),
|
||||
('gpt-5-2025-08-07', 'GPT 5', 'openai', 400000, 128000),
|
||||
('gpt-5.1-2025-11-13', 'GPT 5.1', 'openai', 400000, 128000),
|
||||
('gpt-5-mini-2025-08-07', 'GPT 5 Mini', 'openai', 400000, 128000),
|
||||
('gpt-5-nano-2025-08-07', 'GPT 5 Nano', 'openai', 400000, 128000),
|
||||
('gpt-5-chat-latest', 'GPT 5 Chat', 'openai', 400000, 16384),
|
||||
('gpt-4.1-2025-04-14', 'GPT 4.1', 'openai', 1047576, 32768),
|
||||
('gpt-4.1-mini-2025-04-14', 'GPT 4.1 Mini', 'openai', 1047576, 32768),
|
||||
('gpt-4o-mini', 'GPT 4o Mini', 'openai', 128000, 16384),
|
||||
('gpt-4o', 'GPT 4o', 'openai', 128000, 16384),
|
||||
('gpt-4-turbo', 'GPT 4 Turbo', 'openai', 128000, 4096),
|
||||
('gpt-3.5-turbo', 'GPT 3.5 Turbo', 'openai', 16385, 4096),
|
||||
-- Anthropic models
|
||||
('claude-opus-4-1-20250805', 'Claude 4.1 Opus', 'anthropic', 200000, 32000),
|
||||
('claude-opus-4-20250514', 'Claude 4 Opus', 'anthropic', 200000, 32000),
|
||||
('claude-sonnet-4-20250514', 'Claude 4 Sonnet', 'anthropic', 200000, 64000),
|
||||
('claude-opus-4-5-20251101', 'Claude 4.5 Opus', 'anthropic', 200000, 64000),
|
||||
('claude-sonnet-4-5-20250929', 'Claude 4.5 Sonnet', 'anthropic', 200000, 64000),
|
||||
('claude-haiku-4-5-20251001', 'Claude 4.5 Haiku', 'anthropic', 200000, 64000),
|
||||
('claude-3-7-sonnet-20250219', 'Claude 3.7 Sonnet', 'anthropic', 200000, 64000),
|
||||
('claude-3-haiku-20240307', 'Claude 3 Haiku', 'anthropic', 200000, 4096),
|
||||
-- AI/ML API models
|
||||
('Qwen/Qwen2.5-72B-Instruct-Turbo', 'Qwen 2.5 72B', 'aiml_api', 32000, 8000),
|
||||
('nvidia/llama-3.1-nemotron-70b-instruct', 'Llama 3.1 Nemotron 70B', 'aiml_api', 128000, 40000),
|
||||
('meta-llama/Llama-3.3-70B-Instruct-Turbo', 'Llama 3.3 70B', 'aiml_api', 128000, NULL),
|
||||
('meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 'Meta Llama 3.1 70B', 'aiml_api', 131000, 2000),
|
||||
('meta-llama/Llama-3.2-3B-Instruct-Turbo', 'Llama 3.2 3B', 'aiml_api', 128000, NULL),
|
||||
-- Groq models
|
||||
('llama-3.3-70b-versatile', 'Llama 3.3 70B', 'groq', 128000, 32768),
|
||||
('llama-3.1-8b-instant', 'Llama 3.1 8B', 'groq', 128000, 8192),
|
||||
-- Ollama models
|
||||
('llama3.3', 'Llama 3.3', 'ollama', 8192, NULL),
|
||||
('llama3.2', 'Llama 3.2', 'ollama', 8192, NULL),
|
||||
('llama3', 'Llama 3', 'ollama', 8192, NULL),
|
||||
('llama3.1:405b', 'Llama 3.1 405B', 'ollama', 8192, NULL),
|
||||
('dolphin-mistral:latest', 'Dolphin Mistral', 'ollama', 32768, NULL),
|
||||
-- OpenRouter models
|
||||
('google/gemini-2.5-pro-preview-03-25', 'Gemini 2.5 Pro', 'open_router', 1050000, 8192),
|
||||
('google/gemini-3-pro-preview', 'Gemini 3 Pro Preview', 'open_router', 1048576, 65535),
|
||||
('google/gemini-2.5-flash', 'Gemini 2.5 Flash', 'open_router', 1048576, 65535),
|
||||
('google/gemini-2.0-flash-001', 'Gemini 2.0 Flash', 'open_router', 1048576, 8192),
|
||||
('google/gemini-2.5-flash-lite-preview-06-17', 'Gemini 2.5 Flash Lite Preview', 'open_router', 1048576, 65535),
|
||||
('google/gemini-2.0-flash-lite-001', 'Gemini 2.0 Flash Lite', 'open_router', 1048576, 8192),
|
||||
('mistralai/mistral-nemo', 'Mistral Nemo', 'open_router', 128000, 4096),
|
||||
('cohere/command-r-08-2024', 'Command R', 'open_router', 128000, 4096),
|
||||
('cohere/command-r-plus-08-2024', 'Command R Plus', 'open_router', 128000, 4096),
|
||||
('deepseek/deepseek-chat', 'DeepSeek Chat', 'open_router', 64000, 2048),
|
||||
('deepseek/deepseek-r1-0528', 'DeepSeek R1', 'open_router', 163840, 163840),
|
||||
('perplexity/sonar', 'Perplexity Sonar', 'open_router', 127000, 8000),
|
||||
('perplexity/sonar-pro', 'Perplexity Sonar Pro', 'open_router', 200000, 8000),
|
||||
('perplexity/sonar-deep-research', 'Perplexity Sonar Deep Research', 'open_router', 128000, 16000),
|
||||
('nousresearch/hermes-3-llama-3.1-405b', 'Hermes 3 Llama 3.1 405B', 'open_router', 131000, 4096),
|
||||
('nousresearch/hermes-3-llama-3.1-70b', 'Hermes 3 Llama 3.1 70B', 'open_router', 12288, 12288),
|
||||
('openai/gpt-oss-120b', 'GPT OSS 120B', 'open_router', 131072, 131072),
|
||||
('openai/gpt-oss-20b', 'GPT OSS 20B', 'open_router', 131072, 32768),
|
||||
('amazon/nova-lite-v1', 'Amazon Nova Lite', 'open_router', 300000, 5120),
|
||||
('amazon/nova-micro-v1', 'Amazon Nova Micro', 'open_router', 128000, 5120),
|
||||
('amazon/nova-pro-v1', 'Amazon Nova Pro', 'open_router', 300000, 5120),
|
||||
('microsoft/wizardlm-2-8x22b', 'WizardLM 2 8x22B', 'open_router', 65536, 4096),
|
||||
('gryphe/mythomax-l2-13b', 'MythoMax L2 13B', 'open_router', 4096, 4096),
|
||||
('meta-llama/llama-4-scout', 'Llama 4 Scout', 'open_router', 131072, 131072),
|
||||
('meta-llama/llama-4-maverick', 'Llama 4 Maverick', 'open_router', 1048576, 1000000),
|
||||
('x-ai/grok-4', 'Grok 4', 'open_router', 256000, 256000),
|
||||
('x-ai/grok-4-fast', 'Grok 4 Fast', 'open_router', 2000000, 30000),
|
||||
('x-ai/grok-4.1-fast', 'Grok 4.1 Fast', 'open_router', 2000000, 30000),
|
||||
('x-ai/grok-code-fast-1', 'Grok Code Fast 1', 'open_router', 256000, 10000),
|
||||
('moonshotai/kimi-k2', 'Kimi K2', 'open_router', 131000, 131000),
|
||||
('qwen/qwen3-235b-a22b-thinking-2507', 'Qwen 3 235B Thinking', 'open_router', 262144, 262144),
|
||||
('qwen/qwen3-coder', 'Qwen 3 Coder', 'open_router', 262144, 262144),
|
||||
-- Llama API models
|
||||
('Llama-4-Scout-17B-16E-Instruct-FP8', 'Llama 4 Scout', 'llama_api', 128000, 4028),
|
||||
('Llama-4-Maverick-17B-128E-Instruct-FP8', 'Llama 4 Maverick', 'llama_api', 128000, 4028),
|
||||
('Llama-3.3-8B-Instruct', 'Llama 3.3 8B', 'llama_api', 128000, 4028),
|
||||
('Llama-3.3-70B-Instruct', 'Llama 3.3 70B', 'llama_api', 128000, 4028),
|
||||
-- v0 models
|
||||
('v0-1.5-md', 'v0 1.5 MD', 'v0', 128000, 64000),
|
||||
('v0-1.5-lg', 'v0 1.5 LG', 'v0', 512000, 64000),
|
||||
('v0-1.0-md', 'v0 1.0 MD', 'v0', 128000, 64000)
|
||||
) AS models(model_slug, model_display_name, provider_name, context_window, max_output_tokens)
|
||||
JOIN provider_ids p ON p."name" = models.provider_name
|
||||
ON CONFLICT ("slug") DO NOTHING;
|
||||
|
||||
-- Insert Costs (using CTEs to reference model IDs)
|
||||
WITH model_ids AS (
|
||||
SELECT "id", "slug", "providerId" FROM "LlmModel"
|
||||
),
|
||||
provider_ids AS (
|
||||
SELECT "id", "name" FROM "LlmProvider"
|
||||
)
|
||||
INSERT INTO "LlmModelCost" ("id", "unit", "creditCost", "credentialProvider", "credentialId", "credentialType", "currency", "metadata", "llmModelId")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
'RUN'::"LlmCostUnit",
|
||||
cost,
|
||||
p."name",
|
||||
NULL,
|
||||
'api_key',
|
||||
NULL,
|
||||
'{}'::jsonb,
|
||||
m."id"
|
||||
FROM (VALUES
|
||||
-- OpenAI costs
|
||||
('o3', 4),
|
||||
('o3-mini', 2),
|
||||
('o1', 16),
|
||||
('o1-mini', 4),
|
||||
('gpt-5-2025-08-07', 2),
|
||||
('gpt-5.1-2025-11-13', 5),
|
||||
('gpt-5-mini-2025-08-07', 1),
|
||||
('gpt-5-nano-2025-08-07', 1),
|
||||
('gpt-5-chat-latest', 5),
|
||||
('gpt-4.1-2025-04-14', 2),
|
||||
('gpt-4.1-mini-2025-04-14', 1),
|
||||
('gpt-4o-mini', 1),
|
||||
('gpt-4o', 3),
|
||||
('gpt-4-turbo', 10),
|
||||
('gpt-3.5-turbo', 1),
|
||||
-- Anthropic costs
|
||||
('claude-opus-4-1-20250805', 21),
|
||||
('claude-opus-4-20250514', 21),
|
||||
('claude-sonnet-4-20250514', 5),
|
||||
('claude-haiku-4-5-20251001', 4),
|
||||
('claude-opus-4-5-20251101', 14),
|
||||
('claude-sonnet-4-5-20250929', 9),
|
||||
('claude-3-7-sonnet-20250219', 5),
|
||||
('claude-3-haiku-20240307', 1),
|
||||
-- AI/ML API costs
|
||||
('Qwen/Qwen2.5-72B-Instruct-Turbo', 1),
|
||||
('nvidia/llama-3.1-nemotron-70b-instruct', 1),
|
||||
('meta-llama/Llama-3.3-70B-Instruct-Turbo', 1),
|
||||
('meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 1),
|
||||
('meta-llama/Llama-3.2-3B-Instruct-Turbo', 1),
|
||||
-- Groq costs
|
||||
('llama-3.3-70b-versatile', 1),
|
||||
('llama-3.1-8b-instant', 1),
|
||||
-- Ollama costs
|
||||
('llama3.3', 1),
|
||||
('llama3.2', 1),
|
||||
('llama3', 1),
|
||||
('llama3.1:405b', 1),
|
||||
('dolphin-mistral:latest', 1),
|
||||
-- OpenRouter costs
|
||||
('google/gemini-2.5-pro-preview-03-25', 4),
|
||||
('google/gemini-3-pro-preview', 5),
|
||||
('mistralai/mistral-nemo', 1),
|
||||
('cohere/command-r-08-2024', 1),
|
||||
('cohere/command-r-plus-08-2024', 3),
|
||||
('deepseek/deepseek-chat', 2),
|
||||
('perplexity/sonar', 1),
|
||||
('perplexity/sonar-pro', 5),
|
||||
('perplexity/sonar-deep-research', 10),
|
||||
('nousresearch/hermes-3-llama-3.1-405b', 1),
|
||||
('nousresearch/hermes-3-llama-3.1-70b', 1),
|
||||
('amazon/nova-lite-v1', 1),
|
||||
('amazon/nova-micro-v1', 1),
|
||||
('amazon/nova-pro-v1', 1),
|
||||
('microsoft/wizardlm-2-8x22b', 1),
|
||||
('gryphe/mythomax-l2-13b', 1),
|
||||
('meta-llama/llama-4-scout', 1),
|
||||
('meta-llama/llama-4-maverick', 1),
|
||||
('x-ai/grok-4', 9),
|
||||
('x-ai/grok-4-fast', 1),
|
||||
('x-ai/grok-4.1-fast', 1),
|
||||
('x-ai/grok-code-fast-1', 1),
|
||||
('moonshotai/kimi-k2', 1),
|
||||
('qwen/qwen3-235b-a22b-thinking-2507', 1),
|
||||
('qwen/qwen3-coder', 9),
|
||||
('google/gemini-2.5-flash', 1),
|
||||
('google/gemini-2.0-flash-001', 1),
|
||||
('google/gemini-2.5-flash-lite-preview-06-17', 1),
|
||||
('google/gemini-2.0-flash-lite-001', 1),
|
||||
('deepseek/deepseek-r1-0528', 1),
|
||||
('openai/gpt-oss-120b', 1),
|
||||
('openai/gpt-oss-20b', 1),
|
||||
-- Llama API costs
|
||||
('Llama-4-Scout-17B-16E-Instruct-FP8', 1),
|
||||
('Llama-4-Maverick-17B-128E-Instruct-FP8', 1),
|
||||
('Llama-3.3-8B-Instruct', 1),
|
||||
('Llama-3.3-70B-Instruct', 1),
|
||||
-- v0 costs
|
||||
('v0-1.5-md', 1),
|
||||
('v0-1.5-lg', 2),
|
||||
('v0-1.0-md', 1)
|
||||
) AS costs(model_slug, cost)
|
||||
JOIN model_ids m ON m."slug" = costs.model_slug
|
||||
JOIN provider_ids p ON p."id" = m."providerId";
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
-- CreateTable
|
||||
CREATE TABLE "LlmModelMigration" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL,
|
||||
"sourceModelSlug" TEXT NOT NULL,
|
||||
"targetModelSlug" TEXT NOT NULL,
|
||||
"reason" TEXT,
|
||||
"migratedNodeIds" JSONB NOT NULL DEFAULT '[]',
|
||||
"nodeCount" INTEGER NOT NULL,
|
||||
"customCreditCost" INTEGER,
|
||||
"isReverted" BOOLEAN NOT NULL DEFAULT false,
|
||||
"revertedAt" TIMESTAMP(3),
|
||||
|
||||
CONSTRAINT "LlmModelMigration_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelMigration_sourceModelSlug_idx" ON "LlmModelMigration"("sourceModelSlug");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelMigration_targetModelSlug_idx" ON "LlmModelMigration"("targetModelSlug");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelMigration_isReverted_idx" ON "LlmModelMigration"("isReverted");
|
||||
@@ -1,127 +0,0 @@
|
||||
-- Add LlmModelCreator table
|
||||
-- Creator represents who made/trained the model (e.g., OpenAI, Meta)
|
||||
-- This is distinct from Provider who hosts/serves the model (e.g., OpenRouter)
|
||||
|
||||
-- Create the LlmModelCreator table
|
||||
CREATE TABLE "LlmModelCreator" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL,
|
||||
"name" TEXT NOT NULL,
|
||||
"displayName" TEXT NOT NULL,
|
||||
"description" TEXT,
|
||||
"websiteUrl" TEXT,
|
||||
"logoUrl" TEXT,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}',
|
||||
|
||||
CONSTRAINT "LlmModelCreator_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- Create unique index on name
|
||||
CREATE UNIQUE INDEX "LlmModelCreator_name_key" ON "LlmModelCreator"("name");
|
||||
|
||||
-- Add creatorId column to LlmModel
|
||||
ALTER TABLE "LlmModel" ADD COLUMN "creatorId" TEXT;
|
||||
|
||||
-- Add foreign key constraint
|
||||
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_creatorId_fkey"
|
||||
FOREIGN KEY ("creatorId") REFERENCES "LlmModelCreator"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
||||
|
||||
-- Create index on creatorId
|
||||
CREATE INDEX "LlmModel_creatorId_idx" ON "LlmModel"("creatorId");
|
||||
|
||||
-- Seed creators based on known model creators
|
||||
INSERT INTO "LlmModelCreator" ("id", "updatedAt", "name", "displayName", "description", "websiteUrl", "metadata")
|
||||
VALUES
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'openai', 'OpenAI', 'Creator of GPT models', 'https://openai.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'anthropic', 'Anthropic', 'Creator of Claude models', 'https://anthropic.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'meta', 'Meta', 'Creator of Llama models', 'https://ai.meta.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'google', 'Google', 'Creator of Gemini models', 'https://deepmind.google', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'mistral', 'Mistral AI', 'Creator of Mistral models', 'https://mistral.ai', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'cohere', 'Cohere', 'Creator of Command models', 'https://cohere.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'deepseek', 'DeepSeek', 'Creator of DeepSeek models', 'https://deepseek.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'perplexity', 'Perplexity AI', 'Creator of Sonar models', 'https://perplexity.ai', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'qwen', 'Qwen (Alibaba)', 'Creator of Qwen models', 'https://qwenlm.github.io', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'xai', 'xAI', 'Creator of Grok models', 'https://x.ai', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'amazon', 'Amazon', 'Creator of Nova models', 'https://aws.amazon.com/bedrock', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'microsoft', 'Microsoft', 'Creator of WizardLM models', 'https://microsoft.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'moonshot', 'Moonshot AI', 'Creator of Kimi models', 'https://moonshot.cn', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'nvidia', 'NVIDIA', 'Creator of Nemotron models', 'https://nvidia.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'nous_research', 'Nous Research', 'Creator of Hermes models', 'https://nousresearch.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'vercel', 'Vercel', 'Creator of v0 models', 'https://vercel.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'cognitive_computations', 'Cognitive Computations', 'Creator of Dolphin models', 'https://erichartford.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'gryphe', 'Gryphe', 'Creator of MythoMax models', 'https://huggingface.co/Gryphe', '{}')
|
||||
ON CONFLICT ("name") DO NOTHING;
|
||||
|
||||
-- Update existing models with their creators
|
||||
-- OpenAI models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'openai')
|
||||
WHERE "slug" LIKE 'gpt-%' OR "slug" LIKE 'o1%' OR "slug" LIKE 'o3%' OR "slug" LIKE 'openai/%';
|
||||
|
||||
-- Anthropic models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'anthropic')
|
||||
WHERE "slug" LIKE 'claude-%';
|
||||
|
||||
-- Meta/Llama models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'meta')
|
||||
WHERE "slug" LIKE 'llama%' OR "slug" LIKE 'Llama%' OR "slug" LIKE 'meta-llama/%' OR "slug" LIKE '%/llama-%';
|
||||
|
||||
-- Google models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'google')
|
||||
WHERE "slug" LIKE 'google/%' OR "slug" LIKE 'gemini%';
|
||||
|
||||
-- Mistral models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'mistral')
|
||||
WHERE "slug" LIKE 'mistral%' OR "slug" LIKE 'mistralai/%';
|
||||
|
||||
-- Cohere models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'cohere')
|
||||
WHERE "slug" LIKE 'cohere/%' OR "slug" LIKE 'command-%';
|
||||
|
||||
-- DeepSeek models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'deepseek')
|
||||
WHERE "slug" LIKE 'deepseek/%' OR "slug" LIKE 'deepseek-%';
|
||||
|
||||
-- Perplexity models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'perplexity')
|
||||
WHERE "slug" LIKE 'perplexity/%' OR "slug" LIKE 'sonar%';
|
||||
|
||||
-- Qwen models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'qwen')
|
||||
WHERE "slug" LIKE 'Qwen/%' OR "slug" LIKE 'qwen/%';
|
||||
|
||||
-- xAI/Grok models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'xai')
|
||||
WHERE "slug" LIKE 'x-ai/%' OR "slug" LIKE 'grok%';
|
||||
|
||||
-- Amazon models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'amazon')
|
||||
WHERE "slug" LIKE 'amazon/%' OR "slug" LIKE 'nova-%';
|
||||
|
||||
-- Microsoft models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'microsoft')
|
||||
WHERE "slug" LIKE 'microsoft/%' OR "slug" LIKE 'wizardlm%';
|
||||
|
||||
-- Moonshot models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'moonshot')
|
||||
WHERE "slug" LIKE 'moonshotai/%' OR "slug" LIKE 'kimi%';
|
||||
|
||||
-- NVIDIA models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'nvidia')
|
||||
WHERE "slug" LIKE 'nvidia/%' OR "slug" LIKE '%nemotron%';
|
||||
|
||||
-- Nous Research models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'nous_research')
|
||||
WHERE "slug" LIKE 'nousresearch/%' OR "slug" LIKE 'hermes%';
|
||||
|
||||
-- Vercel/v0 models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'vercel')
|
||||
WHERE "slug" LIKE 'v0-%';
|
||||
|
||||
-- Dolphin models (Cognitive Computations / Eric Hartford)
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'cognitive_computations')
|
||||
WHERE "slug" LIKE 'dolphin-%';
|
||||
|
||||
-- Gryphe models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'gryphe')
|
||||
WHERE "slug" LIKE 'gryphe/%' OR "slug" LIKE 'mythomax%';
|
||||
@@ -1,4 +0,0 @@
|
||||
-- CreateIndex
|
||||
-- Index for efficient LLM model lookups on AgentNode.constantInput->>'model'
|
||||
-- This improves performance of model migration queries in the LLM registry
|
||||
CREATE INDEX "AgentNode_constantInput_model_idx" ON "AgentNode" ((("constantInput"->>'model')));
|
||||
@@ -1,52 +0,0 @@
|
||||
-- Add GPT-5.2 model and update O3 slug
|
||||
-- This migration adds the new GPT-5.2 model added in dev branch
|
||||
|
||||
-- Update O3 slug to match dev branch format
|
||||
UPDATE "LlmModel"
|
||||
SET "slug" = 'o3-2025-04-16'
|
||||
WHERE "slug" = 'o3';
|
||||
|
||||
-- Update cost reference for O3 if needed
|
||||
-- (costs are linked by model ID, so no update needed)
|
||||
|
||||
-- Add GPT-5.2 model
|
||||
WITH provider_id AS (
|
||||
SELECT "id" FROM "LlmProvider" WHERE "name" = 'openai'
|
||||
)
|
||||
INSERT INTO "LlmModel" ("id", "slug", "displayName", "description", "providerId", "contextWindow", "maxOutputTokens", "isEnabled", "capabilities", "metadata")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
'gpt-5.2-2025-12-11',
|
||||
'GPT 5.2',
|
||||
'OpenAI GPT-5.2 model',
|
||||
p."id",
|
||||
400000,
|
||||
128000,
|
||||
true,
|
||||
'{}'::jsonb,
|
||||
'{}'::jsonb
|
||||
FROM provider_id p
|
||||
ON CONFLICT ("slug") DO NOTHING;
|
||||
|
||||
-- Add cost for GPT-5.2
|
||||
WITH model_id AS (
|
||||
SELECT m."id", p."name" as provider_name
|
||||
FROM "LlmModel" m
|
||||
JOIN "LlmProvider" p ON p."id" = m."providerId"
|
||||
WHERE m."slug" = 'gpt-5.2-2025-12-11'
|
||||
)
|
||||
INSERT INTO "LlmModelCost" ("id", "unit", "creditCost", "credentialProvider", "credentialId", "credentialType", "currency", "metadata", "llmModelId")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
'RUN'::"LlmCostUnit",
|
||||
3, -- Same cost tier as GPT-5.1
|
||||
m.provider_name,
|
||||
NULL,
|
||||
'api_key',
|
||||
NULL,
|
||||
'{}'::jsonb,
|
||||
m."id"
|
||||
FROM model_id m
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM "LlmModelCost" c WHERE c."llmModelId" = m."id"
|
||||
);
|
||||
@@ -1,11 +0,0 @@
|
||||
-- Add isRecommended field to LlmModel table
|
||||
-- This allows admins to mark a model as the recommended default
|
||||
|
||||
ALTER TABLE "LlmModel" ADD COLUMN "isRecommended" BOOLEAN NOT NULL DEFAULT false;
|
||||
|
||||
-- Set gpt-4o-mini as the default recommended model (if it exists)
|
||||
UPDATE "LlmModel" SET "isRecommended" = true WHERE "slug" = 'gpt-4o-mini' AND "isEnabled" = true;
|
||||
|
||||
-- Create unique partial index to enforce only one recommended model at the database level
|
||||
-- This prevents multiple rows from having isRecommended = true
|
||||
CREATE UNIQUE INDEX "LlmModel_single_recommended_idx" ON "LlmModel" ("isRecommended") WHERE "isRecommended" = true;
|
||||
20
autogpt_platform/backend/poetry.lock
generated
20
autogpt_platform/backend/poetry.lock
generated
@@ -5339,6 +5339,24 @@ urllib3 = ">=1.26.14,<3"
|
||||
fastembed = ["fastembed (>=0.7,<0.8)"]
|
||||
fastembed-gpu = ["fastembed-gpu (>=0.7,<0.8)"]
|
||||
|
||||
[[package]]
|
||||
name = "rank-bm25"
|
||||
version = "0.2.2"
|
||||
description = "Various BM25 algorithms for document ranking"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "rank_bm25-0.2.2-py3-none-any.whl", hash = "sha256:7bd4a95571adadfc271746fa146a4bcfd89c0cf731e49c3d1ad863290adbe8ae"},
|
||||
{file = "rank_bm25-0.2.2.tar.gz", hash = "sha256:096ccef76f8188563419aaf384a02f0ea459503fdf77901378d4fd9d87e5e51d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
numpy = "*"
|
||||
|
||||
[package.extras]
|
||||
dev = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "rapidfuzz"
|
||||
version = "3.13.0"
|
||||
@@ -7494,4 +7512,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "86838b5ae40d606d6e01a14dad8a56c389d890d7a6a0c274a6602cca80f0df84"
|
||||
content-hash = "18b92e09596298c82432e4d0a85cb6d80a40b4229bee0a0c15f0529fd6cb21a4"
|
||||
|
||||
@@ -46,6 +46,7 @@ poetry = "2.1.1" # CHECK DEPENDABOT SUPPORT BEFORE UPGRADING
|
||||
postmarker = "^1.0"
|
||||
praw = "~7.8.1"
|
||||
prisma = "^0.15.0"
|
||||
rank-bm25 = "^0.2.2"
|
||||
prometheus-client = "^0.22.1"
|
||||
prometheus-fastapi-instrumentator = "^7.0.0"
|
||||
psutil = "^7.0.0"
|
||||
|
||||
@@ -1095,151 +1095,6 @@ enum APIKeyStatus {
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
///////////// LLM REGISTRY AND BILLING DATA /////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
// LlmCostUnit: Defines how LLM MODEL costs are calculated (per run or per token).
|
||||
// This is distinct from BlockCostType (in backend/data/block.py) which defines
|
||||
// how BLOCK EXECUTION costs are calculated (per run, per byte, or per second).
|
||||
// LlmCostUnit is for pricing individual LLM model API calls in the registry,
|
||||
// while BlockCostType is for billing platform block executions.
|
||||
enum LlmCostUnit {
|
||||
RUN
|
||||
TOKENS
|
||||
}
|
||||
|
||||
model LlmModelCreator {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
name String @unique // e.g., "openai", "anthropic", "meta"
|
||||
displayName String // e.g., "OpenAI", "Anthropic", "Meta"
|
||||
description String?
|
||||
websiteUrl String? // Link to creator's website
|
||||
logoUrl String? // URL to creator's logo
|
||||
|
||||
metadata Json @default("{}")
|
||||
|
||||
Models LlmModel[]
|
||||
}
|
||||
|
||||
model LlmProvider {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
name String @unique
|
||||
displayName String
|
||||
description String?
|
||||
|
||||
defaultCredentialProvider String?
|
||||
defaultCredentialId String?
|
||||
defaultCredentialType String?
|
||||
|
||||
supportsTools Boolean @default(true)
|
||||
supportsJsonOutput Boolean @default(true)
|
||||
supportsReasoning Boolean @default(false)
|
||||
supportsParallelTool Boolean @default(false)
|
||||
|
||||
metadata Json @default("{}")
|
||||
|
||||
Models LlmModel[]
|
||||
}
|
||||
|
||||
model LlmModel {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
slug String @unique
|
||||
displayName String
|
||||
description String?
|
||||
|
||||
providerId String
|
||||
Provider LlmProvider @relation(fields: [providerId], references: [id], onDelete: Restrict)
|
||||
|
||||
// Creator is the organization that created/trained the model (e.g., OpenAI, Meta)
|
||||
// This is distinct from the provider who hosts/serves the model (e.g., OpenRouter)
|
||||
creatorId String?
|
||||
Creator LlmModelCreator? @relation(fields: [creatorId], references: [id], onDelete: SetNull)
|
||||
|
||||
contextWindow Int
|
||||
maxOutputTokens Int?
|
||||
isEnabled Boolean @default(true)
|
||||
isRecommended Boolean @default(false)
|
||||
|
||||
capabilities Json @default("{}")
|
||||
metadata Json @default("{}")
|
||||
|
||||
Costs LlmModelCost[]
|
||||
|
||||
@@index([providerId, isEnabled])
|
||||
@@index([creatorId])
|
||||
@@index([slug])
|
||||
}
|
||||
|
||||
model LlmModelCost {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
unit LlmCostUnit @default(RUN)
|
||||
|
||||
creditCost Int
|
||||
|
||||
credentialProvider String
|
||||
credentialId String?
|
||||
credentialType String?
|
||||
currency String?
|
||||
|
||||
metadata Json @default("{}")
|
||||
|
||||
llmModelId String
|
||||
Model LlmModel @relation(fields: [llmModelId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@index([llmModelId])
|
||||
@@index([credentialProvider])
|
||||
}
|
||||
|
||||
// Tracks model migrations for revert capability
|
||||
// When a model is disabled with migration, we record which nodes were affected
|
||||
// so they can be reverted when the original model is back online
|
||||
model LlmModelMigration {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
sourceModelSlug String // The original model that was disabled
|
||||
targetModelSlug String // The model workflows were migrated to
|
||||
reason String? // Why the migration happened (e.g., "Provider outage")
|
||||
|
||||
// Track affected nodes as JSON array of node IDs
|
||||
// Format: ["node-uuid-1", "node-uuid-2", ...]
|
||||
migratedNodeIds Json @default("[]")
|
||||
nodeCount Int // Number of nodes migrated
|
||||
|
||||
// Custom pricing override for migrated workflows during the migration period.
|
||||
// Use case: When migrating users from an expensive model (e.g., GPT-4) to a cheaper
|
||||
// one (e.g., GPT-3.5), you may want to temporarily maintain the original pricing
|
||||
// to avoid billing surprises, or offer a discount during the transition.
|
||||
//
|
||||
// IMPORTANT: This field is intended for integration with the billing system.
|
||||
// When billing calculates costs for nodes affected by this migration, it should
|
||||
// check if customCreditCost is set and use it instead of the target model's cost.
|
||||
// If null, the target model's normal cost applies.
|
||||
//
|
||||
// TODO: Integrate with billing system to apply this override during cost calculation.
|
||||
customCreditCost Int?
|
||||
|
||||
// Revert tracking
|
||||
isReverted Boolean @default(false)
|
||||
revertedAt DateTime?
|
||||
|
||||
@@index([sourceModelSlug])
|
||||
@@index([targetModelSlug])
|
||||
@@index([isReverted])
|
||||
}
|
||||
////////////// OAUTH PROVIDER TABLES //////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
"use client";
|
||||
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useEffect, useRef } from "react";
|
||||
|
||||
const LOGOUT_REDIRECT_DELAY_MS = 400;
|
||||
|
||||
function wait(ms: number): Promise<void> {
|
||||
return new Promise(function resolveAfterDelay(resolve) {
|
||||
setTimeout(resolve, ms);
|
||||
});
|
||||
}
|
||||
|
||||
export default function LogoutPage() {
|
||||
const { logOut } = useSupabase();
|
||||
const router = useRouter();
|
||||
const hasStartedRef = useRef(false);
|
||||
|
||||
useEffect(function handleLogoutEffect() {
|
||||
if (hasStartedRef.current) return;
|
||||
hasStartedRef.current = true;
|
||||
|
||||
async function runLogout() {
|
||||
await logOut();
|
||||
await wait(LOGOUT_REDIRECT_DELAY_MS);
|
||||
router.replace("/login");
|
||||
}
|
||||
|
||||
void runLogout();
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div className="flex min-h-screen items-center justify-center px-4">
|
||||
<div className="flex flex-col items-center justify-center gap-4 py-8">
|
||||
<LoadingSpinner size="large" />
|
||||
<Text variant="body" className="text-center">
|
||||
Logging you out...
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInput";
|
||||
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
||||
import { useState } from "react";
|
||||
import { getSchemaDefaultCredentials } from "../../helpers";
|
||||
import { areAllCredentialsSet, getCredentialFields } from "./helpers";
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
"use client";
|
||||
|
||||
import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs";
|
||||
import {
|
||||
Card,
|
||||
CardContent,
|
||||
CardHeader,
|
||||
CardTitle,
|
||||
} from "@/components/__legacy__/ui/card";
|
||||
import { RunAgentInputs } from "@/components/contextual/RunAgentInputs/RunAgentInputs";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { CircleNotchIcon } from "@phosphor-icons/react/dist/ssr";
|
||||
import { Play } from "lucide-react";
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
"use client";
|
||||
|
||||
import { Sidebar } from "@/components/__legacy__/Sidebar";
|
||||
import { Users, DollarSign, UserSearch, FileText } from "lucide-react";
|
||||
import { Cpu } from "@phosphor-icons/react";
|
||||
|
||||
import { IconSliders } from "@/components/__legacy__/ui/icons";
|
||||
|
||||
@@ -29,11 +26,6 @@ const sidebarLinkGroups = [
|
||||
href: "/admin/execution-analytics",
|
||||
icon: <FileText className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "LLM Registry",
|
||||
href: "/admin/llms",
|
||||
icon: <Cpu size={24} />,
|
||||
},
|
||||
{
|
||||
text: "Admin User Management",
|
||||
href: "/admin/settings",
|
||||
|
||||
@@ -1,361 +0,0 @@
|
||||
"use server";
|
||||
|
||||
import { revalidatePath } from "next/cache";
|
||||
|
||||
// Generated API functions
|
||||
import {
|
||||
getV2ListLlmProviders,
|
||||
postV2CreateLlmProvider,
|
||||
getV2ListLlmModels,
|
||||
postV2CreateLlmModel,
|
||||
patchV2UpdateLlmModel,
|
||||
patchV2ToggleLlmModelAvailability,
|
||||
deleteV2DeleteLlmModelAndMigrateWorkflows,
|
||||
getV2GetModelUsageCount,
|
||||
getV2ListModelMigrations,
|
||||
postV2RevertAModelMigration,
|
||||
getV2ListModelCreators,
|
||||
postV2CreateModelCreator,
|
||||
patchV2UpdateModelCreator,
|
||||
deleteV2DeleteModelCreator,
|
||||
postV2SetRecommendedModel,
|
||||
} from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
|
||||
// Generated types
|
||||
import type { LlmProvidersResponse } from "@/app/api/__generated__/models/llmProvidersResponse";
|
||||
import type { LlmModelsResponse } from "@/app/api/__generated__/models/llmModelsResponse";
|
||||
import type { UpsertLlmProviderRequest } from "@/app/api/__generated__/models/upsertLlmProviderRequest";
|
||||
import type { CreateLlmModelRequest } from "@/app/api/__generated__/models/createLlmModelRequest";
|
||||
import type { UpdateLlmModelRequest } from "@/app/api/__generated__/models/updateLlmModelRequest";
|
||||
import type { ToggleLlmModelRequest } from "@/app/api/__generated__/models/toggleLlmModelRequest";
|
||||
import type { LlmMigrationsResponse } from "@/app/api/__generated__/models/llmMigrationsResponse";
|
||||
import type { LlmCreatorsResponse } from "@/app/api/__generated__/models/llmCreatorsResponse";
|
||||
import type { UpsertLlmCreatorRequest } from "@/app/api/__generated__/models/upsertLlmCreatorRequest";
|
||||
import type { LlmModelUsageResponse } from "@/app/api/__generated__/models/llmModelUsageResponse";
|
||||
import { LlmCostUnit } from "@/app/api/__generated__/models/llmCostUnit";
|
||||
|
||||
const ADMIN_LLM_PATH = "/admin/llms";
|
||||
|
||||
// =============================================================================
|
||||
// Provider Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function fetchLlmProviders(): Promise<LlmProvidersResponse> {
|
||||
const response = await getV2ListLlmProviders({ include_models: true });
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch LLM providers");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
export async function createLlmProviderAction(formData: FormData) {
|
||||
const payload: UpsertLlmProviderRequest = {
|
||||
name: String(formData.get("name") || "").trim(),
|
||||
display_name: String(formData.get("display_name") || "").trim(),
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
default_credential_provider: formData.get("default_credential_provider")
|
||||
? String(formData.get("default_credential_provider")).trim()
|
||||
: undefined,
|
||||
default_credential_id: undefined,
|
||||
default_credential_type: "api_key",
|
||||
supports_tools: formData.get("supports_tools") === "on",
|
||||
supports_json_output: formData.get("supports_json_output") === "on",
|
||||
supports_reasoning: formData.get("supports_reasoning") === "on",
|
||||
supports_parallel_tool: formData.get("supports_parallel_tool") === "on",
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
const response = await postV2CreateLlmProvider(payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to create LLM provider");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Model Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function fetchLlmModels(): Promise<LlmModelsResponse> {
|
||||
const response = await getV2ListLlmModels();
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch LLM models");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
export async function createLlmModelAction(formData: FormData) {
|
||||
const providerId = String(formData.get("provider_id"));
|
||||
const creatorId = formData.get("creator_id");
|
||||
|
||||
// Fetch provider to get default credentials
|
||||
const providersResponse = await getV2ListLlmProviders({
|
||||
include_models: false,
|
||||
});
|
||||
if (providersResponse.status !== 200) {
|
||||
throw new Error("Failed to fetch providers");
|
||||
}
|
||||
const provider = providersResponse.data.providers.find(
|
||||
(p) => p.id === providerId,
|
||||
);
|
||||
|
||||
if (!provider) {
|
||||
throw new Error("Provider not found");
|
||||
}
|
||||
|
||||
const payload: CreateLlmModelRequest = {
|
||||
slug: String(formData.get("slug") || "").trim(),
|
||||
display_name: String(formData.get("display_name") || "").trim(),
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
provider_id: providerId,
|
||||
creator_id: creatorId ? String(creatorId) : undefined,
|
||||
context_window: Number(formData.get("context_window") || 0),
|
||||
max_output_tokens: formData.get("max_output_tokens")
|
||||
? Number(formData.get("max_output_tokens"))
|
||||
: undefined,
|
||||
is_enabled: formData.get("is_enabled") === "on",
|
||||
capabilities: {},
|
||||
metadata: {},
|
||||
costs: [
|
||||
{
|
||||
unit: (formData.get("unit") as LlmCostUnit) || LlmCostUnit.RUN,
|
||||
credit_cost: Number(formData.get("credit_cost") || 0),
|
||||
credential_provider:
|
||||
provider.default_credential_provider || provider.name,
|
||||
credential_id: provider.default_credential_id || undefined,
|
||||
credential_type: provider.default_credential_type || "api_key",
|
||||
metadata: {},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const response = await postV2CreateLlmModel(payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to create LLM model");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function updateLlmModelAction(formData: FormData) {
|
||||
const modelId = String(formData.get("model_id"));
|
||||
const creatorId = formData.get("creator_id");
|
||||
|
||||
const payload: UpdateLlmModelRequest = {
|
||||
display_name: formData.get("display_name")
|
||||
? String(formData.get("display_name"))
|
||||
: undefined,
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
provider_id: formData.get("provider_id")
|
||||
? String(formData.get("provider_id"))
|
||||
: undefined,
|
||||
creator_id: creatorId ? String(creatorId) : undefined,
|
||||
context_window: formData.get("context_window")
|
||||
? Number(formData.get("context_window"))
|
||||
: undefined,
|
||||
max_output_tokens: formData.get("max_output_tokens")
|
||||
? Number(formData.get("max_output_tokens"))
|
||||
: undefined,
|
||||
is_enabled: formData.get("is_enabled")
|
||||
? formData.get("is_enabled") === "on"
|
||||
: undefined,
|
||||
costs: formData.get("credit_cost")
|
||||
? [
|
||||
{
|
||||
unit: (formData.get("unit") as LlmCostUnit) || LlmCostUnit.RUN,
|
||||
credit_cost: Number(formData.get("credit_cost")),
|
||||
credential_provider: String(
|
||||
formData.get("credential_provider") || "",
|
||||
).trim(),
|
||||
credential_id: formData.get("credential_id")
|
||||
? String(formData.get("credential_id"))
|
||||
: undefined,
|
||||
credential_type: formData.get("credential_type")
|
||||
? String(formData.get("credential_type"))
|
||||
: undefined,
|
||||
metadata: {},
|
||||
},
|
||||
]
|
||||
: undefined,
|
||||
};
|
||||
|
||||
const response = await patchV2UpdateLlmModel(modelId, payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to update LLM model");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function toggleLlmModelAction(formData: FormData): Promise<void> {
|
||||
const modelId = String(formData.get("model_id"));
|
||||
const shouldEnable = formData.get("is_enabled") === "true";
|
||||
const migrateToSlug = formData.get("migrate_to_slug");
|
||||
const migrationReason = formData.get("migration_reason");
|
||||
const customCreditCost = formData.get("custom_credit_cost");
|
||||
|
||||
const payload: ToggleLlmModelRequest = {
|
||||
is_enabled: shouldEnable,
|
||||
migrate_to_slug: migrateToSlug ? String(migrateToSlug) : undefined,
|
||||
migration_reason: migrationReason ? String(migrationReason) : undefined,
|
||||
custom_credit_cost: customCreditCost ? Number(customCreditCost) : undefined,
|
||||
};
|
||||
|
||||
const response = await patchV2ToggleLlmModelAvailability(modelId, payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to toggle LLM model");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function deleteLlmModelAction(formData: FormData): Promise<void> {
|
||||
const modelId = String(formData.get("model_id"));
|
||||
const rawReplacement = formData.get("replacement_model_slug");
|
||||
|
||||
if (rawReplacement == null || String(rawReplacement).trim() === "") {
|
||||
throw new Error("Replacement model is required");
|
||||
}
|
||||
const replacementModelSlug = String(rawReplacement).trim();
|
||||
|
||||
const response = await deleteV2DeleteLlmModelAndMigrateWorkflows(modelId, {
|
||||
replacement_model_slug: replacementModelSlug,
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to delete model");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function fetchLlmModelUsage(
|
||||
modelId: string,
|
||||
): Promise<LlmModelUsageResponse> {
|
||||
const response = await getV2GetModelUsageCount(modelId);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch model usage");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Migration Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function fetchLlmMigrations(
|
||||
includeReverted: boolean = false,
|
||||
): Promise<LlmMigrationsResponse> {
|
||||
const response = await getV2ListModelMigrations({
|
||||
include_reverted: includeReverted,
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch migrations");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
export async function revertLlmMigrationAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const migrationId = String(formData.get("migration_id"));
|
||||
|
||||
const response = await postV2RevertAModelMigration(migrationId, null);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to revert migration");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Creator Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function fetchLlmCreators(): Promise<LlmCreatorsResponse> {
|
||||
const response = await getV2ListModelCreators();
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch creators");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
export async function createLlmCreatorAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const payload: UpsertLlmCreatorRequest = {
|
||||
name: String(formData.get("name") || "").trim(),
|
||||
display_name: String(formData.get("display_name") || "").trim(),
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
website_url: formData.get("website_url")
|
||||
? String(formData.get("website_url")).trim()
|
||||
: undefined,
|
||||
logo_url: formData.get("logo_url")
|
||||
? String(formData.get("logo_url")).trim()
|
||||
: undefined,
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
const response = await postV2CreateModelCreator(payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to create creator");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function updateLlmCreatorAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const creatorId = String(formData.get("creator_id"));
|
||||
const payload: UpsertLlmCreatorRequest = {
|
||||
name: String(formData.get("name") || "").trim(),
|
||||
display_name: String(formData.get("display_name") || "").trim(),
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
website_url: formData.get("website_url")
|
||||
? String(formData.get("website_url")).trim()
|
||||
: undefined,
|
||||
logo_url: formData.get("logo_url")
|
||||
? String(formData.get("logo_url")).trim()
|
||||
: undefined,
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
const response = await patchV2UpdateModelCreator(creatorId, payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to update creator");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function deleteLlmCreatorAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const creatorId = String(formData.get("creator_id"));
|
||||
|
||||
const response = await deleteV2DeleteModelCreator(creatorId);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to delete creator");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Recommended Model Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function setRecommendedModelAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const modelId = String(formData.get("model_id"));
|
||||
|
||||
const response = await postV2SetRecommendedModel({ model_id: modelId });
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to set recommended model");
|
||||
}
|
||||
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { createLlmCreatorAction } from "../actions";
|
||||
import { useRouter } from "next/navigation";
|
||||
|
||||
export function AddCreatorModal() {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await createLlmCreatorAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to create creator");
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Add Creator"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "512px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="primary" size="small">
|
||||
Add Creator
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Add a new model creator (the organization that made/trained the
|
||||
model).
|
||||
</div>
|
||||
|
||||
<form action={handleSubmit} className="space-y-4">
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Name (slug) <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="name"
|
||||
required
|
||||
name="name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="openai"
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Lowercase identifier (e.g., openai, meta, anthropic)
|
||||
</p>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="display_name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Display Name <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="display_name"
|
||||
required
|
||||
name="display_name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="OpenAI"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="description"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Description
|
||||
</label>
|
||||
<textarea
|
||||
id="description"
|
||||
name="description"
|
||||
rows={2}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="Creator of GPT models..."
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="website_url"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Website URL
|
||||
</label>
|
||||
<input
|
||||
id="website_url"
|
||||
name="website_url"
|
||||
type="url"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="https://openai.com"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Creating..." : "Add Creator"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,314 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import { createLlmModelAction } from "../actions";
|
||||
import { useRouter } from "next/navigation";
|
||||
|
||||
interface Props {
|
||||
providers: LlmProvider[];
|
||||
creators: LlmModelCreator[];
|
||||
}
|
||||
|
||||
export function AddModelModal({ providers, creators }: Props) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [selectedCreatorId, setSelectedCreatorId] = useState("");
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await createLlmModelAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to create model");
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
// When provider changes, auto-select matching creator if one exists
|
||||
function handleProviderChange(providerId: string) {
|
||||
const provider = providers.find((p) => p.id === providerId);
|
||||
if (provider) {
|
||||
// Find creator with same name as provider (e.g., "openai" -> "openai")
|
||||
const matchingCreator = creators.find((c) => c.name === provider.name);
|
||||
if (matchingCreator) {
|
||||
setSelectedCreatorId(matchingCreator.id);
|
||||
} else {
|
||||
// No matching creator (e.g., OpenRouter hosts other creators' models)
|
||||
setSelectedCreatorId("");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Add Model"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="primary" size="small">
|
||||
Add Model
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Register a new model slug, metadata, and pricing.
|
||||
</div>
|
||||
|
||||
<form action={handleSubmit} className="space-y-6">
|
||||
{/* Basic Information */}
|
||||
<div className="space-y-4">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Basic Information
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Core model details
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="slug"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Model Slug <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="slug"
|
||||
required
|
||||
name="slug"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="gpt-4.1-mini-2025-04-14"
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="display_name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Display Name <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="display_name"
|
||||
required
|
||||
name="display_name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="GPT 4.1 Mini"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="description"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Description
|
||||
</label>
|
||||
<textarea
|
||||
id="description"
|
||||
name="description"
|
||||
rows={3}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="Optional description..."
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Model Configuration */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Model Configuration
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Model capabilities and limits
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="provider_id"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Provider <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<select
|
||||
id="provider_id"
|
||||
required
|
||||
name="provider_id"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
defaultValue=""
|
||||
onChange={(e) => handleProviderChange(e.target.value)}
|
||||
>
|
||||
<option value="" disabled>
|
||||
Select provider
|
||||
</option>
|
||||
{providers.map((provider) => (
|
||||
<option key={provider.id} value={provider.id}>
|
||||
{provider.display_name} ({provider.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Who hosts/serves the model
|
||||
</p>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="creator_id"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Creator
|
||||
</label>
|
||||
<select
|
||||
id="creator_id"
|
||||
name="creator_id"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
value={selectedCreatorId}
|
||||
onChange={(e) => setSelectedCreatorId(e.target.value)}
|
||||
>
|
||||
<option value="">No creator selected</option>
|
||||
{creators.map((creator) => (
|
||||
<option key={creator.id} value={creator.id}>
|
||||
{creator.display_name} ({creator.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Who made/trained the model (e.g., OpenAI, Meta)
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="context_window"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Context Window <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="context_window"
|
||||
required
|
||||
type="number"
|
||||
name="context_window"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="128000"
|
||||
min={1}
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="max_output_tokens"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Max Output Tokens
|
||||
</label>
|
||||
<input
|
||||
id="max_output_tokens"
|
||||
type="number"
|
||||
name="max_output_tokens"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="16384"
|
||||
min={1}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Pricing */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">Pricing</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Credit cost per run (credentials are managed via the provider)
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-1">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="credit_cost"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Credit Cost <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="credit_cost"
|
||||
required
|
||||
type="number"
|
||||
name="credit_cost"
|
||||
step="1"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="5"
|
||||
min={0}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Credit cost is always in platform credits. Credentials are
|
||||
inherited from the selected provider.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Enabled Toggle */}
|
||||
<div className="flex items-center gap-3 border-t border-border pt-6">
|
||||
<input type="hidden" name="is_enabled" value="off" />
|
||||
<input
|
||||
id="is_enabled"
|
||||
type="checkbox"
|
||||
name="is_enabled"
|
||||
defaultChecked
|
||||
className="h-4 w-4 rounded border-input"
|
||||
/>
|
||||
<label
|
||||
htmlFor="is_enabled"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Enabled by default
|
||||
</label>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Creating..." : "Save Model"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,268 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { createLlmProviderAction } from "../actions";
|
||||
import { useRouter } from "next/navigation";
|
||||
|
||||
export function AddProviderModal() {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await createLlmProviderAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(
|
||||
err instanceof Error ? err.message : "Failed to create provider",
|
||||
);
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Add Provider"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="primary" size="small">
|
||||
Add Provider
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Define a new upstream provider and default credential information.
|
||||
</div>
|
||||
|
||||
{/* Setup Instructions */}
|
||||
<div className="mb-6 rounded-lg border border-primary/30 bg-primary/5 p-4">
|
||||
<div className="space-y-2">
|
||||
<h4 className="text-sm font-semibold text-foreground">
|
||||
Before Adding a Provider
|
||||
</h4>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
To use a new provider, you must first configure its credentials in
|
||||
the backend:
|
||||
</p>
|
||||
<ol className="list-inside list-decimal space-y-1 text-xs text-muted-foreground">
|
||||
<li>
|
||||
Add the credential to{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
||||
backend/integrations/credentials_store.py
|
||||
</code>{" "}
|
||||
with a UUID, provider name, and settings secret reference
|
||||
</li>
|
||||
<li>
|
||||
Add it to the{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
||||
PROVIDER_CREDENTIALS
|
||||
</code>{" "}
|
||||
dictionary in{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
||||
backend/data/block_cost_config.py
|
||||
</code>
|
||||
</li>
|
||||
<li>
|
||||
Use the <strong>same provider name</strong> in the
|
||||
"Credential Provider" field below that matches the key
|
||||
in{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
||||
PROVIDER_CREDENTIALS
|
||||
</code>
|
||||
</li>
|
||||
</ol>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<form action={handleSubmit} className="space-y-6">
|
||||
{/* Basic Information */}
|
||||
<div className="space-y-4">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Basic Information
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Core provider details
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Provider Slug <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="name"
|
||||
required
|
||||
name="name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="e.g. openai"
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="display_name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Display Name <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="display_name"
|
||||
required
|
||||
name="display_name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="OpenAI"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="description"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Description
|
||||
</label>
|
||||
<textarea
|
||||
id="description"
|
||||
name="description"
|
||||
rows={3}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="Optional description..."
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Default Credentials */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Default Credentials
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Credential provider name that matches the key in{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
||||
PROVIDER_CREDENTIALS
|
||||
</code>
|
||||
</p>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="default_credential_provider"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Credential Provider <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="default_credential_provider"
|
||||
name="default_credential_provider"
|
||||
required
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="openai"
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
<strong>Important:</strong> This must exactly match the key in
|
||||
the{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
||||
PROVIDER_CREDENTIALS
|
||||
</code>{" "}
|
||||
dictionary in{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
||||
block_cost_config.py
|
||||
</code>
|
||||
. Common values: "openai", "anthropic",
|
||||
"groq", "open_router", etc.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Capabilities */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Capabilities
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Provider feature flags
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-3 sm:grid-cols-2">
|
||||
{[
|
||||
{ name: "supports_tools", label: "Supports tools" },
|
||||
{ name: "supports_json_output", label: "Supports JSON output" },
|
||||
{ name: "supports_reasoning", label: "Supports reasoning" },
|
||||
{
|
||||
name: "supports_parallel_tool",
|
||||
label: "Supports parallel tool calls",
|
||||
},
|
||||
].map(({ name, label }) => (
|
||||
<div
|
||||
key={name}
|
||||
className="flex items-center gap-3 rounded-md border border-border bg-muted/30 px-4 py-3 transition-colors hover:bg-muted/50"
|
||||
>
|
||||
<input type="hidden" name={name} value="off" />
|
||||
<input
|
||||
id={name}
|
||||
type="checkbox"
|
||||
name={name}
|
||||
defaultChecked={
|
||||
name !== "supports_reasoning" &&
|
||||
name !== "supports_parallel_tool"
|
||||
}
|
||||
className="h-4 w-4 rounded border-input"
|
||||
/>
|
||||
<label
|
||||
htmlFor={name}
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
{label}
|
||||
</label>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Creating..." : "Save Provider"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/atoms/Table/Table";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { updateLlmCreatorAction } from "../actions";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { DeleteCreatorModal } from "./DeleteCreatorModal";
|
||||
|
||||
export function CreatorsTable({ creators }: { creators: LlmModelCreator[] }) {
|
||||
if (!creators.length) {
|
||||
return (
|
||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
||||
No creators registered yet.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="rounded-lg border">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Creator</TableHead>
|
||||
<TableHead>Description</TableHead>
|
||||
<TableHead>Website</TableHead>
|
||||
<TableHead>Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{creators.map((creator) => (
|
||||
<TableRow key={creator.id}>
|
||||
<TableCell>
|
||||
<div className="font-medium">{creator.display_name}</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{creator.name}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<span className="text-sm text-muted-foreground">
|
||||
{creator.description || "—"}
|
||||
</span>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{creator.website_url ? (
|
||||
<a
|
||||
href={creator.website_url}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="text-sm text-primary hover:underline"
|
||||
>
|
||||
{(() => {
|
||||
try {
|
||||
return new URL(creator.website_url).hostname;
|
||||
} catch {
|
||||
return creator.website_url;
|
||||
}
|
||||
})()}
|
||||
</a>
|
||||
) : (
|
||||
<span className="text-muted-foreground">—</span>
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex items-center justify-end gap-2">
|
||||
<EditCreatorModal creator={creator} />
|
||||
<DeleteCreatorModal creator={creator} />
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function EditCreatorModal({ creator }: { creator: LlmModelCreator }) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await updateLlmCreatorAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to update creator");
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Edit Creator"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "512px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="outline" size="small" className="min-w-0">
|
||||
Edit
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<form action={handleSubmit} className="space-y-4">
|
||||
<input type="hidden" name="creator_id" value={creator.id} />
|
||||
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium">Name (slug)</label>
|
||||
<input
|
||||
required
|
||||
name="name"
|
||||
defaultValue={creator.name}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium">Display Name</label>
|
||||
<input
|
||||
required
|
||||
name="display_name"
|
||||
defaultValue={creator.display_name}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium">Description</label>
|
||||
<textarea
|
||||
name="description"
|
||||
rows={2}
|
||||
defaultValue={creator.description ?? ""}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium">Website URL</label>
|
||||
<input
|
||||
name="website_url"
|
||||
type="url"
|
||||
defaultValue={creator.website_url ?? ""}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Updating..." : "Update"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import { deleteLlmCreatorAction } from "../actions";
|
||||
|
||||
export function DeleteCreatorModal({ creator }: { creator: LlmModelCreator }) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isDeleting, setIsDeleting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleDelete(formData: FormData) {
|
||||
setIsDeleting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await deleteLlmCreatorAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to delete creator");
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Delete Creator"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "480px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
className="min-w-0 text-destructive hover:bg-destructive/10"
|
||||
>
|
||||
Delete
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="space-y-4">
|
||||
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
|
||||
<div className="flex items-start gap-3">
|
||||
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
|
||||
⚠️
|
||||
</div>
|
||||
<div className="text-sm text-foreground">
|
||||
<p className="font-semibold">You are about to delete:</p>
|
||||
<p className="mt-1">
|
||||
<span className="font-medium">{creator.display_name}</span>{" "}
|
||||
<span className="text-muted-foreground">
|
||||
({creator.name})
|
||||
</span>
|
||||
</p>
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
Models using this creator will have their creator field
|
||||
cleared. This is safe and won't affect model
|
||||
functionality.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<form action={handleDelete} className="space-y-4">
|
||||
<input type="hidden" name="creator_id" value={creator.id} />
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isDeleting}
|
||||
type="button"
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
variant="primary"
|
||||
size="small"
|
||||
disabled={isDeleting}
|
||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
|
||||
>
|
||||
{isDeleting ? "Deleting..." : "Delete Creator"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,201 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import { deleteLlmModelAction, fetchLlmModelUsage } from "../actions";
|
||||
|
||||
export function DeleteModelModal({
|
||||
model,
|
||||
availableModels,
|
||||
}: {
|
||||
model: LlmModel;
|
||||
availableModels: LlmModel[];
|
||||
}) {
|
||||
const router = useRouter();
|
||||
const [open, setOpen] = useState(false);
|
||||
const [selectedReplacement, setSelectedReplacement] = useState<string>("");
|
||||
const [isDeleting, setIsDeleting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [usageCount, setUsageCount] = useState<number | null>(null);
|
||||
const [usageLoading, setUsageLoading] = useState(false);
|
||||
const [usageError, setUsageError] = useState<string | null>(null);
|
||||
|
||||
// Filter out the current model and disabled models from replacement options
|
||||
const replacementOptions = availableModels.filter(
|
||||
(m) => m.id !== model.id && m.is_enabled,
|
||||
);
|
||||
|
||||
async function fetchUsage() {
|
||||
setUsageLoading(true);
|
||||
setUsageError(null);
|
||||
try {
|
||||
const usage = await fetchLlmModelUsage(model.id);
|
||||
setUsageCount(usage.node_count);
|
||||
} catch (err) {
|
||||
console.error("Failed to fetch model usage:", err);
|
||||
setUsageError("Failed to load usage count");
|
||||
setUsageCount(null);
|
||||
} finally {
|
||||
setUsageLoading(false);
|
||||
}
|
||||
}
|
||||
|
||||
async function handleDelete(formData: FormData) {
|
||||
setIsDeleting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await deleteLlmModelAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to delete model");
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Delete Model"
|
||||
controlled={{
|
||||
isOpen: open,
|
||||
set: async (isOpen) => {
|
||||
setOpen(isOpen);
|
||||
if (isOpen) {
|
||||
setUsageCount(null);
|
||||
setUsageError(null);
|
||||
setError(null);
|
||||
setSelectedReplacement("");
|
||||
await fetchUsage();
|
||||
}
|
||||
},
|
||||
}}
|
||||
styling={{ maxWidth: "600px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
className="min-w-0 text-destructive hover:bg-destructive/10"
|
||||
>
|
||||
Delete
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
This action cannot be undone. All workflows using this model will be
|
||||
migrated to the replacement model you select.
|
||||
</div>
|
||||
|
||||
<div className="space-y-4">
|
||||
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
|
||||
<div className="flex items-start gap-3">
|
||||
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
|
||||
⚠️
|
||||
</div>
|
||||
<div className="text-sm text-foreground">
|
||||
<p className="font-semibold">You are about to delete:</p>
|
||||
<p className="mt-1">
|
||||
<span className="font-medium">{model.display_name}</span>{" "}
|
||||
<span className="text-muted-foreground">({model.slug})</span>
|
||||
</p>
|
||||
{usageLoading && (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
Loading usage count...
|
||||
</p>
|
||||
)}
|
||||
{usageError && (
|
||||
<p className="mt-2 text-destructive">{usageError}</p>
|
||||
)}
|
||||
{!usageLoading && !usageError && usageCount !== null && (
|
||||
<p className="mt-2 font-semibold">
|
||||
Impact: {usageCount} block{usageCount !== 1 ? "s" : ""}{" "}
|
||||
currently use this model
|
||||
</p>
|
||||
)}
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
All workflows currently using this model will be automatically
|
||||
updated to use the replacement model you choose below.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<form action={handleDelete} className="space-y-4">
|
||||
<input type="hidden" name="model_id" value={model.id} />
|
||||
<input
|
||||
type="hidden"
|
||||
name="replacement_model_slug"
|
||||
value={selectedReplacement}
|
||||
/>
|
||||
|
||||
<label className="text-sm font-medium">
|
||||
<span className="mb-2 block">
|
||||
Select Replacement Model{" "}
|
||||
<span className="text-destructive">*</span>
|
||||
</span>
|
||||
<select
|
||||
required
|
||||
value={selectedReplacement}
|
||||
onChange={(e) => setSelectedReplacement(e.target.value)}
|
||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
||||
>
|
||||
<option value="">-- Choose a replacement model --</option>
|
||||
{replacementOptions.map((m) => (
|
||||
<option key={m.id} value={m.slug}>
|
||||
{m.display_name} ({m.slug})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{replacementOptions.length === 0 && (
|
||||
<p className="mt-2 text-xs text-destructive">
|
||||
No replacement models available. You must have at least one
|
||||
other enabled model before deleting this one.
|
||||
</p>
|
||||
)}
|
||||
</label>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setSelectedReplacement("");
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isDeleting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
variant="primary"
|
||||
size="small"
|
||||
disabled={
|
||||
!selectedReplacement ||
|
||||
isDeleting ||
|
||||
replacementOptions.length === 0
|
||||
}
|
||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
|
||||
>
|
||||
{isDeleting ? "Deleting..." : "Delete and Migrate"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,287 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import { toggleLlmModelAction, fetchLlmModelUsage } from "../actions";
|
||||
|
||||
export function DisableModelModal({
|
||||
model,
|
||||
availableModels,
|
||||
}: {
|
||||
model: LlmModel;
|
||||
availableModels: LlmModel[];
|
||||
}) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isDisabling, setIsDisabling] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [usageCount, setUsageCount] = useState<number | null>(null);
|
||||
const [selectedMigration, setSelectedMigration] = useState<string>("");
|
||||
const [wantsMigration, setWantsMigration] = useState(false);
|
||||
const [migrationReason, setMigrationReason] = useState("");
|
||||
const [customCreditCost, setCustomCreditCost] = useState<string>("");
|
||||
|
||||
// Filter out the current model and disabled models from replacement options
|
||||
const migrationOptions = availableModels.filter(
|
||||
(m) => m.id !== model.id && m.is_enabled,
|
||||
);
|
||||
|
||||
async function fetchUsage() {
|
||||
try {
|
||||
const usage = await fetchLlmModelUsage(model.id);
|
||||
setUsageCount(usage.node_count);
|
||||
} catch {
|
||||
setUsageCount(null);
|
||||
}
|
||||
}
|
||||
|
||||
async function handleDisable(formData: FormData) {
|
||||
setIsDisabling(true);
|
||||
setError(null);
|
||||
try {
|
||||
await toggleLlmModelAction(formData);
|
||||
setOpen(false);
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to disable model");
|
||||
} finally {
|
||||
setIsDisabling(false);
|
||||
}
|
||||
}
|
||||
|
||||
function resetState() {
|
||||
setError(null);
|
||||
setSelectedMigration("");
|
||||
setWantsMigration(false);
|
||||
setMigrationReason("");
|
||||
setCustomCreditCost("");
|
||||
}
|
||||
|
||||
const hasUsage = usageCount !== null && usageCount > 0;
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Disable Model"
|
||||
controlled={{
|
||||
isOpen: open,
|
||||
set: async (isOpen) => {
|
||||
setOpen(isOpen);
|
||||
if (isOpen) {
|
||||
setUsageCount(null);
|
||||
resetState();
|
||||
await fetchUsage();
|
||||
}
|
||||
},
|
||||
}}
|
||||
styling={{ maxWidth: "600px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
className="min-w-0"
|
||||
>
|
||||
Disable
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Disabling a model will hide it from users when creating new workflows.
|
||||
</div>
|
||||
|
||||
<div className="space-y-4">
|
||||
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
|
||||
<div className="flex items-start gap-3">
|
||||
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
|
||||
⚠️
|
||||
</div>
|
||||
<div className="text-sm text-foreground">
|
||||
<p className="font-semibold">You are about to disable:</p>
|
||||
<p className="mt-1">
|
||||
<span className="font-medium">{model.display_name}</span>{" "}
|
||||
<span className="text-muted-foreground">({model.slug})</span>
|
||||
</p>
|
||||
{usageCount === null ? (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
Loading usage data...
|
||||
</p>
|
||||
) : usageCount > 0 ? (
|
||||
<p className="mt-2 font-semibold">
|
||||
Impact: {usageCount} block{usageCount !== 1 ? "s" : ""}{" "}
|
||||
currently use this model
|
||||
</p>
|
||||
) : (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
No workflows are currently using this model.
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{hasUsage && (
|
||||
<div className="space-y-4 rounded-lg border border-border bg-muted/50 p-4">
|
||||
<label className="flex items-start gap-3">
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={wantsMigration}
|
||||
onChange={(e) => {
|
||||
setWantsMigration(e.target.checked);
|
||||
if (!e.target.checked) {
|
||||
setSelectedMigration("");
|
||||
}
|
||||
}}
|
||||
className="mt-1"
|
||||
/>
|
||||
<div className="text-sm">
|
||||
<span className="font-medium">
|
||||
Migrate existing workflows to another model
|
||||
</span>
|
||||
<p className="mt-1 text-muted-foreground">
|
||||
Creates a revertible migration record. If unchecked,
|
||||
existing workflows will use automatic fallback to an enabled
|
||||
model from the same provider.
|
||||
</p>
|
||||
</div>
|
||||
</label>
|
||||
|
||||
{wantsMigration && (
|
||||
<div className="space-y-4 border-t border-border pt-4">
|
||||
<label className="block text-sm font-medium">
|
||||
<span className="mb-2 block">
|
||||
Replacement Model{" "}
|
||||
<span className="text-destructive">*</span>
|
||||
</span>
|
||||
<select
|
||||
required
|
||||
value={selectedMigration}
|
||||
onChange={(e) => setSelectedMigration(e.target.value)}
|
||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
||||
>
|
||||
<option value="">-- Choose a replacement model --</option>
|
||||
{migrationOptions.map((m) => (
|
||||
<option key={m.id} value={m.slug}>
|
||||
{m.display_name} ({m.slug})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{migrationOptions.length === 0 && (
|
||||
<p className="mt-2 text-xs text-destructive">
|
||||
No other enabled models available for migration.
|
||||
</p>
|
||||
)}
|
||||
</label>
|
||||
|
||||
<label className="block text-sm font-medium">
|
||||
<span className="mb-2 block">
|
||||
Migration Reason{" "}
|
||||
<span className="font-normal text-muted-foreground">
|
||||
(optional)
|
||||
</span>
|
||||
</span>
|
||||
<input
|
||||
type="text"
|
||||
value={migrationReason}
|
||||
onChange={(e) => setMigrationReason(e.target.value)}
|
||||
placeholder="e.g., Provider outage, Cost reduction"
|
||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
||||
/>
|
||||
<p className="mt-1 text-xs text-muted-foreground">
|
||||
Helps track why the migration was made
|
||||
</p>
|
||||
</label>
|
||||
|
||||
<label className="block text-sm font-medium">
|
||||
<span className="mb-2 block">
|
||||
Custom Credit Cost{" "}
|
||||
<span className="font-normal text-muted-foreground">
|
||||
(optional)
|
||||
</span>
|
||||
</span>
|
||||
<input
|
||||
type="number"
|
||||
min="0"
|
||||
value={customCreditCost}
|
||||
onChange={(e) => setCustomCreditCost(e.target.value)}
|
||||
placeholder="Leave blank to use target model's cost"
|
||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
||||
/>
|
||||
<p className="mt-1 text-xs text-muted-foreground">
|
||||
Override pricing for migrated workflows. When set, billing
|
||||
will use this cost instead of the target model's cost.
|
||||
</p>
|
||||
</label>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<form action={handleDisable} className="space-y-4">
|
||||
<input type="hidden" name="model_id" value={model.id} />
|
||||
<input type="hidden" name="is_enabled" value="false" />
|
||||
{wantsMigration && selectedMigration && (
|
||||
<>
|
||||
<input
|
||||
type="hidden"
|
||||
name="migrate_to_slug"
|
||||
value={selectedMigration}
|
||||
/>
|
||||
{migrationReason && (
|
||||
<input
|
||||
type="hidden"
|
||||
name="migration_reason"
|
||||
value={migrationReason}
|
||||
/>
|
||||
)}
|
||||
{customCreditCost && (
|
||||
<input
|
||||
type="hidden"
|
||||
name="custom_credit_cost"
|
||||
value={customCreditCost}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
resetState();
|
||||
}}
|
||||
disabled={isDisabling}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
variant="primary"
|
||||
size="small"
|
||||
disabled={
|
||||
isDisabling ||
|
||||
(wantsMigration && !selectedMigration) ||
|
||||
usageCount === null
|
||||
}
|
||||
>
|
||||
{isDisabling
|
||||
? "Disabling..."
|
||||
: wantsMigration && selectedMigration
|
||||
? "Disable & Migrate"
|
||||
: "Disable Model"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,219 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import { updateLlmModelAction } from "../actions";
|
||||
|
||||
export function EditModelModal({
|
||||
model,
|
||||
providers,
|
||||
creators,
|
||||
}: {
|
||||
model: LlmModel;
|
||||
providers: LlmProvider[];
|
||||
creators: LlmModelCreator[];
|
||||
}) {
|
||||
const router = useRouter();
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const cost = model.costs?.[0];
|
||||
const provider = providers.find((p) => p.id === model.provider_id);
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await updateLlmModelAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to update model");
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Edit Model"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="outline" size="small" className="min-w-0">
|
||||
Edit
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Update model metadata and pricing information.
|
||||
</div>
|
||||
{error && (
|
||||
<div className="mb-4 rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
<form action={handleSubmit} className="space-y-4">
|
||||
<input type="hidden" name="model_id" value={model.id} />
|
||||
|
||||
<div className="grid gap-4 md:grid-cols-2">
|
||||
<label className="text-sm font-medium">
|
||||
Display Name
|
||||
<input
|
||||
required
|
||||
name="display_name"
|
||||
defaultValue={model.display_name}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
/>
|
||||
</label>
|
||||
<label className="text-sm font-medium">
|
||||
Provider
|
||||
<select
|
||||
required
|
||||
name="provider_id"
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
defaultValue={model.provider_id}
|
||||
>
|
||||
{providers.map((p) => (
|
||||
<option key={p.id} value={p.id}>
|
||||
{p.display_name} ({p.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Who hosts/serves the model
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div className="grid gap-4 md:grid-cols-2">
|
||||
<label className="text-sm font-medium">
|
||||
Creator
|
||||
<select
|
||||
name="creator_id"
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
defaultValue={model.creator_id ?? ""}
|
||||
>
|
||||
<option value="">No creator selected</option>
|
||||
{creators.map((c) => (
|
||||
<option key={c.id} value={c.id}>
|
||||
{c.display_name} ({c.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Who made/trained the model (e.g., OpenAI, Meta)
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<label className="text-sm font-medium">
|
||||
Description
|
||||
<textarea
|
||||
name="description"
|
||||
rows={2}
|
||||
defaultValue={model.description ?? ""}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
placeholder="Optional description..."
|
||||
/>
|
||||
</label>
|
||||
|
||||
<div className="grid gap-4 md:grid-cols-2">
|
||||
<label className="text-sm font-medium">
|
||||
Context Window
|
||||
<input
|
||||
required
|
||||
type="number"
|
||||
name="context_window"
|
||||
defaultValue={model.context_window}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
min={1}
|
||||
/>
|
||||
</label>
|
||||
<label className="text-sm font-medium">
|
||||
Max Output Tokens
|
||||
<input
|
||||
type="number"
|
||||
name="max_output_tokens"
|
||||
defaultValue={model.max_output_tokens ?? undefined}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
min={1}
|
||||
/>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div className="grid gap-4 md:grid-cols-2">
|
||||
<label className="text-sm font-medium">
|
||||
Credit Cost
|
||||
<input
|
||||
required
|
||||
type="number"
|
||||
name="credit_cost"
|
||||
defaultValue={cost?.credit_cost ?? 0}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
min={0}
|
||||
/>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Credits charged per run
|
||||
</span>
|
||||
</label>
|
||||
<label className="text-sm font-medium">
|
||||
Credential Provider
|
||||
<select
|
||||
required
|
||||
name="credential_provider"
|
||||
defaultValue={cost?.credential_provider ?? provider?.name ?? ""}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
>
|
||||
<option value="" disabled>
|
||||
Select provider
|
||||
</option>
|
||||
{providers.map((p) => (
|
||||
<option key={p.id} value={p.name}>
|
||||
{p.display_name} ({p.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Must match a key in PROVIDER_CREDENTIALS
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
{/* Hidden defaults for credential_type and unit */}
|
||||
<input
|
||||
type="hidden"
|
||||
name="credential_type"
|
||||
value={cost?.credential_type ?? provider?.default_credential_type ?? "api_key"}
|
||||
/>
|
||||
<input type="hidden" name="unit" value={cost?.unit ?? "RUN"} />
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => setOpen(false)}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Updating..." : "Update Model"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import type { LlmModelMigration } from "@/app/api/__generated__/models/llmModelMigration";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import { ErrorBoundary } from "@/components/molecules/ErrorBoundary/ErrorBoundary";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { AddProviderModal } from "./AddProviderModal";
|
||||
import { AddModelModal } from "./AddModelModal";
|
||||
import { AddCreatorModal } from "./AddCreatorModal";
|
||||
import { ProviderList } from "./ProviderList";
|
||||
import { ModelsTable } from "./ModelsTable";
|
||||
import { MigrationsTable } from "./MigrationsTable";
|
||||
import { CreatorsTable } from "./CreatorsTable";
|
||||
import { RecommendedModelSelector } from "./RecommendedModelSelector";
|
||||
|
||||
interface Props {
|
||||
providers: LlmProvider[];
|
||||
models: LlmModel[];
|
||||
migrations: LlmModelMigration[];
|
||||
creators: LlmModelCreator[];
|
||||
}
|
||||
|
||||
function AdminErrorFallback() {
|
||||
return (
|
||||
<div className="mx-auto max-w-xl p-6">
|
||||
<ErrorCard
|
||||
responseError={{
|
||||
message:
|
||||
"An error occurred while loading the LLM Registry. Please refresh the page.",
|
||||
}}
|
||||
context="llm-registry"
|
||||
onRetry={() => window.location.reload()}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export function LlmRegistryDashboard({
|
||||
providers,
|
||||
models,
|
||||
migrations,
|
||||
creators,
|
||||
}: Props) {
|
||||
return (
|
||||
<ErrorBoundary fallback={<AdminErrorFallback />} context="llm-registry">
|
||||
<div className="mx-auto p-6">
|
||||
<div className="flex flex-col gap-6">
|
||||
{/* Header */}
|
||||
<div>
|
||||
<h1 className="text-3xl font-bold">LLM Registry</h1>
|
||||
<p className="text-muted-foreground">
|
||||
Manage providers, creators, models, and credit pricing
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Active Migrations Section - Only show if there are migrations */}
|
||||
{migrations.length > 0 && (
|
||||
<div className="rounded-lg border border-primary/30 bg-primary/5 p-6 shadow-sm">
|
||||
<div className="mb-4">
|
||||
<h2 className="text-xl font-semibold">Active Migrations</h2>
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
These migrations can be reverted to restore workflows to their
|
||||
original model
|
||||
</p>
|
||||
</div>
|
||||
<MigrationsTable migrations={migrations} />
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Providers & Creators Section - Side by Side */}
|
||||
<div className="grid gap-6 lg:grid-cols-2">
|
||||
{/* Providers */}
|
||||
<div className="rounded-lg border bg-card p-6 shadow-sm">
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<div>
|
||||
<h2 className="text-xl font-semibold">Providers</h2>
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
Who hosts/serves the models
|
||||
</p>
|
||||
</div>
|
||||
<AddProviderModal />
|
||||
</div>
|
||||
<ProviderList providers={providers} />
|
||||
</div>
|
||||
|
||||
{/* Creators */}
|
||||
<div className="rounded-lg border bg-card p-6 shadow-sm">
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<div>
|
||||
<h2 className="text-xl font-semibold">Creators</h2>
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
Who made/trained the models
|
||||
</p>
|
||||
</div>
|
||||
<AddCreatorModal />
|
||||
</div>
|
||||
<CreatorsTable creators={creators} />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Models Section */}
|
||||
<div className="rounded-lg border bg-card p-6 shadow-sm">
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<div>
|
||||
<h2 className="text-xl font-semibold">Models</h2>
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
Toggle availability, adjust context windows, and update credit
|
||||
pricing
|
||||
</p>
|
||||
</div>
|
||||
<AddModelModal providers={providers} creators={creators} />
|
||||
</div>
|
||||
|
||||
{/* Recommended Model Selector */}
|
||||
<div className="mb-6">
|
||||
<RecommendedModelSelector models={models} />
|
||||
</div>
|
||||
|
||||
<ModelsTable
|
||||
models={models}
|
||||
providers={providers}
|
||||
creators={creators}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</ErrorBoundary>
|
||||
);
|
||||
}
|
||||
@@ -1,133 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import type { LlmModelMigration } from "@/app/api/__generated__/models/llmModelMigration";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/atoms/Table/Table";
|
||||
import { revertLlmMigrationAction } from "../actions";
|
||||
|
||||
export function MigrationsTable({
|
||||
migrations,
|
||||
}: {
|
||||
migrations: LlmModelMigration[];
|
||||
}) {
|
||||
if (!migrations.length) {
|
||||
return (
|
||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
||||
No active migrations. Migrations are created when you disable a model
|
||||
with the "Migrate existing workflows" option.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="rounded-lg border">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Migration</TableHead>
|
||||
<TableHead>Reason</TableHead>
|
||||
<TableHead>Nodes Affected</TableHead>
|
||||
<TableHead>Custom Cost</TableHead>
|
||||
<TableHead>Created</TableHead>
|
||||
<TableHead className="text-right">Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{migrations.map((migration) => (
|
||||
<MigrationRow key={migration.id} migration={migration} />
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function MigrationRow({ migration }: { migration: LlmModelMigration }) {
|
||||
const [isReverting, setIsReverting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
async function handleRevert(formData: FormData) {
|
||||
setIsReverting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await revertLlmMigrationAction(formData);
|
||||
} catch (err) {
|
||||
setError(
|
||||
err instanceof Error ? err.message : "Failed to revert migration",
|
||||
);
|
||||
} finally {
|
||||
setIsReverting(false);
|
||||
}
|
||||
}
|
||||
|
||||
const createdDate = new Date(migration.created_at);
|
||||
|
||||
return (
|
||||
<>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<div className="text-sm">
|
||||
<span className="font-medium">{migration.source_model_slug}</span>
|
||||
<span className="mx-2 text-muted-foreground">→</span>
|
||||
<span className="font-medium">{migration.target_model_slug}</span>
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{migration.reason || "—"}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="text-sm">{migration.node_count}</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="text-sm">
|
||||
{migration.custom_credit_cost !== null &&
|
||||
migration.custom_credit_cost !== undefined
|
||||
? `${migration.custom_credit_cost} credits`
|
||||
: "—"}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{createdDate.toLocaleDateString()}{" "}
|
||||
{createdDate.toLocaleTimeString([], {
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
})}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell className="text-right">
|
||||
<form action={handleRevert} className="inline">
|
||||
<input type="hidden" name="migration_id" value={migration.id} />
|
||||
<Button
|
||||
type="submit"
|
||||
variant="outline"
|
||||
size="small"
|
||||
disabled={isReverting}
|
||||
>
|
||||
{isReverting ? "Reverting..." : "Revert"}
|
||||
</Button>
|
||||
</form>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
{error && (
|
||||
<TableRow>
|
||||
<TableCell colSpan={6}>
|
||||
<div className="rounded border border-destructive/30 bg-destructive/10 p-2 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -1,172 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/atoms/Table/Table";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { toggleLlmModelAction } from "../actions";
|
||||
import { DeleteModelModal } from "./DeleteModelModal";
|
||||
import { DisableModelModal } from "./DisableModelModal";
|
||||
import { EditModelModal } from "./EditModelModal";
|
||||
import { Star } from "@phosphor-icons/react";
|
||||
|
||||
export function ModelsTable({
|
||||
models,
|
||||
providers,
|
||||
creators,
|
||||
}: {
|
||||
models: LlmModel[];
|
||||
providers: LlmProvider[];
|
||||
creators: LlmModelCreator[];
|
||||
}) {
|
||||
if (!models.length) {
|
||||
return (
|
||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
||||
No models registered yet.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
const providerLookup = new Map(
|
||||
providers.map((provider) => [provider.id, provider]),
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="rounded-lg border">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Model</TableHead>
|
||||
<TableHead>Provider</TableHead>
|
||||
<TableHead>Creator</TableHead>
|
||||
<TableHead>Context Window</TableHead>
|
||||
<TableHead>Max Output</TableHead>
|
||||
<TableHead>Cost</TableHead>
|
||||
<TableHead>Status</TableHead>
|
||||
<TableHead>Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{models.map((model) => {
|
||||
const cost = model.costs?.[0];
|
||||
const provider = providerLookup.get(model.provider_id);
|
||||
return (
|
||||
<TableRow
|
||||
key={model.id}
|
||||
className={model.is_enabled ? "" : "opacity-60"}
|
||||
>
|
||||
<TableCell>
|
||||
<div className="font-medium">{model.display_name}</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{model.slug}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{provider ? (
|
||||
<>
|
||||
<div>{provider.display_name}</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{provider.name}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
model.provider_id
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{model.creator ? (
|
||||
<>
|
||||
<div>{model.creator.display_name}</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{model.creator.name}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
<span className="text-muted-foreground">—</span>
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell>{model.context_window.toLocaleString()}</TableCell>
|
||||
<TableCell>
|
||||
{model.max_output_tokens
|
||||
? model.max_output_tokens.toLocaleString()
|
||||
: "—"}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{cost ? (
|
||||
<>
|
||||
<div className="font-medium">
|
||||
{cost.credit_cost} credits
|
||||
</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{cost.credential_provider}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
"—"
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex flex-col gap-1">
|
||||
<span
|
||||
className={`inline-flex rounded-full px-2.5 py-1 text-xs font-semibold ${
|
||||
model.is_enabled
|
||||
? "bg-primary/10 text-primary"
|
||||
: "bg-muted text-muted-foreground"
|
||||
}`}
|
||||
>
|
||||
{model.is_enabled ? "Enabled" : "Disabled"}
|
||||
</span>
|
||||
{model.is_recommended && (
|
||||
<span className="inline-flex items-center gap-1 rounded-full bg-amber-500/10 px-2.5 py-1 text-xs font-semibold text-amber-600 dark:text-amber-400">
|
||||
<Star size={12} weight="fill" />
|
||||
Recommended
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex items-center justify-end gap-2">
|
||||
{model.is_enabled ? (
|
||||
<DisableModelModal
|
||||
model={model}
|
||||
availableModels={models}
|
||||
/>
|
||||
) : (
|
||||
<EnableModelButton modelId={model.id} />
|
||||
)}
|
||||
<EditModelModal
|
||||
model={model}
|
||||
providers={providers}
|
||||
creators={creators}
|
||||
/>
|
||||
<DeleteModelModal model={model} availableModels={models} />
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function EnableModelButton({ modelId }: { modelId: string }) {
|
||||
return (
|
||||
<form action={toggleLlmModelAction} className="inline">
|
||||
<input type="hidden" name="model_id" value={modelId} />
|
||||
<input type="hidden" name="is_enabled" value="true" />
|
||||
<Button type="submit" variant="outline" size="small" className="min-w-0">
|
||||
Enable
|
||||
</Button>
|
||||
</form>
|
||||
);
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/atoms/Table/Table";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
|
||||
export function ProviderList({ providers }: { providers: LlmProvider[] }) {
|
||||
if (!providers.length) {
|
||||
return (
|
||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
||||
No providers configured yet.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="rounded-lg border">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Name</TableHead>
|
||||
<TableHead>Display Name</TableHead>
|
||||
<TableHead>Default Credential</TableHead>
|
||||
<TableHead>Capabilities</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{providers.map((provider) => (
|
||||
<TableRow key={provider.id}>
|
||||
<TableCell className="font-medium">{provider.name}</TableCell>
|
||||
<TableCell>{provider.display_name}</TableCell>
|
||||
<TableCell>
|
||||
{provider.default_credential_provider
|
||||
? `${provider.default_credential_provider} (${provider.default_credential_id ?? "id?"})`
|
||||
: "—"}
|
||||
</TableCell>
|
||||
<TableCell className="text-sm text-muted-foreground">
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{provider.supports_tools && (
|
||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
||||
Tools
|
||||
</span>
|
||||
)}
|
||||
{provider.supports_json_output && (
|
||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
||||
JSON
|
||||
</span>
|
||||
)}
|
||||
{provider.supports_reasoning && (
|
||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
||||
Reasoning
|
||||
</span>
|
||||
)}
|
||||
{provider.supports_parallel_tool && (
|
||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
||||
Parallel Tools
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { setRecommendedModelAction } from "../actions";
|
||||
import { Star } from "@phosphor-icons/react";
|
||||
|
||||
export function RecommendedModelSelector({ models }: { models: LlmModel[] }) {
|
||||
const router = useRouter();
|
||||
const enabledModels = models.filter((m) => m.is_enabled);
|
||||
const currentRecommended = models.find((m) => m.is_recommended);
|
||||
|
||||
const [selectedModelId, setSelectedModelId] = useState<string>(
|
||||
currentRecommended?.id || "",
|
||||
);
|
||||
const [isSaving, setIsSaving] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const hasChanges = selectedModelId !== (currentRecommended?.id || "");
|
||||
|
||||
async function handleSave() {
|
||||
if (!selectedModelId) return;
|
||||
|
||||
setIsSaving(true);
|
||||
setError(null);
|
||||
try {
|
||||
const formData = new FormData();
|
||||
formData.set("model_id", selectedModelId);
|
||||
await setRecommendedModelAction(formData);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to save");
|
||||
} finally {
|
||||
setIsSaving(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="rounded-lg border border-border bg-card p-4">
|
||||
<div className="mb-3 flex items-center gap-2">
|
||||
<Star size={20} weight="fill" className="text-amber-500" />
|
||||
<h3 className="text-sm font-semibold">Recommended Model</h3>
|
||||
</div>
|
||||
<p className="mb-3 text-xs text-muted-foreground">
|
||||
The recommended model is shown as the default suggestion in model
|
||||
selection dropdowns throughout the platform.
|
||||
</p>
|
||||
|
||||
<div className="flex items-center gap-3">
|
||||
<select
|
||||
value={selectedModelId}
|
||||
onChange={(e) => setSelectedModelId(e.target.value)}
|
||||
className="flex-1 rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
disabled={isSaving}
|
||||
>
|
||||
<option value="">-- Select a model --</option>
|
||||
{enabledModels.map((model) => (
|
||||
<option key={model.id} value={model.id}>
|
||||
{model.display_name} ({model.slug})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
|
||||
<Button
|
||||
type="button"
|
||||
variant="primary"
|
||||
size="small"
|
||||
onClick={handleSave}
|
||||
disabled={!hasChanges || !selectedModelId || isSaving}
|
||||
>
|
||||
{isSaving ? "Saving..." : "Save"}
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{error && <p className="mt-2 text-xs text-destructive">{error}</p>}
|
||||
|
||||
{currentRecommended && !hasChanges && (
|
||||
<p className="mt-2 text-xs text-muted-foreground">
|
||||
Currently set to:{" "}
|
||||
<span className="font-medium">{currentRecommended.display_name}</span>
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
/**
|
||||
* Server-side data fetching for LLM Registry page.
|
||||
*/
|
||||
|
||||
import {
|
||||
fetchLlmCreators,
|
||||
fetchLlmMigrations,
|
||||
fetchLlmModels,
|
||||
fetchLlmProviders,
|
||||
} from "./actions";
|
||||
|
||||
export async function getLlmRegistryPageData() {
|
||||
// Fetch providers and models (required)
|
||||
const [providersResponse, modelsResponse] = await Promise.all([
|
||||
fetchLlmProviders(),
|
||||
fetchLlmModels(),
|
||||
]);
|
||||
|
||||
// Fetch migrations separately with fallback (table might not exist yet)
|
||||
let migrations: Awaited<ReturnType<typeof fetchLlmMigrations>>["migrations"] =
|
||||
[];
|
||||
try {
|
||||
const migrationsResponse = await fetchLlmMigrations(false);
|
||||
migrations = migrationsResponse.migrations;
|
||||
} catch {
|
||||
// Migrations table might not exist yet - that's ok, just show empty list
|
||||
console.warn("Could not fetch migrations - table may not exist yet");
|
||||
}
|
||||
|
||||
// Fetch creators separately with fallback (table might not exist yet)
|
||||
let creators: Awaited<ReturnType<typeof fetchLlmCreators>>["creators"] = [];
|
||||
try {
|
||||
const creatorsResponse = await fetchLlmCreators();
|
||||
creators = creatorsResponse.creators;
|
||||
} catch {
|
||||
// Creators table might not exist yet - that's ok, just show empty list
|
||||
console.warn("Could not fetch creators - table may not exist yet");
|
||||
}
|
||||
|
||||
return {
|
||||
providers: providersResponse.providers,
|
||||
models: modelsResponse.models,
|
||||
migrations,
|
||||
creators,
|
||||
};
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
import { withRoleAccess } from "@/lib/withRoleAccess";
|
||||
import { getLlmRegistryPageData } from "./getLlmRegistryPage";
|
||||
import { LlmRegistryDashboard } from "./components/LlmRegistryDashboard";
|
||||
|
||||
async function LlmRegistryPage() {
|
||||
const data = await getLlmRegistryPageData();
|
||||
return <LlmRegistryDashboard {...data} />;
|
||||
}
|
||||
|
||||
export default async function AdminLlmRegistryPage() {
|
||||
const withAdminAccess = await withRoleAccess(["admin"]);
|
||||
const ProtectedLlmRegistryPage = await withAdminAccess(LlmRegistryPage);
|
||||
return <ProtectedLlmRegistryPage />;
|
||||
}
|
||||
@@ -9,7 +9,7 @@ export async function GET(request: Request) {
|
||||
const { searchParams, origin } = new URL(request.url);
|
||||
const code = searchParams.get("code");
|
||||
|
||||
let next = "/marketplace";
|
||||
let next = "/";
|
||||
|
||||
if (code) {
|
||||
const supabase = await getServerSupabase();
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"use client";
|
||||
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInput";
|
||||
import { useGetOauthGetOauthAppInfo } from "@/app/api/__generated__/endpoints/oauth/oauth";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { AuthCard } from "@/components/auth/AuthCard";
|
||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import type {
|
||||
BlockIOCredentialsSubSchema,
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
import { BlockUIType } from "@/app/(platform)/build/components/types";
|
||||
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { Label } from "@/components/__legacy__/ui/label";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import {
|
||||
@@ -23,6 +18,11 @@ import {
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { BookOpenIcon } from "@phosphor-icons/react";
|
||||
import { useMemo } from "react";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { usePostV1ExecuteGraphAgent } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
|
||||
import {
|
||||
ApiError,
|
||||
CredentialsMetaInput,
|
||||
GraphExecutionMeta,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
@@ -9,6 +10,9 @@ import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
|
||||
import { useMemo, useState } from "react";
|
||||
import { uiSchema } from "../../../FlowEditor/nodes/uiSchema";
|
||||
import { isCredentialFieldSchema } from "@/components/renderers/InputRenderer/custom/CredentialField/helpers";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { useReactFlow } from "@xyflow/react";
|
||||
|
||||
export const useRunInputDialog = ({
|
||||
setIsOpen,
|
||||
@@ -31,6 +35,7 @@ export const useRunInputDialog = ({
|
||||
flowVersion: parseAsInteger,
|
||||
});
|
||||
const { toast } = useToast();
|
||||
const { setViewport } = useReactFlow();
|
||||
|
||||
const { mutateAsync: executeGraph, isPending: isExecutingGraph } =
|
||||
usePostV1ExecuteGraphAgent({
|
||||
@@ -42,13 +47,75 @@ export const useRunInputDialog = ({
|
||||
});
|
||||
},
|
||||
onError: (error) => {
|
||||
// Reset running state on error
|
||||
if (error instanceof ApiError && error.isGraphValidationError()) {
|
||||
const errorData = error.response?.detail || {
|
||||
node_errors: {},
|
||||
message: undefined,
|
||||
};
|
||||
const nodeErrors = errorData.node_errors || {};
|
||||
|
||||
if (Object.keys(nodeErrors).length > 0) {
|
||||
Object.entries(nodeErrors).forEach(
|
||||
([nodeId, nodeErrorsForNode]) => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeErrors(
|
||||
nodeId,
|
||||
nodeErrorsForNode as { [key: string]: string },
|
||||
);
|
||||
},
|
||||
);
|
||||
} else {
|
||||
useNodeStore.getState().nodes.forEach((node) => {
|
||||
useNodeStore.getState().updateNodeErrors(node.id, {});
|
||||
});
|
||||
}
|
||||
|
||||
toast({
|
||||
title: errorData?.message || "Graph validation failed",
|
||||
description:
|
||||
"Please fix the validation errors on the highlighted nodes and try again.",
|
||||
variant: "destructive",
|
||||
});
|
||||
setIsOpen(false);
|
||||
|
||||
const firstBackendId = Object.keys(nodeErrors)[0];
|
||||
|
||||
if (firstBackendId) {
|
||||
const firstErrorNode = useNodeStore
|
||||
.getState()
|
||||
.nodes.find(
|
||||
(n) =>
|
||||
n.data.metadata?.backend_id === firstBackendId ||
|
||||
n.id === firstBackendId,
|
||||
);
|
||||
|
||||
if (firstErrorNode) {
|
||||
setTimeout(() => {
|
||||
setViewport(
|
||||
{
|
||||
x:
|
||||
-firstErrorNode.position.x * 0.8 +
|
||||
window.innerWidth / 2 -
|
||||
150,
|
||||
y: -firstErrorNode.position.y * 0.8 + 50,
|
||||
zoom: 0.8,
|
||||
},
|
||||
{ duration: 500 },
|
||||
);
|
||||
}, 50);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
toast({
|
||||
title: "Error running graph",
|
||||
description:
|
||||
(error as Error).message || "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
setIsOpen(false);
|
||||
}
|
||||
setIsGraphRunning(false);
|
||||
toast({
|
||||
title: (error.detail as string) ?? "An unexpected error occurred.",
|
||||
description: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@@ -55,14 +55,16 @@ export const Flow = () => {
|
||||
const edgeTypes = useMemo(() => ({ custom: CustomEdge }), []);
|
||||
|
||||
const onNodeDragStop = useCallback(() => {
|
||||
const currentNodes = useNodeStore.getState().nodes;
|
||||
setNodes(
|
||||
resolveCollisions(nodes, {
|
||||
resolveCollisions(currentNodes, {
|
||||
maxIterations: Infinity,
|
||||
overlapThreshold: 0.5,
|
||||
margin: 15,
|
||||
}),
|
||||
);
|
||||
}, [setNodes, nodes]);
|
||||
}, [setNodes]);
|
||||
|
||||
const { edges, onConnect, onEdgesChange } = useCustomEdge();
|
||||
|
||||
// for loading purpose
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
|
||||
import { useCallback } from "react";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { useHistoryStore } from "../../../stores/historyStore";
|
||||
import { CustomEdge } from "./CustomEdge";
|
||||
|
||||
export const useCustomEdge = () => {
|
||||
@@ -51,7 +52,20 @@ export const useCustomEdge = () => {
|
||||
|
||||
const onEdgesChange = useCallback(
|
||||
(changes: EdgeChange<CustomEdge>[]) => {
|
||||
const hasRemoval = changes.some((change) => change.type === "remove");
|
||||
|
||||
const prevState = hasRemoval
|
||||
? {
|
||||
nodes: useNodeStore.getState().nodes,
|
||||
edges: edges,
|
||||
}
|
||||
: null;
|
||||
|
||||
setEdges(applyEdgeChanges(changes, edges));
|
||||
|
||||
if (prevState) {
|
||||
useHistoryStore.getState().pushState(prevState);
|
||||
}
|
||||
},
|
||||
[edges, setEdges],
|
||||
);
|
||||
|
||||
@@ -20,11 +20,13 @@ type Props = {
|
||||
|
||||
export const NodeHeader = ({ data, nodeId }: Props) => {
|
||||
const updateNodeData = useNodeStore((state) => state.updateNodeData);
|
||||
const title = (data.metadata?.customized_name as string) || data.title;
|
||||
const title =
|
||||
(data.metadata?.customized_name as string) ||
|
||||
data.hardcodedValues?.agent_name ||
|
||||
data.title;
|
||||
|
||||
const [isEditingTitle, setIsEditingTitle] = useState(false);
|
||||
const [editedTitle, setEditedTitle] = useState(
|
||||
beautifyString(title).replace("Block", "").trim(),
|
||||
);
|
||||
const [editedTitle, setEditedTitle] = useState(title);
|
||||
|
||||
const handleTitleEdit = () => {
|
||||
updateNodeData(nodeId, {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import { globalRegistry } from "@/components/contextual/OutputRenderers";
|
||||
|
||||
export const TextRenderer: React.FC<{
|
||||
value: any;
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
import {
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
@@ -11,6 +7,10 @@ import {
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import {
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { downloadOutputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/utils/download";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import { globalRegistry } from "@/components/contextual/OutputRenderers";
|
||||
import { downloadOutputs } from "@/components/contextual/OutputRenderers/utils/download";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import React, { useMemo, useState } from "react";
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import Link from "next/link";
|
||||
import { useGetV2GetLibraryAgentByGraphId } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { useQueryStates, parseAsString } from "nuqs";
|
||||
import { isValidUUID } from "@/app/(platform)/chat/helpers";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
|
||||
import { isValidUUID } from "@/lib/utils";
|
||||
import Link from "next/link";
|
||||
import { parseAsString, useQueryStates } from "nuqs";
|
||||
|
||||
export const WebhookDisclaimer = ({ nodeId }: { nodeId: string }) => {
|
||||
const [{ flowID }] = useQueryStates({
|
||||
|
||||
@@ -31,8 +31,6 @@ export const OutputHandler = ({
|
||||
const [isOutputVisible, setIsOutputVisible] = useState(true);
|
||||
const brokenOutputs = useBrokenOutputs(nodeId);
|
||||
|
||||
console.log("brokenOutputs", brokenOutputs);
|
||||
|
||||
const showHandles = uiType !== BlockUIType.OUTPUT;
|
||||
|
||||
const renderOutputHandles = (
|
||||
|
||||
@@ -7,9 +7,8 @@ import { BlockCategoryResponse } from "@/app/api/__generated__/models/blockCateg
|
||||
import { BlockResponse } from "@/app/api/__generated__/models/blockResponse";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { useState, useEffect } from "react";
|
||||
import { useState } from "react";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import BackendApi from "@/lib/autogpt-server-api";
|
||||
|
||||
export const useAllBlockContent = () => {
|
||||
const { toast } = useToast();
|
||||
@@ -94,32 +93,6 @@ export const useAllBlockContent = () => {
|
||||
const isErrorOnLoadingMore = (categoryName: string) =>
|
||||
errorLoadingCategories.has(categoryName);
|
||||
|
||||
// Listen for LLM registry refresh notifications
|
||||
useEffect(() => {
|
||||
const api = new BackendApi();
|
||||
const queryClient = getQueryClient();
|
||||
|
||||
const handleNotification = (notification: any) => {
|
||||
if (
|
||||
notification?.type === "LLM_REGISTRY_REFRESH" ||
|
||||
notification?.event === "registry_updated"
|
||||
) {
|
||||
// Invalidate all block-related queries to force refresh
|
||||
const categoriesQueryKey = getGetV2GetBuilderBlockCategoriesQueryKey();
|
||||
queryClient.invalidateQueries({ queryKey: categoriesQueryKey });
|
||||
}
|
||||
};
|
||||
|
||||
const unsubscribe = api.onWebSocketMessage(
|
||||
"notification",
|
||||
handleNotification,
|
||||
);
|
||||
|
||||
return () => {
|
||||
unsubscribe();
|
||||
};
|
||||
}, []);
|
||||
|
||||
return {
|
||||
data,
|
||||
isLoading,
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
|
||||
@@ -3,7 +3,6 @@ import {
|
||||
CustomNodeData,
|
||||
} from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import { NodeTableInput } from "@/app/(platform)/build/components/legacy-builder/NodeTableInput";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInput";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Calendar } from "@/components/__legacy__/ui/calendar";
|
||||
import { LocalValuedInput } from "@/components/__legacy__/ui/input";
|
||||
@@ -28,6 +27,7 @@ import {
|
||||
SelectValue,
|
||||
} from "@/components/__legacy__/ui/select";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
||||
import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput";
|
||||
import {
|
||||
BlockIOArraySubSchema,
|
||||
@@ -610,11 +610,8 @@ const NodeOneOfDiscriminatorField: FC<{
|
||||
|
||||
return oneOfVariants
|
||||
.map((variant) => {
|
||||
const discProperty = variant.properties?.[discriminatorProperty];
|
||||
const variantDiscValue =
|
||||
discProperty && "const" in discProperty
|
||||
? (discProperty.const as string)
|
||||
: undefined; // NOTE: can discriminators only be strings?
|
||||
const variantDiscValue = variant.properties?.[discriminatorProperty]
|
||||
?.const as string; // NOTE: can discriminators only be strings?
|
||||
|
||||
return {
|
||||
value: variantDiscValue,
|
||||
@@ -1127,47 +1124,9 @@ const NodeStringInput: FC<{
|
||||
displayName,
|
||||
}) => {
|
||||
value ||= schema.default || "";
|
||||
|
||||
// Check if we have options with labels (e.g., LLM model picker)
|
||||
const hasOptions = schema.options && schema.options.length > 0;
|
||||
const hasEnum = schema.enum && schema.enum.length > 0;
|
||||
|
||||
// Helper to get display label for a value
|
||||
const getDisplayLabel = (val: string) => {
|
||||
if (hasOptions) {
|
||||
const option = schema.options!.find((opt) => opt.value === val);
|
||||
return option?.label || beautifyString(val);
|
||||
}
|
||||
return beautifyString(val);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={className}>
|
||||
{hasOptions ? (
|
||||
// Render options with proper labels (used by LLM model picker)
|
||||
<Select
|
||||
defaultValue={value}
|
||||
onValueChange={(newValue) => handleInputChange(selfKey, newValue)}
|
||||
>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder={schema.placeholder || displayName}>
|
||||
{value ? getDisplayLabel(value) : undefined}
|
||||
</SelectValue>
|
||||
</SelectTrigger>
|
||||
<SelectContent className="nodrag">
|
||||
{schema.options!.map((option, index) => (
|
||||
<SelectItem
|
||||
key={index}
|
||||
value={option.value}
|
||||
title={option.description}
|
||||
>
|
||||
{option.label}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
) : hasEnum ? (
|
||||
// Fallback to enum with beautified strings
|
||||
{schema.enum && schema.enum.length > 0 ? (
|
||||
<Select
|
||||
defaultValue={value}
|
||||
onValueChange={(newValue) => handleInputChange(selfKey, newValue)}
|
||||
@@ -1176,8 +1135,8 @@ const NodeStringInput: FC<{
|
||||
<SelectValue placeholder={schema.placeholder || displayName} />
|
||||
</SelectTrigger>
|
||||
<SelectContent className="nodrag">
|
||||
{schema
|
||||
.enum!.filter((option) => option)
|
||||
{schema.enum
|
||||
.filter((option) => option)
|
||||
.map((option, index) => (
|
||||
<SelectItem key={index} value={option}>
|
||||
{beautifyString(option)}
|
||||
|
||||
@@ -5,6 +5,8 @@ import { customEdgeToLink, linkToCustomEdge } from "../components/helper";
|
||||
import { MarkerType } from "@xyflow/react";
|
||||
import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import { cleanUpHandleId } from "@/components/renderers/InputRenderer/helpers";
|
||||
import { useHistoryStore } from "./historyStore";
|
||||
import { useNodeStore } from "./nodeStore";
|
||||
|
||||
type EdgeStore = {
|
||||
edges: CustomEdge[];
|
||||
@@ -53,25 +55,36 @@ export const useEdgeStore = create<EdgeStore>((set, get) => ({
|
||||
id,
|
||||
};
|
||||
|
||||
set((state) => {
|
||||
const exists = state.edges.some(
|
||||
(e) =>
|
||||
e.source === newEdge.source &&
|
||||
e.target === newEdge.target &&
|
||||
e.sourceHandle === newEdge.sourceHandle &&
|
||||
e.targetHandle === newEdge.targetHandle,
|
||||
);
|
||||
if (exists) return state;
|
||||
return { edges: [...state.edges, newEdge] };
|
||||
});
|
||||
const exists = get().edges.some(
|
||||
(e) =>
|
||||
e.source === newEdge.source &&
|
||||
e.target === newEdge.target &&
|
||||
e.sourceHandle === newEdge.sourceHandle &&
|
||||
e.targetHandle === newEdge.targetHandle,
|
||||
);
|
||||
if (exists) return newEdge;
|
||||
const prevState = {
|
||||
nodes: useNodeStore.getState().nodes,
|
||||
edges: get().edges,
|
||||
};
|
||||
|
||||
set((state) => ({ edges: [...state.edges, newEdge] }));
|
||||
useHistoryStore.getState().pushState(prevState);
|
||||
|
||||
return newEdge;
|
||||
},
|
||||
|
||||
removeEdge: (edgeId) =>
|
||||
removeEdge: (edgeId) => {
|
||||
const prevState = {
|
||||
nodes: useNodeStore.getState().nodes,
|
||||
edges: get().edges,
|
||||
};
|
||||
|
||||
set((state) => ({
|
||||
edges: state.edges.filter((e) => e.id !== edgeId),
|
||||
})),
|
||||
}));
|
||||
useHistoryStore.getState().pushState(prevState);
|
||||
},
|
||||
|
||||
upsertMany: (edges) =>
|
||||
set((state) => {
|
||||
|
||||
@@ -37,6 +37,15 @@ export const useHistoryStore = create<HistoryStore>((set, get) => ({
|
||||
return;
|
||||
}
|
||||
|
||||
const actualCurrentState = {
|
||||
nodes: useNodeStore.getState().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
if (isEqual(state, actualCurrentState)) {
|
||||
return;
|
||||
}
|
||||
|
||||
set((prev) => ({
|
||||
past: [...prev.past.slice(-MAX_HISTORY + 1), state],
|
||||
future: [],
|
||||
@@ -55,18 +64,25 @@ export const useHistoryStore = create<HistoryStore>((set, get) => ({
|
||||
|
||||
undo: () => {
|
||||
const { past, future } = get();
|
||||
if (past.length <= 1) return;
|
||||
if (past.length === 0) return;
|
||||
|
||||
const currentState = past[past.length - 1];
|
||||
const actualCurrentState = {
|
||||
nodes: useNodeStore.getState().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
const previousState = past[past.length - 2];
|
||||
const previousState = past[past.length - 1];
|
||||
|
||||
if (isEqual(actualCurrentState, previousState)) {
|
||||
return;
|
||||
}
|
||||
|
||||
useNodeStore.getState().setNodes(previousState.nodes);
|
||||
useEdgeStore.getState().setEdges(previousState.edges);
|
||||
|
||||
set({
|
||||
past: past.slice(0, -1),
|
||||
future: [currentState, ...future],
|
||||
past: past.length > 1 ? past.slice(0, -1) : past,
|
||||
future: [actualCurrentState, ...future],
|
||||
});
|
||||
},
|
||||
|
||||
@@ -74,18 +90,36 @@ export const useHistoryStore = create<HistoryStore>((set, get) => ({
|
||||
const { past, future } = get();
|
||||
if (future.length === 0) return;
|
||||
|
||||
const actualCurrentState = {
|
||||
nodes: useNodeStore.getState().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
const nextState = future[0];
|
||||
|
||||
useNodeStore.getState().setNodes(nextState.nodes);
|
||||
useEdgeStore.getState().setEdges(nextState.edges);
|
||||
|
||||
const lastPast = past[past.length - 1];
|
||||
const shouldPushToPast =
|
||||
!lastPast || !isEqual(actualCurrentState, lastPast);
|
||||
|
||||
set({
|
||||
past: [...past, nextState],
|
||||
past: shouldPushToPast ? [...past, actualCurrentState] : past,
|
||||
future: future.slice(1),
|
||||
});
|
||||
},
|
||||
|
||||
canUndo: () => get().past.length > 1,
|
||||
canUndo: () => {
|
||||
const { past } = get();
|
||||
if (past.length === 0) return false;
|
||||
|
||||
const actualCurrentState = {
|
||||
nodes: useNodeStore.getState().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
return !isEqual(actualCurrentState, past[past.length - 1]);
|
||||
},
|
||||
canRedo: () => get().future.length > 0,
|
||||
|
||||
clear: () => set({ past: [{ nodes: [], edges: [] }], future: [] }),
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { create } from "zustand";
|
||||
import { NodeChange, XYPosition, applyNodeChanges } from "@xyflow/react";
|
||||
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { CustomEdge } from "../components/FlowEditor/edges/CustomEdge";
|
||||
import { BlockInfo } from "@/app/api/__generated__/models/blockInfo";
|
||||
import {
|
||||
convertBlockInfoIntoCustomNodeData,
|
||||
@@ -44,6 +45,8 @@ const MINIMUM_MOVE_BEFORE_LOG = 50;
|
||||
// Track initial positions when drag starts (outside store to avoid re-renders)
|
||||
const dragStartPositions: Record<string, XYPosition> = {};
|
||||
|
||||
let dragStartState: { nodes: CustomNode[]; edges: CustomEdge[] } | null = null;
|
||||
|
||||
type NodeStore = {
|
||||
nodes: CustomNode[];
|
||||
nodeCounter: number;
|
||||
@@ -124,14 +127,20 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
nodeCounter: state.nodeCounter + 1,
|
||||
})),
|
||||
onNodesChange: (changes) => {
|
||||
const prevState = {
|
||||
nodes: get().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
// Track initial positions when drag starts
|
||||
changes.forEach((change) => {
|
||||
if (change.type === "position" && change.dragging === true) {
|
||||
if (!dragStartState) {
|
||||
const currentNodes = get().nodes;
|
||||
const currentEdges = useEdgeStore.getState().edges;
|
||||
dragStartState = {
|
||||
nodes: currentNodes.map((n) => ({
|
||||
...n,
|
||||
position: { ...n.position },
|
||||
data: { ...n.data },
|
||||
})),
|
||||
edges: currentEdges.map((e) => ({ ...e })),
|
||||
};
|
||||
}
|
||||
if (!dragStartPositions[change.id]) {
|
||||
const node = get().nodes.find((n) => n.id === change.id);
|
||||
if (node) {
|
||||
@@ -141,12 +150,17 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
}
|
||||
});
|
||||
|
||||
// Check if we should track this change in history
|
||||
let shouldTrack = changes.some(
|
||||
(change) => change.type === "remove" || change.type === "add",
|
||||
);
|
||||
let shouldTrack = changes.some((change) => change.type === "remove");
|
||||
let stateToTrack: { nodes: CustomNode[]; edges: CustomEdge[] } | null =
|
||||
null;
|
||||
|
||||
if (shouldTrack) {
|
||||
stateToTrack = {
|
||||
nodes: get().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
}
|
||||
|
||||
// For position changes, only track if movement exceeds threshold
|
||||
if (!shouldTrack) {
|
||||
changes.forEach((change) => {
|
||||
if (change.type === "position" && change.dragging === false) {
|
||||
@@ -158,20 +172,23 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
);
|
||||
if (distanceMoved > MINIMUM_MOVE_BEFORE_LOG) {
|
||||
shouldTrack = true;
|
||||
stateToTrack = dragStartState;
|
||||
}
|
||||
}
|
||||
// Clean up tracked position after drag ends
|
||||
delete dragStartPositions[change.id];
|
||||
}
|
||||
});
|
||||
if (Object.keys(dragStartPositions).length === 0) {
|
||||
dragStartState = null;
|
||||
}
|
||||
}
|
||||
|
||||
set((state) => ({
|
||||
nodes: applyNodeChanges(changes, state.nodes),
|
||||
}));
|
||||
|
||||
if (shouldTrack) {
|
||||
useHistoryStore.getState().pushState(prevState);
|
||||
if (shouldTrack && stateToTrack) {
|
||||
useHistoryStore.getState().pushState(stateToTrack);
|
||||
}
|
||||
},
|
||||
|
||||
@@ -185,6 +202,11 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
hardcodedValues?: Record<string, any>,
|
||||
position?: XYPosition,
|
||||
) => {
|
||||
const prevState = {
|
||||
nodes: get().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
const customNodeData = convertBlockInfoIntoCustomNodeData(
|
||||
block,
|
||||
hardcodedValues,
|
||||
@@ -218,21 +240,24 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
set((state) => ({
|
||||
nodes: [...state.nodes, customNode],
|
||||
}));
|
||||
|
||||
useHistoryStore.getState().pushState(prevState);
|
||||
|
||||
return customNode;
|
||||
},
|
||||
updateNodeData: (nodeId, data) => {
|
||||
const prevState = {
|
||||
nodes: get().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) =>
|
||||
n.id === nodeId ? { ...n, data: { ...n.data, ...data } } : n,
|
||||
),
|
||||
}));
|
||||
|
||||
const newState = {
|
||||
nodes: get().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
useHistoryStore.getState().pushState(newState);
|
||||
useHistoryStore.getState().pushState(prevState);
|
||||
},
|
||||
toggleAdvanced: (nodeId: string) =>
|
||||
set((state) => ({
|
||||
@@ -391,6 +416,11 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
},
|
||||
|
||||
setCredentialsOptional: (nodeId: string, optional: boolean) => {
|
||||
const prevState = {
|
||||
nodes: get().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) =>
|
||||
n.id === nodeId
|
||||
@@ -408,12 +438,7 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
),
|
||||
}));
|
||||
|
||||
const newState = {
|
||||
nodes: get().nodes,
|
||||
edges: useEdgeStore.getState().edges,
|
||||
};
|
||||
|
||||
useHistoryStore.getState().pushState(newState);
|
||||
useHistoryStore.getState().pushState(prevState);
|
||||
},
|
||||
|
||||
// Sub-agent resolution mode state
|
||||
|
||||
@@ -0,0 +1,111 @@
|
||||
"use client";
|
||||
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { cn } from "@/lib/utils";
|
||||
import type { ReactNode } from "react";
|
||||
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
||||
import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState";
|
||||
import { ChatLoadingState } from "./components/ChatLoadingState/ChatLoadingState";
|
||||
import { useChat } from "./useChat";
|
||||
|
||||
export interface ChatProps {
|
||||
className?: string;
|
||||
headerTitle?: ReactNode;
|
||||
showHeader?: boolean;
|
||||
showSessionInfo?: boolean;
|
||||
showNewChatButton?: boolean;
|
||||
onNewChat?: () => void;
|
||||
headerActions?: ReactNode;
|
||||
urlSessionId?: string | null;
|
||||
initialPrompt?: string | null;
|
||||
}
|
||||
|
||||
export function Chat({
|
||||
className,
|
||||
headerTitle = "AutoGPT Copilot",
|
||||
showHeader = true,
|
||||
showSessionInfo = true,
|
||||
showNewChatButton = true,
|
||||
onNewChat,
|
||||
headerActions,
|
||||
urlSessionId,
|
||||
initialPrompt,
|
||||
}: ChatProps) {
|
||||
const {
|
||||
messages,
|
||||
isLoading,
|
||||
isCreating,
|
||||
error,
|
||||
sessionId,
|
||||
createSession,
|
||||
clearSession,
|
||||
} = useChat({ urlSessionId });
|
||||
|
||||
function handleNewChat() {
|
||||
clearSession();
|
||||
onNewChat?.();
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={cn("flex h-full flex-col", className)}>
|
||||
{/* Header */}
|
||||
{showHeader && (
|
||||
<header className="shrink-0 border-t border-zinc-200 bg-white p-3">
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-3">
|
||||
{typeof headerTitle === "string" ? (
|
||||
<Text variant="h2" className="text-lg font-semibold">
|
||||
{headerTitle}
|
||||
</Text>
|
||||
) : (
|
||||
headerTitle
|
||||
)}
|
||||
</div>
|
||||
<div className="flex items-center gap-3">
|
||||
{showSessionInfo && sessionId && (
|
||||
<>
|
||||
{showNewChatButton && (
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={handleNewChat}
|
||||
>
|
||||
New Chat
|
||||
</Button>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
{headerActions}
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
)}
|
||||
|
||||
{/* Main Content */}
|
||||
<main className="flex min-h-0 flex-1 flex-col overflow-hidden">
|
||||
{/* Loading State - show when explicitly loading/creating OR when we don't have a session yet and no error */}
|
||||
{(isLoading || isCreating || (!sessionId && !error)) && (
|
||||
<ChatLoadingState
|
||||
message={isCreating ? "Creating session..." : "Loading..."}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Error State */}
|
||||
{error && !isLoading && (
|
||||
<ChatErrorState error={error} onRetry={createSession} />
|
||||
)}
|
||||
|
||||
{/* Session Content */}
|
||||
{sessionId && !isLoading && !error && (
|
||||
<ChatContainer
|
||||
sessionId={sessionId}
|
||||
initialMessages={messages}
|
||||
initialPrompt={initialPrompt}
|
||||
className="flex-1"
|
||||
/>
|
||||
)}
|
||||
</main>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,15 +1,16 @@
|
||||
import React from "react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Card } from "@/components/atoms/Card/Card";
|
||||
import { List, Robot, ArrowRight } from "@phosphor-icons/react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { ArrowRight, List, Robot } from "@phosphor-icons/react";
|
||||
import Image from "next/image";
|
||||
|
||||
export interface Agent {
|
||||
id: string;
|
||||
name: string;
|
||||
description: string;
|
||||
version?: number;
|
||||
image_url?: string;
|
||||
}
|
||||
|
||||
export interface AgentCarouselMessageProps {
|
||||
@@ -30,7 +31,7 @@ export function AgentCarouselMessage({
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"mx-4 my-2 flex flex-col gap-4 rounded-lg border border-purple-200 bg-purple-50 p-6 dark:border-purple-900 dark:bg-purple-950",
|
||||
"mx-4 my-2 flex flex-col gap-4 rounded-lg border border-purple-200 bg-purple-50 p-6",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
@@ -40,13 +41,10 @@ export function AgentCarouselMessage({
|
||||
<List size={24} weight="bold" className="text-white" />
|
||||
</div>
|
||||
<div>
|
||||
<Text variant="h3" className="text-purple-900 dark:text-purple-100">
|
||||
<Text variant="h3" className="text-purple-900">
|
||||
Found {displayCount} {displayCount === 1 ? "Agent" : "Agents"}
|
||||
</Text>
|
||||
<Text
|
||||
variant="small"
|
||||
className="text-purple-700 dark:text-purple-300"
|
||||
>
|
||||
<Text variant="small" className="text-purple-700">
|
||||
Select an agent to view details or run it
|
||||
</Text>
|
||||
</div>
|
||||
@@ -57,40 +55,49 @@ export function AgentCarouselMessage({
|
||||
{agents.map((agent) => (
|
||||
<Card
|
||||
key={agent.id}
|
||||
className="border border-purple-200 bg-white p-4 dark:border-purple-800 dark:bg-purple-900"
|
||||
className="border border-purple-200 bg-white p-4"
|
||||
>
|
||||
<div className="flex gap-3">
|
||||
<div className="flex h-10 w-10 flex-shrink-0 items-center justify-center rounded-lg bg-purple-100 dark:bg-purple-800">
|
||||
<Robot size={20} weight="bold" className="text-purple-600" />
|
||||
<div className="relative h-10 w-10 flex-shrink-0 overflow-hidden rounded-lg bg-purple-100">
|
||||
{agent.image_url ? (
|
||||
<Image
|
||||
src={agent.image_url}
|
||||
alt={`${agent.name} preview image`}
|
||||
fill
|
||||
className="object-cover"
|
||||
/>
|
||||
) : (
|
||||
<div className="flex h-full w-full items-center justify-center">
|
||||
<Robot
|
||||
size={20}
|
||||
weight="bold"
|
||||
className="text-purple-600"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex-1 space-y-2">
|
||||
<div>
|
||||
<Text
|
||||
variant="body"
|
||||
className="font-semibold text-purple-900 dark:text-purple-100"
|
||||
className="font-semibold text-purple-900"
|
||||
>
|
||||
{agent.name}
|
||||
</Text>
|
||||
{agent.version && (
|
||||
<Text
|
||||
variant="small"
|
||||
className="text-purple-600 dark:text-purple-400"
|
||||
>
|
||||
<Text variant="small" className="text-purple-600">
|
||||
v{agent.version}
|
||||
</Text>
|
||||
)}
|
||||
</div>
|
||||
<Text
|
||||
variant="small"
|
||||
className="line-clamp-2 text-purple-700 dark:text-purple-300"
|
||||
>
|
||||
<Text variant="small" className="line-clamp-2 text-purple-700">
|
||||
{agent.description}
|
||||
</Text>
|
||||
{onSelectAgent && (
|
||||
<Button
|
||||
onClick={() => onSelectAgent(agent.id)}
|
||||
variant="ghost"
|
||||
className="mt-2 flex items-center gap-1 p-0 text-sm text-purple-600 hover:text-purple-800 dark:text-purple-400 dark:hover:text-purple-200"
|
||||
className="mt-2 flex items-center gap-1 p-0 text-sm text-purple-600 hover:text-purple-800"
|
||||
>
|
||||
View details
|
||||
<ArrowRight size={16} weight="bold" />
|
||||
@@ -103,10 +110,7 @@ export function AgentCarouselMessage({
|
||||
</div>
|
||||
|
||||
{totalCount && totalCount > agents.length && (
|
||||
<Text
|
||||
variant="small"
|
||||
className="text-center text-purple-600 dark:text-purple-400"
|
||||
>
|
||||
<Text variant="small" className="text-center text-purple-600">
|
||||
Showing {agents.length} of {totalCount} results
|
||||
</Text>
|
||||
)}
|
||||
@@ -0,0 +1,246 @@
|
||||
"use client";
|
||||
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Card } from "@/components/atoms/Card/Card";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
||||
import { RunAgentInputs } from "@/components/contextual/RunAgentInputs/RunAgentInputs";
|
||||
|
||||
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import {
|
||||
BlockIOCredentialsSubSchema,
|
||||
BlockIOSubSchema,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { cn, isEmpty } from "@/lib/utils";
|
||||
import { PlayIcon, WarningIcon } from "@phosphor-icons/react";
|
||||
import { useMemo } from "react";
|
||||
import { useAgentInputsSetup } from "./useAgentInputsSetup";
|
||||
|
||||
type LibraryAgentInputSchemaProperties = LibraryAgent["input_schema"] extends {
|
||||
properties: infer P;
|
||||
}
|
||||
? P extends Record<string, BlockIOSubSchema>
|
||||
? P
|
||||
: Record<string, BlockIOSubSchema>
|
||||
: Record<string, BlockIOSubSchema>;
|
||||
|
||||
type LibraryAgentCredentialsInputSchemaProperties =
|
||||
LibraryAgent["credentials_input_schema"] extends {
|
||||
properties: infer P;
|
||||
}
|
||||
? P extends Record<string, BlockIOCredentialsSubSchema>
|
||||
? P
|
||||
: Record<string, BlockIOCredentialsSubSchema>
|
||||
: Record<string, BlockIOCredentialsSubSchema>;
|
||||
|
||||
interface Props {
|
||||
agentName?: string;
|
||||
inputSchema: LibraryAgentInputSchemaProperties | Record<string, any>;
|
||||
credentialsSchema?:
|
||||
| LibraryAgentCredentialsInputSchemaProperties
|
||||
| Record<string, any>;
|
||||
message: string;
|
||||
requiredFields?: string[];
|
||||
onRun: (
|
||||
inputs: Record<string, any>,
|
||||
credentials: Record<string, any>,
|
||||
) => void;
|
||||
onCancel?: () => void;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export function AgentInputsSetup({
|
||||
agentName,
|
||||
inputSchema,
|
||||
credentialsSchema,
|
||||
message,
|
||||
requiredFields,
|
||||
onRun,
|
||||
onCancel,
|
||||
className,
|
||||
}: Props) {
|
||||
const { inputValues, setInputValue, credentialsValues, setCredentialsValue } =
|
||||
useAgentInputsSetup();
|
||||
|
||||
const inputSchemaObj = useMemo(() => {
|
||||
if (!inputSchema) return { properties: {}, required: [] };
|
||||
if ("properties" in inputSchema && "type" in inputSchema) {
|
||||
return inputSchema as {
|
||||
properties: Record<string, any>;
|
||||
required?: string[];
|
||||
};
|
||||
}
|
||||
return { properties: inputSchema as Record<string, any>, required: [] };
|
||||
}, [inputSchema]);
|
||||
|
||||
const credentialsSchemaObj = useMemo(() => {
|
||||
if (!credentialsSchema) return { properties: {}, required: [] };
|
||||
if ("properties" in credentialsSchema && "type" in credentialsSchema) {
|
||||
return credentialsSchema as {
|
||||
properties: Record<string, any>;
|
||||
required?: string[];
|
||||
};
|
||||
}
|
||||
return {
|
||||
properties: credentialsSchema as Record<string, any>,
|
||||
required: [],
|
||||
};
|
||||
}, [credentialsSchema]);
|
||||
|
||||
const agentInputFields = useMemo(() => {
|
||||
const properties = inputSchemaObj.properties || {};
|
||||
return Object.fromEntries(
|
||||
Object.entries(properties).filter(
|
||||
([_, subSchema]: [string, any]) => !subSchema.hidden,
|
||||
),
|
||||
);
|
||||
}, [inputSchemaObj]);
|
||||
|
||||
const agentCredentialsInputFields = useMemo(() => {
|
||||
return credentialsSchemaObj.properties || {};
|
||||
}, [credentialsSchemaObj]);
|
||||
|
||||
const inputFields = Object.entries(agentInputFields);
|
||||
const credentialFields = Object.entries(agentCredentialsInputFields);
|
||||
|
||||
const defaultsFromSchema = useMemo(() => {
|
||||
const defaults: Record<string, any> = {};
|
||||
Object.entries(agentInputFields).forEach(([key, schema]) => {
|
||||
if ("default" in schema && schema.default !== undefined) {
|
||||
defaults[key] = schema.default;
|
||||
}
|
||||
});
|
||||
return defaults;
|
||||
}, [agentInputFields]);
|
||||
|
||||
const defaultsFromCredentialsSchema = useMemo(() => {
|
||||
const defaults: Record<string, any> = {};
|
||||
Object.entries(agentCredentialsInputFields).forEach(([key, schema]) => {
|
||||
if ("default" in schema && schema.default !== undefined) {
|
||||
defaults[key] = schema.default;
|
||||
}
|
||||
});
|
||||
return defaults;
|
||||
}, [agentCredentialsInputFields]);
|
||||
|
||||
const mergedInputValues = useMemo(() => {
|
||||
return { ...defaultsFromSchema, ...inputValues };
|
||||
}, [defaultsFromSchema, inputValues]);
|
||||
|
||||
const mergedCredentialsValues = useMemo(() => {
|
||||
return { ...defaultsFromCredentialsSchema, ...credentialsValues };
|
||||
}, [defaultsFromCredentialsSchema, credentialsValues]);
|
||||
|
||||
const allRequiredInputsAreSet = useMemo(() => {
|
||||
const requiredInputs = new Set(
|
||||
requiredFields || (inputSchemaObj.required as string[]) || [],
|
||||
);
|
||||
const nonEmptyInputs = new Set(
|
||||
Object.keys(mergedInputValues).filter(
|
||||
(k) => !isEmpty(mergedInputValues[k]),
|
||||
),
|
||||
);
|
||||
const missing = [...requiredInputs].filter(
|
||||
(input) => !nonEmptyInputs.has(input),
|
||||
);
|
||||
return missing.length === 0;
|
||||
}, [inputSchemaObj.required, mergedInputValues, requiredFields]);
|
||||
|
||||
const allCredentialsAreSet = useMemo(() => {
|
||||
const requiredCredentials = new Set(
|
||||
(credentialsSchemaObj.required as string[]) || [],
|
||||
);
|
||||
if (requiredCredentials.size === 0) {
|
||||
return true;
|
||||
}
|
||||
const missing = [...requiredCredentials].filter((key) => {
|
||||
const cred = mergedCredentialsValues[key];
|
||||
return !cred || !cred.id;
|
||||
});
|
||||
return missing.length === 0;
|
||||
}, [credentialsSchemaObj.required, mergedCredentialsValues]);
|
||||
|
||||
const canRun = allRequiredInputsAreSet && allCredentialsAreSet;
|
||||
|
||||
function handleRun() {
|
||||
if (canRun) {
|
||||
onRun(mergedInputValues, mergedCredentialsValues);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Card
|
||||
className={cn(
|
||||
"mx-4 my-2 overflow-hidden border-blue-200 bg-blue-50",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<div className="flex items-start gap-4 p-6">
|
||||
<div className="flex h-12 w-12 flex-shrink-0 items-center justify-center rounded-full bg-blue-500">
|
||||
<WarningIcon size={24} weight="bold" className="text-white" />
|
||||
</div>
|
||||
<div className="flex-1">
|
||||
<Text variant="h3" className="mb-2 text-blue-900">
|
||||
{agentName ? `Configure ${agentName}` : "Agent Configuration"}
|
||||
</Text>
|
||||
<Text variant="body" className="mb-4 text-blue-700">
|
||||
{message}
|
||||
</Text>
|
||||
|
||||
{inputFields.length > 0 && (
|
||||
<div className="mb-4 space-y-4">
|
||||
{inputFields.map(([key, inputSubSchema]) => (
|
||||
<RunAgentInputs
|
||||
key={key}
|
||||
schema={inputSubSchema}
|
||||
value={inputValues[key] ?? inputSubSchema.default}
|
||||
placeholder={inputSubSchema.description}
|
||||
onChange={(value) => setInputValue(key, value)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{credentialFields.length > 0 && (
|
||||
<div className="mb-4 space-y-4">
|
||||
{credentialFields.map(([key, schema]) => {
|
||||
const requiredCredentials = new Set(
|
||||
(credentialsSchemaObj.required as string[]) || [],
|
||||
);
|
||||
return (
|
||||
<CredentialsInput
|
||||
key={key}
|
||||
schema={schema}
|
||||
selectedCredentials={credentialsValues[key]}
|
||||
onSelectCredentials={(value) =>
|
||||
setCredentialsValue(key, value)
|
||||
}
|
||||
siblingInputs={mergedInputValues}
|
||||
isOptional={!requiredCredentials.has(key)}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="flex gap-2">
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
onClick={handleRun}
|
||||
disabled={!canRun}
|
||||
>
|
||||
<PlayIcon className="mr-2 h-4 w-4" weight="bold" />
|
||||
Run Agent
|
||||
</Button>
|
||||
{onCancel && (
|
||||
<Button variant="outline" size="small" onClick={onCancel}>
|
||||
Cancel
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types";
|
||||
import { useState } from "react";
|
||||
|
||||
export function useAgentInputsSetup() {
|
||||
const [inputValues, setInputValues] = useState<Record<string, any>>({});
|
||||
const [credentialsValues, setCredentialsValues] = useState<
|
||||
Record<string, CredentialsMetaInput>
|
||||
>({});
|
||||
|
||||
function setInputValue(key: string, value: any) {
|
||||
setInputValues((prev) => ({
|
||||
...prev,
|
||||
[key]: value,
|
||||
}));
|
||||
}
|
||||
|
||||
function setCredentialsValue(key: string, value?: CredentialsMetaInput) {
|
||||
if (value) {
|
||||
setCredentialsValues((prev) => ({
|
||||
...prev,
|
||||
[key]: value,
|
||||
}));
|
||||
} else {
|
||||
setCredentialsValues((prev) => {
|
||||
const next = { ...prev };
|
||||
delete next[key];
|
||||
return next;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
inputValues,
|
||||
setInputValue,
|
||||
credentialsValues,
|
||||
setCredentialsValue,
|
||||
};
|
||||
}
|
||||
@@ -1,10 +1,9 @@
|
||||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { SignInIcon, UserPlusIcon, ShieldIcon } from "@phosphor-icons/react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { ShieldIcon, SignInIcon, UserPlusIcon } from "@phosphor-icons/react";
|
||||
import { useRouter } from "next/navigation";
|
||||
|
||||
export interface AuthPromptWidgetProps {
|
||||
message: string;
|
||||
@@ -22,7 +21,7 @@ export function AuthPromptWidget({
|
||||
message,
|
||||
sessionId,
|
||||
agentInfo,
|
||||
returnUrl = "/chat",
|
||||
returnUrl = "/copilot/chat",
|
||||
className,
|
||||
}: AuthPromptWidgetProps) {
|
||||
const router = useRouter();
|
||||
@@ -54,8 +53,8 @@ export function AuthPromptWidget({
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"my-4 overflow-hidden rounded-lg border border-violet-200 dark:border-violet-800",
|
||||
"bg-gradient-to-br from-violet-50 to-purple-50 dark:from-violet-950/30 dark:to-purple-950/30",
|
||||
"my-4 overflow-hidden rounded-lg border border-violet-200",
|
||||
"bg-gradient-to-br from-violet-50 to-purple-50",
|
||||
"duration-500 animate-in fade-in-50 slide-in-from-bottom-2",
|
||||
className,
|
||||
)}
|
||||
@@ -66,21 +65,19 @@ export function AuthPromptWidget({
|
||||
<ShieldIcon size={20} weight="fill" className="text-white" />
|
||||
</div>
|
||||
<div>
|
||||
<h3 className="text-lg font-semibold text-neutral-900 dark:text-neutral-100">
|
||||
<h3 className="text-lg font-semibold text-neutral-900">
|
||||
Authentication Required
|
||||
</h3>
|
||||
<p className="text-sm text-neutral-600 dark:text-neutral-400">
|
||||
<p className="text-sm text-neutral-600">
|
||||
Sign in to set up and manage agents
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="mb-5 rounded-md bg-white/50 p-4 dark:bg-neutral-900/50">
|
||||
<p className="text-sm text-neutral-700 dark:text-neutral-300">
|
||||
{message}
|
||||
</p>
|
||||
<div className="mb-5 rounded-md bg-white/50 p-4">
|
||||
<p className="text-sm text-neutral-700">{message}</p>
|
||||
{agentInfo && (
|
||||
<div className="mt-3 text-xs text-neutral-600 dark:text-neutral-400">
|
||||
<div className="mt-3 text-xs text-neutral-600">
|
||||
<p>
|
||||
Ready to set up:{" "}
|
||||
<span className="font-medium">{agentInfo.name}</span>
|
||||
@@ -114,7 +111,7 @@ export function AuthPromptWidget({
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<div className="mt-4 text-center text-xs text-neutral-500 dark:text-neutral-500">
|
||||
<div className="mt-4 text-center text-xs text-neutral-500">
|
||||
Your chat session will be preserved after signing in
|
||||
</div>
|
||||
</div>
|
||||
@@ -0,0 +1,103 @@
|
||||
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useCallback, useEffect, useRef } from "react";
|
||||
import { usePageContext } from "../../usePageContext";
|
||||
import { ChatInput } from "../ChatInput/ChatInput";
|
||||
import { MessageList } from "../MessageList/MessageList";
|
||||
import { QuickActionsWelcome } from "../QuickActionsWelcome/QuickActionsWelcome";
|
||||
import { useChatContainer } from "./useChatContainer";
|
||||
|
||||
export interface ChatContainerProps {
|
||||
sessionId: string | null;
|
||||
initialMessages: SessionDetailResponse["messages"];
|
||||
className?: string;
|
||||
initialPrompt?: string | null;
|
||||
}
|
||||
|
||||
export function ChatContainer({
|
||||
sessionId,
|
||||
initialMessages,
|
||||
className,
|
||||
initialPrompt,
|
||||
}: ChatContainerProps) {
|
||||
const { messages, streamingChunks, isStreaming, sendMessage } =
|
||||
useChatContainer({
|
||||
sessionId,
|
||||
initialMessages,
|
||||
});
|
||||
const { capturePageContext } = usePageContext();
|
||||
const hasSentInitialRef = useRef(false);
|
||||
|
||||
// Wrap sendMessage to automatically capture page context
|
||||
const sendMessageWithContext = useCallback(
|
||||
async (content: string, isUserMessage: boolean = true) => {
|
||||
const context = capturePageContext();
|
||||
await sendMessage(content, isUserMessage, context);
|
||||
},
|
||||
[sendMessage, capturePageContext],
|
||||
);
|
||||
|
||||
useEffect(
|
||||
function handleInitialPrompt() {
|
||||
if (!initialPrompt) return;
|
||||
if (hasSentInitialRef.current) return;
|
||||
if (!sessionId) return;
|
||||
if (messages.length > 0) return;
|
||||
hasSentInitialRef.current = true;
|
||||
void sendMessageWithContext(initialPrompt);
|
||||
},
|
||||
[initialPrompt, messages.length, sendMessageWithContext, sessionId],
|
||||
);
|
||||
|
||||
const quickActions = [
|
||||
"Find agents for social media management",
|
||||
"Show me agents for content creation",
|
||||
"Help me automate my business",
|
||||
"What can you help me with?",
|
||||
];
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn("flex h-full min-h-0 flex-col", className)}
|
||||
style={{
|
||||
backgroundColor: "#ffffff",
|
||||
backgroundImage:
|
||||
"radial-gradient(#e5e5e5 0.5px, transparent 0.5px), radial-gradient(#e5e5e5 0.5px, #ffffff 0.5px)",
|
||||
backgroundSize: "20px 20px",
|
||||
backgroundPosition: "0 0, 10px 10px",
|
||||
}}
|
||||
>
|
||||
{/* Messages or Welcome Screen */}
|
||||
<div className="flex min-h-0 flex-1 flex-col overflow-hidden pb-24">
|
||||
{messages.length === 0 ? (
|
||||
<QuickActionsWelcome
|
||||
title="Welcome to AutoGPT Copilot"
|
||||
description="Start a conversation to discover and run AI agents."
|
||||
actions={quickActions}
|
||||
onActionClick={sendMessageWithContext}
|
||||
disabled={isStreaming || !sessionId}
|
||||
/>
|
||||
) : (
|
||||
<MessageList
|
||||
messages={messages}
|
||||
streamingChunks={streamingChunks}
|
||||
isStreaming={isStreaming}
|
||||
onSendMessage={sendMessageWithContext}
|
||||
className="flex-1"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Input - Always visible */}
|
||||
<div className="sticky bottom-0 z-50 border-t border-zinc-200 bg-white p-4">
|
||||
<ChatInput
|
||||
onSend={sendMessageWithContext}
|
||||
disabled={isStreaming || !sessionId}
|
||||
placeholder={
|
||||
sessionId ? "Type your message..." : "Creating session..."
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,14 +1,14 @@
|
||||
import { toast } from "sonner";
|
||||
import type { StreamChunk } from "@/app/(platform)/chat/useChatStream";
|
||||
import { StreamChunk } from "../../useChatStream";
|
||||
import type { HandlerDependencies } from "./useChatContainer.handlers";
|
||||
import {
|
||||
handleError,
|
||||
handleLoginNeeded,
|
||||
handleStreamEnd,
|
||||
handleTextChunk,
|
||||
handleTextEnded,
|
||||
handleToolCallStart,
|
||||
handleToolResponse,
|
||||
handleLoginNeeded,
|
||||
handleStreamEnd,
|
||||
handleError,
|
||||
} from "./useChatContainer.handlers";
|
||||
|
||||
export function createStreamEventDispatcher(
|
||||
@@ -1,5 +1,24 @@
|
||||
import type { ChatMessageData } from "@/app/(platform)/chat/components/ChatMessage/useChatMessage";
|
||||
import type { ToolResult } from "@/types/chat";
|
||||
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
|
||||
|
||||
export function removePageContext(content: string): string {
|
||||
// Remove "Page URL: ..." pattern at start of line (case insensitive, handles various formats)
|
||||
let cleaned = content.replace(/^\s*Page URL:\s*[^\n\r]*/gim, "");
|
||||
|
||||
// Find "User Message:" marker at start of line to preserve the actual user message
|
||||
const userMessageMatch = cleaned.match(/^\s*User Message:\s*([\s\S]*)$/im);
|
||||
if (userMessageMatch) {
|
||||
// If we found "User Message:", extract everything after it
|
||||
cleaned = userMessageMatch[1];
|
||||
} else {
|
||||
// If no "User Message:" marker, remove "Page Content:" and everything after it at start of line
|
||||
cleaned = cleaned.replace(/^\s*Page Content:[\s\S]*$/gim, "");
|
||||
}
|
||||
|
||||
// Clean up extra whitespace and newlines
|
||||
cleaned = cleaned.replace(/\n\s*\n\s*\n+/g, "\n\n").trim();
|
||||
return cleaned;
|
||||
}
|
||||
|
||||
export function createUserMessage(content: string): ChatMessageData {
|
||||
return {
|
||||
@@ -63,6 +82,7 @@ export function isAgentArray(value: unknown): value is Array<{
|
||||
name: string;
|
||||
description: string;
|
||||
version?: number;
|
||||
image_url?: string;
|
||||
}> {
|
||||
if (!Array.isArray(value)) {
|
||||
return false;
|
||||
@@ -77,7 +97,8 @@ export function isAgentArray(value: unknown): value is Array<{
|
||||
typeof item.name === "string" &&
|
||||
"description" in item &&
|
||||
typeof item.description === "string" &&
|
||||
(!("version" in item) || typeof item.version === "number"),
|
||||
(!("version" in item) || typeof item.version === "number") &&
|
||||
(!("image_url" in item) || typeof item.image_url === "string"),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -232,6 +253,7 @@ export function isSetupInfo(value: unknown): value is {
|
||||
|
||||
export function extractCredentialsNeeded(
|
||||
parsedResult: Record<string, unknown>,
|
||||
toolName: string = "run_agent",
|
||||
): ChatMessageData | null {
|
||||
try {
|
||||
const setupInfo = parsedResult?.setup_info as
|
||||
@@ -244,7 +266,7 @@ export function extractCredentialsNeeded(
|
||||
| Record<string, Record<string, unknown>>
|
||||
| undefined;
|
||||
if (missingCreds && Object.keys(missingCreds).length > 0) {
|
||||
const agentName = (setupInfo?.agent_name as string) || "this agent";
|
||||
const agentName = (setupInfo?.agent_name as string) || "this block";
|
||||
const credentials = Object.values(missingCreds).map((credInfo) => ({
|
||||
provider: (credInfo.provider as string) || "unknown",
|
||||
providerName:
|
||||
@@ -264,7 +286,7 @@ export function extractCredentialsNeeded(
|
||||
}));
|
||||
return {
|
||||
type: "credentials_needed",
|
||||
toolName: "run_agent",
|
||||
toolName,
|
||||
credentials,
|
||||
message: `To run ${agentName}, you need to add ${credentials.length === 1 ? "credentials" : `${credentials.length} credentials`}.`,
|
||||
agentName,
|
||||
@@ -277,3 +299,92 @@ export function extractCredentialsNeeded(
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function extractInputsNeeded(
|
||||
parsedResult: Record<string, unknown>,
|
||||
toolName: string = "run_agent",
|
||||
): ChatMessageData | null {
|
||||
try {
|
||||
const setupInfo = parsedResult?.setup_info as
|
||||
| Record<string, unknown>
|
||||
| undefined;
|
||||
const requirements = setupInfo?.requirements as
|
||||
| Record<string, unknown>
|
||||
| undefined;
|
||||
const inputs = requirements?.inputs as
|
||||
| Array<Record<string, unknown>>
|
||||
| undefined;
|
||||
const credentials = requirements?.credentials as
|
||||
| Array<Record<string, unknown>>
|
||||
| undefined;
|
||||
|
||||
if (!inputs || inputs.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const agentName = (setupInfo?.agent_name as string) || "this agent";
|
||||
const agentId = parsedResult?.graph_id as string | undefined;
|
||||
const graphVersion = parsedResult?.graph_version as number | undefined;
|
||||
|
||||
const properties: Record<string, any> = {};
|
||||
const requiredProps: string[] = [];
|
||||
inputs.forEach((input) => {
|
||||
const name = input.name as string;
|
||||
if (name) {
|
||||
properties[name] = {
|
||||
title: input.name as string,
|
||||
description: (input.description as string) || "",
|
||||
type: (input.type as string) || "string",
|
||||
default: input.default,
|
||||
enum: input.options,
|
||||
format: input.format,
|
||||
};
|
||||
if ((input.required as boolean) === true) {
|
||||
requiredProps.push(name);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const inputSchema: Record<string, any> = {
|
||||
type: "object",
|
||||
properties,
|
||||
};
|
||||
if (requiredProps.length > 0) {
|
||||
inputSchema.required = requiredProps;
|
||||
}
|
||||
|
||||
const credentialsSchema: Record<string, any> = {};
|
||||
if (credentials && credentials.length > 0) {
|
||||
credentials.forEach((cred) => {
|
||||
const id = cred.id as string;
|
||||
if (id) {
|
||||
credentialsSchema[id] = {
|
||||
type: "object",
|
||||
properties: {},
|
||||
credentials_provider: [cred.provider as string],
|
||||
credentials_types: [(cred.type as string) || "api_key"],
|
||||
credentials_scopes: cred.scopes as string[] | undefined,
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
type: "inputs_needed",
|
||||
toolName,
|
||||
agentName,
|
||||
agentId,
|
||||
graphVersion,
|
||||
inputSchema,
|
||||
credentialsSchema:
|
||||
Object.keys(credentialsSchema).length > 0
|
||||
? credentialsSchema
|
||||
: undefined,
|
||||
message: `Please provide the required inputs to run ${agentName}.`,
|
||||
timestamp: new Date(),
|
||||
};
|
||||
} catch (err) {
|
||||
console.error("Failed to extract inputs from setup info:", err);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,18 @@
|
||||
import type { Dispatch, SetStateAction, MutableRefObject } from "react";
|
||||
import type { StreamChunk } from "@/app/(platform)/chat/useChatStream";
|
||||
import type { ChatMessageData } from "@/app/(platform)/chat/components/ChatMessage/useChatMessage";
|
||||
import { parseToolResponse, extractCredentialsNeeded } from "./helpers";
|
||||
import type { Dispatch, MutableRefObject, SetStateAction } from "react";
|
||||
import { StreamChunk } from "../../useChatStream";
|
||||
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
|
||||
import {
|
||||
extractCredentialsNeeded,
|
||||
extractInputsNeeded,
|
||||
parseToolResponse,
|
||||
} from "./helpers";
|
||||
|
||||
export interface HandlerDependencies {
|
||||
setHasTextChunks: Dispatch<SetStateAction<boolean>>;
|
||||
setStreamingChunks: Dispatch<SetStateAction<string[]>>;
|
||||
streamingChunksRef: MutableRefObject<string[]>;
|
||||
setMessages: Dispatch<SetStateAction<ChatMessageData[]>>;
|
||||
setIsStreamingInitiated: Dispatch<SetStateAction<boolean>>;
|
||||
sessionId: string;
|
||||
}
|
||||
|
||||
@@ -100,11 +105,18 @@ export function handleToolResponse(
|
||||
parsedResult = null;
|
||||
}
|
||||
if (
|
||||
chunk.tool_name === "run_agent" &&
|
||||
(chunk.tool_name === "run_agent" || chunk.tool_name === "run_block") &&
|
||||
chunk.success &&
|
||||
parsedResult?.type === "setup_requirements"
|
||||
) {
|
||||
const credentialsMessage = extractCredentialsNeeded(parsedResult);
|
||||
const inputsMessage = extractInputsNeeded(parsedResult, chunk.tool_name);
|
||||
if (inputsMessage) {
|
||||
deps.setMessages((prev) => [...prev, inputsMessage]);
|
||||
}
|
||||
const credentialsMessage = extractCredentialsNeeded(
|
||||
parsedResult,
|
||||
chunk.tool_name,
|
||||
);
|
||||
if (credentialsMessage) {
|
||||
deps.setMessages((prev) => [...prev, credentialsMessage]);
|
||||
}
|
||||
@@ -197,10 +209,15 @@ export function handleStreamEnd(
|
||||
deps.setStreamingChunks([]);
|
||||
deps.streamingChunksRef.current = [];
|
||||
deps.setHasTextChunks(false);
|
||||
deps.setIsStreamingInitiated(false);
|
||||
console.log("[Stream End] Stream complete, messages in local state");
|
||||
}
|
||||
|
||||
export function handleError(chunk: StreamChunk, _deps: HandlerDependencies) {
|
||||
export function handleError(chunk: StreamChunk, deps: HandlerDependencies) {
|
||||
const errorMessage = chunk.message || chunk.content || "An error occurred";
|
||||
console.error("Stream error:", errorMessage);
|
||||
deps.setIsStreamingInitiated(false);
|
||||
deps.setHasTextChunks(false);
|
||||
deps.setStreamingChunks([]);
|
||||
deps.streamingChunksRef.current = [];
|
||||
}
|
||||
@@ -0,0 +1,206 @@
|
||||
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
|
||||
import { useCallback, useMemo, useRef, useState } from "react";
|
||||
import { toast } from "sonner";
|
||||
import { useChatStream } from "../../useChatStream";
|
||||
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
|
||||
import { createStreamEventDispatcher } from "./createStreamEventDispatcher";
|
||||
import {
|
||||
createUserMessage,
|
||||
filterAuthMessages,
|
||||
isToolCallArray,
|
||||
isValidMessage,
|
||||
parseToolResponse,
|
||||
removePageContext,
|
||||
} from "./helpers";
|
||||
|
||||
interface Args {
|
||||
sessionId: string | null;
|
||||
initialMessages: SessionDetailResponse["messages"];
|
||||
}
|
||||
|
||||
export function useChatContainer({ sessionId, initialMessages }: Args) {
|
||||
const [messages, setMessages] = useState<ChatMessageData[]>([]);
|
||||
const [streamingChunks, setStreamingChunks] = useState<string[]>([]);
|
||||
const [hasTextChunks, setHasTextChunks] = useState(false);
|
||||
const [isStreamingInitiated, setIsStreamingInitiated] = useState(false);
|
||||
const streamingChunksRef = useRef<string[]>([]);
|
||||
const { error, sendMessage: sendStreamMessage } = useChatStream();
|
||||
const isStreaming = isStreamingInitiated || hasTextChunks;
|
||||
|
||||
const allMessages = useMemo(() => {
|
||||
const processedInitialMessages: ChatMessageData[] = [];
|
||||
// Map to track tool calls by their ID so we can look up tool names for tool responses
|
||||
const toolCallMap = new Map<string, string>();
|
||||
|
||||
for (const msg of initialMessages) {
|
||||
if (!isValidMessage(msg)) {
|
||||
console.warn("Invalid message structure from backend:", msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
let content = String(msg.content || "");
|
||||
const role = String(msg.role || "assistant").toLowerCase();
|
||||
const toolCalls = msg.tool_calls;
|
||||
const timestamp = msg.timestamp
|
||||
? new Date(msg.timestamp as string)
|
||||
: undefined;
|
||||
|
||||
// Remove page context from user messages when loading existing sessions
|
||||
if (role === "user") {
|
||||
content = removePageContext(content);
|
||||
// Skip user messages that become empty after removing page context
|
||||
if (!content.trim()) {
|
||||
continue;
|
||||
}
|
||||
processedInitialMessages.push({
|
||||
type: "message",
|
||||
role: "user",
|
||||
content,
|
||||
timestamp,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle assistant messages first (before tool messages) to build tool call map
|
||||
if (role === "assistant") {
|
||||
// Strip <thinking> tags from content
|
||||
content = content
|
||||
.replace(/<thinking>[\s\S]*?<\/thinking>/gi, "")
|
||||
.trim();
|
||||
|
||||
// If assistant has tool calls, create tool_call messages for each
|
||||
if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) {
|
||||
for (const toolCall of toolCalls) {
|
||||
const toolName = toolCall.function.name;
|
||||
const toolId = toolCall.id;
|
||||
// Store tool name for later lookup
|
||||
toolCallMap.set(toolId, toolName);
|
||||
|
||||
try {
|
||||
const args = JSON.parse(toolCall.function.arguments || "{}");
|
||||
processedInitialMessages.push({
|
||||
type: "tool_call",
|
||||
toolId,
|
||||
toolName,
|
||||
arguments: args,
|
||||
timestamp,
|
||||
});
|
||||
} catch (err) {
|
||||
console.warn("Failed to parse tool call arguments:", err);
|
||||
processedInitialMessages.push({
|
||||
type: "tool_call",
|
||||
toolId,
|
||||
toolName,
|
||||
arguments: {},
|
||||
timestamp,
|
||||
});
|
||||
}
|
||||
}
|
||||
// Only add assistant message if there's content after stripping thinking tags
|
||||
if (content.trim()) {
|
||||
processedInitialMessages.push({
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
content,
|
||||
timestamp,
|
||||
});
|
||||
}
|
||||
} else if (content.trim()) {
|
||||
// Assistant message without tool calls, but with content
|
||||
processedInitialMessages.push({
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
content,
|
||||
timestamp,
|
||||
});
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle tool messages - look up tool name from tool call map
|
||||
if (role === "tool") {
|
||||
const toolCallId = (msg.tool_call_id as string) || "";
|
||||
const toolName = toolCallMap.get(toolCallId) || "unknown";
|
||||
const toolResponse = parseToolResponse(
|
||||
content,
|
||||
toolCallId,
|
||||
toolName,
|
||||
timestamp,
|
||||
);
|
||||
if (toolResponse) {
|
||||
processedInitialMessages.push(toolResponse);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle other message types (system, etc.)
|
||||
if (content.trim()) {
|
||||
processedInitialMessages.push({
|
||||
type: "message",
|
||||
role: role as "user" | "assistant" | "system",
|
||||
content,
|
||||
timestamp,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return [...processedInitialMessages, ...messages];
|
||||
}, [initialMessages, messages]);
|
||||
|
||||
const sendMessage = useCallback(
|
||||
async function sendMessage(
|
||||
content: string,
|
||||
isUserMessage: boolean = true,
|
||||
context?: { url: string; content: string },
|
||||
) {
|
||||
if (!sessionId) {
|
||||
console.error("Cannot send message: no session ID");
|
||||
return;
|
||||
}
|
||||
if (isUserMessage) {
|
||||
const userMessage = createUserMessage(content);
|
||||
setMessages((prev) => [...filterAuthMessages(prev), userMessage]);
|
||||
} else {
|
||||
setMessages((prev) => filterAuthMessages(prev));
|
||||
}
|
||||
setStreamingChunks([]);
|
||||
streamingChunksRef.current = [];
|
||||
setHasTextChunks(false);
|
||||
setIsStreamingInitiated(true);
|
||||
const dispatcher = createStreamEventDispatcher({
|
||||
setHasTextChunks,
|
||||
setStreamingChunks,
|
||||
streamingChunksRef,
|
||||
setMessages,
|
||||
sessionId,
|
||||
setIsStreamingInitiated,
|
||||
});
|
||||
try {
|
||||
await sendStreamMessage(
|
||||
sessionId,
|
||||
content,
|
||||
dispatcher,
|
||||
isUserMessage,
|
||||
context,
|
||||
);
|
||||
} catch (err) {
|
||||
console.error("Failed to send message:", err);
|
||||
setIsStreamingInitiated(false);
|
||||
const errorMessage =
|
||||
err instanceof Error ? err.message : "Failed to send message";
|
||||
toast.error("Failed to send message", {
|
||||
description: errorMessage,
|
||||
});
|
||||
}
|
||||
},
|
||||
[sessionId, sendStreamMessage],
|
||||
);
|
||||
|
||||
return {
|
||||
messages: allMessages,
|
||||
streamingChunks,
|
||||
isStreaming,
|
||||
error,
|
||||
sendMessage,
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,149 @@
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
||||
import type { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { CheckIcon, RobotIcon, WarningIcon } from "@phosphor-icons/react";
|
||||
import { useEffect, useRef } from "react";
|
||||
import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
|
||||
|
||||
export interface CredentialInfo {
|
||||
provider: string;
|
||||
providerName: string;
|
||||
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
|
||||
title: string;
|
||||
scopes?: string[];
|
||||
}
|
||||
|
||||
interface Props {
|
||||
credentials: CredentialInfo[];
|
||||
agentName?: string;
|
||||
message: string;
|
||||
onAllCredentialsComplete: () => void;
|
||||
onCancel: () => void;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
function createSchemaFromCredentialInfo(
|
||||
credential: CredentialInfo,
|
||||
): BlockIOCredentialsSubSchema {
|
||||
return {
|
||||
type: "object",
|
||||
properties: {},
|
||||
credentials_provider: [credential.provider],
|
||||
credentials_types: [credential.credentialType],
|
||||
credentials_scopes: credential.scopes,
|
||||
discriminator: undefined,
|
||||
discriminator_mapping: undefined,
|
||||
discriminator_values: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
export function ChatCredentialsSetup({
|
||||
credentials,
|
||||
agentName: _agentName,
|
||||
message,
|
||||
onAllCredentialsComplete,
|
||||
onCancel: _onCancel,
|
||||
}: Props) {
|
||||
const { selectedCredentials, isAllComplete, handleCredentialSelect } =
|
||||
useChatCredentialsSetup(credentials);
|
||||
|
||||
// Track if we've already called completion to prevent double calls
|
||||
const hasCalledCompleteRef = useRef(false);
|
||||
|
||||
// Reset the completion flag when credentials change (new credential setup flow)
|
||||
useEffect(
|
||||
function resetCompletionFlag() {
|
||||
hasCalledCompleteRef.current = false;
|
||||
},
|
||||
[credentials],
|
||||
);
|
||||
|
||||
// Auto-call completion when all credentials are configured
|
||||
useEffect(
|
||||
function autoCompleteWhenReady() {
|
||||
if (isAllComplete && !hasCalledCompleteRef.current) {
|
||||
hasCalledCompleteRef.current = true;
|
||||
onAllCredentialsComplete();
|
||||
}
|
||||
},
|
||||
[isAllComplete, onAllCredentialsComplete],
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="group relative flex w-full justify-start gap-3 px-4 py-3">
|
||||
<div className="flex w-full max-w-3xl gap-3">
|
||||
<div className="flex-shrink-0">
|
||||
<div className="flex h-7 w-7 items-center justify-center rounded-lg bg-indigo-500">
|
||||
<RobotIcon className="h-4 w-4 text-indigo-50" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex min-w-0 flex-1 flex-col">
|
||||
<div className="group relative min-w-20 overflow-hidden rounded-xl border border-slate-100 bg-slate-50/20 px-6 py-2.5 text-sm leading-relaxed backdrop-blur-xl">
|
||||
<div className="absolute inset-0 bg-gradient-to-br from-slate-200/20 via-slate-300/10 to-transparent" />
|
||||
<div className="relative z-10 space-y-3 text-slate-900">
|
||||
<div>
|
||||
<Text variant="h4" className="mb-1 text-slate-900">
|
||||
Credentials Required
|
||||
</Text>
|
||||
<Text variant="small" className="text-slate-600">
|
||||
{message}
|
||||
</Text>
|
||||
</div>
|
||||
|
||||
<div className="space-y-3">
|
||||
{credentials.map((cred, index) => {
|
||||
const schema = createSchemaFromCredentialInfo(cred);
|
||||
const isSelected = !!selectedCredentials[cred.provider];
|
||||
|
||||
return (
|
||||
<div
|
||||
key={`${cred.provider}-${index}`}
|
||||
className={cn(
|
||||
"relative rounded-lg border p-3",
|
||||
isSelected
|
||||
? "border-green-500 bg-green-50/50"
|
||||
: "border-slate-200 bg-white/50",
|
||||
)}
|
||||
>
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
{isSelected ? (
|
||||
<CheckIcon
|
||||
size={16}
|
||||
className="text-green-500"
|
||||
weight="bold"
|
||||
/>
|
||||
) : (
|
||||
<WarningIcon
|
||||
size={16}
|
||||
className="text-slate-500"
|
||||
weight="bold"
|
||||
/>
|
||||
)}
|
||||
<Text
|
||||
variant="small"
|
||||
className="font-semibold text-slate-900"
|
||||
>
|
||||
{cred.providerName}
|
||||
</Text>
|
||||
</div>
|
||||
|
||||
<CredentialsInput
|
||||
schema={schema}
|
||||
selectedCredentials={selectedCredentials[cred.provider]}
|
||||
onSelectCredentials={(credMeta) =>
|
||||
handleCredentialSelect(cred.provider, credMeta)
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user