Add LLM model deletion and migration feature

Introduces backend and frontend support for deleting LLM models with automatic workflow migration to a replacement model. Adds API endpoints, database logic, response models, frontend modal, and actions for safe deletion, including usage count display and error handling. Updates table components to use new modal and refactors table imports.
This commit is contained in:
Bentlybro
2025-12-02 14:41:13 +00:00
parent dfc42003a1
commit 7fe6b576ae
12 changed files with 1758 additions and 37 deletions

View File

@@ -143,3 +143,71 @@ async def toggle_llm_model(
detail="Failed to toggle model availability",
) from exc
@router.get(
"/models/{model_id}/usage",
summary="Get model usage count",
response_model=llm_model.LlmModelUsageResponse,
)
async def get_llm_model_usage(model_id: str):
"""Get the number of workflow nodes using this model."""
try:
return await llm_db.get_model_usage(model_id=model_id)
except ValueError as exc:
raise fastapi.HTTPException(status_code=404, detail=str(exc)) from exc
except Exception as exc:
logger.exception("Failed to get model usage %s: %s", model_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to get model usage",
) from exc
@router.delete(
"/models/{model_id}",
summary="Delete LLM model and migrate workflows",
response_model=llm_model.DeleteLlmModelResponse,
)
async def delete_llm_model(
model_id: str,
replacement_model_slug: str = fastapi.Query(
...,
description="Slug of the model to migrate existing workflows to"
),
):
"""
Delete a model and automatically migrate all workflows using it to a replacement model.
This endpoint:
1. Validates the replacement model exists and is enabled
2. Counts how many workflow nodes use the model being deleted
3. Updates all AgentNode.constantInput->model fields to the replacement
4. Deletes the model record
5. Refreshes all caches and notifies executors
Example: DELETE /admin/llm/models/{id}?replacement_model_slug=gpt-4o
"""
try:
result = await llm_db.delete_model(
model_id=model_id,
replacement_model_slug=replacement_model_slug
)
await _refresh_runtime_state()
logger.info(
"Deleted model '%s' and migrated %d nodes to '%s'",
result.deleted_model_slug,
result.nodes_migrated,
result.replacement_model_slug
)
return result
except ValueError as exc:
# Validation errors (model not found, replacement invalid, etc.)
logger.warning("Model deletion validation failed: %s", exc)
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
except Exception as exc:
logger.exception("Failed to delete LLM model %s: %s", model_id, exc)
raise fastapi.HTTPException(
status_code=500,
detail="Failed to delete model and migrate workflows",
) from exc

View File

@@ -0,0 +1,409 @@
from unittest.mock import AsyncMock, MagicMock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from autogpt_libs.auth.jwt_utils import get_jwt_payload
from pytest_snapshot.plugin import Snapshot
import backend.server.v2.admin.llm_routes as llm_routes
app = fastapi.FastAPI()
app.include_router(llm_routes.router)
client = fastapi.testclient.TestClient(app)
@pytest.fixture(autouse=True)
def setup_app_admin_auth(mock_jwt_admin):
"""Setup admin auth overrides for all tests in this module"""
app.dependency_overrides[get_jwt_payload] = mock_jwt_admin["get_jwt_payload"]
yield
app.dependency_overrides.clear()
def test_list_llm_providers_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful listing of LLM providers"""
# Mock the database function
mock_providers = [
{
"id": "provider-1",
"name": "openai",
"display_name": "OpenAI",
"description": "OpenAI LLM provider",
"supports_tools": True,
"supports_json_output": True,
"supports_reasoning": False,
"supports_parallel_tool": True,
"metadata": {},
"models": [],
},
{
"id": "provider-2",
"name": "anthropic",
"display_name": "Anthropic",
"description": "Anthropic LLM provider",
"supports_tools": True,
"supports_json_output": True,
"supports_reasoning": False,
"supports_parallel_tool": True,
"metadata": {},
"models": [],
},
]
mocker.patch(
"backend.server.v2.admin.llm_routes.get_all_providers",
new=AsyncMock(return_value=mock_providers),
)
response = client.get("/admin/llm/providers")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["providers"]) == 2
assert response_data["providers"][0]["name"] == "openai"
# Snapshot test the response
configured_snapshot.assert_match(response_data, "list_llm_providers_success.json")
def test_list_llm_models_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful listing of LLM models"""
# Mock the database function
mock_models = [
{
"id": "model-1",
"slug": "gpt-4o",
"display_name": "GPT-4o",
"description": "GPT-4 Optimized",
"provider_id": "provider-1",
"context_window": 128000,
"max_output_tokens": 16384,
"is_enabled": True,
"capabilities": {},
"metadata": {},
"costs": [
{
"id": "cost-1",
"credit_cost": 10,
"credential_provider": "openai",
"metadata": {},
}
],
}
]
mocker.patch(
"backend.server.v2.admin.llm_routes.get_all_models",
new=AsyncMock(return_value=mock_models),
)
response = client.get("/admin/llm/models")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["models"]) == 1
assert response_data["models"][0]["slug"] == "gpt-4o"
# Snapshot test the response
configured_snapshot.assert_match(response_data, "list_llm_models_success.json")
def test_create_llm_provider_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful creation of LLM provider"""
mock_provider = {
"id": "new-provider-id",
"name": "groq",
"display_name": "Groq",
"description": "Groq LLM provider",
"supports_tools": True,
"supports_json_output": True,
"supports_reasoning": False,
"supports_parallel_tool": False,
"metadata": {},
}
mocker.patch(
"backend.server.v2.admin.llm_routes.upsert_provider",
new=AsyncMock(return_value=mock_provider),
)
mock_notify = mocker.patch(
"backend.server.v2.admin.llm_routes.notify_llm_registry_refresh",
new=AsyncMock(),
)
request_data = {
"name": "groq",
"display_name": "Groq",
"description": "Groq LLM provider",
"supports_tools": True,
"supports_json_output": True,
"supports_reasoning": False,
"supports_parallel_tool": False,
"metadata": {},
}
response = client.post("/admin/llm/providers", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["name"] == "groq"
assert response_data["display_name"] == "Groq"
# Verify notification was sent
mock_notify.assert_called_once()
# Snapshot test the response
configured_snapshot.assert_match(
response_data, "create_llm_provider_success.json"
)
def test_create_llm_model_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful creation of LLM model"""
mock_model = {
"id": "new-model-id",
"slug": "gpt-4.1-mini",
"display_name": "GPT-4.1 Mini",
"description": "Latest GPT-4.1 Mini model",
"provider_id": "provider-1",
"context_window": 128000,
"max_output_tokens": 16384,
"is_enabled": True,
"capabilities": {},
"metadata": {},
"costs": [
{
"id": "cost-id",
"credit_cost": 5,
"credential_provider": "openai",
"metadata": {},
}
],
}
mocker.patch(
"backend.server.v2.admin.llm_routes.create_model",
new=AsyncMock(return_value=mock_model),
)
mock_notify = mocker.patch(
"backend.server.v2.admin.llm_routes.notify_llm_registry_refresh",
new=AsyncMock(),
)
request_data = {
"slug": "gpt-4.1-mini",
"display_name": "GPT-4.1 Mini",
"description": "Latest GPT-4.1 Mini model",
"provider_id": "provider-1",
"context_window": 128000,
"max_output_tokens": 16384,
"is_enabled": True,
"capabilities": {},
"metadata": {},
"costs": [
{
"credit_cost": 5,
"credential_provider": "openai",
"metadata": {},
}
],
}
response = client.post("/admin/llm/models", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["slug"] == "gpt-4.1-mini"
assert response_data["is_enabled"] is True
# Verify notification was sent
mock_notify.assert_called_once()
# Snapshot test the response
configured_snapshot.assert_match(response_data, "create_llm_model_success.json")
def test_update_llm_model_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful update of LLM model"""
mock_model = {
"id": "model-1",
"slug": "gpt-4o",
"display_name": "GPT-4o Updated",
"description": "Updated description",
"provider_id": "provider-1",
"context_window": 256000,
"max_output_tokens": 32768,
"is_enabled": True,
"capabilities": {},
"metadata": {},
"costs": [
{
"id": "cost-1",
"credit_cost": 15,
"credential_provider": "openai",
"metadata": {},
}
],
}
mocker.patch(
"backend.server.v2.admin.llm_routes.update_model",
new=AsyncMock(return_value=mock_model),
)
mock_notify = mocker.patch(
"backend.server.v2.admin.llm_routes.notify_llm_registry_refresh",
new=AsyncMock(),
)
request_data = {
"display_name": "GPT-4o Updated",
"description": "Updated description",
"context_window": 256000,
"max_output_tokens": 32768,
}
response = client.patch("/admin/llm/models/model-1", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["display_name"] == "GPT-4o Updated"
assert response_data["context_window"] == 256000
# Verify notification was sent
mock_notify.assert_called_once()
# Snapshot test the response
configured_snapshot.assert_match(response_data, "update_llm_model_success.json")
def test_toggle_llm_model_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful toggling of LLM model enabled status"""
mock_model = {
"id": "model-1",
"slug": "gpt-4o",
"display_name": "GPT-4o",
"description": "GPT-4 Optimized",
"provider_id": "provider-1",
"context_window": 128000,
"max_output_tokens": 16384,
"is_enabled": False,
"capabilities": {},
"metadata": {},
"costs": [],
}
mocker.patch(
"backend.server.v2.admin.llm_routes.toggle_model",
new=AsyncMock(return_value=mock_model),
)
mock_notify = mocker.patch(
"backend.server.v2.admin.llm_routes.notify_llm_registry_refresh",
new=AsyncMock(),
)
request_data = {"is_enabled": False}
response = client.patch("/admin/llm/models/model-1/toggle", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["is_enabled"] is False
# Verify notification was sent
mock_notify.assert_called_once()
# Snapshot test the response
configured_snapshot.assert_match(response_data, "toggle_llm_model_success.json")
def test_delete_llm_model_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful deletion of LLM model with migration"""
mock_response = {
"deleted_model_slug": "gpt-3.5-turbo",
"deleted_model_display_name": "GPT-3.5 Turbo",
"replacement_model_slug": "gpt-4o-mini",
"nodes_migrated": 42,
"message": "Successfully deleted model 'GPT-3.5 Turbo' (gpt-3.5-turbo) "
"and migrated 42 workflow node(s) to 'gpt-4o-mini'."
}
mocker.patch(
"backend.server.v2.admin.llm_routes.llm_db.delete_model",
new=AsyncMock(return_value=type('obj', (object,), mock_response)()),
)
mock_refresh = mocker.patch(
"backend.server.v2.admin.llm_routes._refresh_runtime_state",
new=AsyncMock(),
)
response = client.delete(
"/admin/llm/models/model-1?replacement_model_slug=gpt-4o-mini"
)
assert response.status_code == 200
response_data = response.json()
assert response_data["deleted_model_slug"] == "gpt-3.5-turbo"
assert response_data["nodes_migrated"] == 42
assert response_data["replacement_model_slug"] == "gpt-4o-mini"
# Verify refresh was called
mock_refresh.assert_called_once()
# Snapshot test the response
configured_snapshot.assert_match(response_data, "delete_llm_model_success.json")
def test_delete_llm_model_validation_error(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test deletion fails with proper error when validation fails"""
mocker.patch(
"backend.server.v2.admin.llm_routes.llm_db.delete_model",
new=AsyncMock(side_effect=ValueError("Replacement model 'invalid' not found")),
)
response = client.delete(
"/admin/llm/models/model-1?replacement_model_slug=invalid"
)
assert response.status_code == 400
assert "Replacement model 'invalid' not found" in response.json()["detail"]
def test_delete_llm_model_missing_replacement(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test deletion fails when replacement_model_slug is not provided"""
response = client.delete("/admin/llm/models/model-1")
# FastAPI will return 422 for missing required query params
assert response.status_code == 422

View File

@@ -203,3 +203,120 @@ async def toggle_model(model_id: str, is_enabled: bool) -> llm_model.LlmModel:
)
return _map_model(record)
async def get_model_usage(model_id: str) -> llm_model.LlmModelUsageResponse:
"""Get usage count for a model."""
import prisma as prisma_module
model = await prisma.models.LlmModel.prisma().find_unique(
where={"id": model_id}
)
if not model:
raise ValueError(f"Model with id '{model_id}' not found")
count_result = await prisma_module.get_client().query_raw(
"""
SELECT COUNT(*) as count
FROM "AgentNode"
WHERE "constantInput"::jsonb->>'model' = $1
""",
model.slug
)
node_count = int(count_result[0]["count"]) if count_result else 0
return llm_model.LlmModelUsageResponse(
model_slug=model.slug,
node_count=node_count
)
async def delete_model(
model_id: str,
replacement_model_slug: str
) -> llm_model.DeleteLlmModelResponse:
"""
Delete a model and migrate all AgentNodes using it to a replacement model.
This performs an atomic operation:
1. Validates the model exists
2. Validates the replacement model exists and is enabled
3. Counts affected nodes
4. Migrates all AgentNode.constantInput->model to replacement
5. Deletes the LlmModel record (CASCADE deletes costs)
Args:
model_id: UUID of the model to delete
replacement_model_slug: Slug of the model to migrate to
Returns:
DeleteLlmModelResponse with migration stats
Raises:
ValueError: If model not found, replacement not found, or replacement is disabled
"""
import prisma as prisma_module
# 1. Get the model being deleted
model = await prisma.models.LlmModel.prisma().find_unique(
where={"id": model_id},
include={"Costs": True}
)
if not model:
raise ValueError(f"Model with id '{model_id}' not found")
deleted_slug = model.slug
deleted_display_name = model.displayName
# 2. Validate replacement model exists and is enabled
replacement = await prisma.models.LlmModel.prisma().find_unique(
where={"slug": replacement_model_slug}
)
if not replacement:
raise ValueError(f"Replacement model '{replacement_model_slug}' not found")
if not replacement.isEnabled:
raise ValueError(
f"Replacement model '{replacement_model_slug}' is disabled. "
f"Please enable it before using it as a replacement."
)
# 3. Count affected nodes
count_result = await prisma_module.get_client().query_raw(
"""
SELECT COUNT(*) as count
FROM "AgentNode"
WHERE "constantInput"::jsonb->>'model' = $1
""",
deleted_slug
)
nodes_affected = int(count_result[0]["count"]) if count_result else 0
# 4. Perform migration
if nodes_affected > 0:
await prisma_module.get_client().execute_raw(
"""
UPDATE "AgentNode"
SET "constantInput" = JSONB_SET(
"constantInput"::jsonb,
'{model}',
to_jsonb($1::text)
)
WHERE "constantInput"::jsonb->>'model' = $2
""",
replacement_model_slug,
deleted_slug
)
# 5. Delete the model (CASCADE will delete costs automatically)
await prisma.models.LlmModel.prisma().delete(where={"id": model_id})
return llm_model.DeleteLlmModelResponse(
deleted_model_slug=deleted_slug,
deleted_model_display_name=deleted_display_name,
replacement_model_slug=replacement_model_slug,
nodes_migrated=nodes_affected,
message=(
f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}) "
f"and migrated {nodes_affected} workflow node(s) to '{replacement_model_slug}'."
)
)

View File

@@ -107,3 +107,16 @@ class UpdateLlmModelRequest(pydantic.BaseModel):
class ToggleLlmModelRequest(pydantic.BaseModel):
is_enabled: bool
class DeleteLlmModelResponse(pydantic.BaseModel):
deleted_model_slug: str
deleted_model_display_name: str
replacement_model_slug: str
nodes_migrated: int
message: str
class LlmModelUsageResponse(pydantic.BaseModel):
model_slug: str
node_count: int

View File

@@ -144,3 +144,22 @@ export async function toggleLlmModelAction(formData: FormData) {
revalidatePath(ADMIN_LLM_PATH);
}
export async function deleteLlmModelAction(formData: FormData) {
try {
const modelId = String(formData.get("model_id"));
const replacementModelSlug = String(formData.get("replacement_model_slug"));
if (!replacementModelSlug) {
throw new Error("Replacement model is required");
}
const api = new BackendApi();
const result = await api.deleteAdminLlmModel(modelId, replacementModelSlug);
revalidatePath(ADMIN_LLM_PATH);
return result;
} catch (error) {
console.error("Delete model error:", error);
throw error instanceof Error ? error : new Error("Failed to delete model");
}
}

View File

@@ -0,0 +1,172 @@
"use client";
import { useState } from "react";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import type { LlmModel } from "@/lib/autogpt-server-api/types";
import { deleteLlmModelAction } from "../actions";
export function DeleteModelModal({
model,
availableModels,
}: {
model: LlmModel;
availableModels: LlmModel[];
}) {
const [open, setOpen] = useState(false);
const [selectedReplacement, setSelectedReplacement] = useState<string>("");
const [isDeleting, setIsDeleting] = useState(false);
const [error, setError] = useState<string | null>(null);
const [usageCount, setUsageCount] = useState<number | null>(null);
// Filter out the current model and disabled models from replacement options
const replacementOptions = availableModels.filter(
(m) => m.id !== model.id && m.is_enabled
);
async function fetchUsage() {
try {
const BackendApi = (await import("@/lib/autogpt-server-api")).default;
const api = new BackendApi();
const usage = await api.getAdminLlmModelUsage(model.id);
setUsageCount(usage.node_count);
} catch {
setUsageCount(null);
}
}
async function handleDelete(formData: FormData) {
setIsDeleting(true);
setError(null);
try {
await deleteLlmModelAction(formData);
setOpen(false);
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to delete model");
} finally {
setIsDeleting(false);
}
}
return (
<Dialog
title="Delete Model"
controlled={{
isOpen: open,
set: async (isOpen) => {
setOpen(isOpen);
if (isOpen) {
setUsageCount(null);
await fetchUsage();
}
},
}}
styling={{ maxWidth: "600px" }}
>
<Dialog.Trigger>
<button
type="button"
className="inline-flex items-center rounded border border-red-300 px-3 py-1 text-xs font-semibold text-red-600 hover:bg-red-50"
>
Delete
</button>
</Dialog.Trigger>
<Dialog.Content>
<div className="mb-4 text-sm text-muted-foreground">
This action cannot be undone. All workflows using this model will be
migrated to the replacement model you select.
</div>
<div className="space-y-4">
<div className="rounded-lg border border-yellow-200 bg-yellow-50 p-4">
<div className="flex items-start gap-3">
<div className="flex-shrink-0 text-yellow-600"></div>
<div className="text-sm text-yellow-800">
<p className="font-semibold">You are about to delete:</p>
<p className="mt-1">
<span className="font-medium">{model.display_name}</span>{" "}
<span className="text-yellow-600">({model.slug})</span>
</p>
{usageCount !== null && (
<p className="mt-2 font-semibold">
📊 Impact: {usageCount} block{usageCount !== 1 ? "s" : ""} currently use this model
</p>
)}
<p className="mt-2">
All workflows currently using this model will be automatically
updated to use the replacement model you choose below.
</p>
</div>
</div>
</div>
<form action={handleDelete} className="space-y-4">
<input type="hidden" name="model_id" value={model.id} />
<input
type="hidden"
name="replacement_model_slug"
value={selectedReplacement}
/>
<label className="text-sm font-medium">
<span className="block mb-2">
Select Replacement Model <span className="text-red-500">*</span>
</span>
<select
required
value={selectedReplacement}
onChange={(e) => setSelectedReplacement(e.target.value)}
className="w-full rounded border border-input bg-background p-2 text-sm"
>
<option value="">-- Choose a replacement model --</option>
{replacementOptions.map((m) => (
<option key={m.id} value={m.slug}>
{m.display_name} ({m.slug})
</option>
))}
</select>
{replacementOptions.length === 0 && (
<p className="mt-2 text-xs text-red-600">
No replacement models available. You must have at least one
other enabled model before deleting this one.
</p>
)}
</label>
{error && (
<div className="rounded-lg border border-red-200 bg-red-50 p-3 text-sm text-red-800">
{error}
</div>
)}
<Dialog.Footer>
<Button
variant="ghost"
size="small"
onClick={() => {
setOpen(false);
setSelectedReplacement("");
setError(null);
}}
disabled={isDeleting}
>
Cancel
</Button>
<button
type="submit"
disabled={
!selectedReplacement ||
isDeleting ||
replacementOptions.length === 0
}
className="inline-flex items-center rounded bg-red-600 px-4 py-2 text-sm font-semibold text-white hover:bg-red-700 disabled:opacity-50 disabled:cursor-not-allowed"
>
{isDeleting ? "Deleting..." : "Delete and Migrate"}
</button>
</Dialog.Footer>
</form>
</div>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,14 +1,8 @@
"use client";
import { useState } from "react";
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
DialogTrigger,
} from "@/components/__legacy__/ui/dialog";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Button } from "@/components/atoms/Button/Button";
import type { LlmModel, LlmProvider } from "@/lib/autogpt-server-api/types";
import { updateLlmModelAction } from "../actions";
@@ -24,22 +18,23 @@ export function EditModelModal({
const provider = providers.find((p) => p.id === model.provider_id);
return (
<Dialog open={open} onOpenChange={setOpen}>
<DialogTrigger asChild>
<Dialog
title="Edit Model"
controlled={{ isOpen: open, set: setOpen }}
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
>
<Dialog.Trigger>
<button
type="button"
className="inline-flex items-center rounded border border-input px-3 py-1 text-xs font-semibold hover:bg-muted"
>
Edit
</button>
</DialogTrigger>
<DialogContent className="max-w-2xl max-h-[90vh] overflow-y-auto">
<DialogHeader>
<DialogTitle>Edit Model</DialogTitle>
<DialogDescription>
Update model metadata and pricing information.
</DialogDescription>
</DialogHeader>
</Dialog.Trigger>
<Dialog.Content>
<div className="mb-4 text-sm text-muted-foreground">
Update model metadata and pricing information.
</div>
<form
action={async (formData) => {
await updateLlmModelAction(formData);
@@ -161,23 +156,20 @@ export function EditModelModal({
Enabled
</label>
<div className="flex justify-end gap-2 pt-4">
<button
type="button"
<Dialog.Footer>
<Button
variant="ghost"
size="small"
onClick={() => setOpen(false)}
className="inline-flex items-center rounded border border-input px-4 py-2 text-sm font-semibold hover:bg-muted"
>
Cancel
</button>
<button
type="submit"
className="inline-flex items-center rounded bg-primary px-4 py-2 text-sm font-semibold text-primary-foreground hover:bg-primary/90"
>
</Button>
<Button variant="primary" size="small" type="submit">
Update Model
</button>
</div>
</Button>
</Dialog.Footer>
</form>
</DialogContent>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,3 +1,5 @@
import type { LlmModel, LlmProvider } from "@/lib/autogpt-server-api/types";
import {
Table,
TableBody,
@@ -5,9 +7,10 @@ import {
TableHead,
TableHeader,
TableRow,
} from "@/components/__legacy__/ui/table";
import type { LlmModel, LlmProvider } from "@/lib/autogpt-server-api/types";
} from "@/components/atoms/Table/Table";
import { toggleLlmModelAction } from "../actions";
import { DeleteModelModal } from "./DeleteModelModal";
import { EditModelModal } from "./EditModelModal";
export function ModelsTable({
@@ -25,7 +28,9 @@ export function ModelsTable({
);
}
const providerLookup = new Map(providers.map((provider) => [provider.id, provider]));
const providerLookup = new Map(
providers.map((provider) => [provider.id, provider]),
);
return (
<div className="rounded-lg border">
@@ -52,7 +57,9 @@ export function ModelsTable({
>
<TableCell>
<div className="font-medium">{model.display_name}</div>
<div className="text-xs text-muted-foreground">{model.slug}</div>
<div className="text-xs text-muted-foreground">
{model.slug}
</div>
</TableCell>
<TableCell>
{provider ? (
@@ -75,7 +82,9 @@ export function ModelsTable({
<TableCell>
{cost ? (
<>
<div className="font-medium">{cost.credit_cost} credits</div>
<div className="font-medium">
{cost.credit_cost} credits
</div>
<div className="text-xs text-muted-foreground">
{cost.credential_provider}
</div>
@@ -97,8 +106,15 @@ export function ModelsTable({
</TableCell>
<TableCell className="text-right text-sm">
<div className="flex items-center justify-end gap-2">
<ToggleModelButton modelId={model.id} isEnabled={model.is_enabled} />
<ToggleModelButton
modelId={model.id}
isEnabled={model.is_enabled}
/>
<EditModelModal model={model} providers={providers} />
<DeleteModelModal
model={model}
availableModels={models}
/>
</div>
</TableCell>
</TableRow>

View File

@@ -5,7 +5,7 @@ import {
TableHead,
TableHeader,
TableRow,
} from "@/components/__legacy__/ui/table";
} from "@/components/atoms/Table/Table";
import type { LlmProvider } from "@/lib/autogpt-server-api/types";
export function ProviderList({ providers }: { providers: LlmProvider[] }) {

View File

@@ -4130,6 +4130,391 @@
"security": [{ "HTTPBearerJWT": [] }]
}
},
"/api/llm/admin/llm/providers": {
"get": {
"tags": ["v2", "admin", "llm", "admin"],
"summary": "List LLM providers",
"operationId": "getV2List llm providers",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
{
"name": "include_models",
"in": "query",
"required": false,
"schema": {
"type": "boolean",
"default": true,
"title": "Include Models"
}
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/LlmProvidersResponse"
}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
}
},
"post": {
"tags": ["v2", "admin", "llm", "admin"],
"summary": "Create LLM provider",
"operationId": "postV2Create llm provider",
"security": [{ "HTTPBearerJWT": [] }],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/UpsertLlmProviderRequest"
}
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/LlmProvider" }
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
}
}
},
"/api/llm/admin/llm/providers/{provider_id}": {
"patch": {
"tags": ["v2", "admin", "llm", "admin"],
"summary": "Update LLM provider",
"operationId": "patchV2Update llm provider",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
{
"name": "provider_id",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Provider Id" }
}
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/UpsertLlmProviderRequest"
}
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/LlmProvider" }
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
}
}
},
"/api/llm/admin/llm/models": {
"get": {
"tags": ["v2", "admin", "llm", "admin"],
"summary": "List LLM models",
"operationId": "getV2List llm models",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
{
"name": "provider_id",
"in": "query",
"required": false,
"schema": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Provider Id"
}
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/LlmModelsResponse" }
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
}
},
"post": {
"tags": ["v2", "admin", "llm", "admin"],
"summary": "Create LLM model",
"operationId": "postV2Create llm model",
"security": [{ "HTTPBearerJWT": [] }],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/CreateLlmModelRequest" }
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/LlmModel" }
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
}
}
},
"/api/llm/admin/llm/models/{model_id}": {
"patch": {
"tags": ["v2", "admin", "llm", "admin"],
"summary": "Update LLM model",
"operationId": "patchV2Update llm model",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
{
"name": "model_id",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Model Id" }
}
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/UpdateLlmModelRequest" }
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/LlmModel" }
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
}
},
"delete": {
"tags": ["v2", "admin", "llm", "admin"],
"summary": "Delete LLM model and migrate workflows",
"description": "Delete a model and automatically migrate all workflows using it to a replacement model.\n\nThis endpoint:\n1. Validates the replacement model exists and is enabled\n2. Counts how many workflow nodes use the model being deleted\n3. Updates all AgentNode.constantInput->model fields to the replacement\n4. Deletes the model record\n5. Refreshes all caches and notifies executors\n\nExample: DELETE /admin/llm/models/{id}?replacement_model_slug=gpt-4o",
"operationId": "deleteV2Delete llm model and migrate workflows",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
{
"name": "model_id",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Model Id" }
},
{
"name": "replacement_model_slug",
"in": "query",
"required": true,
"schema": {
"type": "string",
"description": "Slug of the model to migrate existing workflows to",
"title": "Replacement Model Slug"
},
"description": "Slug of the model to migrate existing workflows to"
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/DeleteLlmModelResponse"
}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
}
}
},
"/api/llm/admin/llm/models/{model_id}/toggle": {
"patch": {
"tags": ["v2", "admin", "llm", "admin"],
"summary": "Toggle LLM model availability",
"operationId": "patchV2Toggle llm model availability",
"security": [{ "HTTPBearerJWT": [] }],
"parameters": [
{
"name": "model_id",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Model Id" }
}
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/ToggleLlmModelRequest" }
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/LlmModel" }
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
}
}
},
"/api/llm/models": {
"get": {
"tags": ["v2", "llm", "llm"],
"summary": "List Models",
"operationId": "getV2ListModels",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/LlmModelsResponse" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
},
"security": [{ "HTTPBearerJWT": [] }]
}
},
"/api/llm/providers": {
"get": {
"tags": ["v2", "llm", "llm"],
"summary": "List Providers",
"operationId": "getV2ListProviders",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/LlmProvidersResponse"
}
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
},
"security": [{ "HTTPBearerJWT": [] }]
}
},
"/api/library/presets": {
"get": {
"tags": ["v2", "presets"],
@@ -5769,6 +6154,51 @@
"required": ["graph"],
"title": "CreateGraph"
},
"CreateLlmModelRequest": {
"properties": {
"slug": { "type": "string", "title": "Slug" },
"display_name": { "type": "string", "title": "Display Name" },
"description": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Description"
},
"provider_id": { "type": "string", "title": "Provider Id" },
"context_window": { "type": "integer", "title": "Context Window" },
"max_output_tokens": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Max Output Tokens"
},
"is_enabled": {
"type": "boolean",
"title": "Is Enabled",
"default": true
},
"capabilities": {
"additionalProperties": true,
"type": "object",
"title": "Capabilities"
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata"
},
"costs": {
"items": { "$ref": "#/components/schemas/LlmModelCostInput" },
"type": "array",
"title": "Costs"
}
},
"type": "object",
"required": [
"slug",
"display_name",
"provider_id",
"context_window",
"costs"
],
"title": "CreateLlmModelRequest"
},
"CreateSessionResponse": {
"properties": {
"id": { "type": "string", "title": "Id" },
@@ -5961,6 +6391,33 @@
"required": ["version_counts"],
"title": "DeleteGraphResponse"
},
"DeleteLlmModelResponse": {
"properties": {
"deleted_model_slug": {
"type": "string",
"title": "Deleted Model Slug"
},
"deleted_model_display_name": {
"type": "string",
"title": "Deleted Model Display Name"
},
"replacement_model_slug": {
"type": "string",
"title": "Replacement Model Slug"
},
"nodes_migrated": { "type": "integer", "title": "Nodes Migrated" },
"message": { "type": "string", "title": "Message" }
},
"type": "object",
"required": [
"deleted_model_slug",
"deleted_model_display_name",
"replacement_model_slug",
"nodes_migrated",
"message"
],
"title": "DeleteLlmModelResponse"
},
"Document": {
"properties": {
"url": { "type": "string", "title": "Url" },
@@ -7132,6 +7589,205 @@
"required": ["source_id", "sink_id", "source_name", "sink_name"],
"title": "Link"
},
"LlmCostUnit": {
"type": "string",
"enum": ["RUN", "TOKENS"],
"title": "LlmCostUnit"
},
"LlmModel": {
"properties": {
"id": { "type": "string", "title": "Id" },
"slug": { "type": "string", "title": "Slug" },
"display_name": { "type": "string", "title": "Display Name" },
"description": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Description"
},
"provider_id": { "type": "string", "title": "Provider Id" },
"context_window": { "type": "integer", "title": "Context Window" },
"max_output_tokens": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Max Output Tokens"
},
"is_enabled": {
"type": "boolean",
"title": "Is Enabled",
"default": true
},
"capabilities": {
"additionalProperties": true,
"type": "object",
"title": "Capabilities"
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata"
},
"costs": {
"items": { "$ref": "#/components/schemas/LlmModelCost" },
"type": "array",
"title": "Costs"
}
},
"type": "object",
"required": [
"id",
"slug",
"display_name",
"provider_id",
"context_window"
],
"title": "LlmModel"
},
"LlmModelCost": {
"properties": {
"id": { "type": "string", "title": "Id" },
"unit": {
"$ref": "#/components/schemas/LlmCostUnit",
"default": "RUN"
},
"credit_cost": { "type": "integer", "title": "Credit Cost" },
"credential_provider": {
"type": "string",
"title": "Credential Provider"
},
"credential_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Credential Id"
},
"credential_type": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Credential Type"
},
"currency": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Currency"
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata"
}
},
"type": "object",
"required": ["id", "credit_cost", "credential_provider"],
"title": "LlmModelCost"
},
"LlmModelCostInput": {
"properties": {
"unit": {
"$ref": "#/components/schemas/LlmCostUnit",
"default": "RUN"
},
"credit_cost": { "type": "integer", "title": "Credit Cost" },
"credential_provider": {
"type": "string",
"title": "Credential Provider"
},
"credential_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Credential Id"
},
"credential_type": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Credential Type",
"default": "api_key"
},
"currency": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Currency"
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata"
}
},
"type": "object",
"required": ["credit_cost", "credential_provider"],
"title": "LlmModelCostInput"
},
"LlmModelsResponse": {
"properties": {
"models": {
"items": { "$ref": "#/components/schemas/LlmModel" },
"type": "array",
"title": "Models"
}
},
"type": "object",
"required": ["models"],
"title": "LlmModelsResponse"
},
"LlmProvider": {
"properties": {
"id": { "type": "string", "title": "Id" },
"name": { "type": "string", "title": "Name" },
"display_name": { "type": "string", "title": "Display Name" },
"description": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Description"
},
"default_credential_provider": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Default Credential Provider"
},
"default_credential_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Default Credential Id"
},
"default_credential_type": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Default Credential Type"
},
"supports_tools": {
"type": "boolean",
"title": "Supports Tools",
"default": true
},
"supports_json_output": {
"type": "boolean",
"title": "Supports Json Output",
"default": true
},
"supports_reasoning": {
"type": "boolean",
"title": "Supports Reasoning",
"default": false
},
"supports_parallel_tool": {
"type": "boolean",
"title": "Supports Parallel Tool",
"default": false
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata"
},
"models": {
"items": { "$ref": "#/components/schemas/LlmModel" },
"type": "array",
"title": "Models"
}
},
"type": "object",
"required": ["id", "name", "display_name"],
"title": "LlmProvider"
},
"LlmProvidersResponse": {
"properties": {
"providers": {
"items": { "$ref": "#/components/schemas/LlmProvider" },
"type": "array",
"title": "Providers"
}
},
"type": "object",
"required": ["providers"],
"title": "LlmProvidersResponse"
},
"LogRawMetricRequest": {
"properties": {
"metric_name": {
@@ -9503,6 +10159,14 @@
"required": ["timezone"],
"title": "TimezoneResponse"
},
"ToggleLlmModelRequest": {
"properties": {
"is_enabled": { "type": "boolean", "title": "Is Enabled" }
},
"type": "object",
"required": ["is_enabled"],
"title": "ToggleLlmModelRequest"
},
"TransactionHistory": {
"properties": {
"transactions": {
@@ -9549,6 +10213,60 @@
"required": ["name", "graph_id", "graph_version", "trigger_config"],
"title": "TriggeredPresetSetupRequest"
},
"UpdateLlmModelRequest": {
"properties": {
"display_name": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Display Name"
},
"description": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Description"
},
"context_window": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Context Window"
},
"max_output_tokens": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Max Output Tokens"
},
"is_enabled": {
"anyOf": [{ "type": "boolean" }, { "type": "null" }],
"title": "Is Enabled"
},
"capabilities": {
"anyOf": [
{ "additionalProperties": true, "type": "object" },
{ "type": "null" }
],
"title": "Capabilities"
},
"metadata": {
"anyOf": [
{ "additionalProperties": true, "type": "object" },
{ "type": "null" }
],
"title": "Metadata"
},
"provider_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Provider Id"
},
"costs": {
"anyOf": [
{
"items": { "$ref": "#/components/schemas/LlmModelCostInput" },
"type": "array"
},
{ "type": "null" }
],
"title": "Costs"
}
},
"type": "object",
"title": "UpdateLlmModelRequest"
},
"UpdatePermissionsRequest": {
"properties": {
"permissions": {
@@ -10190,6 +10908,57 @@
],
"title": "UploadFileResponse"
},
"UpsertLlmProviderRequest": {
"properties": {
"name": { "type": "string", "title": "Name" },
"display_name": { "type": "string", "title": "Display Name" },
"description": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Description"
},
"default_credential_provider": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Default Credential Provider"
},
"default_credential_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Default Credential Id"
},
"default_credential_type": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Default Credential Type",
"default": "api_key"
},
"supports_tools": {
"type": "boolean",
"title": "Supports Tools",
"default": true
},
"supports_json_output": {
"type": "boolean",
"title": "Supports Json Output",
"default": true
},
"supports_reasoning": {
"type": "boolean",
"title": "Supports Reasoning",
"default": false
},
"supports_parallel_tool": {
"type": "boolean",
"title": "Supports Parallel Tool",
"default": false
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata"
}
},
"type": "object",
"required": ["name", "display_name"],
"title": "UpsertLlmProviderRequest"
},
"UserHistoryResponse": {
"properties": {
"history": {

View File

@@ -0,0 +1,123 @@
import * as React from "react";
import { cn } from "@/lib/utils";
const Table = React.forwardRef<
HTMLTableElement,
React.HTMLAttributes<HTMLTableElement>
>(({ className, ...props }, ref) => (
<div className="relative w-full overflow-auto">
<table
ref={ref}
className={cn("w-full caption-bottom text-sm", className)}
{...props}
/>
</div>
));
Table.displayName = "Table";
const TableHeader = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<thead ref={ref} className={cn("[&_tr]:border-b", className)} {...props} />
));
TableHeader.displayName = "TableHeader";
const TableBody = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tbody
ref={ref}
className={cn("[&_tr:last-child]:border-0", className)}
{...props}
/>
));
TableBody.displayName = "TableBody";
const TableFooter = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tfoot
ref={ref}
className={cn(
"border-t bg-neutral-100/50 font-medium dark:bg-neutral-800/50 [&>tr]:last:border-b-0",
className,
)}
{...props}
/>
));
TableFooter.displayName = "TableFooter";
const TableRow = React.forwardRef<
HTMLTableRowElement,
React.HTMLAttributes<HTMLTableRowElement>
>(({ className, ...props }, ref) => (
<tr
ref={ref}
className={cn(
"border-b transition-colors data-[state=selected]:bg-neutral-100 hover:bg-neutral-100/50 dark:data-[state=selected]:bg-neutral-800 dark:hover:bg-neutral-800/50",
className,
)}
{...props}
/>
));
TableRow.displayName = "TableRow";
const TableHead = React.forwardRef<
HTMLTableCellElement,
React.ThHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<th
ref={ref}
className={cn(
"h-10 px-2 text-left align-middle font-medium text-neutral-500 dark:text-neutral-400 [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]",
className,
)}
{...props}
/>
));
TableHead.displayName = "TableHead";
const TableCell = React.forwardRef<
HTMLTableCellElement,
React.TdHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<td
ref={ref}
className={cn(
"p-2 align-middle [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]",
className,
)}
{...props}
/>
));
TableCell.displayName = "TableCell";
const TableCaption = React.forwardRef<
HTMLTableCaptionElement,
React.HTMLAttributes<HTMLTableCaptionElement>
>(({ className, ...props }, ref) => (
<caption
ref={ref}
className={cn(
"mt-4 text-sm text-neutral-500 dark:text-neutral-400",
className,
)}
{...props}
/>
));
TableCaption.displayName = "TableCaption";
export {
Table,
TableHeader,
TableBody,
TableFooter,
TableHead,
TableRow,
TableCell,
TableCaption,
};

View File

@@ -471,6 +471,29 @@ export default class BackendAPI {
);
}
getAdminLlmModelUsage(
modelId: string,
): Promise<{ model_slug: string; node_count: number }> {
return this._get(`/llm/admin/llm/models/${modelId}/usage`);
}
deleteAdminLlmModel(
modelId: string,
replacementModelSlug: string,
): Promise<{
deleted_model_slug: string;
deleted_model_display_name: string;
replacement_model_slug: string;
nodes_migrated: number;
message: string;
}> {
return this._request(
"DELETE",
`/llm/admin/llm/models/${modelId}`,
{ replacement_model_slug: replacementModelSlug },
);
}
// API Key related requests
async createAPIKey(
name: string,