diff --git a/autogpt_platform/backend/backend/server/v2/admin/llm_routes.py b/autogpt_platform/backend/backend/server/v2/admin/llm_routes.py index 2834ac2d8c..e0822effd5 100644 --- a/autogpt_platform/backend/backend/server/v2/admin/llm_routes.py +++ b/autogpt_platform/backend/backend/server/v2/admin/llm_routes.py @@ -132,18 +132,37 @@ async def update_llm_model( @router.patch( "/models/{model_id}/toggle", summary="Toggle LLM model availability", - response_model=llm_model.LlmModel, + response_model=llm_model.ToggleLlmModelResponse, ) async def toggle_llm_model( model_id: str, request: llm_model.ToggleLlmModelRequest, ): + """ + Toggle a model's enabled status, optionally migrating workflows when disabling. + + If disabling a model and `migrate_to_slug` is provided, all workflows using + this model will be migrated to the specified replacement model before disabling. + """ try: - model = await llm_db.toggle_model( - model_id=model_id, is_enabled=request.is_enabled + result = await llm_db.toggle_model( + model_id=model_id, + is_enabled=request.is_enabled, + migrate_to_slug=request.migrate_to_slug, ) await _refresh_runtime_state() - return model + if result.nodes_migrated > 0: + logger.info( + "Toggled model '%s' to %s and migrated %d nodes to '%s'", + result.model.slug, + "enabled" if request.is_enabled else "disabled", + result.nodes_migrated, + result.migrated_to_slug, + ) + return result + except ValueError as exc: + logger.warning("Model toggle validation failed: %s", exc) + raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc except Exception as exc: logger.exception("Failed to toggle LLM model %s: %s", model_id, exc) raise fastapi.HTTPException( diff --git a/autogpt_platform/backend/backend/server/v2/llm/db.py b/autogpt_platform/backend/backend/server/v2/llm/db.py index 623e81a5b3..08c09133ee 100644 --- a/autogpt_platform/backend/backend/server/v2/llm/db.py +++ b/autogpt_platform/backend/backend/server/v2/llm/db.py @@ -219,13 +219,91 @@ async def update_model( return _map_model(record) -async def toggle_model(model_id: str, is_enabled: bool) -> llm_model.LlmModel: - record = await prisma.models.LlmModel.prisma().update( - where={"id": model_id}, - data={"isEnabled": is_enabled}, - include={"Costs": True}, +async def toggle_model( + model_id: str, is_enabled: bool, migrate_to_slug: str | None = None +) -> llm_model.ToggleLlmModelResponse: + """ + Toggle a model's enabled status, optionally migrating workflows when disabling. + + Args: + model_id: UUID of the model to toggle + is_enabled: New enabled status + migrate_to_slug: If disabling and this is provided, migrate all workflows + using this model to the specified replacement model + + Returns: + ToggleLlmModelResponse with the updated model and optional migration stats + """ + import prisma as prisma_module + + # Get the model being toggled + model = await prisma.models.LlmModel.prisma().find_unique( + where={"id": model_id}, include={"Costs": True} + ) + if not model: + raise ValueError(f"Model with id '{model_id}' not found") + + nodes_migrated = 0 + + # If disabling with migration, perform migration first + if not is_enabled and migrate_to_slug: + # Validate replacement model exists and is enabled + replacement = await prisma.models.LlmModel.prisma().find_unique( + where={"slug": migrate_to_slug} + ) + if not replacement: + raise ValueError(f"Replacement model '{migrate_to_slug}' not found") + if not replacement.isEnabled: + raise ValueError( + f"Replacement model '{migrate_to_slug}' is disabled. " + f"Please enable it before using it as a replacement." + ) + + # Count affected nodes + count_result = await prisma_module.get_client().query_raw( + """ + SELECT COUNT(*) as count + FROM "AgentNode" + WHERE "constantInput"::jsonb->>'model' = $1 + """, + model.slug, + ) + nodes_migrated = int(count_result[0]["count"]) if count_result else 0 + + # Perform migration and toggle atomically + async with transaction() as tx: + if nodes_migrated > 0: + await tx.execute_raw( + """ + UPDATE "AgentNode" + SET "constantInput" = JSONB_SET( + "constantInput"::jsonb, + '{model}', + to_jsonb($1::text) + ) + WHERE "constantInput"::jsonb->>'model' = $2 + """, + migrate_to_slug, + model.slug, + ) + record = await tx.llmmodel.update( + where={"id": model_id}, + data={"isEnabled": is_enabled}, + include={"Costs": True}, + ) + else: + # Simple toggle without migration + record = await prisma.models.LlmModel.prisma().update( + where={"id": model_id}, + data={"isEnabled": is_enabled}, + include={"Costs": True}, + ) + + return llm_model.ToggleLlmModelResponse( + model=_map_model(record), + nodes_migrated=nodes_migrated, + migrated_to_slug=migrate_to_slug if nodes_migrated > 0 else None, ) - return _map_model(record) async def get_model_usage(model_id: str) -> llm_model.LlmModelUsageResponse: diff --git a/autogpt_platform/backend/backend/server/v2/llm/model.py b/autogpt_platform/backend/backend/server/v2/llm/model.py index bbddaf5371..33090d9c2b 100644 --- a/autogpt_platform/backend/backend/server/v2/llm/model.py +++ b/autogpt_platform/backend/backend/server/v2/llm/model.py @@ -106,6 +106,13 @@ class UpdateLlmModelRequest(pydantic.BaseModel): class ToggleLlmModelRequest(pydantic.BaseModel): is_enabled: bool + migrate_to_slug: Optional[str] = None + + +class ToggleLlmModelResponse(pydantic.BaseModel): + model: LlmModel + nodes_migrated: int = 0 + migrated_to_slug: Optional[str] = None class DeleteLlmModelResponse(pydantic.BaseModel): diff --git a/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts b/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts index b8136207ee..7868b355c8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts @@ -134,11 +134,14 @@ export async function updateLlmModelAction(formData: FormData) { revalidatePath(ADMIN_LLM_PATH); } -export async function toggleLlmModelAction(formData: FormData) { +export async function toggleLlmModelAction(formData: FormData): Promise { const modelId = String(formData.get("model_id")); const shouldEnable = formData.get("is_enabled") === "true"; + const migrateToSlug = formData.get("migrate_to_slug"); + const payload: ToggleLlmModelRequest = { is_enabled: shouldEnable, + migrate_to_slug: migrateToSlug ? String(migrateToSlug) : undefined, }; const api = new BackendApi(); await api.toggleAdminLlmModel(modelId, payload); diff --git a/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/DisableModelModal.tsx b/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/DisableModelModal.tsx new file mode 100644 index 0000000000..ea403b17ac --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/DisableModelModal.tsx @@ -0,0 +1,223 @@ +"use client"; + +import { useState } from "react"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { Button } from "@/components/atoms/Button/Button"; +import type { LlmModel } from "@/lib/autogpt-server-api/types"; +import { toggleLlmModelAction } from "../actions"; + +export function DisableModelModal({ + model, + availableModels, +}: { + model: LlmModel; + availableModels: LlmModel[]; +}) { + const [open, setOpen] = useState(false); + const [isDisabling, setIsDisabling] = useState(false); + const [error, setError] = useState(null); + const [usageCount, setUsageCount] = useState(null); + const [selectedMigration, setSelectedMigration] = useState(""); + const [wantsMigration, setWantsMigration] = useState(false); + + // Filter out the current model and disabled models from replacement options + const migrationOptions = availableModels.filter( + (m) => m.id !== model.id && m.is_enabled + ); + + async function fetchUsage() { + try { + const BackendApi = (await import("@/lib/autogpt-server-api")).default; + const api = new BackendApi(); + const usage = await api.getAdminLlmModelUsage(model.id); + setUsageCount(usage.node_count); + } catch { + setUsageCount(null); + } + } + + async function handleDisable(formData: FormData) { + setIsDisabling(true); + setError(null); + try { + await toggleLlmModelAction(formData); + setOpen(false); + } catch (err) { + setError(err instanceof Error ? err.message : "Failed to disable model"); + } finally { + setIsDisabling(false); + } + } + + function resetState() { + setError(null); + setSelectedMigration(""); + setWantsMigration(false); + } + + const hasUsage = usageCount !== null && usageCount > 0; + + return ( + { + setOpen(isOpen); + if (isOpen) { + setUsageCount(null); + resetState(); + await fetchUsage(); + } + }, + }} + styling={{ maxWidth: "550px" }} + > + + + + +
+ Disabling a model will hide it from users when creating new workflows. +
+ +
+
+
+
⚠️
+
+

You are about to disable:

+

+ {model.display_name}{" "} + + ({model.slug}) + +

+ {usageCount === null ? ( +

+ Loading usage data... +

+ ) : usageCount > 0 ? ( +

+ 📊 Impact: {usageCount} block{usageCount !== 1 ? "s" : ""}{" "} + currently use this model +

+ ) : ( +

+ ✓ No workflows are currently using this model. +

+ )} +
+
+
+ + {hasUsage && ( +
+ + + {wantsMigration && ( +
+ +
+ )} +
+ )} + +
+ + + {wantsMigration && selectedMigration && ( + + )} + + {error && ( +
+ {error} +
+ )} + + + + + +
+
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/ModelsTable.tsx b/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/ModelsTable.tsx index f231a244d4..c311ff826c 100644 --- a/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/ModelsTable.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/ModelsTable.tsx @@ -10,6 +10,7 @@ import { import { Button } from "@/components/atoms/Button/Button"; import { toggleLlmModelAction } from "../actions"; import { DeleteModelModal } from "./DeleteModelModal"; +import { DisableModelModal } from "./DisableModelModal"; import { EditModelModal } from "./EditModelModal"; export function ModelsTable({ @@ -105,10 +106,14 @@ export function ModelsTable({
- + {model.is_enabled ? ( + + ) : ( + + )} - - ); diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts index cc0b14fc15..915fda53ae 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts @@ -56,6 +56,7 @@ import type { CreateLlmModelRequest, UpdateLlmModelRequest, ToggleLlmModelRequest, + ToggleLlmModelResponse, UpsertLlmProviderRequest, LlmModelsResponse, LlmProvider, @@ -463,7 +464,7 @@ export default class BackendAPI { toggleAdminLlmModel( modelId: string, payload: ToggleLlmModelRequest, - ): Promise { + ): Promise { return this._request( "PATCH", `/llm/admin/llm/models/${modelId}/toggle`, diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 8500a42141..4c8b198859 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -349,6 +349,13 @@ export type UpdateLlmModelRequest = { export type ToggleLlmModelRequest = { is_enabled: boolean; + migrate_to_slug?: string; +}; + +export type ToggleLlmModelResponse = { + model: LlmModel; + nodes_migrated: number; + migrated_to_slug?: string | null; }; export type BlockIOOneOfSubSchema = {