diff --git a/autogpt_platform/backend/backend/api/features/admin/llm_routes.py b/autogpt_platform/backend/backend/api/features/admin/llm_routes.py index 743306916f..4eb461ffba 100644 --- a/autogpt_platform/backend/backend/api/features/admin/llm_routes.py +++ b/autogpt_platform/backend/backend/api/features/admin/llm_routes.py @@ -216,21 +216,25 @@ async def get_llm_model_usage(model_id: str): ) async def delete_llm_model( model_id: str, - replacement_model_slug: str = fastapi.Query( - ..., description="Slug of the model to migrate existing workflows to" + replacement_model_slug: str | None = fastapi.Query( + default=None, + description="Slug of the model to migrate existing workflows to (required only if workflows use this model)", ), ): """ - Delete a model and automatically migrate all workflows using it to a replacement model. + Delete a model and optionally migrate workflows using it to a replacement model. + + If no workflows are using this model, it can be deleted without providing a + replacement. If workflows exist, replacement_model_slug is required. This endpoint: - 1. Validates the replacement model exists and is enabled - 2. Counts how many workflow nodes use the model being deleted - 3. Updates all AgentNode.constantInput->model fields to the replacement - 4. Deletes the model record - 5. Refreshes all caches and notifies executors + 1. Counts how many workflow nodes use the model being deleted + 2. If nodes exist, validates the replacement model and migrates them + 3. Deletes the model record + 4. Refreshes all caches and notifies executors Example: DELETE /admin/llm/models/{id}?replacement_model_slug=gpt-4o + Example (no usage): DELETE /admin/llm/models/{id} """ try: result = await llm_db.delete_model( diff --git a/autogpt_platform/backend/backend/server/v2/llm/db.py b/autogpt_platform/backend/backend/server/v2/llm/db.py index 4dd3440700..96d80573f1 100644 --- a/autogpt_platform/backend/backend/server/v2/llm/db.py +++ b/autogpt_platform/backend/backend/server/v2/llm/db.py @@ -460,27 +460,27 @@ async def get_model_usage(model_id: str) -> llm_model.LlmModelUsageResponse: async def delete_model( - model_id: str, replacement_model_slug: str + model_id: str, replacement_model_slug: str | None = None ) -> llm_model.DeleteLlmModelResponse: """ - Delete a model and migrate all AgentNodes using it to a replacement model. + Delete a model and optionally migrate all AgentNodes using it to a replacement model. This performs an atomic operation within a database transaction: 1. Validates the model exists - 2. Validates the replacement model exists and is enabled - 3. Counts affected nodes - 4. Migrates all AgentNode.constantInput->model to replacement (in transaction) - 5. Deletes the LlmModel record (CASCADE deletes costs) (in transaction) + 2. Counts affected nodes + 3. If nodes exist, validates replacement model and migrates them + 4. Deletes the LlmModel record (CASCADE deletes costs) Args: model_id: UUID of the model to delete - replacement_model_slug: Slug of the model to migrate to + replacement_model_slug: Slug of the model to migrate to (required only if nodes use this model) Returns: DeleteLlmModelResponse with migration stats Raises: - ValueError: If model not found, replacement not found, or replacement is disabled + ValueError: If model not found, nodes exist but no replacement provided, + replacement not found, or replacement is disabled """ # 1. Get the model being deleted (validation - outside transaction) model = await prisma.models.LlmModel.prisma().find_unique( @@ -492,34 +492,39 @@ async def delete_model( deleted_slug = model.slug deleted_display_name = model.displayName - # 2. Validate replacement model exists and is enabled (validation - outside transaction) - replacement = await prisma.models.LlmModel.prisma().find_unique( - where={"slug": replacement_model_slug} + # 2. Count affected nodes first to determine if replacement is needed + count_result = await prisma.models.prisma().query_raw( + """ + SELECT COUNT(*) as count + FROM "AgentNode" + WHERE "constantInput"::jsonb->>'model' = $1 + """, + deleted_slug, ) - if not replacement: - raise ValueError(f"Replacement model '{replacement_model_slug}' not found") - if not replacement.isEnabled: - raise ValueError( - f"Replacement model '{replacement_model_slug}' is disabled. " - f"Please enable it before using it as a replacement." - ) + nodes_to_migrate = int(count_result[0]["count"]) if count_result else 0 - # 3 & 4. Perform count, migration and deletion atomically within a transaction - nodes_affected = 0 + # 3. Validate replacement model only if there are nodes to migrate + if nodes_to_migrate > 0: + if not replacement_model_slug: + raise ValueError( + f"Cannot delete model '{deleted_slug}': {nodes_to_migrate} workflow node(s) " + f"are using it. Please provide a replacement_model_slug to migrate them." + ) + replacement = await prisma.models.LlmModel.prisma().find_unique( + where={"slug": replacement_model_slug} + ) + if not replacement: + raise ValueError(f"Replacement model '{replacement_model_slug}' not found") + if not replacement.isEnabled: + raise ValueError( + f"Replacement model '{replacement_model_slug}' is disabled. " + f"Please enable it before using it as a replacement." + ) + + # 4. Perform migration (if needed) and deletion atomically within a transaction async with transaction() as tx: - # Count affected nodes (inside transaction for consistency) - count_result = await tx.query_raw( - """ - SELECT COUNT(*) as count - FROM "AgentNode" - WHERE "constantInput"::jsonb->>'model' = $1 - """, - deleted_slug, - ) - nodes_affected = int(count_result[0]["count"]) if count_result else 0 - # Migrate all AgentNode.constantInput->model to replacement - if nodes_affected > 0: + if nodes_to_migrate > 0 and replacement_model_slug: await tx.execute_raw( """ UPDATE "AgentNode" @@ -537,15 +542,24 @@ async def delete_model( # Delete the model (CASCADE will delete costs automatically) await tx.llmmodel.delete(where={"id": model_id}) + # Build appropriate message based on whether migration happened + if nodes_to_migrate > 0: + message = ( + f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}) " + f"and migrated {nodes_to_migrate} workflow node(s) to '{replacement_model_slug}'." + ) + else: + message = ( + f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}). " + f"No workflows were using this model." + ) + return llm_model.DeleteLlmModelResponse( deleted_model_slug=deleted_slug, deleted_model_display_name=deleted_display_name, replacement_model_slug=replacement_model_slug, - nodes_migrated=nodes_affected, - message=( - f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}) " - f"and migrated {nodes_affected} workflow node(s) to '{replacement_model_slug}'." - ), + nodes_migrated=nodes_to_migrate, + message=message, ) diff --git a/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts b/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts index dbab5d1c24..4d87149ff8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts @@ -215,11 +215,10 @@ export async function toggleLlmModelAction(formData: FormData): Promise { export async function deleteLlmModelAction(formData: FormData): Promise { const modelId = String(formData.get("model_id")); const rawReplacement = formData.get("replacement_model_slug"); - - if (rawReplacement == null || String(rawReplacement).trim() === "") { - throw new Error("Replacement model is required"); - } - const replacementModelSlug = String(rawReplacement).trim(); + const replacementModelSlug = + rawReplacement && String(rawReplacement).trim() + ? String(rawReplacement).trim() + : undefined; const response = await deleteV2DeleteLlmModelAndMigrateWorkflows(modelId, { replacement_model_slug: replacementModelSlug, diff --git a/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/DeleteModelModal.tsx b/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/DeleteModelModal.tsx index 183ecb1327..82a22eb1fd 100644 --- a/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/DeleteModelModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/DeleteModelModal.tsx @@ -28,6 +28,9 @@ export function DeleteModelModal({ (m) => m.id !== model.id && m.is_enabled, ); + // Check if migration is required (has blocks using this model) + const requiresMigration = usageCount !== null && usageCount > 0; + async function fetchUsage() { setUsageLoading(true); setUsageError(null); @@ -57,6 +60,15 @@ export function DeleteModelModal({ } } + // Determine if delete button should be enabled + const canDelete = + !isDeleting && + !usageLoading && + usageCount !== null && + (requiresMigration + ? selectedReplacement && replacementOptions.length > 0 + : true); + return (
- This action cannot be undone. All workflows using this model will be - migrated to the replacement model you select. + {requiresMigration + ? "This action cannot be undone. All workflows using this model will be migrated to the replacement model you select." + : "This action cannot be undone."}
@@ -117,10 +130,18 @@ export function DeleteModelModal({ currently use this model

)} -

- All workflows currently using this model will be automatically - updated to use the replacement model you choose below. -

+ {requiresMigration && ( +

+ All workflows currently using this model will be + automatically updated to use the replacement model you + choose below. +

+ )} + {!usageLoading && usageCount === 0 && ( +

+ No workflows are using this model. It can be safely deleted. +

+ )}
@@ -133,31 +154,33 @@ export function DeleteModelModal({ value={selectedReplacement} /> - + {requiresMigration && ( + + )} {error && (
@@ -183,14 +206,14 @@ export function DeleteModelModal({ type="submit" variant="primary" size="small" - disabled={ - !selectedReplacement || - isDeleting || - replacementOptions.length === 0 - } + disabled={!canDelete} className="bg-destructive text-destructive-foreground hover:bg-destructive/90" > - {isDeleting ? "Deleting..." : "Delete and Migrate"} + {isDeleting + ? "Deleting..." + : requiresMigration + ? "Delete and Migrate" + : "Delete"} diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index df8ee7fac2..733ba67625 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -4190,7 +4190,7 @@ "tags": ["v2", "admin", "llm", "llm", "admin"], "summary": "Get creator details", "description": "Get details of a specific model creator.", - "operationId": "getV2GetLlmCreatorDetails", + "operationId": "getV2Get creator details", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ { @@ -4421,6 +4421,33 @@ "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Provider Id" } + }, + { + "name": "page", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 1, + "description": "Page number (1-indexed)", + "default": 1, + "title": "Page" + }, + "description": "Page number (1-indexed)" + }, + { + "name": "page_size", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 100, + "minimum": 1, + "description": "Number of models per page", + "default": 50, + "title": "Page Size" + }, + "description": "Number of models per page" } ], "responses": { @@ -4485,7 +4512,7 @@ "delete": { "tags": ["v2", "admin", "llm", "llm", "admin"], "summary": "Delete LLM model and migrate workflows", - "description": "Delete a model and automatically migrate all workflows using it to a replacement model.\n\nThis endpoint:\n1. Validates the replacement model exists and is enabled\n2. Counts how many workflow nodes use the model being deleted\n3. Updates all AgentNode.constantInput->model fields to the replacement\n4. Deletes the model record\n5. Refreshes all caches and notifies executors\n\nExample: DELETE /admin/llm/models/{id}?replacement_model_slug=gpt-4o", + "description": "Delete a model and optionally migrate workflows using it to a replacement model.\n\nIf no workflows are using this model, it can be deleted without providing a\nreplacement. If workflows exist, replacement_model_slug is required.\n\nThis endpoint:\n1. Counts how many workflow nodes use the model being deleted\n2. If nodes exist, validates the replacement model and migrates them\n3. Deletes the model record\n4. Refreshes all caches and notifies executors\n\nExample: DELETE /admin/llm/models/{id}?replacement_model_slug=gpt-4o\nExample (no usage): DELETE /admin/llm/models/{id}", "operationId": "deleteV2Delete llm model and migrate workflows", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ @@ -4498,13 +4525,13 @@ { "name": "replacement_model_slug", "in": "query", - "required": true, + "required": false, "schema": { - "type": "string", - "description": "Slug of the model to migrate existing workflows to", + "anyOf": [{ "type": "string" }, { "type": "null" }], + "description": "Slug of the model to migrate existing workflows to (required only if workflows use this model)", "title": "Replacement Model Slug" }, - "description": "Slug of the model to migrate existing workflows to" + "description": "Slug of the model to migrate existing workflows to (required only if workflows use this model)" } ], "responses": { @@ -4579,7 +4606,7 @@ "patch": { "tags": ["v2", "admin", "llm", "llm", "admin"], "summary": "Toggle LLM model availability", - "description": "Toggle a model's enabled status, optionally migrating workflows when disabling.\n\nIf disabling a model and `migrate_to_slug` is provided, all workflows using\nthis model will be migrated to the specified replacement model before disabling.\nA migration record is created which can be reverted later using the revert endpoint.\n\nOptional fields:\n- `migration_reason`: Reason for the migration (e.g., \"Provider outage\")\n- `custom_credit_cost`: Custom pricing during the migration period", + "description": "Toggle a model's enabled status, optionally migrating workflows when disabling.\n\nIf disabling a model and `migrate_to_slug` is provided, all workflows using\nthis model will be migrated to the specified replacement model before disabling.\nA migration record is created which can be reverted later using the revert endpoint.\n\nOptional fields:\n- `migration_reason`: Reason for the migration (e.g., \"Provider outage\")\n- `custom_credit_cost`: Custom pricing override for billing during migration", "operationId": "patchV2Toggle llm model availability", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ @@ -4860,6 +4887,36 @@ "summary": "List Models", "description": "List all enabled LLM models available to users.", "operationId": "getV2ListModels", + "security": [{ "HTTPBearerJWT": [] }], + "parameters": [ + { + "name": "page", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 1, + "description": "Page number (1-indexed)", + "default": 1, + "title": "Page" + }, + "description": "Page number (1-indexed)" + }, + { + "name": "page_size", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 100, + "minimum": 1, + "description": "Number of models per page", + "default": 50, + "title": "Page Size" + }, + "description": "Number of models per page" + } + ], "responses": { "200": { "description": "Successful Response", @@ -4871,9 +4928,16 @@ }, "401": { "$ref": "#/components/responses/HTTP401NotAuthenticatedError" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/HTTPValidationError" } + } + } } - }, - "security": [{ "HTTPBearerJWT": [] }] + } } }, "/api/llm/providers": { @@ -9090,6 +9154,20 @@ "required": ["source_id", "sink_id", "source_name", "sink_name"], "title": "Link" }, + "ListSessionsResponse": { + "properties": { + "sessions": { + "items": { "$ref": "#/components/schemas/SessionSummaryResponse" }, + "type": "array", + "title": "Sessions" + }, + "total": { "type": "integer", "title": "Total" } + }, + "type": "object", + "required": ["sessions", "total"], + "title": "ListSessionsResponse", + "description": "Response model for listing chat sessions." + }, "LlmCostUnit": { "type": "string", "enum": ["RUN", "TOKENS"], @@ -9332,6 +9410,12 @@ "items": { "$ref": "#/components/schemas/LlmModel" }, "type": "array", "title": "Models" + }, + "pagination": { + "anyOf": [ + { "$ref": "#/components/schemas/Pagination" }, + { "type": "null" } + ] } }, "type": "object", @@ -9406,20 +9490,6 @@ "required": ["providers"], "title": "LlmProvidersResponse" }, - "ListSessionsResponse": { - "properties": { - "sessions": { - "items": { "$ref": "#/components/schemas/SessionSummaryResponse" }, - "type": "array", - "title": "Sessions" - }, - "total": { "type": "integer", "title": "Total" } - }, - "type": "object", - "required": ["sessions", "total"], - "title": "ListSessionsResponse", - "description": "Response model for listing chat sessions." - }, "LogRawMetricRequest": { "properties": { "metric_name": {