diff --git a/autogpt_platform/backend/backend/api/features/admin/llm_routes.py b/autogpt_platform/backend/backend/api/features/admin/llm_routes.py
index 9d7ac586e4..94c217b04d 100644
--- a/autogpt_platform/backend/backend/api/features/admin/llm_routes.py
+++ b/autogpt_platform/backend/backend/api/features/admin/llm_routes.py
@@ -19,41 +19,45 @@ router = fastapi.APIRouter(
async def _refresh_runtime_state() -> None:
"""Refresh the LLM registry and clear all related caches to ensure real-time updates."""
logger.info("Refreshing LLM registry runtime state...")
-
- # Refresh registry from database
- await llm_registry.refresh_llm_registry()
- refresh_llm_costs()
-
- # Clear block schema caches so they're regenerated with updated model options
- from backend.data.block import BlockSchema
-
- BlockSchema.clear_all_schema_caches()
- logger.info("Cleared all block schema caches")
-
- # Clear the /blocks endpoint cache so frontend gets updated schemas
try:
- from backend.api.features.v1 import _get_cached_blocks
+ # Refresh registry from database
+ await llm_registry.refresh_llm_registry()
+ refresh_llm_costs()
- _get_cached_blocks.cache_clear()
- logger.info("Cleared /blocks endpoint cache")
- except Exception as e:
- logger.warning("Failed to clear /blocks cache: %s", e)
+ # Clear block schema caches so they're regenerated with updated model options
+ from backend.data.block import BlockSchema
- # Clear the v2 builder providers cache (if it exists)
- try:
- from backend.api.features.builder import db as builder_db
+ BlockSchema.clear_all_schema_caches()
+ logger.info("Cleared all block schema caches")
- if hasattr(builder_db, "_get_all_providers"):
- builder_db._get_all_providers.cache_clear()
- logger.info("Cleared v2 builder providers cache")
- except Exception as e:
- logger.debug("Could not clear v2 builder cache: %s", e)
+ # Clear the /blocks endpoint cache so frontend gets updated schemas
+ try:
+ from backend.api.features.v1 import _get_cached_blocks
- # Notify all executor services to refresh their registry cache
- from backend.data.llm_registry import publish_registry_refresh_notification
+ _get_cached_blocks.cache_clear()
+ logger.info("Cleared /blocks endpoint cache")
+ except Exception as e:
+ logger.warning("Failed to clear /blocks cache: %s", e)
- await publish_registry_refresh_notification()
- logger.info("Published registry refresh notification")
+ # Clear the v2 builder providers cache (if it exists)
+ try:
+ from backend.api.features.builder import db as builder_db
+
+ if hasattr(builder_db, "_get_all_providers"):
+ builder_db._get_all_providers.cache_clear()
+ logger.info("Cleared v2 builder providers cache")
+ except Exception as e:
+ logger.debug("Could not clear v2 builder cache: %s", e)
+
+ # Notify all executor services to refresh their registry cache
+ from backend.data.llm_registry import publish_registry_refresh_notification
+
+ await publish_registry_refresh_notification()
+ logger.info("Published registry refresh notification")
+ except Exception as exc:
+ logger.exception(
+ "LLM runtime state refresh failed; caches may be stale: %s", exc
+ )
@router.get(
diff --git a/autogpt_platform/backend/backend/data/llm_registry/registry.py b/autogpt_platform/backend/backend/data/llm_registry/registry.py
index 039614d1a7..917c500f83 100644
--- a/autogpt_platform/backend/backend/data/llm_registry/registry.py
+++ b/autogpt_platform/backend/backend/data/llm_registry/registry.py
@@ -202,7 +202,9 @@ def _refresh_cached_schema() -> None:
# Build new structures
new_options = _build_schema_options()
- new_mapping = {slug: entry.metadata.provider for slug, entry in _dynamic_models.items()}
+ new_mapping = {
+ slug: entry.metadata.provider for slug, entry in _dynamic_models.items()
+ }
for slug, metadata in _static_metadata.items():
new_mapping.setdefault(slug, metadata.provider)
diff --git a/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts b/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts
index e1d0e4a432..d6b73e162c 100644
--- a/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/admin/llms/actions.ts
@@ -32,6 +32,7 @@ import type { LlmMigrationsResponse } from "@/app/api/__generated__/models/llmMi
import type { LlmCreatorsResponse } from "@/app/api/__generated__/models/llmCreatorsResponse";
import type { UpsertLlmCreatorRequest } from "@/app/api/__generated__/models/upsertLlmCreatorRequest";
import type { LlmModelUsageResponse } from "@/app/api/__generated__/models/llmModelUsageResponse";
+import { LlmCostUnit } from "@/app/api/__generated__/models/llmCostUnit";
const ADMIN_LLM_PATH = "/admin/llms";
@@ -121,6 +122,7 @@ export async function createLlmModelAction(formData: FormData) {
metadata: {},
costs: [
{
+ unit: (formData.get("cost_unit") as LlmCostUnit) || LlmCostUnit.RUN,
credit_cost: Number(formData.get("credit_cost") || 0),
credential_provider:
provider.default_credential_provider || provider.name,
@@ -165,6 +167,7 @@ export async function updateLlmModelAction(formData: FormData) {
costs: formData.get("credit_cost")
? [
{
+ unit: (formData.get("cost_unit") as LlmCostUnit) || LlmCostUnit.RUN,
credit_cost: Number(formData.get("credit_cost")),
credential_provider: String(
formData.get("credential_provider") || "",
diff --git a/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/EditModelModal.tsx b/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/EditModelModal.tsx
index 9091afdda6..9362b6d6bf 100644
--- a/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/EditModelModal.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/admin/llms/components/EditModelModal.tsx
@@ -185,8 +185,9 @@ export function EditModelModal({
- {/* Hidden defaults for credential_type */}
+ {/* Hidden defaults for credential_type and cost_unit */}
+