Improve LLM model default selection and admin actions

Backend logic for selecting the default LLM model now prioritizes the recommended model, with improved fallbacks and error handling if no models are enabled. The migration enforces a single recommended model at the database level. Frontend admin actions for LLM models and providers now correctly interpret form values for boolean fields and fix the return type for the delete action.
This commit is contained in:
Bentlybro
2026-01-09 15:18:54 +00:00
parent 632ef24408
commit 6cf28e58d3
5 changed files with 39 additions and 17 deletions

View File

@@ -128,9 +128,11 @@ async def lifespan_context(app: fastapi.FastAPI):
# migrate_llm_models uses registry default model
from backend.blocks.llm import LlmModel
await backend.data.graph.migrate_llm_models(
LlmModel(llm_registry.get_default_model_slug())
)
default_model_slug = llm_registry.get_default_model_slug()
if default_model_slug:
await backend.data.graph.migrate_llm_models(LlmModel(default_model_slug))
else:
logger.warning("Skipping LLM model migration: no default model available")
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
with launch_darkly_context():

View File

@@ -159,12 +159,18 @@ class LlmModel(str, metaclass=LlmModelMeta):
"""
Get the default model from the registry.
Returns the preferred default model (gpt-4o if available and enabled,
otherwise the first enabled model from the registry).
Returns the recommended model if set, otherwise gpt-4o if available
and enabled, otherwise the first enabled model from the registry.
Falls back to "gpt-4o" if registry is empty (e.g., at module import time).
"""
from backend.data.llm_registry import get_default_model_slug
return cls(get_default_model_slug())
slug = get_default_model_slug()
if slug is None:
# Registry is empty (e.g., at module import time before DB connection).
# Fall back to gpt-4o for backward compatibility.
slug = "gpt-4o"
return cls(slug)
@property
def metadata(self) -> ModelMetadata:

View File

@@ -330,12 +330,13 @@ def get_model_info(model_slug: str) -> RegistryModel | None:
return _dynamic_models.get(model_slug)
def get_default_model_slug() -> str:
def get_default_model_slug() -> str | None:
"""
Get the default model slug to use for block defaults.
Returns the recommended model if set, otherwise falls back to "gpt-4o"
if enabled, then first enabled model, or "gpt-4o" as final fallback.
if available and enabled, then first enabled model alphabetically.
Returns None if no models are available or enabled.
"""
# First, check for recommended model
for model in _dynamic_models.values():
@@ -348,10 +349,23 @@ def get_default_model_slug() -> str:
if fallback_model and fallback_model.is_enabled:
return fallback_slug
# Find first enabled model
# Find first enabled model alphabetically
for model in sorted(_dynamic_models.values(), key=lambda m: m.display_name.lower()):
if model.is_enabled:
logger.warning(
"Preferred model '%s' not available, using '%s' as default",
fallback_slug,
model.slug,
)
return model.slug
# Final fallback for backwards compatibility
return fallback_slug
# No enabled models available
if _dynamic_models:
logger.error(
"No enabled models found in registry (%d models registered but all disabled)",
len(_dynamic_models),
)
else:
logger.error("No models registered in LLM registry")
return None

View File

@@ -6,5 +6,6 @@ ALTER TABLE "LlmModel" ADD COLUMN "isRecommended" BOOLEAN NOT NULL DEFAULT false
-- Set gpt-4o-mini as the default recommended model (if it exists)
UPDATE "LlmModel" SET "isRecommended" = true WHERE "slug" = 'gpt-4o-mini' AND "isEnabled" = true;
-- Create index for quick lookup of recommended model
CREATE INDEX "LlmModel_isRecommended_idx" ON "LlmModel" ("isRecommended") WHERE "isRecommended" = true;
-- Create unique partial index to enforce only one recommended model at the database level
-- This prevents multiple rows from having isRecommended = true
CREATE UNIQUE INDEX "LlmModel_single_recommended_idx" ON "LlmModel" ("isRecommended") WHERE "isRecommended" = true;

View File

@@ -60,7 +60,7 @@ export async function createLlmProviderAction(formData: FormData) {
default_credential_id: undefined,
default_credential_type: "api_key",
supports_tools: formData.get("supports_tools") === "on",
supports_json_output: formData.get("supports_json_output") !== "off",
supports_json_output: formData.get("supports_json_output") === "on",
supports_reasoning: formData.get("supports_reasoning") === "on",
supports_parallel_tool: formData.get("supports_parallel_tool") === "on",
metadata: {},
@@ -116,7 +116,7 @@ export async function createLlmModelAction(formData: FormData) {
max_output_tokens: formData.get("max_output_tokens")
? Number(formData.get("max_output_tokens"))
: undefined,
is_enabled: formData.get("is_enabled") !== "off",
is_enabled: formData.get("is_enabled") === "on",
capabilities: {},
metadata: {},
costs: [
@@ -209,7 +209,7 @@ export async function toggleLlmModelAction(formData: FormData): Promise<void> {
revalidatePath(ADMIN_LLM_PATH);
}
export async function deleteLlmModelAction(formData: FormData) {
export async function deleteLlmModelAction(formData: FormData): Promise<void> {
const modelId = String(formData.get("model_id"));
const replacementModelSlug = String(formData.get("replacement_model_slug"));
@@ -224,7 +224,6 @@ export async function deleteLlmModelAction(formData: FormData) {
throw new Error("Failed to delete model");
}
revalidatePath(ADMIN_LLM_PATH);
return response.data;
}
export async function fetchLlmModelUsage(