Use LlmModel and simplify cache clearing

Refactor LLM handling and cache logic: instantiate and pass a LlmModel instance to generate_model_label (rename model_enum -> model) to ensure consistent enum usage when building labels. Remove hasattr guards and directly clear the v2 builder caches during runtime state refresh so cached providers and search results are always attempted to be cleared. Update the AIConditionBlock test fixture to use LlmModel.default() instead of a hardcoded gpt-4o string. These changes simplify the code and standardize LlmModel usage.
This commit is contained in:
Bentlybro
2026-02-10 15:32:36 +00:00
parent f2f779e54f
commit fea46a6d28
3 changed files with 8 additions and 10 deletions

View File

@@ -194,8 +194,8 @@ async def get_execution_analytics_config(
if first_enabled_slug is None:
first_enabled_slug = registry_model.slug
model_enum = LlmModel(registry_model.slug) # Create enum instance from slug
label = generate_model_label(model_enum)
model = LlmModel(registry_model.slug)
label = generate_model_label(model)
# Add "(Recommended)" suffix to the recommended model
if registry_model.slug == recommended_model_slug:
label += " (Recommended)"

View File

@@ -39,16 +39,14 @@ async def _refresh_runtime_state() -> None:
except Exception as e:
logger.warning("Failed to clear /blocks cache: %s", e)
# Clear the v2 builder caches (if they exist)
# Clear the v2 builder caches
try:
from backend.api.features.builder import db as builder_db
if hasattr(builder_db, "_get_all_providers"):
builder_db._get_all_providers.cache_clear()
logger.info("Cleared v2 builder providers cache")
if hasattr(builder_db, "_build_cached_search_results"):
builder_db._build_cached_search_results.cache_clear()
logger.info("Cleared v2 builder search results cache")
builder_db._get_all_providers.cache_clear()
logger.info("Cleared v2 builder providers cache")
builder_db._build_cached_search_results.cache_clear()
logger.info("Cleared v2 builder search results cache")
except Exception as e:
logger.debug("Could not clear v2 builder cache: %s", e)

View File

@@ -83,7 +83,7 @@ class AIConditionBlock(AIBlockBase):
"condition": "the input is an email address",
"yes_value": "Valid email",
"no_value": "Not an email",
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
"model": LlmModel.default(),
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,