From fea46a6d283b83b7874e9fdbeb0ea50b3787d7e5 Mon Sep 17 00:00:00 2001 From: Bentlybro Date: Tue, 10 Feb 2026 15:32:36 +0000 Subject: [PATCH] Use LlmModel and simplify cache clearing Refactor LLM handling and cache logic: instantiate and pass a LlmModel instance to generate_model_label (rename model_enum -> model) to ensure consistent enum usage when building labels. Remove hasattr guards and directly clear the v2 builder caches during runtime state refresh so cached providers and search results are always attempted to be cleared. Update the AIConditionBlock test fixture to use LlmModel.default() instead of a hardcoded gpt-4o string. These changes simplify the code and standardize LlmModel usage. --- .../api/features/admin/execution_analytics_routes.py | 4 ++-- .../backend/backend/api/features/admin/llm_routes.py | 12 +++++------- .../backend/backend/blocks/ai_condition.py | 2 +- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py b/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py index 82d4bd7adb..c37b742e46 100644 --- a/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py +++ b/autogpt_platform/backend/backend/api/features/admin/execution_analytics_routes.py @@ -194,8 +194,8 @@ async def get_execution_analytics_config( if first_enabled_slug is None: first_enabled_slug = registry_model.slug - model_enum = LlmModel(registry_model.slug) # Create enum instance from slug - label = generate_model_label(model_enum) + model = LlmModel(registry_model.slug) + label = generate_model_label(model) # Add "(Recommended)" suffix to the recommended model if registry_model.slug == recommended_model_slug: label += " (Recommended)" diff --git a/autogpt_platform/backend/backend/api/features/admin/llm_routes.py b/autogpt_platform/backend/backend/api/features/admin/llm_routes.py index eec442e568..73f0d7a0a3 100644 --- a/autogpt_platform/backend/backend/api/features/admin/llm_routes.py +++ b/autogpt_platform/backend/backend/api/features/admin/llm_routes.py @@ -39,16 +39,14 @@ async def _refresh_runtime_state() -> None: except Exception as e: logger.warning("Failed to clear /blocks cache: %s", e) - # Clear the v2 builder caches (if they exist) + # Clear the v2 builder caches try: from backend.api.features.builder import db as builder_db - if hasattr(builder_db, "_get_all_providers"): - builder_db._get_all_providers.cache_clear() - logger.info("Cleared v2 builder providers cache") - if hasattr(builder_db, "_build_cached_search_results"): - builder_db._build_cached_search_results.cache_clear() - logger.info("Cleared v2 builder search results cache") + builder_db._get_all_providers.cache_clear() + logger.info("Cleared v2 builder providers cache") + builder_db._build_cached_search_results.cache_clear() + logger.info("Cleared v2 builder search results cache") except Exception as e: logger.debug("Could not clear v2 builder cache: %s", e) diff --git a/autogpt_platform/backend/backend/blocks/ai_condition.py b/autogpt_platform/backend/backend/blocks/ai_condition.py index 19852a26ef..dadc030882 100644 --- a/autogpt_platform/backend/backend/blocks/ai_condition.py +++ b/autogpt_platform/backend/backend/blocks/ai_condition.py @@ -83,7 +83,7 @@ class AIConditionBlock(AIBlockBase): "condition": "the input is an email address", "yes_value": "Valid email", "no_value": "Not an email", - "model": "gpt-4o", # Using string value - enum accepts any model slug dynamically + "model": LlmModel.default(), "credentials": TEST_CREDENTIALS_INPUT, }, test_credentials=TEST_CREDENTIALS,