From 9fc5d465dab5d4656dbf74d95008ce147f48d843 Mon Sep 17 00:00:00 2001 From: Bentlybro Date: Fri, 13 Feb 2026 11:56:47 +0000 Subject: [PATCH] Add BlockSchema cache clearing & fix imports Make BlockSchema.cached_jsonschema default to None and add clear_schema_cache and clear_all_schema_caches (recursive) so JSON schemas can be invalidated and regenerated. Update modules (rest_api, admin llm_routes, executor llm_registry_init) to import BlockSchema from backend.blocks._base so the new cache-clearing API is used when refreshing LLM costs/discriminator mappings. Also switch cache sentinel from {} to None to avoid truthiness preventing regeneration. --- .../backend/api/features/admin/llm_routes.py | 2 +- .../backend/backend/api/rest_api.py | 2 +- .../backend/backend/blocks/_base.py | 21 ++++++++++++++++++- .../backend/executor/llm_registry_init.py | 3 ++- 4 files changed, 24 insertions(+), 4 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/admin/llm_routes.py b/autogpt_platform/backend/backend/api/features/admin/llm_routes.py index c0837ce355..f2476cd1dc 100644 --- a/autogpt_platform/backend/backend/api/features/admin/llm_routes.py +++ b/autogpt_platform/backend/backend/api/features/admin/llm_routes.py @@ -25,7 +25,7 @@ async def _refresh_runtime_state() -> None: await refresh_llm_costs() # Clear block schema caches so they're regenerated with updated model options - from backend.data.block import BlockSchema + from backend.blocks._base import BlockSchema BlockSchema.clear_all_schema_caches() logger.info("Cleared all block schema caches") diff --git a/autogpt_platform/backend/backend/api/rest_api.py b/autogpt_platform/backend/backend/api/rest_api.py index da125ad048..da87d53391 100644 --- a/autogpt_platform/backend/backend/api/rest_api.py +++ b/autogpt_platform/backend/backend/api/rest_api.py @@ -123,7 +123,7 @@ async def lifespan_context(app: fastapi.FastAPI): await refresh_llm_costs() # Clear block schema caches so they're regenerated with updated discriminator_mapping - from backend.data.block import BlockSchema + from backend.blocks._base import BlockSchema BlockSchema.clear_all_schema_caches() diff --git a/autogpt_platform/backend/backend/blocks/_base.py b/autogpt_platform/backend/backend/blocks/_base.py index 0ba4daec40..2fff25f369 100644 --- a/autogpt_platform/backend/backend/blocks/_base.py +++ b/autogpt_platform/backend/backend/blocks/_base.py @@ -133,7 +133,26 @@ class BlockInfo(BaseModel): class BlockSchema(BaseModel): - cached_jsonschema: ClassVar[dict[str, Any]] + cached_jsonschema: ClassVar[dict[str, Any] | None] = None + + @classmethod + def clear_schema_cache(cls) -> None: + """Clear the cached JSON schema for this class.""" + # Use None instead of {} because {} is truthy and would prevent regeneration + cls.cached_jsonschema = None # type: ignore + + @staticmethod + def clear_all_schema_caches() -> None: + """Clear cached JSON schemas for all BlockSchema subclasses.""" + + def clear_recursive(cls: type) -> None: + """Recursively clear cache for class and all subclasses.""" + if hasattr(cls, "clear_schema_cache"): + cls.clear_schema_cache() + for subclass in cls.__subclasses__(): + clear_recursive(subclass) + + clear_recursive(BlockSchema) @classmethod def jsonschema(cls) -> dict[str, Any]: diff --git a/autogpt_platform/backend/backend/executor/llm_registry_init.py b/autogpt_platform/backend/backend/executor/llm_registry_init.py index e55795d8af..d3250f1912 100644 --- a/autogpt_platform/backend/backend/executor/llm_registry_init.py +++ b/autogpt_platform/backend/backend/executor/llm_registry_init.py @@ -7,8 +7,9 @@ and subscribing to real-time updates via Redis pub/sub. import logging +from backend.blocks._base import BlockSchema from backend.data import db, llm_registry -from backend.data.block import BlockSchema, initialize_blocks +from backend.data.block import initialize_blocks from backend.data.block_cost_config import refresh_llm_costs from backend.data.llm_registry import subscribe_to_registry_refresh