diff --git a/autogpt_platform/backend/backend/api/features/chat/config.py b/autogpt_platform/backend/backend/api/features/chat/config.py index dfa4608ba4..6847192cf6 100644 --- a/autogpt_platform/backend/backend/api/features/chat/config.py +++ b/autogpt_platform/backend/backend/api/features/chat/config.py @@ -27,15 +27,6 @@ class ChatConfig(BaseSettings): session_ttl: int = Field(default=43200, description="Session TTL in seconds") # Streaming Configuration - # Only used by the non-SDK fallback path (Anthropic direct). - # The SDK path passes full history and relies on SDK's built-in compaction. - max_context_messages: int = Field( - default=100, - ge=1, - le=500, - description="Max context messages for non-SDK fallback path only", - ) - stream_timeout: int = Field(default=300, description="Stream timeout in seconds") max_retries: int = Field( default=3, diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index da18421b98..e6144a75b4 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -368,7 +368,6 @@ async def stream_chat_completion( Raises: NotFoundError: If session_id is invalid - ValueError: If max_context_messages is exceeded """ completion_start = time.monotonic()