diff --git a/autogpt_platform/backend/backend/api/features/chat/config.py b/autogpt_platform/backend/backend/api/features/chat/config.py index 0b37e42df8..808692f97f 100644 --- a/autogpt_platform/backend/backend/api/features/chat/config.py +++ b/autogpt_platform/backend/backend/api/features/chat/config.py @@ -93,6 +93,12 @@ class ChatConfig(BaseSettings): description="Name of the prompt in Langfuse to fetch", ) + # Extended thinking configuration for Claude models + thinking_enabled: bool = Field( + default=True, + description="Enable adaptive thinking for Claude models via OpenRouter", + ) + @field_validator("api_key", mode="before") @classmethod def get_api_key(cls, v): diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 49e70265fa..072ea88fd5 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -1066,6 +1066,10 @@ async def _stream_chat_chunks( :128 ] # OpenRouter limit + # Enable adaptive thinking for Anthropic models via OpenRouter + if config.thinking_enabled and "anthropic" in model.lower(): + extra_body["reasoning"] = {"enabled": True} + api_call_start = time_module.perf_counter() stream = await client.chat.completions.create( model=model, @@ -1829,6 +1833,10 @@ async def _generate_llm_continuation( if session_id: extra_body["session_id"] = session_id[:128] + # Enable adaptive thinking for Anthropic models via OpenRouter + if config.thinking_enabled and "anthropic" in config.model.lower(): + extra_body["reasoning"] = {"enabled": True} + retry_count = 0 last_error: Exception | None = None response = None @@ -1959,6 +1967,10 @@ async def _generate_llm_continuation_with_streaming( if session_id: extra_body["session_id"] = session_id[:128] + # Enable adaptive thinking for Anthropic models via OpenRouter + if config.thinking_enabled and "anthropic" in config.model.lower(): + extra_body["reasoning"] = {"enabled": True} + # Make streaming LLM call (no tools - just text response) from typing import cast