diff --git a/.github/workflows/platform-autogpt-deploy-dev.yaml b/.github/workflows/platform-autogpt-deploy-dev.yaml
index 6e1e23d3eb..df1e8b1172 100644
--- a/.github/workflows/platform-autogpt-deploy-dev.yaml
+++ b/.github/workflows/platform-autogpt-deploy-dev.yaml
@@ -52,7 +52,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
- uses: peter-evans/repository-dispatch@v3
+ uses: peter-evans/repository-dispatch@v4
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
diff --git a/.github/workflows/platform-autogpt-deploy-prod.yml b/.github/workflows/platform-autogpt-deploy-prod.yml
index 4d7c16d710..42107d4f5a 100644
--- a/.github/workflows/platform-autogpt-deploy-prod.yml
+++ b/.github/workflows/platform-autogpt-deploy-prod.yml
@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
- uses: peter-evans/repository-dispatch@v3
+ uses: peter-evans/repository-dispatch@v4
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
diff --git a/.github/workflows/platform-dev-deploy-event-dispatcher.yml b/.github/workflows/platform-dev-deploy-event-dispatcher.yml
index b5324b7c2c..1a581c55c2 100644
--- a/.github/workflows/platform-dev-deploy-event-dispatcher.yml
+++ b/.github/workflows/platform-dev-deploy-event-dispatcher.yml
@@ -82,7 +82,7 @@ jobs:
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
- uses: peter-evans/repository-dispatch@v3
+ uses: peter-evans/repository-dispatch@v4
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -110,7 +110,7 @@ jobs:
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
- uses: peter-evans/repository-dispatch@v3
+ uses: peter-evans/repository-dispatch@v4
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -168,7 +168,7 @@ jobs:
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
- uses: peter-evans/repository-dispatch@v3
+ uses: peter-evans/repository-dispatch@v4
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
diff --git a/autogpt_platform/backend/backend/api/features/chat/config.py b/autogpt_platform/backend/backend/api/features/chat/config.py
index 6847192cf6..dce774c5fb 100644
--- a/autogpt_platform/backend/backend/api/features/chat/config.py
+++ b/autogpt_platform/backend/backend/api/features/chat/config.py
@@ -98,6 +98,12 @@ class ChatConfig(BaseSettings):
description="Use Claude Agent SDK for chat completions",
)
+ # Extended thinking configuration for Claude models
+ thinking_enabled: bool = Field(
+ default=True,
+ description="Enable adaptive thinking for Claude models via OpenRouter",
+ )
+
@field_validator("api_key", mode="before")
@classmethod
def get_api_key(cls, v):
diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py
index 6616537d18..986730cd5d 100644
--- a/autogpt_platform/backend/backend/api/features/chat/service.py
+++ b/autogpt_platform/backend/backend/api/features/chat/service.py
@@ -1072,6 +1072,10 @@ async def _stream_chat_chunks(
:128
] # OpenRouter limit
+ # Enable adaptive thinking for Anthropic models via OpenRouter
+ if config.thinking_enabled and "anthropic" in model.lower():
+ extra_body["reasoning"] = {"enabled": True}
+
api_call_start = time_module.perf_counter()
stream = await client.chat.completions.create(
model=model,
@@ -1835,6 +1839,10 @@ async def _generate_llm_continuation(
if session_id:
extra_body["session_id"] = session_id[:128]
+ # Enable adaptive thinking for Anthropic models via OpenRouter
+ if config.thinking_enabled and "anthropic" in config.model.lower():
+ extra_body["reasoning"] = {"enabled": True}
+
retry_count = 0
last_error: Exception | None = None
response = None
@@ -1965,6 +1973,10 @@ async def _generate_llm_continuation_with_streaming(
if session_id:
extra_body["session_id"] = session_id[:128]
+ # Enable adaptive thinking for Anthropic models via OpenRouter
+ if config.thinking_enabled and "anthropic" in config.model.lower():
+ extra_body["reasoning"] = {"enabled": True}
+
# Make streaming LLM call (no tools - just text response)
from typing import cast
diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock
index 19ee530505..53b5030da6 100644
--- a/autogpt_platform/backend/poetry.lock
+++ b/autogpt_platform/backend/poetry.lock
@@ -46,14 +46,14 @@ pycares = ">=4.9.0,<5"
[[package]]
name = "aiofiles"
-version = "24.1.0"
+version = "25.1.0"
description = "File support for asyncio."
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"},
- {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"},
+ {file = "aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695"},
+ {file = "aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2"},
]
[[package]]
@@ -897,29 +897,6 @@ files = [
{file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"},
]
-[[package]]
-name = "claude-agent-sdk"
-version = "0.1.33"
-description = "Python SDK for Claude Code"
-optional = false
-python-versions = ">=3.10"
-groups = ["main"]
-files = [
- {file = "claude_agent_sdk-0.1.33-py3-none-macosx_11_0_arm64.whl", hash = "sha256:57886a2dd124e5b3c9e12ec3e4841742ab3444d1e428b45ceaec8841c96698fa"},
- {file = "claude_agent_sdk-0.1.33-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:ea0f1e4fadeec766000122723c406a6f47c6210ea11bb5cc0c88af11ef7c940c"},
- {file = "claude_agent_sdk-0.1.33-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:0ecd822c577b4ea2a52e51146a24dcea73eb69ff366bdb875785dadb116d593b"},
- {file = "claude_agent_sdk-0.1.33-py3-none-win_amd64.whl", hash = "sha256:a9fbd09d8f947005e087340ecd0706ed35639c946b4bd49429d3132db4cb3751"},
- {file = "claude_agent_sdk-0.1.33.tar.gz", hash = "sha256:134bf403bb7553d829dadec42c30ecef340f5d4ad1595c1bdef933a9ca3129cf"},
-]
-
-[package.dependencies]
-anyio = ">=4.0.0"
-mcp = ">=0.1.0"
-typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
-
-[package.extras]
-dev = ["anyio[trio] (>=4.0.0)", "mypy (>=1.0.0)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.20.0)", "pytest-cov (>=4.0.0)", "ruff (>=0.1.0)"]
-
[[package]]
name = "cleo"
version = "2.1.0"
@@ -2616,18 +2593,6 @@ http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
-[[package]]
-name = "httpx-sse"
-version = "0.4.3"
-description = "Consume Server-Sent Event (SSE) messages with HTTPX."
-optional = false
-python-versions = ">=3.9"
-groups = ["main"]
-files = [
- {file = "httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc"},
- {file = "httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d"},
-]
-
[[package]]
name = "huggingface-hub"
version = "1.4.1"
@@ -3345,39 +3310,6 @@ files = [
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
]
-[[package]]
-name = "mcp"
-version = "1.26.0"
-description = "Model Context Protocol SDK"
-optional = false
-python-versions = ">=3.10"
-groups = ["main"]
-files = [
- {file = "mcp-1.26.0-py3-none-any.whl", hash = "sha256:904a21c33c25aa98ddbeb47273033c435e595bbacfdb177f4bd87f6dceebe1ca"},
- {file = "mcp-1.26.0.tar.gz", hash = "sha256:db6e2ef491eecc1a0d93711a76f28dec2e05999f93afd48795da1c1137142c66"},
-]
-
-[package.dependencies]
-anyio = ">=4.5"
-httpx = ">=0.27.1"
-httpx-sse = ">=0.4"
-jsonschema = ">=4.20.0"
-pydantic = ">=2.11.0,<3.0.0"
-pydantic-settings = ">=2.5.2"
-pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
-python-multipart = ">=0.0.9"
-pywin32 = {version = ">=310", markers = "sys_platform == \"win32\""}
-sse-starlette = ">=1.6.1"
-starlette = ">=0.27"
-typing-extensions = ">=4.9.0"
-typing-inspection = ">=0.4.1"
-uvicorn = {version = ">=0.31.1", markers = "sys_platform != \"emscripten\""}
-
-[package.extras]
-cli = ["python-dotenv (>=1.0.0)", "typer (>=0.16.0)"]
-rich = ["rich (>=13.9.4)"]
-ws = ["websockets (>=15.0.1)"]
-
[[package]]
name = "mdurl"
version = "0.1.2"
@@ -6062,7 +5994,7 @@ description = "Python for Window Extensions"
optional = false
python-versions = "*"
groups = ["main"]
-markers = "sys_platform == \"win32\" or platform_system == \"Windows\""
+markers = "platform_system == \"Windows\""
files = [
{file = "pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3"},
{file = "pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b"},
@@ -7042,28 +6974,6 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
pymysql = ["pymysql"]
sqlcipher = ["sqlcipher3_binary"]
-[[package]]
-name = "sse-starlette"
-version = "3.2.0"
-description = "SSE plugin for Starlette"
-optional = false
-python-versions = ">=3.9"
-groups = ["main"]
-files = [
- {file = "sse_starlette-3.2.0-py3-none-any.whl", hash = "sha256:5876954bd51920fc2cd51baee47a080eb88a37b5b784e615abb0b283f801cdbf"},
- {file = "sse_starlette-3.2.0.tar.gz", hash = "sha256:8127594edfb51abe44eac9c49e59b0b01f1039d0c7461c6fd91d4e03b70da422"},
-]
-
-[package.dependencies]
-anyio = ">=4.7.0"
-starlette = ">=0.49.1"
-
-[package.extras]
-daphne = ["daphne (>=4.2.0)"]
-examples = ["aiosqlite (>=0.21.0)", "fastapi (>=0.115.12)", "sqlalchemy[asyncio] (>=2.0.41)", "uvicorn (>=0.34.0)"]
-granian = ["granian (>=2.3.1)"]
-uvicorn = ["uvicorn (>=0.34.0)"]
-
[[package]]
name = "stagehand"
version = "0.5.9"
@@ -8530,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
-content-hash = "c15100f0726e7e42e0a69b50474ef23c8b90125b1da6cc0c6afca47f6c453a86"
+content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml
index 501c432855..c3cc56f635 100644
--- a/autogpt_platform/backend/pyproject.toml
+++ b/autogpt_platform/backend/pyproject.toml
@@ -77,7 +77,7 @@ yt-dlp = "2025.12.08"
zerobouncesdk = "^1.1.2"
# NOTE: please insert new dependencies in their alphabetical location
pytest-snapshot = "^0.9.0"
-aiofiles = "^24.1.0"
+aiofiles = "^25.1.0"
tiktoken = "^0.12.0"
aioclamd = "^1.0.0"
setuptools = "^80.9.0"
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/CopilotPage.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/CopilotPage.tsx
index cd1033f535..0d403b1a79 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/CopilotPage.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/CopilotPage.tsx
@@ -1,11 +1,11 @@
"use client";
-import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { SidebarProvider } from "@/components/ui/sidebar";
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
+import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
import { useCopilotPage } from "./useCopilotPage";
export function CopilotPage() {
@@ -34,7 +34,11 @@ export function CopilotPage() {
} = useCopilotPage();
if (isUserLoading || !isLoggedIn) {
- return
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.module.css b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.module.css new file mode 100644 index 0000000000..3e7e71d66b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.module.css @@ -0,0 +1,35 @@ +.loader { + width: 48px; + height: 48px; + display: inline-block; + position: relative; +} + +.loader::after, +.loader::before { + content: ""; + box-sizing: border-box; + width: 100%; + height: 100%; + border-radius: 50%; + background: currentColor; + position: absolute; + left: 0; + top: 0; + animation: animloader 2s linear infinite; +} + +.loader::after { + animation-delay: 1s; +} + +@keyframes animloader { + 0% { + transform: scale(0); + opacity: 1; + } + 100% { + transform: scale(1); + opacity: 0; + } +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.tsx new file mode 100644 index 0000000000..a395b21319 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.tsx @@ -0,0 +1,16 @@ +import { cn } from "@/lib/utils"; +import styles from "./ScaleLoader.module.css"; + +interface Props { + size?: number; + className?: string; +} + +export function ScaleLoader({ size = 48, className }: Props) { + return ( +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx index 5dc2f40dfe..0d023d0529 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx @@ -49,12 +49,7 @@ interface Props { part: CreateAgentToolPart; } -function getAccordionMeta(output: CreateAgentToolOutput): { - icon: React.ReactNode; - title: React.ReactNode; - titleClassName?: string; - description?: string; -} { +function getAccordionMeta(output: CreateAgentToolOutput) { const icon =