diff --git a/.github/workflows/claude-ci-failure-auto-fix.yml b/.github/workflows/claude-ci-failure-auto-fix.yml index 070a4acd14..ab07c8ae10 100644 --- a/.github/workflows/claude-ci-failure-auto-fix.yml +++ b/.github/workflows/claude-ci-failure-auto-fix.yml @@ -22,7 +22,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: ${{ github.event.workflow_run.head_branch }} fetch-depth: 0 diff --git a/.github/workflows/claude-dependabot.yml b/.github/workflows/claude-dependabot.yml index 6dbe068c3d..da37df6de7 100644 --- a/.github/workflows/claude-dependabot.yml +++ b/.github/workflows/claude-dependabot.yml @@ -30,7 +30,7 @@ jobs: actions: read # Required for CI access steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml index 8e165b823e..ee901fe5d4 100644 --- a/.github/workflows/claude.yml +++ b/.github/workflows/claude.yml @@ -40,7 +40,7 @@ jobs: actions: read # Required for CI access steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index a6c36ed86c..966243323c 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -58,7 +58,7 @@ jobs: # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml index eae6eea5d2..dad99cb8d9 100644 --- a/.github/workflows/copilot-setup-steps.yml +++ b/.github/workflows/copilot-setup-steps.yml @@ -27,7 +27,7 @@ jobs: # If you do not check out your code, Copilot will do this for you. steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 submodules: true diff --git a/.github/workflows/docs-block-sync.yml b/.github/workflows/docs-block-sync.yml index 4977877b19..32f205019f 100644 --- a/.github/workflows/docs-block-sync.yml +++ b/.github/workflows/docs-block-sync.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 diff --git a/.github/workflows/docs-claude-review.yml b/.github/workflows/docs-claude-review.yml index 1643fe1c49..ca2788b387 100644 --- a/.github/workflows/docs-claude-review.yml +++ b/.github/workflows/docs-claude-review.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 diff --git a/.github/workflows/docs-enhance.yml b/.github/workflows/docs-enhance.yml index 4baa882cd1..52607fa5df 100644 --- a/.github/workflows/docs-enhance.yml +++ b/.github/workflows/docs-enhance.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 diff --git a/.github/workflows/platform-autogpt-deploy-dev.yaml b/.github/workflows/platform-autogpt-deploy-dev.yaml index 6e1e23d3eb..b415fb1b7b 100644 --- a/.github/workflows/platform-autogpt-deploy-dev.yaml +++ b/.github/workflows/platform-autogpt-deploy-dev.yaml @@ -25,7 +25,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: ${{ github.event.inputs.git_ref || github.ref_name }} @@ -52,7 +52,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Trigger deploy workflow - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DEPLOY_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure diff --git a/.github/workflows/platform-autogpt-deploy-prod.yml b/.github/workflows/platform-autogpt-deploy-prod.yml index 4d7c16d710..e0c524d8d2 100644 --- a/.github/workflows/platform-autogpt-deploy-prod.yml +++ b/.github/workflows/platform-autogpt-deploy-prod.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: ${{ github.ref_name || 'master' }} @@ -45,7 +45,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Trigger deploy workflow - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DEPLOY_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure diff --git a/.github/workflows/platform-backend-ci.yml b/.github/workflows/platform-backend-ci.yml index a301477ecf..1f0c6da3dd 100644 --- a/.github/workflows/platform-backend-ci.yml +++ b/.github/workflows/platform-backend-ci.yml @@ -68,7 +68,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 submodules: true diff --git a/.github/workflows/platform-dev-deploy-event-dispatcher.yml b/.github/workflows/platform-dev-deploy-event-dispatcher.yml index b5324b7c2c..1a581c55c2 100644 --- a/.github/workflows/platform-dev-deploy-event-dispatcher.yml +++ b/.github/workflows/platform-dev-deploy-event-dispatcher.yml @@ -82,7 +82,7 @@ jobs: - name: Dispatch Deploy Event if: steps.check_status.outputs.should_deploy == 'true' - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DISPATCH_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure @@ -110,7 +110,7 @@ jobs: - name: Dispatch Undeploy Event (from comment) if: steps.check_status.outputs.should_undeploy == 'true' - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DISPATCH_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure @@ -168,7 +168,7 @@ jobs: github.event_name == 'pull_request' && github.event.action == 'closed' && steps.check_pr_close.outputs.should_undeploy == 'true' - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.DISPATCH_TOKEN }} repository: Significant-Gravitas/AutoGPT_cloud_infrastructure diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 669a775934..6410daae9f 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -31,7 +31,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Check for component changes uses: dorny/paths-filter@v3 @@ -71,7 +71,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Node.js uses: actions/setup-node@v6 @@ -107,7 +107,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 @@ -148,7 +148,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: recursive @@ -277,7 +277,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: recursive diff --git a/.github/workflows/platform-fullstack-ci.yml b/.github/workflows/platform-fullstack-ci.yml index ab483b98af..b4724245dc 100644 --- a/.github/workflows/platform-fullstack-ci.yml +++ b/.github/workflows/platform-fullstack-ci.yml @@ -29,7 +29,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Node.js uses: actions/setup-node@v6 @@ -63,7 +63,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: recursive diff --git a/.github/workflows/repo-workflow-checker.yml b/.github/workflows/repo-workflow-checker.yml index 35536ba922..aa94622d31 100644 --- a/.github/workflows/repo-workflow-checker.yml +++ b/.github/workflows/repo-workflow-checker.yml @@ -11,7 +11,7 @@ jobs: steps: # - name: Wait some time for all actions to start # run: sleep 30 - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 # with: # fetch-depth: 0 - name: Set up Python diff --git a/autogpt_platform/backend/backend/api/features/chat/config.py b/autogpt_platform/backend/backend/api/features/chat/config.py index 0b37e42df8..808692f97f 100644 --- a/autogpt_platform/backend/backend/api/features/chat/config.py +++ b/autogpt_platform/backend/backend/api/features/chat/config.py @@ -93,6 +93,12 @@ class ChatConfig(BaseSettings): description="Name of the prompt in Langfuse to fetch", ) + # Extended thinking configuration for Claude models + thinking_enabled: bool = Field( + default=True, + description="Enable adaptive thinking for Claude models via OpenRouter", + ) + @field_validator("api_key", mode="before") @classmethod def get_api_key(cls, v): diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 49e70265fa..072ea88fd5 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -1066,6 +1066,10 @@ async def _stream_chat_chunks( :128 ] # OpenRouter limit + # Enable adaptive thinking for Anthropic models via OpenRouter + if config.thinking_enabled and "anthropic" in model.lower(): + extra_body["reasoning"] = {"enabled": True} + api_call_start = time_module.perf_counter() stream = await client.chat.completions.create( model=model, @@ -1829,6 +1833,10 @@ async def _generate_llm_continuation( if session_id: extra_body["session_id"] = session_id[:128] + # Enable adaptive thinking for Anthropic models via OpenRouter + if config.thinking_enabled and "anthropic" in config.model.lower(): + extra_body["reasoning"] = {"enabled": True} + retry_count = 0 last_error: Exception | None = None response = None @@ -1959,6 +1967,10 @@ async def _generate_llm_continuation_with_streaming( if session_id: extra_body["session_id"] = session_id[:128] + # Enable adaptive thinking for Anthropic models via OpenRouter + if config.thinking_enabled and "anthropic" in config.model.lower(): + extra_body["reasoning"] = {"enabled": True} + # Make streaming LLM call (no tools - just text response) from typing import cast diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock index 425b8d555a..53b5030da6 100644 --- a/autogpt_platform/backend/poetry.lock +++ b/autogpt_platform/backend/poetry.lock @@ -46,14 +46,14 @@ pycares = ">=4.9.0,<5" [[package]] name = "aiofiles" -version = "24.1.0" +version = "25.1.0" description = "File support for asyncio." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"}, - {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"}, + {file = "aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695"}, + {file = "aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2"}, ] [[package]] @@ -1382,14 +1382,14 @@ tzdata = "*" [[package]] name = "fastapi" -version = "0.128.5" +version = "0.128.6" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "fastapi-0.128.5-py3-none-any.whl", hash = "sha256:bceec0de8aa6564599c5bcc0593b0d287703562c848271fca8546fd2c87bf4dd"}, - {file = "fastapi-0.128.5.tar.gz", hash = "sha256:a7173579fc162d6471e3c6fbd9a4b7610c7a3b367bcacf6c4f90d5d022cab711"}, + {file = "fastapi-0.128.6-py3-none-any.whl", hash = "sha256:bb1c1ef87d6086a7132d0ab60869d6f1ee67283b20fbf84ec0003bd335099509"}, + {file = "fastapi-0.128.6.tar.gz", hash = "sha256:0cb3946557e792d731b26a42b04912f16367e3c3135ea8290f620e234f2b604f"}, ] [package.dependencies] @@ -3078,14 +3078,14 @@ type = ["pygobject-stubs", "pytest-mypy (>=1.0.1)", "shtab", "types-pywin32"] [[package]] name = "langfuse" -version = "3.13.0" +version = "3.14.1" description = "A client library for accessing langfuse" optional = false python-versions = "<4.0,>=3.10" groups = ["main"] files = [ - {file = "langfuse-3.13.0-py3-none-any.whl", hash = "sha256:71912ddac1cc831a65df895eae538a556f564c094ae51473e747426e9ded1a9d"}, - {file = "langfuse-3.13.0.tar.gz", hash = "sha256:dacea8111ca4442e97dbfec4f8d676cf9709b35357a26e468f8887b95de0012f"}, + {file = "langfuse-3.14.1-py3-none-any.whl", hash = "sha256:17bed605dbfc9947cbd1738a715f6d27c1b80b6da9f2946586171958fa5820d0"}, + {file = "langfuse-3.14.1.tar.gz", hash = "sha256:404a6104cd29353d7829aa417ec46565b04917e5599afdda96c5b0865f4bc991"}, ] [package.dependencies] @@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.14" -content-hash = "14686ee0e2dc446a75d0db145b08dc410dc31c357e25085bb0f9b0174711c4b1" +content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index 677b73b468..317663ee98 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -21,7 +21,7 @@ cryptography = "^46.0" discord-py = "^2.5.2" e2b-code-interpreter = "^1.5.2" elevenlabs = "^1.50.0" -fastapi = "^0.128.5" +fastapi = "^0.128.6" feedparser = "^6.0.11" flake8 = "^7.3.0" google-api-python-client = "^2.177.0" @@ -34,7 +34,7 @@ html2text = "^2024.2.26" jinja2 = "^3.1.6" jsonref = "^1.1.0" jsonschema = "^4.25.0" -langfuse = "^3.11.0" +langfuse = "^3.14.1" launchdarkly-server-sdk = "^9.14.1" mem0ai = "^0.1.115" moviepy = "^2.1.2" @@ -76,7 +76,7 @@ yt-dlp = "2025.12.08" zerobouncesdk = "^1.1.2" # NOTE: please insert new dependencies in their alphabetical location pytest-snapshot = "^0.9.0" -aiofiles = "^24.1.0" +aiofiles = "^25.1.0" tiktoken = "^0.12.0" aioclamd = "^1.0.0" setuptools = "^80.9.0" diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/CopilotPage.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/CopilotPage.tsx index cd1033f535..0d403b1a79 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/CopilotPage.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/CopilotPage.tsx @@ -1,11 +1,11 @@ "use client"; -import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { SidebarProvider } from "@/components/ui/sidebar"; import { ChatContainer } from "./components/ChatContainer/ChatContainer"; import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar"; import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer"; import { MobileHeader } from "./components/MobileHeader/MobileHeader"; +import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader"; import { useCopilotPage } from "./useCopilotPage"; export function CopilotPage() { @@ -34,7 +34,11 @@ export function CopilotPage() { } = useCopilotPage(); if (isUserLoading || !isLoggedIn) { - return ; + return ( +
+ +
+ ); } return ( diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx index 0867ede5a4..4578b268e3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatMessagesContainer/ChatMessagesContainer.tsx @@ -143,10 +143,10 @@ export const ChatMessagesContainer = ({ return ( - + {isLoading && messages.length === 0 && ( -
- +
+
)} {messages.map((message, messageIndex) => { diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatSidebar/ChatSidebar.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatSidebar/ChatSidebar.tsx index 8a48cb66c2..6b7398b4ba 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatSidebar/ChatSidebar.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ChatSidebar/ChatSidebar.tsx @@ -121,8 +121,8 @@ export function ChatSidebar() { className="mt-4 flex flex-col gap-1" > {isLoadingSessions ? ( -
- +
+
) : sessions.length === 0 ? (

diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.module.css b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.module.css new file mode 100644 index 0000000000..3e7e71d66b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.module.css @@ -0,0 +1,35 @@ +.loader { + width: 48px; + height: 48px; + display: inline-block; + position: relative; +} + +.loader::after, +.loader::before { + content: ""; + box-sizing: border-box; + width: 100%; + height: 100%; + border-radius: 50%; + background: currentColor; + position: absolute; + left: 0; + top: 0; + animation: animloader 2s linear infinite; +} + +.loader::after { + animation-delay: 1s; +} + +@keyframes animloader { + 0% { + transform: scale(0); + opacity: 1; + } + 100% { + transform: scale(1); + opacity: 0; + } +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.tsx new file mode 100644 index 0000000000..a395b21319 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/ScaleLoader/ScaleLoader.tsx @@ -0,0 +1,16 @@ +import { cn } from "@/lib/utils"; +import styles from "./ScaleLoader.module.css"; + +interface Props { + size?: number; + className?: string; +} + +export function ScaleLoader({ size = 48, className }: Props) { + return ( +

+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx index 5dc2f40dfe..0d023d0529 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx @@ -49,12 +49,7 @@ interface Props { part: CreateAgentToolPart; } -function getAccordionMeta(output: CreateAgentToolOutput): { - icon: React.ReactNode; - title: React.ReactNode; - titleClassName?: string; - description?: string; -} { +function getAccordionMeta(output: CreateAgentToolOutput) { const icon = ; if (isAgentSavedOutput(output)) { @@ -73,6 +68,7 @@ function getAccordionMeta(output: CreateAgentToolOutput): { icon, title: "Needs clarification", description: `${questions.length} question${questions.length === 1 ? "" : "s"}`, + expanded: true, }; } if ( @@ -97,18 +93,23 @@ function getAccordionMeta(output: CreateAgentToolOutput): { export function CreateAgentTool({ part }: Props) { const text = getAnimationText(part); const { onSend } = useCopilotChatActions(); + const isStreaming = part.state === "input-streaming" || part.state === "input-available"; const output = getCreateAgentToolOutput(part); + const isError = part.state === "output-error" || (!!output && isErrorOutput(output)); + const isOperating = !!output && (isOperationStartedOutput(output) || isOperationPendingOutput(output) || isOperationInProgressOutput(output)); + const progress = useAsymptoticProgress(isOperating); + const hasExpandableContent = part.state === "output-available" && !!output && @@ -149,10 +150,7 @@ export function CreateAgentTool({ part }: Props) {
{hasExpandableContent && output && ( - + {isOperating && ( diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx index 3beb9e7e1e..6766a5cb49 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx @@ -146,10 +146,7 @@ export function EditAgentTool({ part }: Props) {
{hasExpandableContent && output && ( - + {isOperating && ( diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx index 51044848b9..f16b9d2b2f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx @@ -61,14 +61,7 @@ export function RunAgentTool({ part }: Props) {
{hasExpandableContent && output && ( - + {isRunAgentExecutionStartedOutput(output) && ( )} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx index 0a117a71f2..816c661230 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/helpers.tsx @@ -10,7 +10,7 @@ import { WarningDiamondIcon, } from "@phosphor-icons/react"; import type { ToolUIPart } from "ai"; -import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader"; +import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; export interface RunAgentInput { username_agent_slug?: string; @@ -171,7 +171,7 @@ export function ToolIcon({ ); } if (isStreaming) { - return ; + return ; } return ; } @@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): { ? output.status.trim() : "started"; return { - icon: , + icon: , title: output.graph_name, description: `Status: ${statusText}`, }; diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx index ded344efa2..e1cb030449 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx @@ -55,13 +55,7 @@ export function RunBlockTool({ part }: Props) { {hasExpandableContent && output && ( - + {isRunBlockBlockOutput(output) && } {isRunBlockSetupRequirementsOutput(output) && ( diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx index 61ba65e74e..c9b903876a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx @@ -8,7 +8,7 @@ import { WarningDiamondIcon, } from "@phosphor-icons/react"; import type { ToolUIPart } from "ai"; -import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader"; +import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; export interface RunBlockInput { block_id?: string; @@ -120,7 +120,7 @@ export function ToolIcon({ ); } if (isStreaming) { - return ; + return ; } return ; } @@ -149,7 +149,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): { if (isRunBlockBlockOutput(output)) { const keys = Object.keys(output.outputs ?? {}); return { - icon: , + icon: , title: output.block_name, description: keys.length > 0 diff --git a/autogpt_platform/frontend/src/lib/supabase/helpers.ts b/autogpt_platform/frontend/src/lib/supabase/helpers.ts index 26f7711bde..c77e43e7b4 100644 --- a/autogpt_platform/frontend/src/lib/supabase/helpers.ts +++ b/autogpt_platform/frontend/src/lib/supabase/helpers.ts @@ -6,6 +6,7 @@ import { SupabaseClient } from "@supabase/supabase-js"; export const PROTECTED_PAGES = [ "/auth/authorize", "/auth/integrations", + "/copilot", "/monitor", "/build", "/onboarding",