Compare commits

..

4 Commits

Author SHA1 Message Date
Otto-AGPT
cdeefb8621 fix(copilot): Use correct OpenRouter reasoning API format
Addresses review comments from CodeRabbit and Sentry:

- Change reasoning format from {"enabled": True} (invalid) to
  {"max_tokens": config.thinking_budget_tokens} per OpenRouter docs
- Add missing thinking_budget_tokens config field (default: 10000)
- Extract duplicate code into _apply_thinking_config() helper function
- Update description from 'adaptive' to 'extended' thinking for clarity

References:
- OpenRouter reasoning docs: https://openrouter.ai/docs/reasoning-tokens
2026-02-11 13:54:57 +00:00
Swifty
ba6d585170 update settings 2026-02-10 16:08:21 +01:00
Swifty
90eac56525 Merge branch 'dev' into fix/enable-extended-thinking 2026-02-10 15:26:40 +01:00
Otto
75f8772f8a feat(copilot): Enable extended thinking for Claude models
Adds configuration to enable Anthropic's extended thinking feature via
OpenRouter. This keeps the model's chain-of-thought reasoning internal
rather than outputting it to users.

Configuration:
- thinking_enabled: bool (default: True)
- thinking_budget_tokens: int (default: 10000)

The thinking config is only applied to Anthropic models (detected via
model name containing 'anthropic').

Fixes the issue where the CoPilot prompt expects thinking mode but it
wasn't enabled on the API side, causing internal reasoning to leak
into user-facing responses.
2026-02-10 13:58:57 +00:00
34 changed files with 104 additions and 128 deletions

View File

@@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
ref: ${{ github.event.workflow_run.head_branch }} ref: ${{ github.event.workflow_run.head_branch }}
fetch-depth: 0 fetch-depth: 0

View File

@@ -30,7 +30,7 @@ jobs:
actions: read # Required for CI access actions: read # Required for CI access
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 1 fetch-depth: 1

View File

@@ -40,7 +40,7 @@ jobs:
actions: read # Required for CI access actions: read # Required for CI access
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 1 fetch-depth: 1

View File

@@ -58,7 +58,7 @@ jobs:
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL

View File

@@ -27,7 +27,7 @@ jobs:
# If you do not check out your code, Copilot will do this for you. # If you do not check out your code, Copilot will do this for you.
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
submodules: true submodules: true

View File

@@ -23,7 +23,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 1 fetch-depth: 1

View File

@@ -23,7 +23,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0

View File

@@ -28,7 +28,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 1 fetch-depth: 1

View File

@@ -25,7 +25,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
ref: ${{ github.event.inputs.git_ref || github.ref_name }} ref: ${{ github.event.inputs.git_ref || github.ref_name }}
@@ -52,7 +52,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Trigger deploy workflow - name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DEPLOY_TOKEN }} token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -17,7 +17,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
ref: ${{ github.ref_name || 'master' }} ref: ${{ github.ref_name || 'master' }}
@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Trigger deploy workflow - name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DEPLOY_TOKEN }} token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -68,7 +68,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
submodules: true submodules: true

View File

@@ -82,7 +82,7 @@ jobs:
- name: Dispatch Deploy Event - name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true' if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DISPATCH_TOKEN }} token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -110,7 +110,7 @@ jobs:
- name: Dispatch Undeploy Event (from comment) - name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true' if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DISPATCH_TOKEN }} token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -168,7 +168,7 @@ jobs:
github.event_name == 'pull_request' && github.event_name == 'pull_request' &&
github.event.action == 'closed' && github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true' steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DISPATCH_TOKEN }} token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -31,7 +31,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Check for component changes - name: Check for component changes
uses: dorny/paths-filter@v3 uses: dorny/paths-filter@v3
@@ -71,7 +71,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Set up Node.js - name: Set up Node.js
uses: actions/setup-node@v6 uses: actions/setup-node@v6
@@ -107,7 +107,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -148,7 +148,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive
@@ -277,7 +277,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive

View File

@@ -29,7 +29,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Set up Node.js - name: Set up Node.js
uses: actions/setup-node@v6 uses: actions/setup-node@v6
@@ -63,7 +63,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive

View File

@@ -11,7 +11,7 @@ jobs:
steps: steps:
# - name: Wait some time for all actions to start # - name: Wait some time for all actions to start
# run: sleep 30 # run: sleep 30
- uses: actions/checkout@v6 - uses: actions/checkout@v4
# with: # with:
# fetch-depth: 0 # fetch-depth: 0
- name: Set up Python - name: Set up Python

View File

@@ -96,7 +96,13 @@ class ChatConfig(BaseSettings):
# Extended thinking configuration for Claude models # Extended thinking configuration for Claude models
thinking_enabled: bool = Field( thinking_enabled: bool = Field(
default=True, default=True,
description="Enable adaptive thinking for Claude models via OpenRouter", description="Enable extended thinking for Claude models via OpenRouter",
)
thinking_budget_tokens: int = Field(
default=10000,
ge=1000,
le=100000,
description="Maximum tokens for extended thinking (budget_tokens for Claude)",
) )
@field_validator("api_key", mode="before") @field_validator("api_key", mode="before")

View File

@@ -80,6 +80,19 @@ settings = Settings()
client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
def _apply_thinking_config(extra_body: dict[str, Any], model: str) -> None:
"""Apply extended thinking configuration for Anthropic models via OpenRouter.
OpenRouter's reasoning API expects either:
- {"max_tokens": N} for explicit token budget
- {"effort": "high"} for automatic budget
See: https://openrouter.ai/docs/reasoning-tokens
"""
if config.thinking_enabled and "anthropic" in model.lower():
extra_body["reasoning"] = {"max_tokens": config.thinking_budget_tokens}
langfuse = get_client() langfuse = get_client()
# Redis key prefix for tracking running long-running operations # Redis key prefix for tracking running long-running operations
@@ -1066,9 +1079,8 @@ async def _stream_chat_chunks(
:128 :128
] # OpenRouter limit ] # OpenRouter limit
# Enable adaptive thinking for Anthropic models via OpenRouter # Enable extended thinking for Anthropic models via OpenRouter
if config.thinking_enabled and "anthropic" in model.lower(): _apply_thinking_config(extra_body, model)
extra_body["reasoning"] = {"enabled": True}
api_call_start = time_module.perf_counter() api_call_start = time_module.perf_counter()
stream = await client.chat.completions.create( stream = await client.chat.completions.create(
@@ -1833,9 +1845,8 @@ async def _generate_llm_continuation(
if session_id: if session_id:
extra_body["session_id"] = session_id[:128] extra_body["session_id"] = session_id[:128]
# Enable adaptive thinking for Anthropic models via OpenRouter # Enable extended thinking for Anthropic models via OpenRouter
if config.thinking_enabled and "anthropic" in config.model.lower(): _apply_thinking_config(extra_body, config.model)
extra_body["reasoning"] = {"enabled": True}
retry_count = 0 retry_count = 0
last_error: Exception | None = None last_error: Exception | None = None
@@ -1967,9 +1978,8 @@ async def _generate_llm_continuation_with_streaming(
if session_id: if session_id:
extra_body["session_id"] = session_id[:128] extra_body["session_id"] = session_id[:128]
# Enable adaptive thinking for Anthropic models via OpenRouter # Enable extended thinking for Anthropic models via OpenRouter
if config.thinking_enabled and "anthropic" in config.model.lower(): _apply_thinking_config(extra_body, config.model)
extra_body["reasoning"] = {"enabled": True}
# Make streaming LLM call (no tools - just text response) # Make streaming LLM call (no tools - just text response)
from typing import cast from typing import cast

View File

@@ -743,11 +743,6 @@ class GraphModel(Graph, GraphMeta):
# For invalid blocks, we still raise immediately as this is a structural issue # For invalid blocks, we still raise immediately as this is a structural issue
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}") raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
if block.disabled:
raise ValueError(
f"Block {node.block_id} is disabled and cannot be used in graphs"
)
node_input_mask = ( node_input_mask = (
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {} nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
) )

View File

@@ -213,9 +213,6 @@ async def execute_node(
block_name=node_block.name, block_name=node_block.name,
) )
if node_block.disabled:
raise ValueError(f"Block {node_block.id} is disabled and cannot be executed")
# Sanity check: validate the execution input. # Sanity check: validate the execution input.
input_data, error = validate_exec(node, data.inputs, resolve_input=False) input_data, error = validate_exec(node, data.inputs, resolve_input=False)
if input_data is None: if input_data is None:

View File

@@ -46,14 +46,14 @@ pycares = ">=4.9.0,<5"
[[package]] [[package]]
name = "aiofiles" name = "aiofiles"
version = "25.1.0" version = "24.1.0"
description = "File support for asyncio." description = "File support for asyncio."
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.8"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695"}, {file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"},
{file = "aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2"}, {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"},
] ]
[[package]] [[package]]
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata] [metadata]
lock-version = "2.1" lock-version = "2.1"
python-versions = ">=3.10,<3.14" python-versions = ">=3.10,<3.14"
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af" content-hash = "fc135114e01de39c8adf70f6132045e7d44a19473c1279aee0978de65aad1655"

View File

@@ -76,7 +76,7 @@ yt-dlp = "2025.12.08"
zerobouncesdk = "^1.1.2" zerobouncesdk = "^1.1.2"
# NOTE: please insert new dependencies in their alphabetical location # NOTE: please insert new dependencies in their alphabetical location
pytest-snapshot = "^0.9.0" pytest-snapshot = "^0.9.0"
aiofiles = "^25.1.0" aiofiles = "^24.1.0"
tiktoken = "^0.12.0" tiktoken = "^0.12.0"
aioclamd = "^1.0.0" aioclamd = "^1.0.0"
setuptools = "^80.9.0" setuptools = "^80.9.0"

View File

@@ -1,11 +1,11 @@
"use client"; "use client";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { SidebarProvider } from "@/components/ui/sidebar"; import { SidebarProvider } from "@/components/ui/sidebar";
import { ChatContainer } from "./components/ChatContainer/ChatContainer"; import { ChatContainer } from "./components/ChatContainer/ChatContainer";
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar"; import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer"; import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
import { MobileHeader } from "./components/MobileHeader/MobileHeader"; import { MobileHeader } from "./components/MobileHeader/MobileHeader";
import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
import { useCopilotPage } from "./useCopilotPage"; import { useCopilotPage } from "./useCopilotPage";
export function CopilotPage() { export function CopilotPage() {
@@ -34,11 +34,7 @@ export function CopilotPage() {
} = useCopilotPage(); } = useCopilotPage();
if (isUserLoading || !isLoggedIn) { if (isUserLoading || !isLoggedIn) {
return ( return <LoadingSpinner size="large" cover />;
<div className="fixed inset-0 z-50 flex items-center justify-center bg-[#f8f8f9]">
<ScaleLoader className="text-neutral-400" />
</div>
);
} }
return ( return (

View File

@@ -143,10 +143,10 @@ export const ChatMessagesContainer = ({
return ( return (
<Conversation className="min-h-0 flex-1"> <Conversation className="min-h-0 flex-1">
<ConversationContent className="flex min-h-screen flex-1 flex-col gap-6 px-3 py-6"> <ConversationContent className="gap-6 px-3 py-6">
{isLoading && messages.length === 0 && ( {isLoading && messages.length === 0 && (
<div className="flex min-h-full flex-1 items-center justify-center"> <div className="flex flex-1 items-center justify-center">
<LoadingSpinner className="text-neutral-600" /> <LoadingSpinner size="large" className="text-neutral-400" />
</div> </div>
)} )}
{messages.map((message, messageIndex) => { {messages.map((message, messageIndex) => {

View File

@@ -121,8 +121,8 @@ export function ChatSidebar() {
className="mt-4 flex flex-col gap-1" className="mt-4 flex flex-col gap-1"
> >
{isLoadingSessions ? ( {isLoadingSessions ? (
<div className="flex min-h-[30rem] items-center justify-center py-4"> <div className="flex items-center justify-center py-4">
<LoadingSpinner size="small" className="text-neutral-600" /> <LoadingSpinner size="small" className="text-neutral-400" />
</div> </div>
) : sessions.length === 0 ? ( ) : sessions.length === 0 ? (
<p className="py-4 text-center text-sm text-neutral-500"> <p className="py-4 text-center text-sm text-neutral-500">

View File

@@ -1,35 +0,0 @@
.loader {
width: 48px;
height: 48px;
display: inline-block;
position: relative;
}
.loader::after,
.loader::before {
content: "";
box-sizing: border-box;
width: 100%;
height: 100%;
border-radius: 50%;
background: currentColor;
position: absolute;
left: 0;
top: 0;
animation: animloader 2s linear infinite;
}
.loader::after {
animation-delay: 1s;
}
@keyframes animloader {
0% {
transform: scale(0);
opacity: 1;
}
100% {
transform: scale(1);
opacity: 0;
}
}

View File

@@ -1,16 +0,0 @@
import { cn } from "@/lib/utils";
import styles from "./ScaleLoader.module.css";
interface Props {
size?: number;
className?: string;
}
export function ScaleLoader({ size = 48, className }: Props) {
return (
<div
className={cn(styles.loader, className)}
style={{ width: size, height: size }}
/>
);
}

View File

@@ -49,7 +49,12 @@ interface Props {
part: CreateAgentToolPart; part: CreateAgentToolPart;
} }
function getAccordionMeta(output: CreateAgentToolOutput) { function getAccordionMeta(output: CreateAgentToolOutput): {
icon: React.ReactNode;
title: React.ReactNode;
titleClassName?: string;
description?: string;
} {
const icon = <AccordionIcon />; const icon = <AccordionIcon />;
if (isAgentSavedOutput(output)) { if (isAgentSavedOutput(output)) {
@@ -68,7 +73,6 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
icon, icon,
title: "Needs clarification", title: "Needs clarification",
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`, description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
expanded: true,
}; };
} }
if ( if (
@@ -93,23 +97,18 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
export function CreateAgentTool({ part }: Props) { export function CreateAgentTool({ part }: Props) {
const text = getAnimationText(part); const text = getAnimationText(part);
const { onSend } = useCopilotChatActions(); const { onSend } = useCopilotChatActions();
const isStreaming = const isStreaming =
part.state === "input-streaming" || part.state === "input-available"; part.state === "input-streaming" || part.state === "input-available";
const output = getCreateAgentToolOutput(part); const output = getCreateAgentToolOutput(part);
const isError = const isError =
part.state === "output-error" || (!!output && isErrorOutput(output)); part.state === "output-error" || (!!output && isErrorOutput(output));
const isOperating = const isOperating =
!!output && !!output &&
(isOperationStartedOutput(output) || (isOperationStartedOutput(output) ||
isOperationPendingOutput(output) || isOperationPendingOutput(output) ||
isOperationInProgressOutput(output)); isOperationInProgressOutput(output));
const progress = useAsymptoticProgress(isOperating); const progress = useAsymptoticProgress(isOperating);
const hasExpandableContent = const hasExpandableContent =
part.state === "output-available" && part.state === "output-available" &&
!!output && !!output &&
@@ -150,7 +149,10 @@ export function CreateAgentTool({ part }: Props) {
</div> </div>
{hasExpandableContent && output && ( {hasExpandableContent && output && (
<ToolAccordion {...getAccordionMeta(output)}> <ToolAccordion
{...getAccordionMeta(output)}
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
>
{isOperating && ( {isOperating && (
<ContentGrid> <ContentGrid>
<ProgressBar value={progress} className="max-w-[280px]" /> <ProgressBar value={progress} className="max-w-[280px]" />

View File

@@ -146,7 +146,10 @@ export function EditAgentTool({ part }: Props) {
</div> </div>
{hasExpandableContent && output && ( {hasExpandableContent && output && (
<ToolAccordion {...getAccordionMeta(output)}> <ToolAccordion
{...getAccordionMeta(output)}
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
>
{isOperating && ( {isOperating && (
<ContentGrid> <ContentGrid>
<ProgressBar value={progress} className="max-w-[280px]" /> <ProgressBar value={progress} className="max-w-[280px]" />

View File

@@ -61,7 +61,14 @@ export function RunAgentTool({ part }: Props) {
</div> </div>
{hasExpandableContent && output && ( {hasExpandableContent && output && (
<ToolAccordion {...getAccordionMeta(output)}> <ToolAccordion
{...getAccordionMeta(output)}
defaultExpanded={
isRunAgentExecutionStartedOutput(output) ||
isRunAgentSetupRequirementsOutput(output) ||
isRunAgentAgentDetailsOutput(output)
}
>
{isRunAgentExecutionStartedOutput(output) && ( {isRunAgentExecutionStartedOutput(output) && (
<ExecutionStartedCard output={output} /> <ExecutionStartedCard output={output} />
)} )}

View File

@@ -10,7 +10,7 @@ import {
WarningDiamondIcon, WarningDiamondIcon,
} from "@phosphor-icons/react"; } from "@phosphor-icons/react";
import type { ToolUIPart } from "ai"; import type { ToolUIPart } from "ai";
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
export interface RunAgentInput { export interface RunAgentInput {
username_agent_slug?: string; username_agent_slug?: string;
@@ -171,7 +171,7 @@ export function ToolIcon({
); );
} }
if (isStreaming) { if (isStreaming) {
return <OrbitLoader size={24} />; return <SpinnerLoader size={40} className="text-neutral-700" />;
} }
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />; return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
} }
@@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): {
? output.status.trim() ? output.status.trim()
: "started"; : "started";
return { return {
icon: <OrbitLoader size={28} className="text-neutral-700" />, icon: <SpinnerLoader size={28} className="text-neutral-700" />,
title: output.graph_name, title: output.graph_name,
description: `Status: ${statusText}`, description: `Status: ${statusText}`,
}; };

View File

@@ -55,7 +55,13 @@ export function RunBlockTool({ part }: Props) {
</div> </div>
{hasExpandableContent && output && ( {hasExpandableContent && output && (
<ToolAccordion {...getAccordionMeta(output)}> <ToolAccordion
{...getAccordionMeta(output)}
defaultExpanded={
isRunBlockBlockOutput(output) ||
isRunBlockSetupRequirementsOutput(output)
}
>
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />} {isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
{isRunBlockSetupRequirementsOutput(output) && ( {isRunBlockSetupRequirementsOutput(output) && (

View File

@@ -8,7 +8,7 @@ import {
WarningDiamondIcon, WarningDiamondIcon,
} from "@phosphor-icons/react"; } from "@phosphor-icons/react";
import type { ToolUIPart } from "ai"; import type { ToolUIPart } from "ai";
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
export interface RunBlockInput { export interface RunBlockInput {
block_id?: string; block_id?: string;
@@ -120,7 +120,7 @@ export function ToolIcon({
); );
} }
if (isStreaming) { if (isStreaming) {
return <OrbitLoader size={24} />; return <SpinnerLoader size={40} className="text-neutral-700" />;
} }
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />; return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
} }
@@ -149,7 +149,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
if (isRunBlockBlockOutput(output)) { if (isRunBlockBlockOutput(output)) {
const keys = Object.keys(output.outputs ?? {}); const keys = Object.keys(output.outputs ?? {});
return { return {
icon: <OrbitLoader size={24} className="text-neutral-700" />, icon: <SpinnerLoader size={32} className="text-neutral-700" />,
title: output.block_name, title: output.block_name,
description: description:
keys.length > 0 keys.length > 0

View File

@@ -3,6 +3,7 @@ import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { useChat } from "@ai-sdk/react"; import { useChat } from "@ai-sdk/react";
import { DefaultChatTransport } from "ai"; import { DefaultChatTransport } from "ai";
import { useRouter } from "next/navigation";
import { useEffect, useMemo, useState } from "react"; import { useEffect, useMemo, useState } from "react";
import { useChatSession } from "./useChatSession"; import { useChatSession } from "./useChatSession";
@@ -10,6 +11,7 @@ export function useCopilotPage() {
const { isUserLoading, isLoggedIn } = useSupabase(); const { isUserLoading, isLoggedIn } = useSupabase();
const [isDrawerOpen, setIsDrawerOpen] = useState(false); const [isDrawerOpen, setIsDrawerOpen] = useState(false);
const [pendingMessage, setPendingMessage] = useState<string | null>(null); const [pendingMessage, setPendingMessage] = useState<string | null>(null);
const router = useRouter();
const { const {
sessionId, sessionId,
@@ -52,6 +54,10 @@ export function useCopilotPage() {
transport: transport ?? undefined, transport: transport ?? undefined,
}); });
useEffect(() => {
if (!isUserLoading && !isLoggedIn) router.replace("/login");
}, [isUserLoading, isLoggedIn]);
useEffect(() => { useEffect(() => {
if (!hydratedMessages || hydratedMessages.length === 0) return; if (!hydratedMessages || hydratedMessages.length === 0) return;
setMessages((prev) => { setMessages((prev) => {

View File

@@ -6,7 +6,6 @@ import { SupabaseClient } from "@supabase/supabase-js";
export const PROTECTED_PAGES = [ export const PROTECTED_PAGES = [
"/auth/authorize", "/auth/authorize",
"/auth/integrations", "/auth/integrations",
"/copilot",
"/monitor", "/monitor",
"/build", "/build",
"/onboarding", "/onboarding",