Compare commits

..

7 Commits

Author SHA1 Message Date
Zamil Majdy
95c6907ccd fix(frontend): remove test screenshots from repo
Remove binary test screenshots that bloat the repo. Test evidence
should be in the PR description or CI artifacts, not committed.
2026-04-01 18:03:00 +02:00
Zamil Majdy
f4bc3c2012 test: add test screenshots for PR #12598 stream timeout verification 2026-04-01 17:59:17 +02:00
Zamil Majdy
f265ef8ac3 fix(frontend): use type-safe any cast for createSessionMutation call
The generated mutation type differs between local (void) and CI
(requires CreateSessionRequest) due to export-api-schema regeneration.
Use an explicit any cast to handle both generated type variants.
2026-04-01 17:59:17 +02:00
Zamil Majdy
c79e6ff30a fix(frontend): clear stream timeout on stop and fix pre-existing TS errors
Clear the stream timeout timer immediately when the user clicks stop,
preventing a brief window where the timeout could fire after the user
already cancelled the stream. Also fix pre-existing TypeScript errors
in admin rate-limit components (missing user_email on generated type)
and useChatSession (createSessionMutation arg mismatch).
2026-04-01 17:59:17 +02:00
Zamil Majdy
7db8bf161a style(frontend): remove eslint-disable by referencing rawMessages in effect body
Reference rawMessages.length in the stream timeout effect so the
exhaustive-deps rule is satisfied without an eslint suppressor comment.
2026-04-01 17:59:17 +02:00
Zamil Majdy
84650d0f4d fix(frontend): improve stream timeout toast description
Deduplicate "Connection lost" between title and description — the
description now tells the user what to do next.
2026-04-01 17:59:17 +02:00
Zamil Majdy
0467cb2e49 fix(frontend): add stream timeout to copilot chat
When an SSE stream dies silently (no disconnect event), the UI stays
stuck in "Reasoning..." indefinitely. Add a 60-second inactivity
timeout that auto-cancels the stream and shows an error toast,
prompting the user to retry.
2026-04-01 17:59:17 +02:00
30 changed files with 309 additions and 1023 deletions

View File

@@ -2,8 +2,6 @@ import copy
from datetime import date, time
from typing import Any, Optional
from pydantic import AliasChoices, Field
from backend.blocks._base import (
Block,
BlockCategory,
@@ -469,8 +467,7 @@ class AgentFileInputBlock(AgentInputBlock):
class AgentDropdownInputBlock(AgentInputBlock):
"""
A specialized text input block that presents a dropdown selector
restricted to a fixed set of values.
A specialized text input block that relies on placeholder_values to present a dropdown.
"""
class Input(AgentInputBlock.Input):
@@ -480,23 +477,16 @@ class AgentDropdownInputBlock(AgentInputBlock):
advanced=False,
title="Default Value",
)
# Use Field() directly (not SchemaField) to pass validation_alias,
# which handles backward compat for legacy "placeholder_values" across
# all construction paths (model_construct, __init__, model_validate).
options: list = Field(
placeholder_values: list = SchemaField(
description="Possible values for the dropdown.",
default_factory=list,
advanced=False,
title="Dropdown Options",
description=(
"If provided, renders the input as a dropdown selector "
"restricted to these values. Leave empty for free-text input."
),
validation_alias=AliasChoices("options", "placeholder_values"),
json_schema_extra={"advanced": False, "secret": False},
)
def generate_schema(self):
schema = super().generate_schema()
if possible_values := self.options:
if possible_values := self.placeholder_values:
schema["enum"] = possible_values
return schema
@@ -514,13 +504,13 @@ class AgentDropdownInputBlock(AgentInputBlock):
{
"value": "Option A",
"name": "dropdown_1",
"options": ["Option A", "Option B", "Option C"],
"placeholder_values": ["Option A", "Option B", "Option C"],
"description": "Dropdown example 1",
},
{
"value": "Option C",
"name": "dropdown_2",
"options": ["Option A", "Option B", "Option C"],
"placeholder_values": ["Option A", "Option B", "Option C"],
"description": "Dropdown example 2",
},
],

View File

@@ -300,27 +300,13 @@ def test_agent_input_block_ignores_legacy_placeholder_values():
def test_dropdown_input_block_produces_enum():
"""Verify AgentDropdownInputBlock.Input.generate_schema() produces enum
using the canonical 'options' field name."""
opts = ["Option A", "Option B"]
"""Verify AgentDropdownInputBlock.Input.generate_schema() produces enum."""
options = ["Option A", "Option B"]
instance = AgentDropdownInputBlock.Input.model_construct(
name="choice", value=None, options=opts
name="choice", value=None, placeholder_values=options
)
schema = instance.generate_schema()
assert schema.get("enum") == opts
def test_dropdown_input_block_legacy_placeholder_values_produces_enum():
"""Verify backward compat: passing legacy 'placeholder_values' to
AgentDropdownInputBlock still produces enum via model_construct remap."""
opts = ["Option A", "Option B"]
instance = AgentDropdownInputBlock.Input.model_construct(
name="choice", value=None, placeholder_values=opts
)
schema = instance.generate_schema()
assert (
schema.get("enum") == opts
), "Legacy placeholder_values should be remapped to options"
assert schema.get("enum") == options
def test_generate_schema_integration_legacy_placeholder_values():
@@ -343,11 +329,11 @@ def test_generate_schema_integration_legacy_placeholder_values():
def test_generate_schema_integration_dropdown_produces_enum():
"""Test the full Graph._generate_schema path with AgentDropdownInputBlock
— verifies enum IS produced for dropdown blocks using canonical field name."""
— verifies enum IS produced for dropdown blocks."""
dropdown_input_default = {
"name": "color",
"value": None,
"options": ["Red", "Green", "Blue"],
"placeholder_values": ["Red", "Green", "Blue"],
}
result = BaseGraph._generate_schema(
(AgentDropdownInputBlock.Input, dropdown_input_default),
@@ -358,36 +344,3 @@ def test_generate_schema_integration_dropdown_produces_enum():
"Green",
"Blue",
], "Graph schema should contain enum from AgentDropdownInputBlock"
def test_generate_schema_integration_dropdown_legacy_placeholder_values():
"""Test the full Graph._generate_schema path with AgentDropdownInputBlock
using legacy 'placeholder_values' — verifies backward compat produces enum."""
legacy_dropdown_input_default = {
"name": "color",
"value": None,
"placeholder_values": ["Red", "Green", "Blue"],
}
result = BaseGraph._generate_schema(
(AgentDropdownInputBlock.Input, legacy_dropdown_input_default),
)
color_props = result["properties"]["color"]
assert color_props.get("enum") == [
"Red",
"Green",
"Blue",
], "Legacy placeholder_values should still produce enum via model_construct remap"
def test_dropdown_input_block_init_legacy_placeholder_values():
"""Verify backward compat: constructing AgentDropdownInputBlock.Input via
model_validate with legacy 'placeholder_values' correctly maps to 'options'."""
opts = ["Option A", "Option B"]
instance = AgentDropdownInputBlock.Input.model_validate(
{"name": "choice", "value": None, "placeholder_values": opts}
)
assert (
instance.options == opts
), "Legacy placeholder_values should be remapped to options via model_validate"
schema = instance.generate_schema()
assert schema.get("enum") == opts

View File

@@ -123,7 +123,6 @@ async def get_provider_token(user_id: str, provider: str) -> str | None:
[c for c in creds_list if c.type == "oauth2"],
key=lambda c: 0 if "repo" in (cast(OAuth2Credentials, c).scopes or []) else 1,
)
refresh_failed = False
for creds in oauth2_creds:
if creds.type == "oauth2":
try:
@@ -142,7 +141,6 @@ async def get_provider_token(user_id: str, provider: str) -> str | None:
# Do NOT fall back to the stale token — it is likely expired
# or revoked. Returning None forces the caller to re-auth,
# preventing the LLM from receiving a non-functional token.
refresh_failed = True
continue
_token_cache[cache_key] = token
return token
@@ -154,12 +152,8 @@ async def get_provider_token(user_id: str, provider: str) -> str | None:
_token_cache[cache_key] = token
return token
# Only cache "not connected" when the user truly has no credentials for this
# provider. If we had OAuth credentials but refresh failed (e.g. transient
# network error, event-loop mismatch), do NOT cache the negative result —
# the next call should retry the refresh instead of being blocked for 60 s.
if not refresh_failed:
_null_cache[cache_key] = True
# No credentials found — cache to avoid repeated DB hits.
_null_cache[cache_key] = True
return None

View File

@@ -129,15 +129,8 @@ class TestGetProviderToken:
assert result == "oauth-tok"
@pytest.mark.asyncio(loop_scope="session")
async def test_oauth2_refresh_failure_returns_none_without_null_cache(self):
"""On refresh failure, return None but do NOT cache in null_cache.
The user has credentials — they just couldn't be refreshed right now
(e.g. transient network error or event-loop mismatch in the copilot
executor). Caching a negative result would block all credential
lookups for 60 s even though the creds exist and may refresh fine
on the next attempt.
"""
async def test_oauth2_refresh_failure_returns_none(self):
"""On refresh failure, return None instead of caching a stale token."""
oauth_creds = _make_oauth2_creds("stale-oauth-tok")
mock_manager = MagicMock()
mock_manager.store.get_creds_by_provider = AsyncMock(return_value=[oauth_creds])
@@ -148,8 +141,6 @@ class TestGetProviderToken:
# Stale tokens must NOT be returned — forces re-auth.
assert result is None
# Must NOT cache negative result when refresh failed — next call retries.
assert (_USER, _PROVIDER) not in _null_cache
@pytest.mark.asyncio(loop_scope="session")
async def test_no_credentials_caches_null_entry(self):
@@ -185,96 +176,6 @@ class TestGetProviderToken:
assert _NULL_CACHE_TTL < _TOKEN_CACHE_TTL
class TestThreadSafetyLocks:
"""Bug reproduction: shared AsyncRedisKeyedMutex across threads caused
'Future attached to a different loop' when copilot workers accessed
credentials from different event loops."""
@pytest.mark.asyncio(loop_scope="session")
async def test_store_locks_returns_per_thread_instance(self):
"""IntegrationCredentialsStore.locks() must return different instances
for different threads (via @thread_cached)."""
import asyncio
import concurrent.futures
from backend.integrations.credentials_store import IntegrationCredentialsStore
store = IntegrationCredentialsStore()
async def get_locks_id():
mock_redis = AsyncMock()
with patch(
"backend.integrations.credentials_store.get_redis_async",
return_value=mock_redis,
):
locks = await store.locks()
return id(locks)
# Get locks from main thread
main_id = await get_locks_id()
# Get locks from a worker thread
def run_in_thread():
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(get_locks_id())
finally:
loop.close()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
worker_id = await asyncio.get_event_loop().run_in_executor(
pool, run_in_thread
)
assert main_id != worker_id, (
"Store.locks() returned the same instance across threads. "
"This would cause 'Future attached to a different loop' errors."
)
@pytest.mark.asyncio(loop_scope="session")
async def test_manager_delegates_to_store_locks(self):
"""IntegrationCredentialsManager.locks() should delegate to store."""
from backend.integrations.creds_manager import IntegrationCredentialsManager
manager = IntegrationCredentialsManager()
mock_redis = AsyncMock()
with patch(
"backend.integrations.credentials_store.get_redis_async",
return_value=mock_redis,
):
locks = await manager.locks()
# Should have gotten it from the store
assert locks is not None
class TestRefreshUnlockedPath:
"""Bug reproduction: copilot worker threads need lock-free refresh because
Redis-backed asyncio.Lock created on one event loop can't be used on another."""
@pytest.mark.asyncio(loop_scope="session")
async def test_refresh_if_needed_lock_false_skips_redis(self):
"""refresh_if_needed(lock=False) must not touch Redis locks at all."""
from backend.integrations.creds_manager import IntegrationCredentialsManager
manager = IntegrationCredentialsManager()
creds = _make_oauth2_creds()
mock_handler = MagicMock()
mock_handler.needs_refresh = MagicMock(return_value=False)
with patch(
"backend.integrations.creds_manager._get_provider_oauth_handler",
new_callable=AsyncMock,
return_value=mock_handler,
):
result = await manager.refresh_if_needed(_USER, creds, lock=False)
# Should return credentials without touching locks
assert result.id == creds.id
class TestGetIntegrationEnvVars:
@pytest.mark.asyncio(loop_scope="session")
async def test_injects_all_env_vars_for_provider(self):

View File

@@ -66,7 +66,6 @@ from pydantic import BaseModel, PrivateAttr
ToolName = Literal[
# Platform tools (must match keys in TOOL_REGISTRY)
"add_understanding",
"ask_question",
"bash_exec",
"browser_act",
"browser_navigate",

View File

@@ -6,23 +6,16 @@ from pathlib import Path
class TestAgentGenerationGuideContainsClarifySection:
"""The agent generation guide must include the clarification section."""
def test_guide_includes_clarify_section(self):
def test_guide_includes_clarify_before_building(self):
guide_path = Path(__file__).parent / "sdk" / "agent_generation_guide.md"
content = guide_path.read_text(encoding="utf-8")
assert "Before or During Building" in content
assert "Clarifying Before Building" in content
def test_guide_mentions_find_block_for_clarification(self):
guide_path = Path(__file__).parent / "sdk" / "agent_generation_guide.md"
content = guide_path.read_text(encoding="utf-8")
clarify_section = content.split("Before or During Building")[1].split(
# find_block must appear in the clarification section (before the workflow)
clarify_section = content.split("Clarifying Before Building")[1].split(
"### Workflow"
)[0]
assert "find_block" in clarify_section
def test_guide_mentions_ask_question_tool(self):
guide_path = Path(__file__).parent / "sdk" / "agent_generation_guide.md"
content = guide_path.read_text(encoding="utf-8")
clarify_section = content.split("Before or During Building")[1].split(
"### Workflow"
)[0]
assert "ask_question" in clarify_section

View File

@@ -3,25 +3,17 @@
You can create, edit, and customize agents directly. You ARE the brain —
generate the agent JSON yourself using block schemas, then validate and save.
### Clarifying Before or During Building
### Clarifying Before Building
Use `ask_question` whenever the user's intent is ambiguous — whether
that's before starting or midway through the workflow. Common moments:
- **Before building**: output format, delivery channel, data source, or
trigger is unspecified.
- **During block discovery**: multiple blocks could fit and the user
should choose.
- **During JSON generation**: a wiring decision depends on user
preference.
Steps:
1. Call `find_block` (or another discovery tool) to learn what the
platform actually supports for the ambiguous dimension.
2. Call `ask_question` with a concrete question listing the discovered
Before starting the workflow below, check whether the user's goal is
**ambiguous** — missing the output format, delivery channel, data source,
or trigger. If so:
1. Call `find_block` with a query targeting the ambiguous dimension to
discover what the platform actually supports.
2. Ask the user **one concrete question** grounded in the discovered
options (e.g. "The platform supports Gmail, Slack, and Google Docs —
which should the agent use for delivery?").
3. **Wait for the user's answer** before continuing.
3. **Wait for the user's answer** before proceeding.
**Skip this** when the goal already specifies all dimensions (e.g.
"scrape prices from Amazon and email me daily").
@@ -97,8 +89,8 @@ These define the agent's interface — what it accepts and what it produces.
**AgentDropdownInputBlock** (ID: `655d6fdf-a334-421c-b733-520549c07cd1`):
- Specialized input block that presents a dropdown/select to the user
- Required `input_default` fields: `name` (str)
- Optional: `options` (list of dropdown values; when omitted/empty, input behaves as free-text), `title`, `description`, `value` (default selection)
- Required `input_default` fields: `name` (str), `placeholder_values` (list of options, must have at least one)
- Optional: `title`, `description`, `value` (default selection)
- Output: `result` — the user-selected value at runtime
- Use this instead of AgentInputBlock when the user should pick from a fixed set of options

View File

@@ -29,7 +29,6 @@ from backend.copilot.response_model import (
StreamToolOutputAvailable,
)
from .compaction import compaction_events
from .response_adapter import SDKResponseAdapter
from .tool_adapter import MCP_TOOL_PREFIX
from .tool_adapter import _pending_tool_outputs as _pto
@@ -690,102 +689,3 @@ def test_already_resolved_tool_skipped_in_user_message():
assert (
len(output_events) == 0
), "Already-resolved tool should not emit duplicate output"
# -- _end_text_if_open before compaction -------------------------------------
def test_end_text_if_open_emits_text_end_before_finish_step():
"""StreamTextEnd must be emitted before StreamFinishStep during compaction.
When ``emit_end_if_ready`` fires compaction events while a text block is
still open, ``_end_text_if_open`` must close it first. If StreamFinishStep
arrives before StreamTextEnd, the Vercel AI SDK clears ``activeTextParts``
and raises "Received text-end for missing text part".
"""
adapter = _adapter()
# Open a text block by processing an AssistantMessage with text
msg = AssistantMessage(content=[TextBlock(text="partial response")], model="test")
adapter.convert_message(msg)
assert adapter.has_started_text
assert not adapter.has_ended_text
# Simulate what service.py does before yielding compaction events
pre_close: list[StreamBaseResponse] = []
adapter._end_text_if_open(pre_close)
combined = pre_close + list(compaction_events("Compacted transcript"))
text_end_idx = next(
(i for i, e in enumerate(combined) if isinstance(e, StreamTextEnd)), None
)
finish_step_idx = next(
(i for i, e in enumerate(combined) if isinstance(e, StreamFinishStep)), None
)
assert text_end_idx is not None, "StreamTextEnd must be present"
assert finish_step_idx is not None, "StreamFinishStep must be present"
assert text_end_idx < finish_step_idx, (
f"StreamTextEnd (idx={text_end_idx}) must precede "
f"StreamFinishStep (idx={finish_step_idx}) — otherwise the Vercel AI SDK "
"clears activeTextParts before text-end arrives"
)
def test_step_open_must_reset_after_compaction_finish_step():
"""Adapter step_open must be reset when compaction emits StreamFinishStep.
Compaction events bypass the adapter, so service.py must explicitly clear
step_open after yielding a StreamFinishStep from compaction. Without this,
the next AssistantMessage skips StreamStartStep because the adapter still
thinks a step is open.
"""
adapter = _adapter()
# Open a step + text block via an AssistantMessage
msg = AssistantMessage(content=[TextBlock(text="thinking...")], model="test")
adapter.convert_message(msg)
assert adapter.step_open is True
# Simulate what service.py does: close text, then check compaction events
pre_close: list[StreamBaseResponse] = []
adapter._end_text_if_open(pre_close)
events = list(compaction_events("Compacted transcript"))
if any(isinstance(ev, StreamFinishStep) for ev in events):
adapter.step_open = False
assert (
adapter.step_open is False
), "step_open must be False after compaction emits StreamFinishStep"
# Next AssistantMessage must open a new step
msg2 = AssistantMessage(content=[TextBlock(text="continued")], model="test")
results = adapter.convert_message(msg2)
assert any(
isinstance(r, StreamStartStep) for r in results
), "A new StreamStartStep must be emitted after compaction closed the step"
def test_end_text_if_open_no_op_when_no_text_open():
"""_end_text_if_open emits nothing when no text block is open."""
adapter = _adapter()
results: list[StreamBaseResponse] = []
adapter._end_text_if_open(results)
assert results == []
def test_end_text_if_open_no_op_after_text_already_ended():
"""_end_text_if_open emits nothing when the text block is already closed."""
adapter = _adapter()
msg = AssistantMessage(content=[TextBlock(text="hello")], model="test")
adapter.convert_message(msg)
# Close it once
first: list[StreamBaseResponse] = []
adapter._end_text_if_open(first)
assert len(first) == 1
assert isinstance(first[0], StreamTextEnd)
# Second call must be a no-op
second: list[StreamBaseResponse] = []
adapter._end_text_if_open(second)
assert second == []

View File

@@ -1487,188 +1487,3 @@ class TestStreamChatCompletionRetryIntegration:
errors = [e for e in events if isinstance(e, StreamError)]
assert not errors, f"Unexpected StreamError: {errors}"
assert any(isinstance(e, StreamStart) for e in events)
@pytest.mark.asyncio
async def test_result_message_success_subtype_prompt_too_long_triggers_compaction(
self,
):
"""CLI returns ResultMessage(subtype="success") with result="Prompt is too long".
The SDK internally compacts but the transcript is still too long. It
returns subtype="success" (process completed) with result="Prompt is
too long" (the actual rejection message). The retry loop must detect
this as a context-length error and trigger compaction — the subtype
"success" must not fool it into treating this as a real response.
"""
import contextlib
from claude_agent_sdk import ResultMessage
from backend.copilot.response_model import StreamError, StreamStart
from backend.copilot.sdk.service import stream_chat_completion_sdk
session = self._make_session()
success_result = self._make_result_message()
attempt_count = [0]
error_result = ResultMessage(
subtype="success",
result="Prompt is too long",
duration_ms=100,
duration_api_ms=0,
is_error=False,
num_turns=1,
session_id="test-session-id",
)
def _client_factory(*args, **kwargs):
attempt_count[0] += 1
async def _receive_error():
yield error_result
async def _receive_success():
yield success_result
client = MagicMock()
client._transport = MagicMock()
client._transport.write = AsyncMock()
client.query = AsyncMock()
if attempt_count[0] == 1:
client.receive_response = _receive_error
else:
client.receive_response = _receive_success
cm = AsyncMock()
cm.__aenter__.return_value = client
cm.__aexit__.return_value = None
return cm
original_transcript = _build_transcript(
[("user", "prior question"), ("assistant", "prior answer")]
)
compacted_transcript = _build_transcript(
[("user", "[summary]"), ("assistant", "summary reply")]
)
patches = _make_sdk_patches(
session,
original_transcript=original_transcript,
compacted_transcript=compacted_transcript,
client_side_effect=_client_factory,
)
events = []
with contextlib.ExitStack() as stack:
for target, kwargs in patches:
stack.enter_context(patch(target, **kwargs))
async for event in stream_chat_completion_sdk(
session_id="test-session-id",
message="hello",
is_user_message=True,
user_id="test-user",
session=session,
):
events.append(event)
assert attempt_count[0] == 2, (
f"Expected 2 SDK attempts (subtype='success' with 'Prompt is too long' "
f"result should trigger compaction retry), got {attempt_count[0]}"
)
errors = [e for e in events if isinstance(e, StreamError)]
assert not errors, f"Unexpected StreamError: {errors}"
assert any(isinstance(e, StreamStart) for e in events)
@pytest.mark.asyncio
async def test_assistant_message_error_content_prompt_too_long_triggers_compaction(
self,
):
"""AssistantMessage.error="invalid_request" with content "Prompt is too long".
The SDK returns error type "invalid_request" but puts the actual
rejection message ("Prompt is too long") in the content blocks.
The retry loop must detect this via content inspection (sdk_error
being set confirms it's an error message, not user content).
"""
import contextlib
from claude_agent_sdk import AssistantMessage, ResultMessage, TextBlock
from backend.copilot.response_model import StreamError, StreamStart
from backend.copilot.sdk.service import stream_chat_completion_sdk
session = self._make_session()
success_result = self._make_result_message()
attempt_count = [0]
def _client_factory(*args, **kwargs):
attempt_count[0] += 1
async def _receive_error():
# SDK returns invalid_request with "Prompt is too long" in content.
# ResultMessage.result is a non-PTL value ("done") to isolate
# the AssistantMessage content detection path exclusively.
yield AssistantMessage(
content=[TextBlock(text="Prompt is too long")],
model="<synthetic>",
error="invalid_request",
)
yield ResultMessage(
subtype="success",
result="done",
duration_ms=100,
duration_api_ms=0,
is_error=False,
num_turns=1,
session_id="test-session-id",
)
async def _receive_success():
yield success_result
client = MagicMock()
client._transport = MagicMock()
client._transport.write = AsyncMock()
client.query = AsyncMock()
if attempt_count[0] == 1:
client.receive_response = _receive_error
else:
client.receive_response = _receive_success
cm = AsyncMock()
cm.__aenter__.return_value = client
cm.__aexit__.return_value = None
return cm
original_transcript = _build_transcript(
[("user", "prior question"), ("assistant", "prior answer")]
)
compacted_transcript = _build_transcript(
[("user", "[summary]"), ("assistant", "summary reply")]
)
patches = _make_sdk_patches(
session,
original_transcript=original_transcript,
compacted_transcript=compacted_transcript,
client_side_effect=_client_factory,
)
events = []
with contextlib.ExitStack() as stack:
for target, kwargs in patches:
stack.enter_context(patch(target, **kwargs))
async for event in stream_chat_completion_sdk(
session_id="test-session-id",
message="hello",
is_user_message=True,
user_id="test-user",
session=session,
):
events.append(event)
assert attempt_count[0] == 2, (
f"Expected 2 SDK attempts (AssistantMessage error content 'Prompt is "
f"too long' should trigger compaction retry), got {attempt_count[0]}"
)
errors = [e for e in events if isinstance(e, StreamError)]
assert not errors, f"Unexpected StreamError: {errors}"
assert any(isinstance(e, StreamStart) for e in events)

View File

@@ -1310,16 +1310,10 @@ async def _run_stream_attempt(
# AssistantMessage.error (not as a Python exception).
# Re-raise so the outer retry loop can compact the
# transcript and retry with reduced context.
# Check both error_text and error_preview: sdk_error
# being set confirms this is an error message (not user
# content), so checking content is safe. The actual
# error description (e.g. "Prompt is too long") may be
# in the content, not the error type field
# (e.g. error="invalid_request", content="Prompt is
# too long").
if _is_prompt_too_long(Exception(error_text)) or _is_prompt_too_long(
Exception(error_preview)
):
# Only check error_text (the error field), not the
# content preview — content may contain arbitrary text
# that false-positives the pattern match.
if _is_prompt_too_long(Exception(error_text)):
logger.warning(
"%s Prompt-too-long detected via AssistantMessage "
"error — raising for retry",
@@ -1420,16 +1414,13 @@ async def _run_stream_attempt(
ctx.log_prefix,
sdk_msg.result or "(no error message provided)",
)
# Check for prompt-too-long regardless of subtype — the
# SDK may return subtype="success" with result="Prompt is
# too long" when the CLI rejects the prompt before calling
# the API (cost_usd=0, no tokens consumed). If we only
# check the "error" subtype path, the stream appears to
# complete normally, the synthetic error text is stored
# in the transcript, and the session grows without bound.
if _is_prompt_too_long(RuntimeError(sdk_msg.result or "")):
raise RuntimeError("Prompt is too long")
# If the CLI itself rejected the prompt as too long
# (pre-API check, duration_api_ms=0), re-raise as an
# exception so the retry loop can trigger compaction.
# Without this, the ResultMessage is silently consumed
# and the retry/compaction mechanism is never invoked.
if _is_prompt_too_long(RuntimeError(sdk_msg.result or "")):
raise RuntimeError("Prompt is too long")
# Capture token usage from ResultMessage.
# Anthropic reports cached tokens separately:
@@ -1462,23 +1453,6 @@ async def _run_stream_attempt(
# Emit compaction end if SDK finished compacting.
# Sync TranscriptBuilder with the CLI's active context.
compact_result = await ctx.compaction.emit_end_if_ready(ctx.session)
if compact_result.events:
# Compaction events end with StreamFinishStep, which maps to
# Vercel AI SDK's "finish-step" — that clears activeTextParts.
# Close any open text block BEFORE the compaction events so
# the text-end arrives before finish-step, preventing
# "text-end for missing text part" errors on the frontend.
pre_close: list[StreamBaseResponse] = []
state.adapter._end_text_if_open(pre_close)
# Compaction events bypass the adapter, so sync step state
# when a StreamFinishStep is present — otherwise the adapter
# will skip StreamStartStep on the next AssistantMessage.
if any(
isinstance(ev, StreamFinishStep) for ev in compact_result.events
):
state.adapter.step_open = False
for r in pre_close:
yield r
for ev in compact_result.events:
yield ev
entries_replaced = False

View File

@@ -10,7 +10,6 @@ from backend.copilot.tracking import track_tool_called
from .add_understanding import AddUnderstandingTool
from .agent_browser import BrowserActTool, BrowserNavigateTool, BrowserScreenshotTool
from .agent_output import AgentOutputTool
from .ask_question import AskQuestionTool
from .base import BaseTool
from .bash_exec import BashExecTool
from .connect_integration import ConnectIntegrationTool
@@ -56,7 +55,6 @@ logger = logging.getLogger(__name__)
# Single source of truth for all tools
TOOL_REGISTRY: dict[str, BaseTool] = {
"add_understanding": AddUnderstandingTool(),
"ask_question": AskQuestionTool(),
"create_agent": CreateAgentTool(),
"customize_agent": CustomizeAgentTool(),
"edit_agent": EditAgentTool(),

View File

@@ -1,93 +0,0 @@
"""AskQuestionTool - Ask the user a clarifying question before proceeding."""
from typing import Any
from backend.copilot.model import ChatSession
from .base import BaseTool
from .models import ClarificationNeededResponse, ClarifyingQuestion, ToolResponseBase
class AskQuestionTool(BaseTool):
"""Ask the user a clarifying question and wait for their answer.
Use this tool when the user's request is ambiguous and you need more
information before proceeding. Call find_block or other discovery tools
first to ground your question in real platform options, then call this
tool with a concrete question listing those options.
"""
@property
def name(self) -> str:
return "ask_question"
@property
def description(self) -> str:
return (
"Ask the user a clarifying question. Use when the request is "
"ambiguous and you need to confirm intent, choose between options, "
"or gather missing details before proceeding."
)
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": (
"The concrete question to ask the user. Should list "
"real options when applicable."
),
},
"options": {
"type": "array",
"items": {"type": "string"},
"description": (
"Options for the user to choose from "
"(e.g. ['Email', 'Slack', 'Google Docs'])."
),
},
"keyword": {
"type": "string",
"description": "Short label identifying what the question is about.",
},
},
"required": ["question"],
}
@property
def requires_auth(self) -> bool:
return False
async def _execute(
self,
user_id: str | None,
session: ChatSession,
**kwargs: Any,
) -> ToolResponseBase:
del user_id # unused; required by BaseTool contract
question_raw = kwargs.get("question")
if not isinstance(question_raw, str) or not question_raw.strip():
raise ValueError("ask_question requires a non-empty 'question' string")
question = question_raw.strip()
raw_options = kwargs.get("options", [])
if not isinstance(raw_options, list):
raw_options = []
options: list[str] = [str(o) for o in raw_options if o]
raw_keyword = kwargs.get("keyword", "")
keyword: str = str(raw_keyword) if raw_keyword else ""
session_id = session.session_id if session else None
example = ", ".join(options) if options else None
clarifying_question = ClarifyingQuestion(
question=question,
keyword=keyword,
example=example,
)
return ClarificationNeededResponse(
message=question,
session_id=session_id,
questions=[clarifying_question],
)

View File

@@ -1,99 +0,0 @@
"""Tests for AskQuestionTool."""
import pytest
from backend.copilot.model import ChatSession
from backend.copilot.tools.ask_question import AskQuestionTool
from backend.copilot.tools.models import ClarificationNeededResponse
@pytest.fixture()
def tool() -> AskQuestionTool:
return AskQuestionTool()
@pytest.fixture()
def session() -> ChatSession:
return ChatSession.new(user_id="test-user", dry_run=False)
@pytest.mark.asyncio
async def test_execute_with_options(tool: AskQuestionTool, session: ChatSession):
result = await tool._execute(
user_id=None,
session=session,
question="Which channel?",
options=["Email", "Slack", "Google Docs"],
keyword="channel",
)
assert isinstance(result, ClarificationNeededResponse)
assert result.message == "Which channel?"
assert result.session_id == session.session_id
assert len(result.questions) == 1
q = result.questions[0]
assert q.question == "Which channel?"
assert q.keyword == "channel"
assert q.example == "Email, Slack, Google Docs"
@pytest.mark.asyncio
async def test_execute_without_options(tool: AskQuestionTool, session: ChatSession):
result = await tool._execute(
user_id=None,
session=session,
question="What format do you want?",
)
assert isinstance(result, ClarificationNeededResponse)
assert result.message == "What format do you want?"
assert len(result.questions) == 1
q = result.questions[0]
assert q.question == "What format do you want?"
assert q.keyword == ""
assert q.example is None
@pytest.mark.asyncio
async def test_execute_with_keyword_only(tool: AskQuestionTool, session: ChatSession):
result = await tool._execute(
user_id=None,
session=session,
question="How often should it run?",
keyword="trigger",
)
assert isinstance(result, ClarificationNeededResponse)
q = result.questions[0]
assert q.keyword == "trigger"
assert q.example is None
@pytest.mark.asyncio
async def test_execute_rejects_empty_question(
tool: AskQuestionTool, session: ChatSession
):
with pytest.raises(ValueError, match="non-empty"):
await tool._execute(user_id=None, session=session, question="")
with pytest.raises(ValueError, match="non-empty"):
await tool._execute(user_id=None, session=session, question=" ")
@pytest.mark.asyncio
async def test_execute_coerces_invalid_options(
tool: AskQuestionTool, session: ChatSession
):
"""LLM may send options as a string instead of a list; should not crash."""
result = await tool._execute(
user_id=None,
session=session,
question="Pick one",
options="not-a-list", # type: ignore[arg-type]
)
assert isinstance(result, ClarificationNeededResponse)
q = result.questions[0]
assert q.example is None

View File

@@ -19,7 +19,6 @@ from backend.data.model import (
UserPasswordCredentials,
)
from backend.data.redis_client import get_redis_async
from backend.util.cache import thread_cached
from backend.util.settings import Settings
settings = Settings()
@@ -305,12 +304,15 @@ def is_system_provider(provider: str) -> bool:
class IntegrationCredentialsStore:
@thread_cached
def __init__(self):
self._locks = None
async def locks(self) -> AsyncRedisKeyedMutex:
# Per-thread: copilot executor runs worker threads with separate event
# loops; AsyncRedisKeyedMutex's internal asyncio.Lock is bound to the
# loop it was created on.
return AsyncRedisKeyedMutex(await get_redis_async())
if self._locks:
return self._locks
self._locks = AsyncRedisKeyedMutex(await get_redis_async())
return self._locks
@property
def db_manager(self):

View File

@@ -8,6 +8,7 @@ from autogpt_libs.utils.synchronize import AsyncRedisKeyedMutex
from redis.asyncio.lock import Lock as AsyncRedisLock
from backend.data.model import Credentials, OAuth2Credentials
from backend.data.redis_client import get_redis_async
from backend.integrations.credentials_store import (
IntegrationCredentialsStore,
provider_matches,
@@ -105,13 +106,14 @@ class IntegrationCredentialsManager:
def __init__(self):
self.store = IntegrationCredentialsStore()
self._locks = None
async def locks(self) -> AsyncRedisKeyedMutex:
# Delegate to store's @thread_cached locks. Manager uses these for
# fine-grained per-credential locking (refresh, acquire); the store
# uses its own for coarse per-user integrations locking. Same mutex
# type, different key spaces — no collision.
return await self.store.locks()
if self._locks:
return self._locks
self._locks = AsyncRedisKeyedMutex(await get_redis_async())
return self._locks
async def create(self, user_id: str, credentials: Credentials) -> None:
result = await self.store.add_creds(user_id, credentials)
@@ -186,74 +188,35 @@ class IntegrationCredentialsManager:
async def refresh_if_needed(
self, user_id: str, credentials: OAuth2Credentials, lock: bool = True
) -> OAuth2Credentials:
# When lock=False, skip ALL Redis locking (both the outer "refresh" scope
# lock and the inner credential lock). This is used by the copilot's
# integration_creds module which runs across multiple threads with separate
# event loops; acquiring a Redis lock whose asyncio.Lock() was created on
# a different loop raises "Future attached to a different loop".
if lock:
return await self._refresh_locked(user_id, credentials)
return await self._refresh_unlocked(user_id, credentials)
async def _get_oauth_handler(
self, credentials: OAuth2Credentials
) -> "BaseOAuthHandler":
"""Resolve the appropriate OAuth handler for the given credentials."""
if provider_matches(credentials.provider, ProviderName.MCP.value):
return create_mcp_oauth_handler(credentials)
return await _get_provider_oauth_handler(credentials.provider)
async def _refresh_locked(
self, user_id: str, credentials: OAuth2Credentials
) -> OAuth2Credentials:
async with self._locked(user_id, credentials.id, "refresh"):
oauth_handler = await self._get_oauth_handler(credentials)
if provider_matches(credentials.provider, ProviderName.MCP.value):
oauth_handler = create_mcp_oauth_handler(credentials)
else:
oauth_handler = await _get_provider_oauth_handler(credentials.provider)
if oauth_handler.needs_refresh(credentials):
logger.debug(
"Refreshing '%s' credentials #%s",
credentials.provider,
credentials.id,
f"Refreshing '{credentials.provider}' credentials #{credentials.id}"
)
# Wait until the credentials are no longer in use anywhere
_lock = await self._acquire_lock(user_id, credentials.id)
try:
fresh_credentials = await oauth_handler.refresh_tokens(credentials)
await self.store.update_creds(user_id, fresh_credentials)
_invoke_creds_changed_hook(user_id, fresh_credentials.provider)
credentials = fresh_credentials
finally:
if (await _lock.locked()) and (await _lock.owned()):
try:
await _lock.release()
except Exception:
logger.warning(
"Failed to release OAuth refresh lock",
exc_info=True,
)
return credentials
_lock = None
if lock:
# Wait until the credentials are no longer in use anywhere
_lock = await self._acquire_lock(user_id, credentials.id)
async def _refresh_unlocked(
self, user_id: str, credentials: OAuth2Credentials
) -> OAuth2Credentials:
"""Best-effort token refresh without any Redis locking.
fresh_credentials = await oauth_handler.refresh_tokens(credentials)
await self.store.update_creds(user_id, fresh_credentials)
# Notify listeners so the refreshed token is picked up immediately.
_invoke_creds_changed_hook(user_id, fresh_credentials.provider)
if _lock and (await _lock.locked()) and (await _lock.owned()):
try:
await _lock.release()
except Exception:
logger.warning(
"Failed to release OAuth refresh lock",
exc_info=True,
)
Safe for use from multi-threaded contexts (e.g. copilot workers) where
each thread has its own event loop and sharing Redis-backed asyncio locks
is not possible. Concurrent refreshes are tolerated: the last writer
wins, and stale tokens are overwritten.
"""
oauth_handler = await self._get_oauth_handler(credentials)
if oauth_handler.needs_refresh(credentials):
logger.debug(
"Refreshing '%s' credentials #%s (lock-free)",
credentials.provider,
credentials.id,
)
fresh_credentials = await oauth_handler.refresh_tokens(credentials)
await self.store.update_creds(user_id, fresh_credentials)
_invoke_creds_changed_hook(user_id, fresh_credentials.provider)
credentials = fresh_credentials
credentials = fresh_credentials
return credentials
async def update(self, user_id: str, updated: Credentials) -> None:
@@ -301,6 +264,7 @@ class IntegrationCredentialsManager:
async def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
await (await self.locks()).release_all_locks()
await (await self.store.locks()).release_all_locks()

View File

@@ -22,6 +22,7 @@ function generateTestGraph(name = null) {
input_default: {
name: "Load Test Input",
description: "Test input for load testing",
placeholder_values: {},
},
input_nodes: [],
output_nodes: ["output_node"],
@@ -58,7 +59,11 @@ function generateExecutionInputs() {
"Load Test Input": {
name: "Load Test Input",
description: "Test input for load testing",
value: `Test execution at ${new Date().toISOString()}`,
placeholder_values: {
test_data: `Test execution at ${new Date().toISOString()}`,
test_parameter: Math.random().toString(36).substr(2, 9),
numeric_value: Math.floor(Math.random() * 1000),
},
},
};
}

View File

@@ -5,8 +5,12 @@ import { Button } from "@/components/atoms/Button/Button";
import type { UserRateLimitResponse } from "@/app/api/__generated__/models/userRateLimitResponse";
import { UsageBar } from "../../components/UsageBar";
/** Extend generated type with optional fields returned by the backend
* but not yet present in the generated OpenAPI schema on this branch. */
type RateLimitData = UserRateLimitResponse & { user_email?: string | null };
interface Props {
data: UserRateLimitResponse;
data: RateLimitData;
onReset: (resetWeekly: boolean) => Promise<void>;
/** Override the outer container classes (default: bordered card). */
className?: string;

View File

@@ -49,17 +49,23 @@ export function useRateLimitManager() {
setRateLimitData(null);
try {
// The backend accepts either user_id or email, but the generated type
// only knows about user_id — cast to satisfy the compiler until the
// OpenAPI spec on this branch is updated.
const params = looksLikeEmail(trimmed)
? { email: trimmed }
? ({ email: trimmed } as unknown as { user_id: string })
: { user_id: trimmed };
const response = await getV2GetUserRateLimit(params);
if (response.status !== 200) {
throw new Error("Failed to fetch rate limit");
}
setRateLimitData(response.data);
const data = response.data as typeof response.data & {
user_email?: string | null;
};
setSelectedUser({
user_id: response.data.user_id,
user_email: response.data.user_email ?? response.data.user_id,
user_id: data.user_id,
user_email: data.user_email ?? data.user_id,
});
} catch (error) {
console.error("Error fetching rate limit:", error);

View File

@@ -3,7 +3,6 @@ import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { ExclamationMarkIcon } from "@phosphor-icons/react";
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
import { useState } from "react";
import { AskQuestionTool } from "../../../tools/AskQuestion/AskQuestion";
import { ConnectIntegrationTool } from "../../../tools/ConnectIntegrationTool/ConnectIntegrationTool";
import { CreateAgentTool } from "../../../tools/CreateAgent/CreateAgent";
import { EditAgentTool } from "../../../tools/EditAgent/EditAgent";
@@ -130,8 +129,6 @@ export function MessagePartRenderer({
</MessageResponse>
);
}
case "tool-ask_question":
return <AskQuestionTool key={key} part={part as ToolUIPart} />;
case "tool-find_block":
return <FindBlocksTool key={key} part={part as ToolUIPart} />;
case "tool-find_agent":

View File

@@ -13,7 +13,6 @@ export type RenderSegment =
| { kind: "collapsed-group"; parts: ToolUIPart[] };
const CUSTOM_TOOL_TYPES = new Set([
"tool-ask_question",
"tool-find_block",
"tool-find_agent",
"tool-find_library_agent",

View File

@@ -1,68 +0,0 @@
"use client";
import { ChatTeardropDotsIcon, WarningCircleIcon } from "@phosphor-icons/react";
import type { ToolUIPart } from "ai";
import { ClarificationQuestionsCard } from "../../components/ClarificationQuestionsCard/ClarificationQuestionsCard";
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
import { normalizeClarifyingQuestions } from "../clarifying-questions";
import {
getAnimationText,
getAskQuestionOutput,
isClarificationOutput,
isErrorOutput,
} from "./helpers";
interface Props {
part: ToolUIPart;
}
export function AskQuestionTool({ part }: Props) {
const text = getAnimationText(part);
const { onSend } = useCopilotChatActions();
const isStreaming =
part.state === "input-streaming" || part.state === "input-available";
const isError = part.state === "output-error";
const output = getAskQuestionOutput(part);
function handleAnswers(answers: Record<string, string>) {
if (!output || !isClarificationOutput(output)) return;
const questions = normalizeClarifyingQuestions(output.questions ?? []);
const message = questions
.map((q) => {
const answer = answers[q.keyword] || "";
return `> ${q.question}\n\n${answer}`;
})
.join("\n\n");
onSend(`**Here are my answers:**\n\n${message}\n\nPlease proceed.`);
}
if (output && isClarificationOutput(output)) {
return (
<ClarificationQuestionsCard
questions={normalizeClarifyingQuestions(output.questions ?? [])}
message={output.message}
sessionId={output.session_id}
onSubmitAnswers={handleAnswers}
/>
);
}
return (
<div className="flex items-center gap-2 py-2 text-sm text-muted-foreground">
{isError || (output && isErrorOutput(output)) ? (
<WarningCircleIcon size={16} className="text-red-500" />
) : isStreaming ? (
<ChatTeardropDotsIcon size={16} className="animate-pulse" />
) : (
<ChatTeardropDotsIcon size={16} />
)}
<MorphingTextAnimation
text={text}
className={isError ? "text-red-500" : undefined}
/>
</div>
);
}

View File

@@ -1,86 +0,0 @@
import { ResponseType } from "@/app/api/__generated__/models/responseType";
import type { ToolUIPart } from "ai";
interface ClarifyingQuestionPayload {
question: string;
keyword: string;
example?: string;
}
export interface AskQuestionOutput {
type: string;
message: string;
questions: ClarifyingQuestionPayload[];
session_id?: string;
}
interface ErrorOutput {
type: "error";
message: string;
error?: string;
}
export type AskQuestionToolOutput = AskQuestionOutput | ErrorOutput;
function parseOutput(output: unknown): AskQuestionToolOutput | null {
if (!output) return null;
if (typeof output === "string") {
try {
return parseOutput(JSON.parse(output) as unknown);
} catch {
return null;
}
}
if (typeof output === "object" && output !== null) {
const obj = output as Record<string, unknown>;
if (
obj.type === ResponseType.agent_builder_clarification_needed ||
"questions" in obj
) {
return obj as unknown as AskQuestionOutput;
}
if (obj.type === "error" || "error" in obj) {
return obj as unknown as ErrorOutput;
}
}
return null;
}
export function getAskQuestionOutput(
part: ToolUIPart,
): AskQuestionToolOutput | null {
return parseOutput(part.output);
}
export function isClarificationOutput(
output: AskQuestionToolOutput,
): output is AskQuestionOutput {
return (
output.type === ResponseType.agent_builder_clarification_needed ||
"questions" in output
);
}
export function isErrorOutput(
output: AskQuestionToolOutput,
): output is ErrorOutput {
return output.type === "error" || "error" in output;
}
export function getAnimationText(part: ToolUIPart): string {
switch (part.state) {
case "input-streaming":
case "input-available":
return "Asking question...";
case "output-available": {
const output = parseOutput(part.output);
if (output && isClarificationOutput(output)) return "Needs your input";
if (output && isErrorOutput(output)) return "Failed to ask question";
return "Asking question...";
}
case "output-error":
return "Failed to ask question";
default:
return "Asking question...";
}
}

View File

@@ -13,8 +13,13 @@ import {
ContentMessage,
} from "../../components/ToolAccordion/AccordionContent";
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
import { ClarificationQuestionsCard } from "./components/ClarificationQuestionsCard";
import { MiniGame } from "../../components/MiniGame/MiniGame";
import { SuggestedGoalCard } from "./components/SuggestedGoalCard";
import {
buildClarificationAnswersMessage,
normalizeClarifyingQuestions,
} from "../clarifying-questions";
import {
AccordionIcon,
formatMaybeJson,
@@ -22,6 +27,7 @@ import {
getCreateAgentToolOutput,
isAgentPreviewOutput,
isAgentSavedOutput,
isClarificationNeededOutput,
isErrorOutput,
isSuggestedGoalOutput,
ToolIcon,
@@ -60,6 +66,15 @@ function getAccordionMeta(output: CreateAgentToolOutput | null) {
description: `${output.node_count} block${output.node_count === 1 ? "" : "s"}`,
};
}
if (isClarificationNeededOutput(output)) {
const questions = output.questions ?? [];
return {
icon,
title: "Needs clarification",
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
expanded: true,
};
}
if (isSuggestedGoalOutput(output)) {
return {
icon,
@@ -92,6 +107,15 @@ export function CreateAgentTool({ part }: Props) {
onSend(`Please create an agent with this goal: ${goal}`);
}
function handleClarificationAnswers(answers: Record<string, string>) {
const questions =
output && isClarificationNeededOutput(output)
? (output.questions ?? [])
: [];
onSend(buildClarificationAnswersMessage(answers, questions, "create"));
}
return (
<div className="py-2">
{isOperating && (
@@ -124,42 +148,44 @@ export function CreateAgentTool({ part }: Props) {
/>
)}
{hasExpandableContent && !(output && isAgentSavedOutput(output)) && (
<ToolAccordion {...getAccordionMeta(output)}>
{isOperating && (
<ContentGrid>
<MiniGame />
<ContentHint>
This could take a few minutes play while you wait!
</ContentHint>
</ContentGrid>
)}
{hasExpandableContent &&
!(output && isClarificationNeededOutput(output)) &&
!(output && isAgentSavedOutput(output)) && (
<ToolAccordion {...getAccordionMeta(output)}>
{isOperating && (
<ContentGrid>
<MiniGame />
<ContentHint>
This could take a few minutes play while you wait!
</ContentHint>
</ContentGrid>
)}
{output && isAgentPreviewOutput(output) && (
<ContentGrid>
<ContentMessage>{output.message}</ContentMessage>
{output.description?.trim() && (
<ContentCardDescription>
{output.description}
</ContentCardDescription>
)}
<ContentCodeBlock>
{truncateText(formatMaybeJson(output.agent_json), 1600)}
</ContentCodeBlock>
</ContentGrid>
)}
{output && isAgentPreviewOutput(output) && (
<ContentGrid>
<ContentMessage>{output.message}</ContentMessage>
{output.description?.trim() && (
<ContentCardDescription>
{output.description}
</ContentCardDescription>
)}
<ContentCodeBlock>
{truncateText(formatMaybeJson(output.agent_json), 1600)}
</ContentCodeBlock>
</ContentGrid>
)}
{output && isSuggestedGoalOutput(output) && (
<SuggestedGoalCard
message={output.message}
suggestedGoal={output.suggested_goal}
reason={output.reason}
goalType={output.goal_type ?? "vague"}
onUseSuggestedGoal={handleUseSuggestedGoal}
/>
)}
</ToolAccordion>
)}
{output && isSuggestedGoalOutput(output) && (
<SuggestedGoalCard
message={output.message}
suggestedGoal={output.suggested_goal}
reason={output.reason}
goalType={output.goal_type ?? "vague"}
onUseSuggestedGoal={handleUseSuggestedGoal}
/>
)}
</ToolAccordion>
)}
{output && isAgentSavedOutput(output) && (
<AgentSavedCard
@@ -169,6 +195,14 @@ export function CreateAgentTool({ part }: Props) {
agentPageLink={output.agent_page_link}
/>
)}
{output && isClarificationNeededOutput(output) && (
<ClarificationQuestionsCard
questions={normalizeClarifyingQuestions(output.questions ?? [])}
message={output.message}
onSubmitAnswers={handleClarificationAnswers}
/>
)}
</div>
);
}

View File

@@ -7,7 +7,7 @@ import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
import { ChatTeardropDotsIcon, CheckCircleIcon } from "@phosphor-icons/react";
import { useEffect, useRef, useState } from "react";
import type { ClarifyingQuestion } from "../../tools/clarifying-questions";
import type { ClarifyingQuestion } from "../../clarifying-questions";
interface Props {
questions: ClarifyingQuestion[];

View File

@@ -1,5 +1,6 @@
import type { AgentPreviewResponse } from "@/app/api/__generated__/models/agentPreviewResponse";
import type { AgentSavedResponse } from "@/app/api/__generated__/models/agentSavedResponse";
import type { ClarificationNeededResponse } from "@/app/api/__generated__/models/clarificationNeededResponse";
import type { ErrorResponse } from "@/app/api/__generated__/models/errorResponse";
import { ResponseType } from "@/app/api/__generated__/models/responseType";
import type { SuggestedGoalResponse } from "@/app/api/__generated__/models/suggestedGoalResponse";
@@ -14,6 +15,7 @@ import { ScaleLoader } from "../../components/ScaleLoader/ScaleLoader";
export type CreateAgentToolOutput =
| AgentPreviewResponse
| AgentSavedResponse
| ClarificationNeededResponse
| SuggestedGoalResponse
| ErrorResponse;
@@ -33,6 +35,7 @@ function parseOutput(output: unknown): CreateAgentToolOutput | null {
if (
type === ResponseType.agent_builder_preview ||
type === ResponseType.agent_builder_saved ||
type === ResponseType.agent_builder_clarification_needed ||
type === ResponseType.suggested_goal ||
type === ResponseType.error
) {
@@ -42,6 +45,7 @@ function parseOutput(output: unknown): CreateAgentToolOutput | null {
return output as AgentPreviewResponse;
if ("agent_id" in output && "library_agent_id" in output)
return output as AgentSavedResponse;
if ("questions" in output) return output as ClarificationNeededResponse;
if ("suggested_goal" in output) return output as SuggestedGoalResponse;
if ("error" in output || "details" in output)
return output as ErrorResponse;
@@ -73,6 +77,15 @@ export function isAgentSavedOutput(
);
}
export function isClarificationNeededOutput(
output: CreateAgentToolOutput,
): output is ClarificationNeededResponse {
return (
output.type === ResponseType.agent_builder_clarification_needed ||
"questions" in output
);
}
export function isSuggestedGoalOutput(
output: CreateAgentToolOutput,
): output is SuggestedGoalResponse {
@@ -101,6 +114,7 @@ export function getAnimationText(part: {
if (!output) return "Creating a new agent";
if (isAgentSavedOutput(output)) return `Saved ${output.agent_name}`;
if (isAgentPreviewOutput(output)) return `Preview "${output.agent_name}"`;
if (isClarificationNeededOutput(output)) return "Needs clarification";
if (isSuggestedGoalOutput(output)) return "Goal needs refinement";
return "Error creating agent";
}

View File

@@ -14,6 +14,11 @@ import {
ContentMessage,
} from "../../components/ToolAccordion/AccordionContent";
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
import {
buildClarificationAnswersMessage,
normalizeClarifyingQuestions,
} from "../clarifying-questions";
import { ClarificationQuestionsCard } from "../CreateAgent/components/ClarificationQuestionsCard";
import {
AccordionIcon,
formatMaybeJson,
@@ -21,6 +26,7 @@ import {
getEditAgentToolOutput,
isAgentPreviewOutput,
isAgentSavedOutput,
isClarificationNeededOutput,
isErrorOutput,
ToolIcon,
truncateText,
@@ -63,6 +69,14 @@ function getAccordionMeta(output: EditAgentToolOutput | null): {
description: `${output.node_count} block${output.node_count === 1 ? "" : "s"}`,
};
}
if (isClarificationNeededOutput(output)) {
const questions = output.questions ?? [];
return {
icon,
title: "Needs clarification",
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
};
}
return { icon, title: "" };
}
@@ -82,6 +96,15 @@ export function EditAgentTool({ part }: Props) {
// (errors are shown inline so they get replaced when retrying)
const hasExpandableContent = !isError;
function handleClarificationAnswers(answers: Record<string, string>) {
const questions =
output && isClarificationNeededOutput(output)
? (output.questions ?? [])
: [];
onSend(buildClarificationAnswersMessage(answers, questions, "edit"));
}
return (
<div className="py-2">
{isOperating && (
@@ -109,32 +132,34 @@ export function EditAgentTool({ part }: Props) {
/>
)}
{hasExpandableContent && !(output && isAgentSavedOutput(output)) && (
<ToolAccordion {...getAccordionMeta(output)}>
{isOperating && (
<ContentGrid>
<MiniGame />
<ContentHint>
This could take a few minutes play while you wait!
</ContentHint>
</ContentGrid>
)}
{hasExpandableContent &&
!(output && isClarificationNeededOutput(output)) &&
!(output && isAgentSavedOutput(output)) && (
<ToolAccordion {...getAccordionMeta(output)}>
{isOperating && (
<ContentGrid>
<MiniGame />
<ContentHint>
This could take a few minutes play while you wait!
</ContentHint>
</ContentGrid>
)}
{output && isAgentPreviewOutput(output) && (
<ContentGrid>
<ContentMessage>{output.message}</ContentMessage>
{output.description?.trim() && (
<ContentCardDescription>
{output.description}
</ContentCardDescription>
)}
<ContentCodeBlock>
{truncateText(formatMaybeJson(output.agent_json), 1600)}
</ContentCodeBlock>
</ContentGrid>
)}
</ToolAccordion>
)}
{output && isAgentPreviewOutput(output) && (
<ContentGrid>
<ContentMessage>{output.message}</ContentMessage>
{output.description?.trim() && (
<ContentCardDescription>
{output.description}
</ContentCardDescription>
)}
<ContentCodeBlock>
{truncateText(formatMaybeJson(output.agent_json), 1600)}
</ContentCodeBlock>
</ContentGrid>
)}
</ToolAccordion>
)}
{output && isAgentSavedOutput(output) && (
<AgentSavedCard
@@ -144,6 +169,14 @@ export function EditAgentTool({ part }: Props) {
agentPageLink={output.agent_page_link}
/>
)}
{output && isClarificationNeededOutput(output) && (
<ClarificationQuestionsCard
questions={normalizeClarifyingQuestions(output.questions ?? [])}
message={output.message}
onSubmitAnswers={handleClarificationAnswers}
/>
)}
</div>
);
}

View File

@@ -1,5 +1,6 @@
import type { AgentPreviewResponse } from "@/app/api/__generated__/models/agentPreviewResponse";
import type { AgentSavedResponse } from "@/app/api/__generated__/models/agentSavedResponse";
import type { ClarificationNeededResponse } from "@/app/api/__generated__/models/clarificationNeededResponse";
import type { ErrorResponse } from "@/app/api/__generated__/models/errorResponse";
import { ResponseType } from "@/app/api/__generated__/models/responseType";
import {
@@ -13,6 +14,7 @@ import { ScaleLoader } from "../../components/ScaleLoader/ScaleLoader";
export type EditAgentToolOutput =
| AgentPreviewResponse
| AgentSavedResponse
| ClarificationNeededResponse
| ErrorResponse;
function parseOutput(output: unknown): EditAgentToolOutput | null {
@@ -31,6 +33,7 @@ function parseOutput(output: unknown): EditAgentToolOutput | null {
if (
type === ResponseType.agent_builder_preview ||
type === ResponseType.agent_builder_saved ||
type === ResponseType.agent_builder_clarification_needed ||
type === ResponseType.error
) {
return output as EditAgentToolOutput;
@@ -39,6 +42,7 @@ function parseOutput(output: unknown): EditAgentToolOutput | null {
return output as AgentPreviewResponse;
if ("agent_id" in output && "library_agent_id" in output)
return output as AgentSavedResponse;
if ("questions" in output) return output as ClarificationNeededResponse;
if ("error" in output || "details" in output)
return output as ErrorResponse;
}
@@ -69,6 +73,15 @@ export function isAgentSavedOutput(
);
}
export function isClarificationNeededOutput(
output: EditAgentToolOutput,
): output is ClarificationNeededResponse {
return (
output.type === ResponseType.agent_builder_clarification_needed ||
"questions" in output
);
}
export function isErrorOutput(
output: EditAgentToolOutput,
): output is ErrorResponse {
@@ -89,6 +102,7 @@ export function getAnimationText(part: {
if (!output) return "Editing the agent";
if (isAgentSavedOutput(output)) return `Saved "${output.agent_name}"`;
if (isAgentPreviewOutput(output)) return `Preview "${output.agent_name}"`;
if (isClarificationNeededOutput(output)) return "Needs clarification";
return "Error editing agent";
}
case "output-error":

View File

@@ -95,7 +95,8 @@ export function useChatSession() {
async function createSession() {
if (sessionId) return sessionId;
try {
const response = await createSessionMutation({ data: null });
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const response = await (createSessionMutation as any)({ data: null });
if (response.status !== 200 || !response.data?.id) {
const error = new Error("Failed to create session");
Sentry.captureException(error, {

View File

@@ -19,6 +19,7 @@ import {
const RECONNECT_BASE_DELAY_MS = 1_000;
const RECONNECT_MAX_ATTEMPTS = 3;
const STREAM_TIMEOUT_MS = 60_000;
/** Minimum time the page must have been hidden to trigger a wake re-sync. */
const WAKE_RESYNC_THRESHOLD_MS = 30_000;
@@ -102,6 +103,11 @@ export function useCopilotStream({
// Set when the user explicitly clicks stop — prevents onError from
// triggering a reconnect cycle for the resulting AbortError.
const isUserStoppingRef = useRef(false);
// Timer that fires when no SSE events arrive for STREAM_TIMEOUT_MS during
// an active stream — auto-cancels the stream to avoid "Reasoning..." forever.
const streamTimeoutRef = useRef<ReturnType<typeof setTimeout>>();
// Ref to the latest stop() so the timeout callback never uses a stale closure.
const stopRef = useRef<() => void>(() => {});
// Set when all reconnect attempts are exhausted — prevents hasActiveStream
// from keeping the UI blocked forever when the backend is slow to clear it.
// Must be state (not ref) so that setting it triggers a re-render and
@@ -245,8 +251,12 @@ export function useCopilotStream({
// Wrap AI SDK's stop() to also cancel the backend executor task.
// sdkStop() aborts the SSE fetch instantly (UI feedback), then we fire
// the cancel API to actually stop the executor and wait for confirmation.
// Also kept in stopRef so the stream-timeout callback always calls the
// latest version without needing it in the effect dependency array.
async function stop() {
isUserStoppingRef.current = true;
clearTimeout(streamTimeoutRef.current);
streamTimeoutRef.current = undefined;
sdkStop();
// Resolve pending tool calls and inject a cancellation marker so the UI
// shows "You manually stopped this chat" immediately (the backend writes
@@ -295,6 +305,7 @@ export function useCopilotStream({
});
}
}
stopRef.current = stop;
// Keep a ref to sessionId so the async wake handler can detect staleness.
const sessionIdRef = useRef(sessionId);
@@ -375,6 +386,8 @@ export function useCopilotStream({
useEffect(() => {
clearTimeout(reconnectTimerRef.current);
reconnectTimerRef.current = undefined;
clearTimeout(streamTimeoutRef.current);
streamTimeoutRef.current = undefined;
reconnectAttemptsRef.current = 0;
isReconnectScheduledRef.current = false;
setIsReconnectScheduled(false);
@@ -387,6 +400,8 @@ export function useCopilotStream({
return () => {
clearTimeout(reconnectTimerRef.current);
reconnectTimerRef.current = undefined;
clearTimeout(streamTimeoutRef.current);
streamTimeoutRef.current = undefined;
};
}, [sessionId]);
@@ -468,6 +483,41 @@ export function useCopilotStream({
}
}, [hasActiveStream]);
// Stream timeout guard: if no SSE events arrive for STREAM_TIMEOUT_MS while
// the stream is active, auto-cancel to avoid the UI stuck in "Reasoning..."
// indefinitely (e.g. when the SSE connection dies silently without a
// disconnect event).
useEffect(() => {
// rawMessages is intentionally in the dependency array: each SSE event
// updates rawMessages, which re-runs this effect and resets the timer.
// Referencing its length here satisfies the exhaustive-deps rule.
void rawMessages.length;
const isActive = status === "streaming" || status === "submitted";
if (!isActive) {
clearTimeout(streamTimeoutRef.current);
streamTimeoutRef.current = undefined;
return;
}
clearTimeout(streamTimeoutRef.current);
streamTimeoutRef.current = setTimeout(() => {
streamTimeoutRef.current = undefined;
toast({
title: "Connection lost",
description:
"No response received — please try sending your message again.",
variant: "destructive",
});
stopRef.current();
}, STREAM_TIMEOUT_MS);
return () => {
clearTimeout(streamTimeoutRef.current);
streamTimeoutRef.current = undefined;
};
}, [status, rawMessages]);
// True while reconnecting or backend has active stream but we haven't connected yet.
// Suppressed when the user explicitly stopped or when all reconnect attempts
// are exhausted — the backend may be slow to clear active_stream but the UI

View File

@@ -169,7 +169,7 @@ Block for dropdown text selection.
### How it works
<!-- MANUAL: how_it_works -->
This block provides a dropdown selection input for users interacting with your agent. You define the available options using the `options` field, and users select one option from the list at runtime.
This block provides a dropdown selection input for users interacting with your agent. You define the available options using placeholder_values, and users select one option from the list at runtime.
This is ideal when you want to constrain user input to a predefined set of choices, ensuring valid input and simplifying the user experience. The selected value is passed to downstream blocks in your workflow.
<!-- END MANUAL -->
@@ -184,7 +184,7 @@ This is ideal when you want to constrain user input to a predefined set of choic
| description | The description of the input. | str | No |
| advanced | Whether to show the input in the advanced section, if the field is not required. | bool | No |
| secret | Whether the input should be treated as a secret. | bool | No |
| options | If provided, renders the input as a dropdown selector restricted to these values. Leave empty for free-text input. | List[Any] | No |
| placeholder_values | Possible values for the dropdown. | List[Any] | No |
### Outputs