Compare commits

...

4 Commits

Author SHA1 Message Date
Otto
2b34528004 fix(copilot): update homepage copy to focus on problem discovery
Update the CoPilot homepage to shift from 'what do you want?' to 'tell me about your problems'. This lowers the barrier to engagement by letting users describe their work frustrations instead of requiring them to identify automations themselves.

Changes:
- Headline: 'What do you want to automate?' → 'Tell me about your work — I'll find what to automate.'
- Placeholder: Updated to prompt users to describe their role and frustrations
- Quick actions: Changed from feature-oriented to problem-oriented prompts
- Container width: max-w-2xl → max-w-3xl (to fit longer headline on one line)

Resolves: SECRT-1876
2026-02-03 19:36:44 +00:00
Otto
f7350c797a fix(copilot): use messages_dict in fallback context compaction (#11922)
## Summary

Fixes a bug where the fallback path in context compaction passes
`recent_messages` (already sliced) instead of `messages_dict` (full
conversation) to `_ensure_tool_pairs_intact`.

This caused the function to fail to find assistant messages that exist
in the original conversation but were outside the sliced window,
resulting in orphan tool_results being sent to Anthropic and rejected
with:

```
messages.66.content.0: unexpected tool_use_id found in tool_result blocks: toolu_vrtx_019bi1PDvEn7o5ByAxcS3VdA
```

## Changes

- Pass `messages_dict` and `slice_start` (relative to full conversation)
instead of `recent_messages` and `reduced_slice_start` (relative to
already-sliced list)

## Testing

This is a targeted fix for the fallback path. The bug only manifests
when:
1. Token count > 120k (triggers compaction)
2. Initial compaction + summary still exceeds limit (triggers fallback)
3. A tool_result's corresponding assistant is in `messages_dict` but not
in `recent_messages`

## Related

- Fixes SECRT-1861
- Related: SECRT-1839 (original fix that missed this code path)
2026-02-02 13:01:05 +00:00
Otto
2abbb7fbc8 hotfix(backend): use discriminator for credential matching in run_block (#11908)
Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com>
Co-authored-by: Nicholas Tindle <ntindle@users.noreply.github.com>
Co-authored-by: Nicholas Tindle <nicholas.tindle@agpt.co>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-30 21:50:21 -06:00
Nicholas Tindle
05b60db554 fix(backend/chat): Include input schema in discovery and validate unknown fields (#11916)
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-30 21:00:43 -06:00
8 changed files with 176 additions and 18 deletions

View File

@@ -1184,11 +1184,14 @@ async def _stream_chat_chunks(
else recent_messages
)
# Ensure tool pairs stay intact in the reduced slice
reduced_slice_start = max(
# Note: Search in messages_dict (full conversation) not recent_messages
# (already sliced), so we can find assistants outside the current slice.
# Calculate where reduced_recent starts in messages_dict
reduced_start_in_dict = slice_start + max(
0, len(recent_messages) - keep_count
)
reduced_recent = _ensure_tool_pairs_intact(
reduced_recent, recent_messages, reduced_slice_start
reduced_recent, messages_dict, reduced_start_in_dict
)
if has_system_prompt:
messages = [

View File

@@ -1,10 +1,13 @@
"""Shared agent search functionality for find_agent and find_library_agent tools."""
import asyncio
import logging
from typing import Literal
from backend.api.features.library import db as library_db
from backend.api.features.store import db as store_db
from backend.data import graph as graph_db
from backend.data.graph import GraphModel
from backend.util.exceptions import DatabaseError, NotFoundError
from .models import (
@@ -14,6 +17,7 @@ from .models import (
NoResultsResponse,
ToolResponseBase,
)
from .utils import fetch_graph_from_store_slug
logger = logging.getLogger(__name__)
@@ -54,7 +58,28 @@ async def search_agents(
if source == "marketplace":
logger.info(f"Searching marketplace for: {query}")
results = await store_db.get_store_agents(search_query=query, page_size=5)
for agent in results.agents:
# Fetch all graphs in parallel for better performance
async def fetch_marketplace_graph(
creator: str, slug: str
) -> GraphModel | None:
try:
graph, _ = await fetch_graph_from_store_slug(creator, slug)
return graph
except Exception as e:
logger.warning(
f"Failed to fetch input schema for {creator}/{slug}: {e}"
)
return None
graphs = await asyncio.gather(
*(
fetch_marketplace_graph(agent.creator, agent.slug)
for agent in results.agents
)
)
for agent, graph in zip(results.agents, graphs):
agents.append(
AgentInfo(
id=f"{agent.creator}/{agent.slug}",
@@ -67,6 +92,7 @@ async def search_agents(
rating=agent.rating,
runs=agent.runs,
is_featured=False,
inputs=graph.input_schema if graph else None,
)
)
else: # library
@@ -76,7 +102,32 @@ async def search_agents(
search_term=query,
page_size=10,
)
for agent in results.agents:
# Fetch all graphs in parallel for better performance
# (list_library_agents doesn't include nodes for performance)
async def fetch_library_graph(
graph_id: str, graph_version: int
) -> GraphModel | None:
try:
return await graph_db.get_graph(
graph_id=graph_id,
version=graph_version,
user_id=user_id,
)
except Exception as e:
logger.warning(
f"Failed to fetch input schema for graph {graph_id}: {e}"
)
return None
graphs = await asyncio.gather(
*(
fetch_library_graph(agent.graph_id, agent.graph_version)
for agent in results.agents
)
)
for agent, graph in zip(results.agents, graphs):
agents.append(
AgentInfo(
id=agent.id,
@@ -90,6 +141,7 @@ async def search_agents(
has_external_trigger=agent.has_external_trigger,
new_output=agent.new_output,
graph_id=agent.graph_id,
inputs=graph.input_schema if graph else None,
)
)
logger.info(f"Found {len(agents)} agents in {source}")

View File

@@ -32,6 +32,8 @@ class ResponseType(str, Enum):
OPERATION_STARTED = "operation_started"
OPERATION_PENDING = "operation_pending"
OPERATION_IN_PROGRESS = "operation_in_progress"
# Input validation
INPUT_VALIDATION_ERROR = "input_validation_error"
# Base response model
@@ -62,6 +64,10 @@ class AgentInfo(BaseModel):
has_external_trigger: bool | None = None
new_output: bool | None = None
graph_id: str | None = None
inputs: dict[str, Any] | None = Field(
default=None,
description="Input schema for the agent, including field names, types, and defaults",
)
class AgentsFoundResponse(ToolResponseBase):
@@ -188,6 +194,20 @@ class ErrorResponse(ToolResponseBase):
details: dict[str, Any] | None = None
class InputValidationErrorResponse(ToolResponseBase):
"""Response when run_agent receives unknown input fields."""
type: ResponseType = ResponseType.INPUT_VALIDATION_ERROR
unrecognized_fields: list[str] = Field(
description="List of input field names that were not recognized"
)
inputs: dict[str, Any] = Field(
description="The agent's valid input schema for reference"
)
graph_id: str | None = None
graph_version: int | None = None
# Agent output models
class ExecutionOutputInfo(BaseModel):
"""Summary of a single execution's outputs."""

View File

@@ -30,6 +30,7 @@ from .models import (
ErrorResponse,
ExecutionOptions,
ExecutionStartedResponse,
InputValidationErrorResponse,
SetupInfo,
SetupRequirementsResponse,
ToolResponseBase,
@@ -273,6 +274,22 @@ class RunAgentTool(BaseTool):
input_properties = graph.input_schema.get("properties", {})
required_fields = set(graph.input_schema.get("required", []))
provided_inputs = set(params.inputs.keys())
valid_fields = set(input_properties.keys())
# Check for unknown input fields
unrecognized_fields = provided_inputs - valid_fields
if unrecognized_fields:
return InputValidationErrorResponse(
message=(
f"Unknown input field(s) provided: {', '.join(sorted(unrecognized_fields))}. "
f"Agent was not executed. Please use the correct field names from the schema."
),
session_id=session_id,
unrecognized_fields=sorted(unrecognized_fields),
inputs=graph.input_schema,
graph_id=graph.id,
graph_version=graph.version,
)
# If agent has inputs but none were provided AND use_defaults is not set,
# always show what's available first so user can decide

View File

@@ -402,3 +402,42 @@ async def test_run_agent_schedule_without_name(setup_test_data):
# Should return error about missing schedule_name
assert result_data.get("type") == "error"
assert "schedule_name" in result_data["message"].lower()
@pytest.mark.asyncio(loop_scope="session")
async def test_run_agent_rejects_unknown_input_fields(setup_test_data):
"""Test that run_agent returns input_validation_error for unknown input fields."""
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
tool = RunAgentTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
# Execute with unknown input field names
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={
"unknown_field": "some value",
"another_unknown": "another value",
},
session=session,
)
assert response is not None
assert hasattr(response, "output")
assert isinstance(response.output, str)
result_data = orjson.loads(response.output)
# Should return input_validation_error type with unrecognized fields
assert result_data.get("type") == "input_validation_error"
assert "unrecognized_fields" in result_data
assert set(result_data["unrecognized_fields"]) == {
"another_unknown",
"unknown_field",
}
assert "inputs" in result_data # Contains the valid schema
assert "Agent was not executed" in result_data["message"]

View File

@@ -4,6 +4,8 @@ import logging
from collections import defaultdict
from typing import Any
from pydantic_core import PydanticUndefined
from backend.api.features.chat.model import ChatSession
from backend.data.block import get_block
from backend.data.execution import ExecutionContext
@@ -73,15 +75,22 @@ class RunBlockTool(BaseTool):
self,
user_id: str,
block: Any,
input_data: dict[str, Any] | None = None,
) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]:
"""
Check if user has required credentials for a block.
Args:
user_id: User ID
block: Block to check credentials for
input_data: Input data for the block (used to determine provider via discriminator)
Returns:
tuple[matched_credentials, missing_credentials]
"""
matched_credentials: dict[str, CredentialsMetaInput] = {}
missing_credentials: list[CredentialsMetaInput] = []
input_data = input_data or {}
# Get credential field info from block's input schema
credentials_fields_info = block.input_schema.get_credentials_fields_info()
@@ -94,14 +103,33 @@ class RunBlockTool(BaseTool):
available_creds = await creds_manager.store.get_all_creds(user_id)
for field_name, field_info in credentials_fields_info.items():
# field_info.provider is a frozenset of acceptable providers
# field_info.supported_types is a frozenset of acceptable types
effective_field_info = field_info
if field_info.discriminator and field_info.discriminator_mapping:
# Get discriminator from input, falling back to schema default
discriminator_value = input_data.get(field_info.discriminator)
if discriminator_value is None:
field = block.input_schema.model_fields.get(
field_info.discriminator
)
if field and field.default is not PydanticUndefined:
discriminator_value = field.default
if (
discriminator_value
and discriminator_value in field_info.discriminator_mapping
):
effective_field_info = field_info.discriminate(discriminator_value)
logger.debug(
f"Discriminated provider for {field_name}: "
f"{discriminator_value} -> {effective_field_info.provider}"
)
matching_cred = next(
(
cred
for cred in available_creds
if cred.provider in field_info.provider
and cred.type in field_info.supported_types
if cred.provider in effective_field_info.provider
and cred.type in effective_field_info.supported_types
),
None,
)
@@ -115,8 +143,8 @@ class RunBlockTool(BaseTool):
)
else:
# Create a placeholder for the missing credential
provider = next(iter(field_info.provider), "unknown")
cred_type = next(iter(field_info.supported_types), "api_key")
provider = next(iter(effective_field_info.provider), "unknown")
cred_type = next(iter(effective_field_info.supported_types), "api_key")
missing_credentials.append(
CredentialsMetaInput(
id=field_name,
@@ -184,10 +212,9 @@ class RunBlockTool(BaseTool):
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
# Check credentials
creds_manager = IntegrationCredentialsManager()
matched_credentials, missing_credentials = await self._check_block_credentials(
user_id, block
user_id, block, input_data
)
if missing_credentials:

View File

@@ -26,8 +26,8 @@ export function buildCopilotChatUrl(prompt: string): string {
export function getQuickActions(): string[] {
return [
"Show me what I can automate",
"Design a custom workflow",
"Help me with content creation",
"I don't know where to start, just ask me stuff",
"I do the same thing every week and it's killing me",
"Help me find where I'm wasting my time",
];
}

View File

@@ -90,7 +90,7 @@ export default function CopilotPage() {
</div>
) : (
<>
<div className="mx-auto max-w-2xl">
<div className="mx-auto max-w-3xl">
<Text
variant="h3"
className="mb-3 !text-[1.375rem] text-zinc-700"
@@ -98,13 +98,13 @@ export default function CopilotPage() {
Hey, <span className="text-violet-600">{greetingName}</span>
</Text>
<Text variant="h3" className="mb-8 !font-normal">
What do you want to automate?
Tell me about your work I&apos;ll find what to automate.
</Text>
<div className="mb-6">
<ChatInput
onSend={startChatWithPrompt}
placeholder='You can search or just ask - e.g. "create a blog post outline"'
placeholder="What's your role and what eats up most of your day? e.g. 'I'm a real estate agent and I hate...'"
/>
</div>
</div>