Compare commits

..

1 Commits

Author SHA1 Message Date
Otto
7f7a7067ec refactor(copilot): use Pydantic models and match/case in customize_agent
Addresses review feedback from ntindle:

1. Use typed parameters instead of kwargs.get():
   - Added CustomizeAgentInput Pydantic model with field_validator for stripping strings
   - Tool now uses params = CustomizeAgentInput(**kwargs) pattern

2. Use match/case for cleaner pattern matching:
   - Extracted response handling to _handle_customization_result method
   - Uses match result_type: case 'error' | 'clarifying_questions' | _

3. Improved code organization:
   - Split monolithic _execute into smaller focused methods
   - _handle_customization_result for response type handling
   - _save_or_preview_agent for final save/preview logic
2026-02-04 08:53:02 +00:00
21 changed files with 218 additions and 532 deletions

View File

@@ -3,6 +3,8 @@
import logging
from typing import Any
from pydantic import BaseModel, field_validator
from backend.api.features.chat.model import ChatSession
from backend.api.features.store import db as store_db
from backend.api.features.store.exceptions import AgentNotFoundError
@@ -27,6 +29,23 @@ from .models import (
logger = logging.getLogger(__name__)
class CustomizeAgentInput(BaseModel):
"""Input parameters for the customize_agent tool."""
agent_id: str = ""
modifications: str = ""
context: str = ""
save: bool = True
@field_validator("agent_id", "modifications", "context", mode="before")
@classmethod
def strip_strings(cls, v: Any) -> str:
"""Strip whitespace from string fields."""
if isinstance(v, str):
return v.strip()
return v if v is not None else ""
class CustomizeAgentTool(BaseTool):
"""Tool for customizing marketplace/template agents using natural language."""
@@ -92,7 +111,7 @@ class CustomizeAgentTool(BaseTool):
self,
user_id: str | None,
session: ChatSession,
**kwargs,
**kwargs: Any,
) -> ToolResponseBase:
"""Execute the customize_agent tool.
@@ -102,20 +121,17 @@ class CustomizeAgentTool(BaseTool):
3. Call customize_template with the modification request
4. Preview or save based on the save parameter
"""
agent_id = kwargs.get("agent_id", "").strip()
modifications = kwargs.get("modifications", "").strip()
context = kwargs.get("context", "")
save = kwargs.get("save", True)
params = CustomizeAgentInput(**kwargs)
session_id = session.session_id if session else None
if not agent_id:
if not params.agent_id:
return ErrorResponse(
message="Please provide the marketplace agent ID (e.g., 'creator/agent-name').",
error="missing_agent_id",
session_id=session_id,
)
if not modifications:
if not params.modifications:
return ErrorResponse(
message="Please describe how you want to customize this agent.",
error="missing_modifications",
@@ -123,11 +139,11 @@ class CustomizeAgentTool(BaseTool):
)
# Parse agent_id in format "creator/slug"
parts = [p.strip() for p in agent_id.split("/")]
parts = params.agent_id.split("/")
if len(parts) != 2 or not parts[0] or not parts[1]:
return ErrorResponse(
message=(
f"Invalid agent ID format: '{agent_id}'. "
f"Invalid agent ID format: '{params.agent_id}'. "
"Expected format is 'creator/agent-name' "
"(e.g., 'autogpt/newsletter-writer')."
),
@@ -145,14 +161,14 @@ class CustomizeAgentTool(BaseTool):
except AgentNotFoundError:
return ErrorResponse(
message=(
f"Could not find marketplace agent '{agent_id}'. "
f"Could not find marketplace agent '{params.agent_id}'. "
"Please check the agent ID and try again."
),
error="agent_not_found",
session_id=session_id,
)
except Exception as e:
logger.error(f"Error fetching marketplace agent {agent_id}: {e}")
logger.error(f"Error fetching marketplace agent {params.agent_id}: {e}")
return ErrorResponse(
message="Failed to fetch the marketplace agent. Please try again.",
error="fetch_error",
@@ -162,7 +178,7 @@ class CustomizeAgentTool(BaseTool):
if not agent_details.store_listing_version_id:
return ErrorResponse(
message=(
f"The agent '{agent_id}' does not have an available version. "
f"The agent '{params.agent_id}' does not have an available version. "
"Please try a different agent."
),
error="no_version_available",
@@ -174,7 +190,7 @@ class CustomizeAgentTool(BaseTool):
graph = await store_db.get_agent(agent_details.store_listing_version_id)
template_agent = graph_to_json(graph)
except Exception as e:
logger.error(f"Error fetching agent graph for {agent_id}: {e}")
logger.error(f"Error fetching agent graph for {params.agent_id}: {e}")
return ErrorResponse(
message="Failed to fetch the agent configuration. Please try again.",
error="graph_fetch_error",
@@ -185,8 +201,8 @@ class CustomizeAgentTool(BaseTool):
try:
result = await customize_template(
template_agent=template_agent,
modification_request=modifications,
context=context,
modification_request=params.modifications,
context=params.context,
)
except AgentGeneratorNotConfiguredError:
return ErrorResponse(
@@ -198,7 +214,7 @@ class CustomizeAgentTool(BaseTool):
session_id=session_id,
)
except Exception as e:
logger.error(f"Error calling customize_template for {agent_id}: {e}")
logger.error(f"Error calling customize_template for {params.agent_id}: {e}")
return ErrorResponse(
message=(
"Failed to customize the agent due to a service error. "
@@ -219,55 +235,25 @@ class CustomizeAgentTool(BaseTool):
session_id=session_id,
)
# Handle error response
if isinstance(result, dict) and result.get("type") == "error":
error_msg = result.get("error", "Unknown error")
error_type = result.get("error_type", "unknown")
user_message = get_user_message_for_error(
error_type,
operation="customize the agent",
llm_parse_message=(
"The AI had trouble customizing the agent. "
"Please try again or simplify your request."
),
validation_message=(
"The customized agent failed validation. "
"Please try rephrasing your request."
),
error_details=error_msg,
)
return ErrorResponse(
message=user_message,
error=f"customization_failed:{error_type}",
session_id=session_id,
)
# Handle response using match/case for cleaner pattern matching
return await self._handle_customization_result(
result=result,
params=params,
agent_details=agent_details,
user_id=user_id,
session_id=session_id,
)
# Handle clarifying questions
if isinstance(result, dict) and result.get("type") == "clarifying_questions":
questions = result.get("questions") or []
if not isinstance(questions, list):
logger.error(
f"Unexpected clarifying questions format: {type(questions)}"
)
questions = []
return ClarificationNeededResponse(
message=(
"I need some more information to customize this agent. "
"Please answer the following questions:"
),
questions=[
ClarifyingQuestion(
question=q.get("question", ""),
keyword=q.get("keyword", ""),
example=q.get("example"),
)
for q in questions
if isinstance(q, dict)
],
session_id=session_id,
)
# Result should be the customized agent JSON
async def _handle_customization_result(
self,
result: dict[str, Any],
params: CustomizeAgentInput,
agent_details: Any,
user_id: str | None,
session_id: str | None,
) -> ToolResponseBase:
"""Handle the result from customize_template using pattern matching."""
# Ensure result is a dict
if not isinstance(result, dict):
logger.error(f"Unexpected customize_template response type: {type(result)}")
return ErrorResponse(
@@ -276,8 +262,77 @@ class CustomizeAgentTool(BaseTool):
session_id=session_id,
)
customized_agent = result
result_type = result.get("type")
match result_type:
case "error":
error_msg = result.get("error", "Unknown error")
error_type = result.get("error_type", "unknown")
user_message = get_user_message_for_error(
error_type,
operation="customize the agent",
llm_parse_message=(
"The AI had trouble customizing the agent. "
"Please try again or simplify your request."
),
validation_message=(
"The customized agent failed validation. "
"Please try rephrasing your request."
),
error_details=error_msg,
)
return ErrorResponse(
message=user_message,
error=f"customization_failed:{error_type}",
session_id=session_id,
)
case "clarifying_questions":
questions_data = result.get("questions") or []
if not isinstance(questions_data, list):
logger.error(
f"Unexpected clarifying questions format: {type(questions_data)}"
)
questions_data = []
questions = [
ClarifyingQuestion(
question=q.get("question", "") if isinstance(q, dict) else "",
keyword=q.get("keyword", "") if isinstance(q, dict) else "",
example=q.get("example") if isinstance(q, dict) else None,
)
for q in questions_data
if isinstance(q, dict)
]
return ClarificationNeededResponse(
message=(
"I need some more information to customize this agent. "
"Please answer the following questions:"
),
questions=questions,
session_id=session_id,
)
case _:
# Default case: result is the customized agent JSON
return await self._save_or_preview_agent(
customized_agent=result,
params=params,
agent_details=agent_details,
user_id=user_id,
session_id=session_id,
)
async def _save_or_preview_agent(
self,
customized_agent: dict[str, Any],
params: CustomizeAgentInput,
agent_details: Any,
user_id: str | None,
session_id: str | None,
) -> ToolResponseBase:
"""Save or preview the customized agent based on params.save."""
agent_name = customized_agent.get(
"name", f"Customized {agent_details.agent_name}"
)
@@ -287,7 +342,7 @@ class CustomizeAgentTool(BaseTool):
node_count = len(nodes) if isinstance(nodes, list) else 0
link_count = len(links) if isinstance(links, list) else 0
if not save:
if not params.save:
return AgentPreviewResponse(
message=(
f"I've customized the agent '{agent_details.agent_name}'. "

View File

@@ -8,12 +8,7 @@ from backend.api.features.library import model as library_model
from backend.api.features.store import db as store_db
from backend.data import graph as graph_db
from backend.data.graph import GraphModel
from backend.data.model import (
CredentialsFieldInfo,
CredentialsMetaInput,
HostScopedCredentials,
OAuth2Credentials,
)
from backend.data.model import Credentials, CredentialsFieldInfo, CredentialsMetaInput
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.util.exceptions import NotFoundError
@@ -278,14 +273,7 @@ async def match_user_credentials_to_graph(
for cred in available_creds
if cred.provider in credential_requirements.provider
and cred.type in credential_requirements.supported_types
and (
cred.type != "oauth2"
or _credential_has_required_scopes(cred, credential_requirements)
)
and (
cred.type != "host_scoped"
or _credential_is_for_host(cred, credential_requirements)
)
and _credential_has_required_scopes(cred, credential_requirements)
),
None,
)
@@ -330,10 +318,19 @@ async def match_user_credentials_to_graph(
def _credential_has_required_scopes(
credential: OAuth2Credentials,
credential: Credentials,
requirements: CredentialsFieldInfo,
) -> bool:
"""Check if an OAuth2 credential has all the scopes required by the input."""
"""
Check if a credential has all the scopes required by the block.
For OAuth2 credentials, verifies that the credential's scopes are a superset
of the required scopes. For other credential types, returns True (no scope check).
"""
# Only OAuth2 credentials have scopes to check
if credential.type != "oauth2":
return True
# If no scopes are required, any credential matches
if not requirements.required_scopes:
return True
@@ -342,22 +339,6 @@ def _credential_has_required_scopes(
return set(credential.scopes).issuperset(requirements.required_scopes)
def _credential_is_for_host(
credential: HostScopedCredentials,
requirements: CredentialsFieldInfo,
) -> bool:
"""Check if a host-scoped credential matches the host required by the input."""
# We need to know the host to match host-scoped credentials to.
# Graph.aggregate_credentials_inputs() adds the node's set URL value (if any)
# to discriminator_values. No discriminator_values -> no host to match against.
if not requirements.discriminator_values:
return True
# Check that credential host matches required host.
# Host-scoped credential inputs are grouped by host, so any item from the set works.
return credential.matches_url(list(requirements.discriminator_values)[0])
async def check_user_has_required_credentials(
user_id: str,
required_credentials: list[CredentialsMetaInput],

View File

@@ -162,16 +162,8 @@ class LinearClient:
"searchTerm": team_name,
}
result = await self.query(query, variables)
nodes = result["teams"]["nodes"]
if not nodes:
raise LinearAPIException(
f"Team '{team_name}' not found. Check the team name or key and try again.",
status_code=404,
)
return nodes[0]["id"]
team_id = await self.query(query, variables)
return team_id["teams"]["nodes"][0]["id"]
except LinearAPIException as e:
raise e
@@ -248,44 +240,17 @@ class LinearClient:
except LinearAPIException as e:
raise e
async def try_search_issues(
self,
term: str,
max_results: int = 10,
team_id: str | None = None,
) -> list[Issue]:
async def try_search_issues(self, term: str) -> list[Issue]:
try:
query = """
query SearchIssues(
$term: String!,
$first: Int,
$teamId: String
) {
searchIssues(
term: $term,
first: $first,
teamId: $teamId
) {
query SearchIssues($term: String!, $includeComments: Boolean!) {
searchIssues(term: $term, includeComments: $includeComments) {
nodes {
id
identifier
title
description
priority
createdAt
state {
id
name
type
}
project {
id
name
}
assignee {
id
name
}
}
}
}
@@ -293,8 +258,7 @@ class LinearClient:
variables: dict[str, Any] = {
"term": term,
"first": max_results,
"teamId": team_id,
"includeComments": True,
}
issues = await self.query(query, variables)

View File

@@ -17,7 +17,7 @@ from ._config import (
LinearScope,
linear,
)
from .models import CreateIssueResponse, Issue, State
from .models import CreateIssueResponse, Issue
class LinearCreateIssueBlock(Block):
@@ -135,20 +135,9 @@ class LinearSearchIssuesBlock(Block):
description="Linear credentials with read permissions",
required_scopes={LinearScope.READ},
)
max_results: int = SchemaField(
description="Maximum number of results to return",
default=10,
ge=1,
le=100,
)
team_name: str | None = SchemaField(
description="Optional team name to filter results (e.g., 'Internal', 'Open Source')",
default=None,
)
class Output(BlockSchemaOutput):
issues: list[Issue] = SchemaField(description="List of issues")
error: str = SchemaField(description="Error message if the search failed")
def __init__(self):
super().__init__(
@@ -156,11 +145,8 @@ class LinearSearchIssuesBlock(Block):
description="Searches for issues on Linear",
input_schema=self.Input,
output_schema=self.Output,
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
test_input={
"term": "Test issue",
"max_results": 10,
"team_name": None,
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
},
test_credentials=TEST_CREDENTIALS_OAUTH,
@@ -170,14 +156,10 @@ class LinearSearchIssuesBlock(Block):
[
Issue(
id="abc123",
identifier="TST-123",
identifier="abc123",
title="Test issue",
description="Test description",
priority=1,
state=State(
id="state1", name="In Progress", type="started"
),
createdAt="2026-01-15T10:00:00.000Z",
)
],
)
@@ -186,12 +168,10 @@ class LinearSearchIssuesBlock(Block):
"search_issues": lambda *args, **kwargs: [
Issue(
id="abc123",
identifier="TST-123",
identifier="abc123",
title="Test issue",
description="Test description",
priority=1,
state=State(id="state1", name="In Progress", type="started"),
createdAt="2026-01-15T10:00:00.000Z",
)
]
},
@@ -201,22 +181,10 @@ class LinearSearchIssuesBlock(Block):
async def search_issues(
credentials: OAuth2Credentials | APIKeyCredentials,
term: str,
max_results: int = 10,
team_name: str | None = None,
) -> list[Issue]:
client = LinearClient(credentials=credentials)
# Resolve team name to ID if provided
# Raises LinearAPIException with descriptive message if team not found
team_id: str | None = None
if team_name:
team_id = await client.try_get_team_by_name(team_name=team_name)
return await client.try_search_issues(
term=term,
max_results=max_results,
team_id=team_id,
)
response: list[Issue] = await client.try_search_issues(term=term)
return response
async def run(
self,
@@ -228,10 +196,7 @@ class LinearSearchIssuesBlock(Block):
"""Execute the issue search"""
try:
issues = await self.search_issues(
credentials=credentials,
term=input_data.term,
max_results=input_data.max_results,
team_name=input_data.team_name,
credentials=credentials, term=input_data.term
)
yield "issues", issues
except LinearAPIException as e:

View File

@@ -36,21 +36,12 @@ class Project(BaseModel):
content: str | None = None
class State(BaseModel):
id: str
name: str
type: str | None = (
None # Workflow state type (e.g., "triage", "backlog", "started", "completed", "canceled")
)
class Issue(BaseModel):
id: str
identifier: str
title: str
description: str | None
priority: int
state: State | None = None
project: Project | None = None
createdAt: str | None = None
comments: list[Comment] | None = None

View File

@@ -19,6 +19,7 @@ from typing import (
cast,
get_args,
)
from urllib.parse import urlparse
from uuid import uuid4
from prisma.enums import CreditTransactionType, OnboardingStep
@@ -41,7 +42,6 @@ from typing_extensions import TypedDict
from backend.integrations.providers import ProviderName
from backend.util.json import loads as json_loads
from backend.util.request import parse_url
from backend.util.settings import Secrets
# Type alias for any provider name (including custom ones)
@@ -397,25 +397,19 @@ class HostScopedCredentials(_BaseCredentials):
def matches_url(self, url: str) -> bool:
"""Check if this credential should be applied to the given URL."""
request_host, request_port = _extract_host_from_url(url)
cred_scope_host, cred_scope_port = _extract_host_from_url(self.host)
parsed_url = urlparse(url)
# Extract hostname without port
request_host = parsed_url.hostname
if not request_host:
return False
# If a port is specified in credential host, the request host port must match
if cred_scope_port is not None and request_port != cred_scope_port:
return False
# Non-standard ports are only allowed if explicitly specified in credential host
elif cred_scope_port is None and request_port not in (80, 443, None):
return False
# Simple host matching
if cred_scope_host == request_host:
# Simple host matching - exact match or wildcard subdomain match
if self.host == request_host:
return True
# Support wildcard matching (e.g., "*.example.com" matches "api.example.com")
if cred_scope_host.startswith("*."):
domain = cred_scope_host[2:] # Remove "*."
if self.host.startswith("*."):
domain = self.host[2:] # Remove "*."
return request_host.endswith(f".{domain}") or request_host == domain
return False
@@ -557,13 +551,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
)
def _extract_host_from_url(url: str) -> tuple[str, int | None]:
"""Extract host and port from URL for grouping host-scoped credentials."""
def _extract_host_from_url(url: str) -> str:
"""Extract host from URL for grouping host-scoped credentials."""
try:
parsed = parse_url(url)
return parsed.hostname or url, parsed.port
parsed = urlparse(url)
return parsed.hostname or url
except Exception:
return "", None
return ""
class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
@@ -612,7 +606,7 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
providers = frozenset(
[cast(CP, "http")]
+ [
cast(CP, parse_url(str(value)).netloc)
cast(CP, _extract_host_from_url(str(value)))
for value in field.discriminator_values
]
)

View File

@@ -79,23 +79,10 @@ class TestHostScopedCredentials:
headers={"Authorization": SecretStr("Bearer token")},
)
# Non-standard ports require explicit port in credential host
assert not creds.matches_url("http://localhost:8080/api/v1")
assert creds.matches_url("http://localhost:8080/api/v1")
assert creds.matches_url("https://localhost:443/secure/endpoint")
assert creds.matches_url("http://localhost/simple")
def test_matches_url_with_explicit_port(self):
"""Test URL matching with explicit port in credential host."""
creds = HostScopedCredentials(
provider="custom",
host="localhost:8080",
headers={"Authorization": SecretStr("Bearer token")},
)
assert creds.matches_url("http://localhost:8080/api/v1")
assert not creds.matches_url("http://localhost:3000/api/v1")
assert not creds.matches_url("http://localhost/simple")
def test_empty_headers_dict(self):
"""Test HostScopedCredentials with empty headers."""
creds = HostScopedCredentials(
@@ -141,20 +128,8 @@ class TestHostScopedCredentials:
("*.example.com", "https://sub.api.example.com/test", True),
("*.example.com", "https://example.com/test", True),
("*.example.com", "https://example.org/test", False),
# Non-standard ports require explicit port in credential host
("localhost", "http://localhost:3000/test", False),
("localhost:3000", "http://localhost:3000/test", True),
("localhost", "http://localhost:3000/test", True),
("localhost", "http://127.0.0.1:3000/test", False),
# IPv6 addresses (frontend stores with brackets via URL.hostname)
("[::1]", "http://[::1]/test", True),
("[::1]", "http://[::1]:80/test", True),
("[::1]", "https://[::1]:443/test", True),
("[::1]", "http://[::1]:8080/test", False), # Non-standard port
("[::1]:8080", "http://[::1]:8080/test", True),
("[::1]:8080", "http://[::1]:9090/test", False),
("[2001:db8::1]", "http://[2001:db8::1]/path", True),
("[2001:db8::1]", "https://[2001:db8::1]:443/path", True),
("[2001:db8::1]", "http://[2001:db8::ff]/path", False),
],
)
def test_url_matching_parametrized(self, host: str, test_url: str, expected: bool):

View File

@@ -157,7 +157,12 @@ async def validate_url(
is_trusted: Boolean indicating if the hostname is in trusted_origins
ip_addresses: List of IP addresses for the host; empty if the host is trusted
"""
parsed = parse_url(url)
# Canonicalize URL
url = url.strip("/ ").replace("\\", "/")
parsed = urlparse(url)
if not parsed.scheme:
url = f"http://{url}"
parsed = urlparse(url)
# Check scheme
if parsed.scheme not in ALLOWED_SCHEMES:
@@ -215,17 +220,6 @@ async def validate_url(
)
def parse_url(url: str) -> URL:
"""Canonicalizes and parses a URL string."""
url = url.strip("/ ").replace("\\", "/")
# Ensure scheme is present for proper parsing
if not re.match(r"[a-z0-9+.\-]+://", url):
url = f"http://{url}"
return urlparse(url)
def pin_url(url: URL, ip_addresses: Optional[list[str]] = None) -> URL:
"""
Pins a URL to a specific IP address to prevent DNS rebinding attacks.

View File

@@ -1,17 +1,6 @@
import { OAuthPopupResultMessage } from "./types";
import { NextResponse } from "next/server";
/**
* Safely encode a value as JSON for embedding in a script tag.
* Escapes characters that could break out of the script context to prevent XSS.
*/
function safeJsonStringify(value: unknown): string {
return JSON.stringify(value)
.replace(/</g, "\\u003c")
.replace(/>/g, "\\u003e")
.replace(/&/g, "\\u0026");
}
// This route is intended to be used as the callback for integration OAuth flows,
// controlled by the CredentialsInput component. The CredentialsInput opens the login
// page in a pop-up window, which then redirects to this route to close the loop.
@@ -34,13 +23,12 @@ export async function GET(request: Request) {
console.debug("Sending message to opener:", message);
// Return a response with the message as JSON and a script to close the window
// Use safeJsonStringify to prevent XSS by escaping <, >, and & characters
return new NextResponse(
`
<html>
<body>
<script>
window.opener.postMessage(${safeJsonStringify(message)});
window.opener.postMessage(${JSON.stringify(message)});
window.close();
</script>
</body>

View File

@@ -26,20 +26,8 @@ export function buildCopilotChatUrl(prompt: string): string {
export function getQuickActions(): string[] {
return [
"I don't know where to start, just ask me stuff",
"I do the same thing every week and it's killing me",
"Help me find where I'm wasting my time",
"Show me what I can automate",
"Design a custom workflow",
"Help me with content creation",
];
}
export function getInputPlaceholder(width?: number) {
if (!width) return "What's your role and what eats up most of your day?";
if (width < 500) {
return "I'm a chef and I hate...";
}
if (width <= 1080) {
return "What's your role and what eats up most of your day?";
}
return "What's your role and what eats up most of your day? e.g. 'I'm a recruiter and I hate...'";
}

View File

@@ -6,9 +6,7 @@ import { Text } from "@/components/atoms/Text/Text";
import { Chat } from "@/components/contextual/Chat/Chat";
import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { useEffect, useState } from "react";
import { useCopilotStore } from "./copilot-page-store";
import { getInputPlaceholder } from "./helpers";
import { useCopilotPage } from "./useCopilotPage";
export default function CopilotPage() {
@@ -16,25 +14,8 @@ export default function CopilotPage() {
const isInterruptModalOpen = useCopilotStore((s) => s.isInterruptModalOpen);
const confirmInterrupt = useCopilotStore((s) => s.confirmInterrupt);
const cancelInterrupt = useCopilotStore((s) => s.cancelInterrupt);
const [inputPlaceholder, setInputPlaceholder] = useState(
getInputPlaceholder(),
);
useEffect(() => {
const handleResize = () => {
setInputPlaceholder(getInputPlaceholder(window.innerWidth));
};
handleResize();
window.addEventListener("resize", handleResize);
return () => window.removeEventListener("resize", handleResize);
}, []);
const { greetingName, quickActions, isLoading, hasSession, initialPrompt } =
state;
const {
handleQuickAction,
startChatWithPrompt,
@@ -92,7 +73,7 @@ export default function CopilotPage() {
}
return (
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-3 py-5 md:px-6 md:py-10">
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-6 py-10">
<div className="w-full text-center">
{isLoading ? (
<div className="mx-auto max-w-2xl">
@@ -109,25 +90,25 @@ export default function CopilotPage() {
</div>
) : (
<>
<div className="mx-auto max-w-3xl">
<div className="mx-auto max-w-2xl">
<Text
variant="h3"
className="mb-1 !text-[1.375rem] text-zinc-700"
className="mb-3 !text-[1.375rem] text-zinc-700"
>
Hey, <span className="text-violet-600">{greetingName}</span>
</Text>
<Text variant="h3" className="mb-8 !font-normal">
Tell me about your work I&apos;ll find what to automate.
What do you want to automate?
</Text>
<div className="mb-6">
<ChatInput
onSend={startChatWithPrompt}
placeholder={inputPlaceholder}
placeholder='You can search or just ask - e.g. "create a blog post outline"'
/>
</div>
</div>
<div className="flex flex-wrap items-center justify-center gap-3 overflow-x-auto [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
<div className="flex flex-nowrap items-center justify-center gap-3 overflow-x-auto [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
{quickActions.map((action) => (
<Button
key={action}
@@ -135,7 +116,7 @@ export default function CopilotPage() {
variant="outline"
size="small"
onClick={() => handleQuickAction(action)}
className="h-auto shrink-0 border-zinc-300 px-3 py-2 text-[.9rem] text-zinc-600"
className="h-auto shrink-0 border-zinc-600 !px-4 !py-2 text-[1rem] text-zinc-600"
>
{action}
</Button>

View File

@@ -2,6 +2,7 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessi
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
import { cn } from "@/lib/utils";
import { GlobeHemisphereEastIcon } from "@phosphor-icons/react";
import { useEffect } from "react";
@@ -55,6 +56,10 @@ export function ChatContainer({
onStreamingChange?.(isStreaming);
}, [isStreaming, onStreamingChange]);
const breakpoint = useBreakpoint();
const isMobile =
breakpoint === "base" || breakpoint === "sm" || breakpoint === "md";
return (
<div
className={cn(
@@ -122,7 +127,11 @@ export function ChatContainer({
disabled={isStreaming || !sessionId}
isStreaming={isStreaming}
onStop={stopStreaming}
placeholder="What else can I help with?"
placeholder={
isMobile
? "You can search or just ask"
: 'You can search or just ask — e.g. "create a blog post outline"'
}
/>
</div>
</div>

View File

@@ -74,20 +74,19 @@ export function ChatInput({
hasMultipleLines ? "rounded-xlarge" : "rounded-full",
)}
>
{!value && !isRecording && (
<div
className="pointer-events-none absolute inset-0 top-0.5 flex items-center justify-start pl-14 text-[1rem] text-zinc-400"
aria-hidden="true"
>
{isTranscribing ? "Transcribing..." : placeholder}
</div>
)}
<textarea
id={inputId}
aria-label="Chat message input"
value={value}
onChange={handleChange}
onKeyDown={handleKeyDown}
placeholder={
isTranscribing
? "Transcribing..."
: isRecording
? ""
: placeholder
}
disabled={isInputDisabled}
rows={1}
className={cn(
@@ -123,14 +122,13 @@ export function ChatInput({
size="icon"
aria-label={isRecording ? "Stop recording" : "Start recording"}
onClick={toggleRecording}
disabled={disabled || isTranscribing || isStreaming}
disabled={disabled || isTranscribing}
className={cn(
isRecording
? "animate-pulse border-red-500 bg-red-500 text-white hover:border-red-600 hover:bg-red-600"
: isTranscribing
? "border-zinc-300 bg-zinc-100 text-zinc-400"
: "border-zinc-300 bg-white text-zinc-500 hover:border-zinc-400 hover:bg-zinc-50 hover:text-zinc-700",
isStreaming && "opacity-40",
)}
>
{isTranscribing ? (

View File

@@ -38,8 +38,8 @@ export function AudioWaveform({
// Create audio context and analyser
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
analyser.fftSize = 256;
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
analyser.smoothingTimeConstant = 0.8;
// Connect the stream to the analyser
const source = audioContext.createMediaStreamSource(stream);
@@ -73,11 +73,10 @@ export function AudioWaveform({
maxAmplitude = Math.max(maxAmplitude, amplitude);
}
// Normalize amplitude (0-128 range) to 0-1
const normalized = maxAmplitude / 128;
// Apply sensitivity boost (multiply by 4) and use sqrt curve to amplify quiet sounds
const boosted = Math.min(1, Math.sqrt(normalized) * 4);
const height = minBarHeight + boosted * (maxBarHeight - minBarHeight);
// Map amplitude (0-128) to bar height
const normalized = (maxAmplitude / 128) * 255;
const height =
minBarHeight + (normalized / 255) * (maxBarHeight - minBarHeight);
newBars.push(height);
}

View File

@@ -224,7 +224,7 @@ export function useVoiceRecording({
[value, isTranscribing, toggleRecording, baseHandleKeyDown],
);
const showMicButton = isSupported;
const showMicButton = isSupported && !isStreaming;
const isInputDisabled = disabled || isStreaming || isTranscribing;
// Cleanup on unmount

View File

@@ -1,7 +1,6 @@
"use client";
import { cn } from "@/lib/utils";
import { useEffect, useState } from "react";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
import { StreamingMessage } from "../StreamingMessage/StreamingMessage";
import { ThinkingMessage } from "../ThinkingMessage/ThinkingMessage";
@@ -32,29 +31,6 @@ export function MessageList({
isStreaming,
});
const [showThinkingMessage, setShowThinkingMessage] = useState(false);
const [thinkingComplete, setThinkingComplete] = useState(false);
// Manage thinking message visibility and completion state
useEffect(() => {
if (isStreaming && streamingChunks.length === 0) {
// Start showing thinking message
setShowThinkingMessage(true);
setThinkingComplete(false);
} else if (streamingChunks.length > 0 && showThinkingMessage) {
// Chunks arrived - trigger completion animation
setThinkingComplete(true);
} else if (!isStreaming) {
// Streaming ended completely - reset state
setShowThinkingMessage(false);
setThinkingComplete(false);
}
}, [isStreaming, streamingChunks.length, showThinkingMessage]);
function handleThinkingAnimationComplete() {
setShowThinkingMessage(false);
}
return (
<div className="relative flex min-h-0 flex-1 flex-col">
{/* Top fade shadow */}
@@ -116,15 +92,10 @@ export function MessageList({
})()}
{/* Render thinking message when streaming but no chunks yet */}
{showThinkingMessage && (
<ThinkingMessage
isComplete={thinkingComplete}
onAnimationComplete={handleThinkingAnimationComplete}
/>
)}
{isStreaming && streamingChunks.length === 0 && <ThinkingMessage />}
{/* Render streaming message if active (wait for thinking animation to complete) */}
{isStreaming && streamingChunks.length > 0 && !showThinkingMessage && (
{/* Render streaming message if active */}
{isStreaming && streamingChunks.length > 0 && (
<StreamingMessage
chunks={streamingChunks}
onComplete={onStreamComplete}

View File

@@ -1,41 +1,28 @@
import { Progress } from "@/components/atoms/Progress/Progress";
import { cn } from "@/lib/utils";
import { useEffect, useRef, useState } from "react";
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
import { useAsymptoticProgress } from "../ToolCallMessage/useAsymptoticProgress";
export interface ThinkingMessageProps {
className?: string;
isComplete?: boolean;
onAnimationComplete?: () => void;
}
export function ThinkingMessage({
className,
isComplete = false,
onAnimationComplete,
}: ThinkingMessageProps) {
export function ThinkingMessage({ className }: ThinkingMessageProps) {
const [showSlowLoader, setShowSlowLoader] = useState(false);
const [showCoffeeMessage, setShowCoffeeMessage] = useState(false);
const timerRef = useRef<NodeJS.Timeout | null>(null);
const coffeeTimerRef = useRef<NodeJS.Timeout | null>(null);
const delayTimerRef = useRef<NodeJS.Timeout | null>(null);
const { progress, isAnimationDone } = useAsymptoticProgress(
showCoffeeMessage,
isComplete,
);
useEffect(() => {
if (timerRef.current === null) {
timerRef.current = setTimeout(() => {
setShowSlowLoader(true);
}, 3000);
}, 8000);
}
if (coffeeTimerRef.current === null) {
coffeeTimerRef.current = setTimeout(() => {
setShowCoffeeMessage(true);
}, 8000);
}, 10000);
}
return () => {
@@ -50,22 +37,6 @@ export function ThinkingMessage({
};
}, []);
// Handle completion animation delay before unmounting
useEffect(() => {
if (isAnimationDone && onAnimationComplete) {
delayTimerRef.current = setTimeout(() => {
onAnimationComplete();
}, 200); // 200ms delay after animation completes
}
return () => {
if (delayTimerRef.current) {
clearTimeout(delayTimerRef.current);
delayTimerRef.current = null;
}
};
}, [isAnimationDone, onAnimationComplete]);
return (
<div
className={cn(
@@ -78,18 +49,9 @@ export function ThinkingMessage({
<AIChatBubble>
<div className="transition-all duration-500 ease-in-out">
{showCoffeeMessage ? (
<div className="flex flex-col items-center gap-3">
<div className="flex w-full max-w-[280px] flex-col gap-1.5">
<div className="flex items-center justify-between text-xs text-neutral-500">
<span>Working on it...</span>
<span>{Math.round(progress)}%</span>
</div>
<Progress value={progress} className="h-2 w-full" />
</div>
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
This could take a few minutes, grab a coffee
</span>
</div>
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
This could take a few minutes, grab a coffee
</span>
) : showSlowLoader ? (
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
Taking a bit more time...

View File

@@ -1,117 +0,0 @@
import { useEffect, useRef, useState } from "react";
/**
* Cubic Ease Out easing function: 1 - (1 - t)^3
* Starts fast and decelerates smoothly to a stop.
*/
function cubicEaseOut(t: number): number {
return 1 - Math.pow(1 - t, 3);
}
export interface AsymptoticProgressResult {
progress: number;
isAnimationDone: boolean;
}
/**
* Hook that returns a progress value that starts fast and slows down,
* asymptotically approaching but never reaching the max value.
*
* Uses a half-life formula: progress = max * (1 - 0.5^(time/halfLife))
* This creates the "game loading bar" effect where:
* - 50% is reached at halfLifeSeconds
* - 75% is reached at 2 * halfLifeSeconds
* - 87.5% is reached at 3 * halfLifeSeconds
* - and so on...
*
* When isComplete is set to true, animates from current progress to 100%
* using Cubic Ease Out over 300ms.
*
* @param isActive - Whether the progress should be animating
* @param isComplete - Whether to animate to 100% (completion animation)
* @param halfLifeSeconds - Time in seconds to reach 50% progress (default: 30)
* @param maxProgress - Maximum progress value to approach (default: 100)
* @param intervalMs - Update interval in milliseconds (default: 100)
* @returns Object with current progress value and whether completion animation is done
*/
export function useAsymptoticProgress(
isActive: boolean,
isComplete = false,
halfLifeSeconds = 30,
maxProgress = 100,
intervalMs = 100,
): AsymptoticProgressResult {
const [progress, setProgress] = useState(0);
const [isAnimationDone, setIsAnimationDone] = useState(false);
const elapsedTimeRef = useRef(0);
const completionStartProgressRef = useRef<number | null>(null);
const animationFrameRef = useRef<number | null>(null);
// Handle asymptotic progress when active but not complete
useEffect(() => {
if (!isActive || isComplete) {
if (!isComplete) {
setProgress(0);
elapsedTimeRef.current = 0;
setIsAnimationDone(false);
completionStartProgressRef.current = null;
}
return;
}
const interval = setInterval(() => {
elapsedTimeRef.current += intervalMs / 1000;
// Half-life approach: progress = max * (1 - 0.5^(time/halfLife))
// At t=halfLife: 50%, at t=2*halfLife: 75%, at t=3*halfLife: 87.5%, etc.
const newProgress =
maxProgress *
(1 - Math.pow(0.5, elapsedTimeRef.current / halfLifeSeconds));
setProgress(newProgress);
}, intervalMs);
return () => clearInterval(interval);
}, [isActive, isComplete, halfLifeSeconds, maxProgress, intervalMs]);
// Handle completion animation
useEffect(() => {
if (!isComplete) {
return;
}
// Capture the starting progress when completion begins
if (completionStartProgressRef.current === null) {
completionStartProgressRef.current = progress;
}
const startProgress = completionStartProgressRef.current;
const animationDuration = 300; // 300ms
const startTime = performance.now();
function animate(currentTime: number) {
const elapsed = currentTime - startTime;
const t = Math.min(elapsed / animationDuration, 1);
// Cubic Ease Out from current progress to maxProgress
const easedProgress =
startProgress + (maxProgress - startProgress) * cubicEaseOut(t);
setProgress(easedProgress);
if (t < 1) {
animationFrameRef.current = requestAnimationFrame(animate);
} else {
setProgress(maxProgress);
setIsAnimationDone(true);
}
}
animationFrameRef.current = requestAnimationFrame(animate);
return () => {
if (animationFrameRef.current !== null) {
cancelAnimationFrame(animationFrameRef.current);
}
};
}, [isComplete, maxProgress]);
return { progress, isAnimationDone };
}

View File

@@ -41,17 +41,7 @@ export function HostScopedCredentialsModal({
const currentHost = currentUrl ? getHostFromUrl(currentUrl) : "";
const formSchema = z.object({
host: z
.string()
.min(1, "Host is required")
.refine((val) => !/^[a-zA-Z][a-zA-Z\d+\-.]*:\/\//.test(val), {
message: "Enter only the host (e.g. api.example.com), not a full URL",
})
.refine((val) => !val.includes("/"), {
message:
"Enter only the host (e.g. api.example.com), without a trailing path. " +
"You may specify a port (e.g. api.example.com:8080) if needed.",
}),
host: z.string().min(1, "Host is required"),
title: z.string().optional(),
headers: z.record(z.string()).optional(),
});

View File

@@ -62,6 +62,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
| [Linear Search Issues](block-integrations/linear/issues.md#linear-search-issues) | Searches for issues on Linear |
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
@@ -570,7 +571,6 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [Linear Create Comment](block-integrations/linear/comment.md#linear-create-comment) | Creates a new comment on a Linear issue |
| [Linear Create Issue](block-integrations/linear/issues.md#linear-create-issue) | Creates a new issue on Linear |
| [Linear Get Project Issues](block-integrations/linear/issues.md#linear-get-project-issues) | Gets issues from a Linear project filtered by status and assignee |
| [Linear Search Issues](block-integrations/linear/issues.md#linear-search-issues) | Searches for issues on Linear |
| [Linear Search Projects](block-integrations/linear/projects.md#linear-search-projects) | Searches for projects on Linear |
## Hardware

View File

@@ -90,9 +90,9 @@ Searches for issues on Linear
### How it works
<!-- MANUAL: how_it_works -->
This block searches for issues in Linear using a text query. It searches across issue titles, descriptions, and other fields to find matching issues. You can limit the number of results returned using the `max_results` parameter (default: 10, max: 100) to control token consumption and response size.
This block searches for issues in Linear using a text query. It searches across issue titles, descriptions, and other fields to find matching issues.
Optionally filter results by team name to narrow searches to specific workspaces. If a team name is provided, the block resolves it to a team ID before searching. Returns matching issues with their state, creation date, project, and assignee information. If the search or team resolution fails, an error message is returned.
Returns a list of issues matching the search term.
<!-- END MANUAL -->
### Inputs
@@ -100,14 +100,12 @@ Optionally filter results by team name to narrow searches to specific workspaces
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| term | Term to search for issues | str | Yes |
| max_results | Maximum number of results to return | int | No |
| team_name | Optional team name to filter results (e.g., 'Internal', 'Open Source') | str | No |
### Outputs
| Output | Description | Type |
|--------|-------------|------|
| error | Error message if the search failed | str |
| error | Error message if the operation failed | str |
| issues | List of issues | List[Issue] |
### Possible use case