Compare commits

..

5 Commits

Author SHA1 Message Date
Zamil Majdy
ec7c7ebea2 refactor(backend): extract _extract_agent_json helper, fail fast on unknown poll status 2026-02-25 16:57:49 +07:00
Zamil Majdy
8ef8bec14f fix(backend): validate completed job result type in _submit_and_poll 2026-02-25 16:22:19 +07:00
Zamil Majdy
9b3e25d98e fix(backend): retry transient HTTP errors during polling, validate agent_json responses 2026-02-25 15:44:11 +07:00
Zamil Majdy
0bc098acb1 fix(backend): address PR review - wire timeout setting, use monotonic clock, cap poll errors 2026-02-25 14:53:10 +07:00
Zamil Majdy
d78e0ee122 feat(backend/copilot): use async polling for agent-generator + frontend SSE reconnect
Platform service now submits jobs to agent-generator and polls for results
(10s interval) instead of blocking on a single HTTP call for up to 30 min.
asyncio.sleep in the poll loop yields to the event loop, keeping SSE
heartbeats alive through GCP's L7 load balancer.

Frontend auto-reconnects up to 3 times when SSE drops mid-stream.
2026-02-24 21:01:51 +07:00
129 changed files with 2862 additions and 10772 deletions

4
.gitignore vendored
View File

@@ -180,6 +180,4 @@ autogpt_platform/backend/settings.py
.claude/settings.local.json
CLAUDE.local.md
/autogpt_platform/backend/logs
.next
# Implementation plans (generated by AI agents)
plans/
.next

View File

@@ -1,10 +1,3 @@
default_install_hook_types:
- pre-commit
- pre-push
- post-checkout
default_stages: [pre-commit]
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
@@ -24,7 +17,6 @@ repos:
name: Detect secrets
description: Detects high entropy strings that are likely to be passwords.
files: ^autogpt_platform/
exclude: pnpm-lock\.yaml$
stages: [pre-push]
- repo: local
@@ -34,106 +26,49 @@ repos:
- id: poetry-install
name: Check & Install dependencies - AutoGPT Platform - Backend
alias: poetry-install-platform-backend
entry: poetry -C autogpt_platform/backend install
# include autogpt_libs source (since it's a path dependency)
entry: >
bash -c '
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
else
git diff --cached --name-only
fi | grep -qE "^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$" || exit 0;
poetry -C autogpt_platform/backend install
'
always_run: true
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
types: [file]
language: system
pass_filenames: false
stages: [pre-commit, post-checkout]
- id: poetry-install
name: Check & Install dependencies - AutoGPT Platform - Libs
alias: poetry-install-platform-libs
entry: >
bash -c '
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
else
git diff --cached --name-only
fi | grep -qE "^autogpt_platform/autogpt_libs/poetry\.lock$" || exit 0;
poetry -C autogpt_platform/autogpt_libs install
'
always_run: true
entry: poetry -C autogpt_platform/autogpt_libs install
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
types: [file]
language: system
pass_filenames: false
stages: [pre-commit, post-checkout]
- id: pnpm-install
name: Check & Install dependencies - AutoGPT Platform - Frontend
alias: pnpm-install-platform-frontend
entry: >
bash -c '
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
else
git diff --cached --name-only
fi | grep -qE "^autogpt_platform/frontend/pnpm-lock\.yaml$" || exit 0;
pnpm --prefix autogpt_platform/frontend install
'
always_run: true
language: system
pass_filenames: false
stages: [pre-commit, post-checkout]
- id: poetry-install
name: Check & Install dependencies - Classic - AutoGPT
alias: poetry-install-classic-autogpt
entry: >
bash -c '
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
else
git diff --cached --name-only
fi | grep -qE "^classic/(original_autogpt|forge)/poetry\.lock$" || exit 0;
poetry -C classic/original_autogpt install
'
entry: poetry -C classic/original_autogpt install
# include forge source (since it's a path dependency)
always_run: true
files: ^classic/(original_autogpt|forge)/poetry\.lock$
types: [file]
language: system
pass_filenames: false
stages: [pre-commit, post-checkout]
- id: poetry-install
name: Check & Install dependencies - Classic - Forge
alias: poetry-install-classic-forge
entry: >
bash -c '
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
else
git diff --cached --name-only
fi | grep -qE "^classic/forge/poetry\.lock$" || exit 0;
poetry -C classic/forge install
'
always_run: true
entry: poetry -C classic/forge install
files: ^classic/forge/poetry\.lock$
types: [file]
language: system
pass_filenames: false
stages: [pre-commit, post-checkout]
- id: poetry-install
name: Check & Install dependencies - Classic - Benchmark
alias: poetry-install-classic-benchmark
entry: >
bash -c '
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
else
git diff --cached --name-only
fi | grep -qE "^classic/benchmark/poetry\.lock$" || exit 0;
poetry -C classic/benchmark install
'
always_run: true
entry: poetry -C classic/benchmark install
files: ^classic/benchmark/poetry\.lock$
types: [file]
language: system
pass_filenames: false
stages: [pre-commit, post-checkout]
- repo: local
# For proper type checking, Prisma client must be up-to-date.
@@ -141,54 +76,12 @@ repos:
- id: prisma-generate
name: Prisma Generate - AutoGPT Platform - Backend
alias: prisma-generate-platform-backend
entry: >
bash -c '
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
else
git diff --cached --name-only
fi | grep -qE "^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema\.prisma)$" || exit 0;
cd autogpt_platform/backend
&& poetry run prisma generate
&& poetry run gen-prisma-stub
'
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
# include everything that triggers poetry install + the prisma schema
always_run: true
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
types: [file]
language: system
pass_filenames: false
stages: [pre-commit, post-checkout]
- id: export-api-schema
name: Export API schema - AutoGPT Platform - Backend -> Frontend
alias: export-api-schema-platform
entry: >
bash -c '
cd autogpt_platform/backend
&& poetry run export-api-schema --output ../frontend/src/app/api/openapi.json
&& cd ../frontend
&& pnpm prettier --write ./src/app/api/openapi.json
'
files: ^autogpt_platform/backend/
language: system
pass_filenames: false
- id: generate-api-client
name: Generate API client - AutoGPT Platform - Frontend
alias: generate-api-client-platform-frontend
entry: >
bash -c '
SCHEMA=autogpt_platform/frontend/src/app/api/openapi.json;
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
git diff --quiet "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF" -- "$SCHEMA" && exit 0
else
git diff --quiet HEAD -- "$SCHEMA" && exit 0
fi;
cd autogpt_platform/frontend && pnpm generate:api
'
always_run: true
language: system
pass_filenames: false
stages: [pre-commit, post-checkout]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.2

View File

@@ -88,23 +88,20 @@ async def require_auth(
)
def require_permission(*permissions: APIKeyPermission):
def require_permission(permission: APIKeyPermission):
"""
Dependency function for checking required permissions.
All listed permissions must be present.
Dependency function for checking specific permissions
(works with API keys and OAuth tokens)
"""
async def check_permissions(
async def check_permission(
auth: APIAuthorizationInfo = Security(require_auth),
) -> APIAuthorizationInfo:
missing = [p for p in permissions if p not in auth.scopes]
if missing:
if permission not in auth.scopes:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=f"Missing required permission(s): "
f"{', '.join(p.value for p in missing)}",
detail=f"Missing required permission: {permission.value}",
)
return auth
return check_permissions
return check_permission

View File

@@ -18,7 +18,6 @@ from backend.data import user as user_db
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.executor.utils import add_graph_execution
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
from backend.util.settings import Settings
from .integrations import integrations_router
@@ -96,43 +95,6 @@ async def execute_graph_block(
return output
@v1_router.post(
path="/graphs",
tags=["graphs"],
status_code=201,
dependencies=[
Security(
require_permission(
APIKeyPermission.WRITE_GRAPH, APIKeyPermission.WRITE_LIBRARY
)
)
],
)
async def create_graph(
graph: graph_db.Graph,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_GRAPH, APIKeyPermission.WRITE_LIBRARY)
),
) -> graph_db.GraphModel:
"""
Create a new agent graph.
The graph will be validated and assigned a new ID.
It is automatically added to the user's library.
"""
from backend.api.features.library import db as library_db
graph_model = graph_db.make_graph_model(graph, auth.user_id)
graph_model.reassign_ids(user_id=auth.user_id, reassign_graph_id=True)
graph_model.validate_graph(for_run=False)
await graph_db.create_graph(graph_model, user_id=auth.user_id)
await library_db.create_library_agent(graph_model, auth.user_id)
activated_graph = await on_graph_activate(graph_model, user_id=auth.user_id)
return activated_graph
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],

View File

@@ -1,17 +1,15 @@
import logging
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from difflib import SequenceMatcher
from typing import Any, Sequence, get_args, get_origin
from typing import Sequence
import prisma
from prisma.enums import ContentType
from prisma.models import mv_suggested_blocks
import backend.api.features.library.db as library_db
import backend.api.features.library.model as library_model
import backend.api.features.store.db as store_db
import backend.api.features.store.model as store_model
from backend.api.features.store.hybrid_search import unified_hybrid_search
from backend.blocks import load_all_blocks
from backend.blocks._base import (
AnyBlockSchema,
@@ -21,6 +19,7 @@ from backend.blocks._base import (
BlockType,
)
from backend.blocks.llm import LlmModel
from backend.data.db import query_raw_with_schema
from backend.integrations.providers import ProviderName
from backend.util.cache import cached
from backend.util.models import Pagination
@@ -43,16 +42,6 @@ MAX_LIBRARY_AGENT_RESULTS = 100
MAX_MARKETPLACE_AGENT_RESULTS = 100
MIN_SCORE_FOR_FILTERED_RESULTS = 10.0
# Boost blocks over marketplace agents in search results
BLOCK_SCORE_BOOST = 50.0
# Block IDs to exclude from search results
EXCLUDED_BLOCK_IDS = frozenset(
{
"e189baac-8c20-45a1-94a7-55177ea42565", # AgentExecutorBlock
}
)
SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent
@@ -75,8 +64,8 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
# Skip disabled and excluded blocks
if block.disabled or block.id in EXCLUDED_BLOCK_IDS:
# Skip disabled blocks
if block.disabled:
continue
# Skip blocks that don't have categories (all should have at least one)
if not block.categories:
@@ -127,9 +116,6 @@ def get_blocks(
# Skip disabled blocks
if block.disabled:
continue
# Skip excluded blocks
if block.id in EXCLUDED_BLOCK_IDS:
continue
# Skip blocks that don't match the category
if category and category not in {c.name.lower() for c in block.categories}:
continue
@@ -269,25 +255,14 @@ async def _build_cached_search_results(
"my_agents": 0,
}
# Use hybrid search when query is present, otherwise list all blocks
if (include_blocks or include_integrations) and normalized_query:
block_results, block_total, integration_total = await _hybrid_search_blocks(
query=search_query,
include_blocks=include_blocks,
include_integrations=include_integrations,
)
scored_items.extend(block_results)
total_items["blocks"] = block_total
total_items["integrations"] = integration_total
elif include_blocks or include_integrations:
# No query - list all blocks using in-memory approach
block_results, block_total, integration_total = _collect_block_results(
include_blocks=include_blocks,
include_integrations=include_integrations,
)
scored_items.extend(block_results)
total_items["blocks"] = block_total
total_items["integrations"] = integration_total
block_results, block_total, integration_total = _collect_block_results(
normalized_query=normalized_query,
include_blocks=include_blocks,
include_integrations=include_integrations,
)
scored_items.extend(block_results)
total_items["blocks"] = block_total
total_items["integrations"] = integration_total
if include_library_agents:
library_response = await library_db.list_library_agents(
@@ -332,14 +307,10 @@ async def _build_cached_search_results(
def _collect_block_results(
*,
normalized_query: str,
include_blocks: bool,
include_integrations: bool,
) -> tuple[list[_ScoredItem], int, int]:
"""
Collect all blocks for listing (no search query).
All blocks get BLOCK_SCORE_BOOST to prioritize them over marketplace agents.
"""
results: list[_ScoredItem] = []
block_count = 0
integration_count = 0
@@ -352,10 +323,6 @@ def _collect_block_results(
if block.disabled:
continue
# Skip excluded blocks
if block.id in EXCLUDED_BLOCK_IDS:
continue
block_info = block.get_info()
credentials = list(block.input_schema.get_credentials_fields().values())
is_integration = len(credentials) > 0
@@ -365,6 +332,10 @@ def _collect_block_results(
if not is_integration and not include_blocks:
continue
score = _score_block(block, block_info, normalized_query)
if not _should_include_item(score, normalized_query):
continue
filter_type: FilterType = "integrations" if is_integration else "blocks"
if is_integration:
integration_count += 1
@@ -375,122 +346,8 @@ def _collect_block_results(
_ScoredItem(
item=block_info,
filter_type=filter_type,
score=BLOCK_SCORE_BOOST,
sort_key=block_info.name.lower(),
)
)
return results, block_count, integration_count
async def _hybrid_search_blocks(
*,
query: str,
include_blocks: bool,
include_integrations: bool,
) -> tuple[list[_ScoredItem], int, int]:
"""
Search blocks using hybrid search with builder-specific filtering.
Uses unified_hybrid_search for semantic + lexical search, then applies
post-filtering for block/integration types and scoring adjustments.
Scoring:
- Base: hybrid relevance score (0-1) scaled to 0-100, plus BLOCK_SCORE_BOOST
to prioritize blocks over marketplace agents in combined results
- +30 for exact name match, +15 for prefix name match
- +20 if the block has an LlmModel field and the query matches an LLM model name
Args:
query: The search query string
include_blocks: Whether to include regular blocks
include_integrations: Whether to include integration blocks
Returns:
Tuple of (scored_items, block_count, integration_count)
"""
results: list[_ScoredItem] = []
block_count = 0
integration_count = 0
if not include_blocks and not include_integrations:
return results, block_count, integration_count
normalized_query = query.strip().lower()
# Fetch more results to account for post-filtering
search_results, _ = await unified_hybrid_search(
query=query,
content_types=[ContentType.BLOCK],
page=1,
page_size=150,
min_score=0.10,
)
# Load all blocks for getting BlockInfo
all_blocks = load_all_blocks()
for result in search_results:
block_id = result["content_id"]
# Skip excluded blocks
if block_id in EXCLUDED_BLOCK_IDS:
continue
metadata = result.get("metadata", {})
hybrid_score = result.get("relevance", 0.0)
# Get the actual block class
if block_id not in all_blocks:
continue
block_cls = all_blocks[block_id]
block: AnyBlockSchema = block_cls()
if block.disabled:
continue
# Check block/integration filter using metadata
is_integration = metadata.get("is_integration", False)
if is_integration and not include_integrations:
continue
if not is_integration and not include_blocks:
continue
# Get block info
block_info = block.get_info()
# Calculate final score: scale hybrid score and add builder-specific bonuses
# Hybrid scores are 0-1, builder scores were 0-200+
# Add BLOCK_SCORE_BOOST to prioritize blocks over marketplace agents
final_score = hybrid_score * 100 + BLOCK_SCORE_BOOST
# Add LLM model match bonus
has_llm_field = metadata.get("has_llm_model_field", False)
if has_llm_field and _matches_llm_model(block.input_schema, normalized_query):
final_score += 20
# Add exact/prefix match bonus for deterministic tie-breaking
name = block_info.name.lower()
if name == normalized_query:
final_score += 30
elif name.startswith(normalized_query):
final_score += 15
# Track counts
filter_type: FilterType = "integrations" if is_integration else "blocks"
if is_integration:
integration_count += 1
else:
block_count += 1
results.append(
_ScoredItem(
item=block_info,
filter_type=filter_type,
score=final_score,
sort_key=name,
score=score,
sort_key=_get_item_name(block_info),
)
)
@@ -615,8 +472,6 @@ async def _get_static_counts():
block: AnyBlockSchema = block_type()
if block.disabled:
continue
if block.id in EXCLUDED_BLOCK_IDS:
continue
all_blocks += 1
@@ -643,25 +498,47 @@ async def _get_static_counts():
}
def _contains_type(annotation: Any, target: type) -> bool:
"""Check if an annotation is or contains the target type (handles Optional/Union/Annotated)."""
if annotation is target:
return True
origin = get_origin(annotation)
if origin is None:
return False
return any(_contains_type(arg, target) for arg in get_args(annotation))
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
for field in schema_cls.model_fields.values():
if _contains_type(field.annotation, LlmModel):
if field.annotation == LlmModel:
# Check if query matches any value in llm_models
if any(query in name for name in llm_models):
return True
return False
def _score_block(
block: AnyBlockSchema,
block_info: BlockInfo,
normalized_query: str,
) -> float:
if not normalized_query:
return 0.0
name = block_info.name.lower()
description = block_info.description.lower()
score = _score_primary_fields(name, description, normalized_query)
category_text = " ".join(
category.get("category", "").lower() for category in block_info.categories
)
score += _score_additional_field(category_text, normalized_query, 12, 6)
credentials_info = block.input_schema.get_credentials_fields_info().values()
provider_names = [
provider.value.lower()
for info in credentials_info
for provider in info.provider
]
provider_text = " ".join(provider_names)
score += _score_additional_field(provider_text, normalized_query, 15, 6)
if _matches_llm_model(block.input_schema, normalized_query):
score += 20
return score
def _score_library_agent(
agent: library_model.LibraryAgent,
normalized_query: str,
@@ -768,20 +645,31 @@ def _get_all_providers() -> dict[ProviderName, Provider]:
return providers
@cached(ttl_seconds=3600, shared_cache=True)
@cached(ttl_seconds=3600)
async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
"""Return the most-executed blocks from the last 14 days.
suggested_blocks = []
# Sum the number of executions for each block type
# Prisma cannot group by nested relations, so we do a raw query
# Calculate the cutoff timestamp
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
Queries the mv_suggested_blocks materialized view (refreshed hourly via pg_cron)
and returns the top `count` blocks sorted by execution count, excluding
Input/Output/Agent block types and blocks in EXCLUDED_BLOCK_IDS.
"""
results = await mv_suggested_blocks.prisma().find_many()
results = await query_raw_with_schema(
"""
SELECT
agent_node."agentBlockId" AS block_id,
COUNT(execution.id) AS execution_count
FROM {schema_prefix}"AgentNodeExecution" execution
JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
WHERE execution."endedTime" >= $1::timestamp
GROUP BY agent_node."agentBlockId"
ORDER BY execution_count DESC;
""",
timestamp_threshold,
)
# Get the top blocks based on execution count
# But ignore Input, Output, Agent, and excluded blocks
# But ignore Input and Output blocks
blocks: list[tuple[BlockInfo, int]] = []
execution_counts = {row.block_id: row.execution_count for row in results}
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
@@ -791,9 +679,11 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
BlockType.AGENT,
):
continue
if block.id in EXCLUDED_BLOCK_IDS:
continue
execution_count = execution_counts.get(block.id, 0)
# Find the execution count for this block
execution_count = next(
(row["execution_count"] for row in results if row["block_id"] == block.id),
0,
)
blocks.append((block.get_info(), execution_count))
# Sort blocks by execution count
blocks.sort(key=lambda x: x[1], reverse=True)

View File

@@ -27,6 +27,7 @@ class SearchEntry(BaseModel):
# Suggestions
class SuggestionsResponse(BaseModel):
otto_suggestions: list[str]
recent_searches: list[SearchEntry]
providers: list[ProviderName]
top_blocks: list[BlockInfo]

View File

@@ -1,5 +1,5 @@
import logging
from typing import Annotated, Sequence, cast, get_args
from typing import Annotated, Sequence
import fastapi
from autogpt_libs.auth.dependencies import get_user_id, requires_user
@@ -10,8 +10,6 @@ from backend.util.models import Pagination
from . import db as builder_db
from . import model as builder_model
VALID_FILTER_VALUES = get_args(builder_model.FilterType)
logger = logging.getLogger(__name__)
router = fastapi.APIRouter(
@@ -51,6 +49,11 @@ async def get_suggestions(
Get all suggestions for the Blocks Menu.
"""
return builder_model.SuggestionsResponse(
otto_suggestions=[
"What blocks do I need to get started?",
"Help me create a list",
"Help me feed my data to Google Maps",
],
recent_searches=await builder_db.get_recent_searches(user_id),
providers=[
ProviderName.TWITTER,
@@ -148,7 +151,7 @@ async def get_providers(
async def search(
user_id: Annotated[str, fastapi.Security(get_user_id)],
search_query: Annotated[str | None, fastapi.Query()] = None,
filter: Annotated[str | None, fastapi.Query()] = None,
filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None,
search_id: Annotated[str | None, fastapi.Query()] = None,
by_creator: Annotated[list[str] | None, fastapi.Query()] = None,
page: Annotated[int, fastapi.Query()] = 1,
@@ -157,20 +160,9 @@ async def search(
"""
Search for blocks (including integrations), marketplace agents, and user library agents.
"""
# Parse and validate filter parameter
filters: list[builder_model.FilterType]
if filter:
filter_values = [f.strip() for f in filter.split(",")]
invalid_filters = [f for f in filter_values if f not in VALID_FILTER_VALUES]
if invalid_filters:
raise fastapi.HTTPException(
status_code=400,
detail=f"Invalid filter value(s): {', '.join(invalid_filters)}. "
f"Valid values are: {', '.join(VALID_FILTER_VALUES)}",
)
filters = cast(list[builder_model.FilterType], filter_values)
else:
filters = [
# If no filters are provided, then we will return all types
if not filter:
filter = [
"blocks",
"integrations",
"marketplace_agents",
@@ -182,7 +174,7 @@ async def search(
cached_results = await builder_db.get_sorted_search_results(
user_id=user_id,
search_query=search_query,
filters=filters,
filters=filter,
by_creator=by_creator,
)
@@ -204,7 +196,7 @@ async def search(
user_id,
builder_model.SearchEntry(
search_query=search_query,
filter=filters,
filter=filter,
by_creator=by_creator,
search_id=search_id,
),

View File

@@ -621,10 +621,11 @@ async def resume_session_stream(
if not active_session:
return Response(status_code=204)
# Always replay from the beginning ("0-0") on resume.
# We can't use last_message_id because it's the latest ID in the backend
# stream, not the latest the frontend received — the gap causes lost
# messages. The frontend deduplicates replayed content.
# Subscribe from the beginning ("0-0") to replay all chunks for this turn.
# This is necessary because hydrated messages filter out incomplete tool calls
# to avoid "No tool invocation found" errors. The resume stream delivers
# those tool calls fresh with proper SDK state.
# The AI SDK's deduplication will handle any duplicate chunks.
subscriber_queue = await stream_registry.subscribe_to_session(
session_id=session_id,
user_id=user_id,

File diff suppressed because it is too large Load Diff

View File

@@ -144,7 +144,6 @@ async def test_add_agent_to_library(mocker):
)
mock_library_agent = mocker.patch("prisma.models.LibraryAgent.prisma")
mock_library_agent.return_value.find_first = mocker.AsyncMock(return_value=None)
mock_library_agent.return_value.find_unique = mocker.AsyncMock(return_value=None)
mock_library_agent.return_value.create = mocker.AsyncMock(
return_value=mock_library_agent_data
@@ -179,6 +178,7 @@ async def test_add_agent_to_library(mocker):
"agentGraphVersion": 1,
}
},
include={"AgentGraph": True},
)
# Check that create was called with the expected data including settings
create_call_args = mock_library_agent.return_value.create.call_args

View File

@@ -1,10 +0,0 @@
class FolderValidationError(Exception):
"""Raised when folder operations fail validation."""
pass
class FolderAlreadyExistsError(FolderValidationError):
"""Raised when a folder with the same name already exists in the location."""
pass

View File

@@ -26,95 +26,6 @@ class LibraryAgentStatus(str, Enum):
ERROR = "ERROR"
# === Folder Models ===
class LibraryFolder(pydantic.BaseModel):
"""Represents a folder for organizing library agents."""
id: str
user_id: str
name: str
icon: str | None = None
color: str | None = None
parent_id: str | None = None
created_at: datetime.datetime
updated_at: datetime.datetime
agent_count: int = 0 # Direct agents in folder
subfolder_count: int = 0 # Direct child folders
@staticmethod
def from_db(
folder: prisma.models.LibraryFolder,
agent_count: int = 0,
subfolder_count: int = 0,
) -> "LibraryFolder":
"""Factory method that constructs a LibraryFolder from a Prisma model."""
return LibraryFolder(
id=folder.id,
user_id=folder.userId,
name=folder.name,
icon=folder.icon,
color=folder.color,
parent_id=folder.parentId,
created_at=folder.createdAt,
updated_at=folder.updatedAt,
agent_count=agent_count,
subfolder_count=subfolder_count,
)
class LibraryFolderTree(LibraryFolder):
"""Folder with nested children for tree view."""
children: list["LibraryFolderTree"] = []
class FolderCreateRequest(pydantic.BaseModel):
"""Request model for creating a folder."""
name: str = pydantic.Field(..., min_length=1, max_length=100)
icon: str | None = None
color: str | None = pydantic.Field(
None, pattern=r"^#[0-9A-Fa-f]{6}$", description="Hex color code (#RRGGBB)"
)
parent_id: str | None = None
class FolderUpdateRequest(pydantic.BaseModel):
"""Request model for updating a folder."""
name: str | None = pydantic.Field(None, min_length=1, max_length=100)
icon: str | None = None
color: str | None = None
class FolderMoveRequest(pydantic.BaseModel):
"""Request model for moving a folder to a new parent."""
target_parent_id: str | None = None # None = move to root
class BulkMoveAgentsRequest(pydantic.BaseModel):
"""Request model for moving multiple agents to a folder."""
agent_ids: list[str]
folder_id: str | None = None # None = move to root
class FolderListResponse(pydantic.BaseModel):
"""Response schema for a list of folders."""
folders: list[LibraryFolder]
pagination: Pagination
class FolderTreeResponse(pydantic.BaseModel):
"""Response schema for folder tree structure."""
tree: list[LibraryFolderTree]
class MarketplaceListingCreator(pydantic.BaseModel):
"""Creator information for a marketplace listing."""
@@ -209,9 +120,6 @@ class LibraryAgent(pydantic.BaseModel):
can_access_graph: bool
is_latest_version: bool
is_favorite: bool
folder_id: str | None = None
folder_name: str | None = None # Denormalized for display
recommended_schedule_cron: str | None = None
settings: GraphSettings = pydantic.Field(default_factory=GraphSettings)
marketplace_listing: Optional["MarketplaceListing"] = None
@@ -351,8 +259,6 @@ class LibraryAgent(pydantic.BaseModel):
can_access_graph=can_access_graph,
is_latest_version=is_latest_version,
is_favorite=agent.isFavorite,
folder_id=agent.folderId,
folder_name=agent.Folder.name if agent.Folder else None,
recommended_schedule_cron=agent.AgentGraph.recommendedScheduleCron,
settings=_parse_settings(agent.settings),
marketplace_listing=marketplace_listing_data,
@@ -564,7 +470,3 @@ class LibraryAgentUpdateRequest(pydantic.BaseModel):
settings: Optional[GraphSettings] = pydantic.Field(
default=None, description="User-specific settings for this library agent"
)
folder_id: Optional[str] = pydantic.Field(
default=None,
description="Folder ID to move agent to (None to move to root)",
)

View File

@@ -1,11 +1,9 @@
import fastapi
from .agents import router as agents_router
from .folders import router as folders_router
from .presets import router as presets_router
router = fastapi.APIRouter()
router.include_router(presets_router)
router.include_router(folders_router)
router.include_router(agents_router)

View File

@@ -41,14 +41,6 @@ async def list_library_agents(
ge=1,
description="Number of agents per page (must be >= 1)",
),
folder_id: Optional[str] = Query(
None,
description="Filter by folder ID",
),
include_root_only: bool = Query(
False,
description="Only return agents without a folder (root-level agents)",
),
) -> library_model.LibraryAgentResponse:
"""
Get all agents in the user's library (both created and saved).
@@ -59,8 +51,6 @@ async def list_library_agents(
sort_by=sort_by,
page=page,
page_size=page_size,
folder_id=folder_id,
include_root_only=include_root_only,
)
@@ -178,7 +168,6 @@ async def update_library_agent(
is_favorite=payload.is_favorite,
is_archived=payload.is_archived,
settings=payload.settings,
folder_id=payload.folder_id,
)

View File

@@ -1,287 +0,0 @@
from typing import Optional
import autogpt_libs.auth as autogpt_auth_lib
from fastapi import APIRouter, Query, Security, status
from fastapi.responses import Response
from .. import db as library_db
from .. import model as library_model
router = APIRouter(
prefix="/folders",
tags=["library", "folders", "private"],
dependencies=[Security(autogpt_auth_lib.requires_user)],
)
@router.get(
"",
summary="List Library Folders",
response_model=library_model.FolderListResponse,
responses={
200: {"description": "List of folders"},
500: {"description": "Server error"},
},
)
async def list_folders(
user_id: str = Security(autogpt_auth_lib.get_user_id),
parent_id: Optional[str] = Query(
None,
description="Filter by parent folder ID. If not provided, returns root-level folders.",
),
include_relations: bool = Query(
True,
description="Include agent and subfolder relations (for counts)",
),
) -> library_model.FolderListResponse:
"""
List folders for the authenticated user.
Args:
user_id: ID of the authenticated user.
parent_id: Optional parent folder ID to filter by.
include_relations: Whether to include agent and subfolder relations for counts.
Returns:
A FolderListResponse containing folders.
"""
folders = await library_db.list_folders(
user_id=user_id,
parent_id=parent_id,
include_relations=include_relations,
)
return library_model.FolderListResponse(
folders=folders,
pagination=library_model.Pagination(
total_items=len(folders),
total_pages=1,
current_page=1,
page_size=len(folders),
),
)
@router.get(
"/tree",
summary="Get Folder Tree",
response_model=library_model.FolderTreeResponse,
responses={
200: {"description": "Folder tree structure"},
500: {"description": "Server error"},
},
)
async def get_folder_tree(
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.FolderTreeResponse:
"""
Get the full folder tree for the authenticated user.
Args:
user_id: ID of the authenticated user.
Returns:
A FolderTreeResponse containing the nested folder structure.
"""
tree = await library_db.get_folder_tree(user_id=user_id)
return library_model.FolderTreeResponse(tree=tree)
@router.get(
"/{folder_id}",
summary="Get Folder",
response_model=library_model.LibraryFolder,
responses={
200: {"description": "Folder details"},
404: {"description": "Folder not found"},
500: {"description": "Server error"},
},
)
async def get_folder(
folder_id: str,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.LibraryFolder:
"""
Get a specific folder.
Args:
folder_id: ID of the folder to retrieve.
user_id: ID of the authenticated user.
Returns:
The requested LibraryFolder.
"""
return await library_db.get_folder(folder_id=folder_id, user_id=user_id)
@router.post(
"",
summary="Create Folder",
status_code=status.HTTP_201_CREATED,
response_model=library_model.LibraryFolder,
responses={
201: {"description": "Folder created successfully"},
400: {"description": "Validation error"},
404: {"description": "Parent folder not found"},
409: {"description": "Folder name conflict"},
500: {"description": "Server error"},
},
)
async def create_folder(
payload: library_model.FolderCreateRequest,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.LibraryFolder:
"""
Create a new folder.
Args:
payload: The folder creation request.
user_id: ID of the authenticated user.
Returns:
The created LibraryFolder.
"""
return await library_db.create_folder(
user_id=user_id,
name=payload.name,
parent_id=payload.parent_id,
icon=payload.icon,
color=payload.color,
)
@router.patch(
"/{folder_id}",
summary="Update Folder",
response_model=library_model.LibraryFolder,
responses={
200: {"description": "Folder updated successfully"},
400: {"description": "Validation error"},
404: {"description": "Folder not found"},
409: {"description": "Folder name conflict"},
500: {"description": "Server error"},
},
)
async def update_folder(
folder_id: str,
payload: library_model.FolderUpdateRequest,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.LibraryFolder:
"""
Update a folder's properties.
Args:
folder_id: ID of the folder to update.
payload: The folder update request.
user_id: ID of the authenticated user.
Returns:
The updated LibraryFolder.
"""
return await library_db.update_folder(
folder_id=folder_id,
user_id=user_id,
name=payload.name,
icon=payload.icon,
color=payload.color,
)
@router.post(
"/{folder_id}/move",
summary="Move Folder",
response_model=library_model.LibraryFolder,
responses={
200: {"description": "Folder moved successfully"},
400: {"description": "Validation error (circular reference)"},
404: {"description": "Folder or target parent not found"},
409: {"description": "Folder name conflict in target location"},
500: {"description": "Server error"},
},
)
async def move_folder(
folder_id: str,
payload: library_model.FolderMoveRequest,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.LibraryFolder:
"""
Move a folder to a new parent.
Args:
folder_id: ID of the folder to move.
payload: The move request with target parent.
user_id: ID of the authenticated user.
Returns:
The moved LibraryFolder.
"""
return await library_db.move_folder(
folder_id=folder_id,
user_id=user_id,
target_parent_id=payload.target_parent_id,
)
@router.delete(
"/{folder_id}",
summary="Delete Folder",
status_code=status.HTTP_204_NO_CONTENT,
responses={
204: {"description": "Folder deleted successfully"},
404: {"description": "Folder not found"},
500: {"description": "Server error"},
},
)
async def delete_folder(
folder_id: str,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> Response:
"""
Soft-delete a folder and all its contents.
Args:
folder_id: ID of the folder to delete.
user_id: ID of the authenticated user.
Returns:
204 No Content if successful.
"""
await library_db.delete_folder(
folder_id=folder_id,
user_id=user_id,
soft_delete=True,
)
return Response(status_code=status.HTTP_204_NO_CONTENT)
# === Bulk Agent Operations ===
@router.post(
"/agents/bulk-move",
summary="Bulk Move Agents",
response_model=list[library_model.LibraryAgent],
responses={
200: {"description": "Agents moved successfully"},
404: {"description": "Folder not found"},
500: {"description": "Server error"},
},
)
async def bulk_move_agents(
payload: library_model.BulkMoveAgentsRequest,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> list[library_model.LibraryAgent]:
"""
Move multiple agents to a folder.
Args:
payload: The bulk move request with agent IDs and target folder.
user_id: ID of the authenticated user.
Returns:
The updated LibraryAgents.
"""
return await library_db.bulk_move_agents_to_folder(
agent_ids=payload.agent_ids,
folder_id=payload.folder_id,
user_id=user_id,
)

View File

@@ -115,8 +115,6 @@ async def test_get_library_agents_success(
sort_by=library_model.LibraryAgentSort.UPDATED_AT,
page=1,
page_size=15,
folder_id=None,
include_root_only=False,
)

View File

@@ -9,26 +9,15 @@ import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import Any, get_args, get_origin
from typing import Any
from prisma.enums import ContentType
from backend.blocks.llm import LlmModel
from backend.data.db import query_raw_with_schema
logger = logging.getLogger(__name__)
def _contains_type(annotation: Any, target: type) -> bool:
"""Check if an annotation is or contains the target type (handles Optional/Union/Annotated)."""
if annotation is target:
return True
origin = get_origin(annotation)
if origin is None:
return False
return any(_contains_type(arg, target) for arg in get_args(annotation))
@dataclass
class ContentItem:
"""Represents a piece of content to be embedded."""
@@ -199,51 +188,45 @@ class BlockHandler(ContentHandler):
try:
block_instance = block_cls()
# Skip disabled blocks - they shouldn't be indexed
if block_instance.disabled:
continue
# Build searchable text from block metadata
parts = []
if block_instance.name:
if hasattr(block_instance, "name") and block_instance.name:
parts.append(block_instance.name)
if block_instance.description:
if (
hasattr(block_instance, "description")
and block_instance.description
):
parts.append(block_instance.description)
if block_instance.categories:
if hasattr(block_instance, "categories") and block_instance.categories:
# Convert BlockCategory enum to strings
parts.append(
" ".join(str(cat.value) for cat in block_instance.categories)
)
# Add input schema field descriptions
block_input_fields = block_instance.input_schema.model_fields
parts += [
f"{field_name}: {field_info.description}"
for field_name, field_info in block_input_fields.items()
if field_info.description
]
# Add input/output schema info
if hasattr(block_instance, "input_schema"):
schema = block_instance.input_schema
if hasattr(schema, "model_json_schema"):
schema_dict = schema.model_json_schema()
if "properties" in schema_dict:
for prop_name, prop_info in schema_dict[
"properties"
].items():
if "description" in prop_info:
parts.append(
f"{prop_name}: {prop_info['description']}"
)
searchable_text = " ".join(parts)
# Convert categories set of enums to list of strings for JSON serialization
categories = getattr(block_instance, "categories", set())
categories_list = (
[cat.value for cat in block_instance.categories]
if block_instance.categories
else []
)
# Extract provider names from credentials fields
credentials_info = (
block_instance.input_schema.get_credentials_fields_info()
)
is_integration = len(credentials_info) > 0
provider_names = [
provider.value.lower()
for info in credentials_info.values()
for provider in info.provider
]
# Check if block has LlmModel field in input schema
has_llm_model_field = any(
_contains_type(field.annotation, LlmModel)
for field in block_instance.input_schema.model_fields.values()
[cat.value for cat in categories] if categories else []
)
items.append(
@@ -252,11 +235,8 @@ class BlockHandler(ContentHandler):
content_type=ContentType.BLOCK,
searchable_text=searchable_text,
metadata={
"name": block_instance.name,
"name": getattr(block_instance, "name", ""),
"categories": categories_list,
"providers": provider_names,
"has_llm_model_field": has_llm_model_field,
"is_integration": is_integration,
},
user_id=None, # Blocks are public
)

View File

@@ -82,10 +82,9 @@ async def test_block_handler_get_missing_items(mocker):
mock_block_instance.description = "Performs calculations"
mock_block_instance.categories = [MagicMock(value="MATH")]
mock_block_instance.disabled = False
mock_field = MagicMock()
mock_field.description = "Math expression to evaluate"
mock_block_instance.input_schema.model_fields = {"expression": mock_field}
mock_block_instance.input_schema.get_credentials_fields_info.return_value = {}
mock_block_instance.input_schema.model_json_schema.return_value = {
"properties": {"expression": {"description": "Math expression to evaluate"}}
}
mock_block_class.return_value = mock_block_instance
mock_blocks = {"block-uuid-1": mock_block_class}
@@ -310,19 +309,19 @@ async def test_content_handlers_registry():
@pytest.mark.asyncio(loop_scope="session")
async def test_block_handler_handles_empty_attributes():
"""Test BlockHandler handles blocks with empty/falsy attribute values."""
async def test_block_handler_handles_missing_attributes():
"""Test BlockHandler gracefully handles blocks with missing attributes."""
handler = BlockHandler()
# Mock block with empty values (all attributes exist but are falsy)
# Mock block with minimal attributes
mock_block_class = MagicMock()
mock_block_instance = MagicMock()
mock_block_instance.name = "Minimal Block"
mock_block_instance.disabled = False
mock_block_instance.description = ""
mock_block_instance.categories = set()
mock_block_instance.input_schema.model_fields = {}
mock_block_instance.input_schema.get_credentials_fields_info.return_value = {}
# No description, categories, or schema
del mock_block_instance.description
del mock_block_instance.categories
del mock_block_instance.input_schema
mock_block_class.return_value = mock_block_instance
mock_blocks = {"block-minimal": mock_block_class}
@@ -353,8 +352,6 @@ async def test_block_handler_skips_failed_blocks():
good_instance.description = "Works fine"
good_instance.categories = []
good_instance.disabled = False
good_instance.input_schema.model_fields = {}
good_instance.input_schema.get_credentials_fields_info.return_value = {}
good_block.return_value = good_instance
bad_block = MagicMock()

View File

@@ -41,10 +41,6 @@ import backend.data.user
import backend.integrations.webhooks.utils
import backend.util.service
import backend.util.settings
from backend.api.features.library.exceptions import (
FolderAlreadyExistsError,
FolderValidationError,
)
from backend.blocks.llm import DEFAULT_LLM_MODEL
from backend.data.model import Credentials
from backend.integrations.providers import ProviderName
@@ -265,10 +261,6 @@ async def validation_error_handler(
app.add_exception_handler(PrismaError, handle_internal_http_error(500))
app.add_exception_handler(
FolderAlreadyExistsError, handle_internal_http_error(409, False)
)
app.add_exception_handler(FolderValidationError, handle_internal_http_error(400, False))
app.add_exception_handler(NotFoundError, handle_internal_http_error(404, False))
app.add_exception_handler(NotAuthorizedError, handle_internal_http_error(403, False))
app.add_exception_handler(RequestValidationError, validation_error_handler)

View File

@@ -1,182 +0,0 @@
"""
Telegram Bot API helper functions.
Provides utilities for making authenticated requests to the Telegram Bot API.
"""
import logging
from io import BytesIO
from typing import Any, Optional
from pydantic import BaseModel
from backend.data.model import APIKeyCredentials
from backend.util.request import Requests
logger = logging.getLogger(__name__)
TELEGRAM_API_BASE = "https://api.telegram.org"
class TelegramMessageResult(BaseModel, extra="allow"):
"""Result from Telegram send/edit message API calls."""
message_id: int = 0
chat: dict[str, Any] = {}
date: int = 0
text: str = ""
class TelegramFileResult(BaseModel, extra="allow"):
"""Result from Telegram getFile API call."""
file_id: str = ""
file_unique_id: str = ""
file_size: int = 0
file_path: str = ""
class TelegramAPIException(ValueError):
"""Exception raised for Telegram API errors."""
def __init__(self, message: str, error_code: int = 0):
super().__init__(message)
self.error_code = error_code
def get_bot_api_url(bot_token: str, method: str) -> str:
"""Construct Telegram Bot API URL for a method."""
return f"{TELEGRAM_API_BASE}/bot{bot_token}/{method}"
def get_file_url(bot_token: str, file_path: str) -> str:
"""Construct Telegram file download URL."""
return f"{TELEGRAM_API_BASE}/file/bot{bot_token}/{file_path}"
async def call_telegram_api(
credentials: APIKeyCredentials,
method: str,
data: Optional[dict[str, Any]] = None,
) -> TelegramMessageResult:
"""
Make a request to the Telegram Bot API.
Args:
credentials: Bot token credentials
method: API method name (e.g., "sendMessage", "getFile")
data: Request parameters
Returns:
API response result
Raises:
TelegramAPIException: If the API returns an error
"""
token = credentials.api_key.get_secret_value()
url = get_bot_api_url(token, method)
response = await Requests().post(url, json=data or {})
result = response.json()
if not result.get("ok"):
error_code = result.get("error_code", 0)
description = result.get("description", "Unknown error")
raise TelegramAPIException(description, error_code)
return TelegramMessageResult(**result.get("result", {}))
async def call_telegram_api_with_file(
credentials: APIKeyCredentials,
method: str,
file_field: str,
file_data: bytes,
filename: str,
content_type: str,
data: Optional[dict[str, Any]] = None,
) -> TelegramMessageResult:
"""
Make a multipart/form-data request to the Telegram Bot API with a file upload.
Args:
credentials: Bot token credentials
method: API method name (e.g., "sendPhoto", "sendVoice")
file_field: Form field name for the file (e.g., "photo", "voice")
file_data: Raw file bytes
filename: Filename for the upload
content_type: MIME type of the file
data: Additional form parameters
Returns:
API response result
Raises:
TelegramAPIException: If the API returns an error
"""
token = credentials.api_key.get_secret_value()
url = get_bot_api_url(token, method)
files = [(file_field, (filename, BytesIO(file_data), content_type))]
response = await Requests().post(url, files=files, data=data or {})
result = response.json()
if not result.get("ok"):
error_code = result.get("error_code", 0)
description = result.get("description", "Unknown error")
raise TelegramAPIException(description, error_code)
return TelegramMessageResult(**result.get("result", {}))
async def get_file_info(
credentials: APIKeyCredentials, file_id: str
) -> TelegramFileResult:
"""
Get file information from Telegram.
Args:
credentials: Bot token credentials
file_id: Telegram file_id from message
Returns:
File info dict containing file_id, file_unique_id, file_size, file_path
"""
result = await call_telegram_api(credentials, "getFile", {"file_id": file_id})
return TelegramFileResult(**result.model_dump())
async def get_file_download_url(credentials: APIKeyCredentials, file_id: str) -> str:
"""
Get the download URL for a Telegram file.
Args:
credentials: Bot token credentials
file_id: Telegram file_id from message
Returns:
Full download URL
"""
token = credentials.api_key.get_secret_value()
result = await get_file_info(credentials, file_id)
file_path = result.file_path
if not file_path:
raise TelegramAPIException("No file_path returned from getFile")
return get_file_url(token, file_path)
async def download_telegram_file(credentials: APIKeyCredentials, file_id: str) -> bytes:
"""
Download a file from Telegram servers.
Args:
credentials: Bot token credentials
file_id: Telegram file_id
Returns:
File content as bytes
"""
url = await get_file_download_url(credentials, file_id)
response = await Requests().get(url)
return response.content

View File

@@ -1,43 +0,0 @@
"""
Telegram Bot credentials handling.
Telegram bots use an API key (bot token) obtained from @BotFather.
"""
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
from backend.integrations.providers import ProviderName
# Bot token credentials (API key style)
TelegramCredentials = APIKeyCredentials
TelegramCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.TELEGRAM], Literal["api_key"]
]
def TelegramCredentialsField() -> TelegramCredentialsInput:
"""Creates a Telegram bot token credentials field."""
return CredentialsField(
description="Telegram Bot API token from @BotFather. "
"Create a bot at https://t.me/BotFather to get your token."
)
# Test credentials for unit tests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="telegram",
api_key=SecretStr("test_telegram_bot_token"),
title="Mock Telegram Bot Token",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,377 +0,0 @@
"""
Telegram trigger blocks for receiving messages via webhooks.
"""
import logging
from pydantic import BaseModel
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
BlockWebhookConfig,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.telegram import TelegramWebhookType
from ._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
TelegramCredentialsField,
TelegramCredentialsInput,
)
logger = logging.getLogger(__name__)
# Example payload for testing
EXAMPLE_MESSAGE_PAYLOAD = {
"update_id": 123456789,
"message": {
"message_id": 1,
"from": {
"id": 12345678,
"is_bot": False,
"first_name": "John",
"last_name": "Doe",
"username": "johndoe",
"language_code": "en",
},
"chat": {
"id": 12345678,
"first_name": "John",
"last_name": "Doe",
"username": "johndoe",
"type": "private",
},
"date": 1234567890,
"text": "Hello, bot!",
},
}
class TelegramTriggerBase:
"""Base class for Telegram trigger blocks."""
class Input(BlockSchemaInput):
credentials: TelegramCredentialsInput = TelegramCredentialsField()
payload: dict = SchemaField(hidden=True, default_factory=dict)
class TelegramMessageTriggerBlock(TelegramTriggerBase, Block):
"""
Triggers when a message is received or edited in your Telegram bot.
Supports text, photos, voice messages, audio files, documents, and videos.
Connect the outputs to other blocks to process messages and send responses.
"""
class Input(TelegramTriggerBase.Input):
class EventsFilter(BaseModel):
"""Filter for message types to receive."""
text: bool = True
photo: bool = False
voice: bool = False
audio: bool = False
document: bool = False
video: bool = False
edited_message: bool = False
events: EventsFilter = SchemaField(
title="Message Types", description="Types of messages to receive"
)
class Output(BlockSchemaOutput):
payload: dict = SchemaField(
description="The complete webhook payload from Telegram"
)
chat_id: int = SchemaField(
description="The chat ID where the message was received. "
"Use this to send replies."
)
message_id: int = SchemaField(description="The unique message ID")
user_id: int = SchemaField(description="The user ID who sent the message")
username: str = SchemaField(description="Username of the sender (may be empty)")
first_name: str = SchemaField(description="First name of the sender")
event: str = SchemaField(
description="The message type (text, photo, voice, audio, etc.)"
)
text: str = SchemaField(
description="Text content of the message (for text messages)"
)
photo_file_id: str = SchemaField(
description="File ID of the photo (for photo messages). "
"Use GetTelegramFileBlock to download."
)
voice_file_id: str = SchemaField(
description="File ID of the voice message (for voice messages). "
"Use GetTelegramFileBlock to download."
)
audio_file_id: str = SchemaField(
description="File ID of the audio file (for audio messages). "
"Use GetTelegramFileBlock to download."
)
file_id: str = SchemaField(
description="File ID for document/video messages. "
"Use GetTelegramFileBlock to download."
)
file_name: str = SchemaField(
description="Original filename (for document/audio messages)"
)
caption: str = SchemaField(description="Caption for media messages")
is_edited: bool = SchemaField(
description="Whether this is an edit of a previously sent message"
)
def __init__(self):
super().__init__(
id="4435e4e0-df6e-4301-8f35-ad70b12fc9ec",
description="Triggers when a message is received or edited in your Telegram bot. "
"Supports text, photos, voice messages, audio files, documents, and videos.",
categories={BlockCategory.SOCIAL},
input_schema=TelegramMessageTriggerBlock.Input,
output_schema=TelegramMessageTriggerBlock.Output,
webhook_config=BlockWebhookConfig(
provider=ProviderName.TELEGRAM,
webhook_type=TelegramWebhookType.BOT,
resource_format="bot",
event_filter_input="events",
event_format="message.{event}",
),
test_input={
"events": {"text": True, "photo": True},
"credentials": TEST_CREDENTIALS_INPUT,
"payload": EXAMPLE_MESSAGE_PAYLOAD,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("payload", EXAMPLE_MESSAGE_PAYLOAD),
("chat_id", 12345678),
("message_id", 1),
("user_id", 12345678),
("username", "johndoe"),
("first_name", "John"),
("is_edited", False),
("event", "text"),
("text", "Hello, bot!"),
("photo_file_id", ""),
("voice_file_id", ""),
("audio_file_id", ""),
("file_id", ""),
("file_name", ""),
("caption", ""),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
payload = input_data.payload
is_edited = "edited_message" in payload
message = payload.get("message") or payload.get("edited_message", {})
# Extract common fields
chat = message.get("chat", {})
sender = message.get("from", {})
yield "payload", payload
yield "chat_id", chat.get("id", 0)
yield "message_id", message.get("message_id", 0)
yield "user_id", sender.get("id", 0)
yield "username", sender.get("username", "")
yield "first_name", sender.get("first_name", "")
yield "is_edited", is_edited
# For edited messages, yield event as "edited_message" and extract
# all content fields from the edited message body
if is_edited:
yield "event", "edited_message"
yield "text", message.get("text", "")
photos = message.get("photo", [])
yield "photo_file_id", photos[-1].get("file_id", "") if photos else ""
voice = message.get("voice", {})
yield "voice_file_id", voice.get("file_id", "")
audio = message.get("audio", {})
yield "audio_file_id", audio.get("file_id", "")
document = message.get("document", {})
video = message.get("video", {})
yield "file_id", (document.get("file_id", "") or video.get("file_id", ""))
yield "file_name", (
document.get("file_name", "") or audio.get("file_name", "")
)
yield "caption", message.get("caption", "")
# Determine message type and extract content
elif "text" in message:
yield "event", "text"
yield "text", message.get("text", "")
yield "photo_file_id", ""
yield "voice_file_id", ""
yield "audio_file_id", ""
yield "file_id", ""
yield "file_name", ""
yield "caption", ""
elif "photo" in message:
# Get the largest photo (last in array)
photos = message.get("photo", [])
photo_fid = photos[-1].get("file_id", "") if photos else ""
yield "event", "photo"
yield "text", ""
yield "photo_file_id", photo_fid
yield "voice_file_id", ""
yield "audio_file_id", ""
yield "file_id", ""
yield "file_name", ""
yield "caption", message.get("caption", "")
elif "voice" in message:
voice = message.get("voice", {})
yield "event", "voice"
yield "text", ""
yield "photo_file_id", ""
yield "voice_file_id", voice.get("file_id", "")
yield "audio_file_id", ""
yield "file_id", ""
yield "file_name", ""
yield "caption", message.get("caption", "")
elif "audio" in message:
audio = message.get("audio", {})
yield "event", "audio"
yield "text", ""
yield "photo_file_id", ""
yield "voice_file_id", ""
yield "audio_file_id", audio.get("file_id", "")
yield "file_id", ""
yield "file_name", audio.get("file_name", "")
yield "caption", message.get("caption", "")
elif "document" in message:
document = message.get("document", {})
yield "event", "document"
yield "text", ""
yield "photo_file_id", ""
yield "voice_file_id", ""
yield "audio_file_id", ""
yield "file_id", document.get("file_id", "")
yield "file_name", document.get("file_name", "")
yield "caption", message.get("caption", "")
elif "video" in message:
video = message.get("video", {})
yield "event", "video"
yield "text", ""
yield "photo_file_id", ""
yield "voice_file_id", ""
yield "audio_file_id", ""
yield "file_id", video.get("file_id", "")
yield "file_name", video.get("file_name", "")
yield "caption", message.get("caption", "")
else:
yield "event", "other"
yield "text", ""
yield "photo_file_id", ""
yield "voice_file_id", ""
yield "audio_file_id", ""
yield "file_id", ""
yield "file_name", ""
yield "caption", ""
# Example payload for reaction trigger testing
EXAMPLE_REACTION_PAYLOAD = {
"update_id": 123456790,
"message_reaction": {
"chat": {
"id": 12345678,
"first_name": "John",
"last_name": "Doe",
"username": "johndoe",
"type": "private",
},
"message_id": 42,
"user": {
"id": 12345678,
"is_bot": False,
"first_name": "John",
"username": "johndoe",
},
"date": 1234567890,
"new_reaction": [{"type": "emoji", "emoji": "👍"}],
"old_reaction": [],
},
}
class TelegramMessageReactionTriggerBlock(TelegramTriggerBase, Block):
"""
Triggers when a reaction to a message is changed.
Works automatically in private chats. In group chats, the bot must be
an administrator to receive reaction updates.
"""
class Input(TelegramTriggerBase.Input):
pass
class Output(BlockSchemaOutput):
payload: dict = SchemaField(
description="The complete webhook payload from Telegram"
)
chat_id: int = SchemaField(
description="The chat ID where the reaction occurred"
)
message_id: int = SchemaField(description="The message ID that was reacted to")
user_id: int = SchemaField(description="The user ID who changed the reaction")
username: str = SchemaField(description="Username of the user (may be empty)")
new_reactions: list = SchemaField(
description="List of new reactions on the message"
)
old_reactions: list = SchemaField(
description="List of previous reactions on the message"
)
def __init__(self):
super().__init__(
id="82525328-9368-4966-8f0c-cd78e80181fd",
description="Triggers when a reaction to a message is changed. "
"Works in private chats automatically. "
"In groups, the bot must be an administrator.",
categories={BlockCategory.SOCIAL},
input_schema=TelegramMessageReactionTriggerBlock.Input,
output_schema=TelegramMessageReactionTriggerBlock.Output,
webhook_config=BlockWebhookConfig(
provider=ProviderName.TELEGRAM,
webhook_type=TelegramWebhookType.BOT,
resource_format="bot",
event_filter_input="",
event_format="message_reaction",
),
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"payload": EXAMPLE_REACTION_PAYLOAD,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("payload", EXAMPLE_REACTION_PAYLOAD),
("chat_id", 12345678),
("message_id", 42),
("user_id", 12345678),
("username", "johndoe"),
("new_reactions", [{"type": "emoji", "emoji": "👍"}]),
("old_reactions", []),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
payload = input_data.payload
reaction = payload.get("message_reaction", {})
chat = reaction.get("chat", {})
user = reaction.get("user", {})
yield "payload", payload
yield "chat_id", chat.get("id", 0)
yield "message_id", reaction.get("message_id", 0)
yield "user_id", user.get("id", 0)
yield "username", user.get("username", "")
yield "new_reactions", reaction.get("new_reaction", [])
yield "old_reactions", reaction.get("old_reaction", [])

View File

@@ -34,12 +34,10 @@ def main(output: Path, pretty: bool):
"""Generate and output the OpenAPI JSON specification."""
openapi_schema = get_openapi_schema()
json_output = json.dumps(
openapi_schema, indent=2 if pretty else None, ensure_ascii=False
)
json_output = json.dumps(openapi_schema, indent=2 if pretty else None)
if output:
output.write_text(json_output, encoding="utf-8")
output.write_text(json_output)
click.echo(f"✅ OpenAPI specification written to {output}\n\nPreview:")
click.echo(f"\n{json_output[:500]} ...")
else:

View File

@@ -85,7 +85,7 @@ class ChatConfig(BaseSettings):
)
claude_agent_max_subtasks: int = Field(
default=10,
description="Max number of concurrent sub-agent Tasks the SDK can run per session.",
description="Max number of sub-agent Tasks the SDK can spawn per session.",
)
claude_agent_use_resume: bool = Field(
default=True,

View File

@@ -4,7 +4,6 @@ This module contains the CoPilotExecutor class that consumes chat tasks from
RabbitMQ and processes them using a thread pool, following the graph executor pattern.
"""
import asyncio
import logging
import os
import threading
@@ -410,19 +409,14 @@ class CoPilotExecutor(AppProcess):
def on_run_done(f: Future):
logger.info(f"Run completed for {session_id}")
error_msg = None
try:
if exec_error := f.exception():
error_msg = str(exec_error) or type(exec_error).__name__
logger.error(f"Execution for {session_id} failed: {error_msg}")
logger.error(f"Execution for {session_id} failed: {exec_error}")
ack_message(reject=True, requeue=False)
else:
ack_message(reject=False, requeue=False)
except asyncio.CancelledError:
logger.info(f"Run completion callback cancelled for {session_id}")
except BaseException as e:
error_msg = str(e) or type(e).__name__
logger.exception(f"Error in run completion callback: {error_msg}")
logger.exception(f"Error in run completion callback: {e}")
finally:
# Release the cluster lock
if session_id in self._task_locks:

View File

@@ -125,10 +125,7 @@ class CoPilotProcessor:
)
future.result(timeout=5)
except Exception as e:
error_msg = str(e) or type(e).__name__
logger.warning(
f"[CoPilotExecutor] Worker {self.tid} cleanup error: {error_msg}"
)
logger.warning(f"[CoPilotExecutor] Worker {self.tid} cleanup error: {e}")
# Stop the event loop
self.execution_loop.call_soon_threadsafe(self.execution_loop.stop)
@@ -160,30 +157,47 @@ class CoPilotProcessor:
start_time = time.monotonic()
# Run the async execution in our event loop
future = asyncio.run_coroutine_threadsafe(
self._execute_async(entry, cancel, cluster_lock, log),
self.execution_loop,
)
try:
# Run the async execution in our event loop
future = asyncio.run_coroutine_threadsafe(
self._execute_async(entry, cancel, cluster_lock, log),
self.execution_loop,
)
# Wait for completion, checking cancel periodically
while not future.done():
# Wait for completion, checking cancel periodically
while not future.done():
try:
future.result(timeout=1.0)
except asyncio.TimeoutError:
if cancel.is_set():
log.info("Cancellation requested")
future.cancel()
break
# Refresh cluster lock to maintain ownership
cluster_lock.refresh()
if not future.cancelled():
# Get result to propagate any exceptions
future.result()
elapsed = time.monotonic() - start_time
log.info(f"Execution completed in {elapsed:.2f}s")
except BaseException as e:
elapsed = time.monotonic() - start_time
log.error(f"Execution failed after {elapsed:.2f}s: {e}")
# Safety net: if _execute_async's error handler failed to mark
# the session (e.g. RuntimeError from SDK cleanup), do it here.
try:
future.result(timeout=1.0)
except asyncio.TimeoutError:
if cancel.is_set():
log.info("Cancellation requested")
future.cancel()
break
# Refresh cluster lock to maintain ownership
cluster_lock.refresh()
if not future.cancelled():
# Get result to propagate any exceptions
future.result()
elapsed = time.monotonic() - start_time
log.info(f"Execution completed in {elapsed:.2f}s")
asyncio.run_coroutine_threadsafe(
stream_registry.mark_session_completed(
entry.session_id, error_message=str(e) or "Unknown error"
),
self.execution_loop,
).result(timeout=5.0)
except Exception as cleanup_err:
log.error(f"Safety net mark_session_completed failed: {cleanup_err}")
raise
async def _execute_async(
self,
@@ -205,7 +219,6 @@ class CoPilotProcessor:
"""
last_refresh = time.monotonic()
refresh_interval = 30.0 # Refresh lock every 30 seconds
error_msg = None
try:
# Choose service based on LaunchDarkly flag
@@ -251,26 +264,17 @@ class CoPilotProcessor:
exc_info=True,
)
# Stream loop completed
if cancel.is_set():
log.info("Stream cancelled by user")
error_message = "Operation cancelled" if cancel.is_set() else None
await stream_registry.mark_session_completed(
entry.session_id, error_message=error_message
)
except BaseException as e:
# Handle all exceptions (including CancelledError) with appropriate logging
if isinstance(e, asyncio.CancelledError):
log.info("Turn cancelled")
error_msg = "Operation cancelled"
else:
error_msg = str(e) or type(e).__name__
log.error(f"Turn failed: {error_msg}")
raise
finally:
# If no exception but user cancelled, still mark as cancelled
if not error_msg and cancel.is_set():
error_msg = "Operation cancelled"
log.error(f"Turn failed: {e}")
try:
await stream_registry.mark_session_completed(
entry.session_id, error_message=error_msg
entry.session_id, error_message=str(e) or "Unknown error"
)
except Exception as mark_err:
log.error(f"Failed to mark session completed: {mark_err}")
log.error(f"mark_session_completed also failed: {mark_err}")
raise

View File

@@ -160,7 +160,7 @@ def create_security_hooks(
Args:
user_id: Current user ID for isolation validation
sdk_cwd: SDK working directory for workspace-scoped tool validation
max_subtasks: Maximum concurrent Task (sub-agent) spawns allowed per session
max_subtasks: Maximum Task (sub-agent) spawns allowed per session
on_stop: Callback ``(transcript_path, sdk_session_id)`` invoked when
the SDK finishes processing — used to read the JSONL transcript
before the CLI process exits.
@@ -172,9 +172,8 @@ def create_security_hooks(
from claude_agent_sdk import HookMatcher
from claude_agent_sdk.types import HookContext, HookInput, SyncHookJSONOutput
# Per-session tracking for Task sub-agent concurrency.
# Set of tool_use_ids that consumed a slot — len() is the active count.
task_tool_use_ids: set[str] = set()
# Per-session counter for Task sub-agent spawns
task_spawn_count = 0
async def pre_tool_use_hook(
input_data: HookInput,
@@ -182,6 +181,7 @@ def create_security_hooks(
context: HookContext,
) -> SyncHookJSONOutput:
"""Combined pre-tool-use validation hook."""
nonlocal task_spawn_count
_ = context # unused but required by signature
tool_name = cast(str, input_data.get("tool_name", ""))
tool_input = cast(dict[str, Any], input_data.get("tool_input", {}))
@@ -200,18 +200,18 @@ def create_security_hooks(
"(remove the run_in_background parameter)."
),
)
if len(task_tool_use_ids) >= max_subtasks:
if task_spawn_count >= max_subtasks:
logger.warning(
f"[SDK] Task limit reached ({max_subtasks}), user={user_id}"
)
return cast(
SyncHookJSONOutput,
_deny(
f"Maximum {max_subtasks} concurrent sub-tasks. "
"Wait for running sub-tasks to finish, "
"or continue in the main conversation."
f"Maximum {max_subtasks} sub-tasks per session. "
"Please continue in the main conversation."
),
)
task_spawn_count += 1
# Strip MCP prefix for consistent validation
is_copilot_tool = tool_name.startswith(MCP_TOOL_PREFIX)
@@ -229,24 +229,9 @@ def create_security_hooks(
if result:
return cast(SyncHookJSONOutput, result)
# Reserve the Task slot only after all validations pass
if tool_name == "Task" and tool_use_id is not None:
task_tool_use_ids.add(tool_use_id)
logger.debug(f"[SDK] Tool start: {tool_name}, user={user_id}")
return cast(SyncHookJSONOutput, {})
def _release_task_slot(tool_name: str, tool_use_id: str | None) -> None:
"""Release a Task concurrency slot if one was reserved."""
if tool_name == "Task" and tool_use_id in task_tool_use_ids:
task_tool_use_ids.discard(tool_use_id)
logger.info(
"[SDK] Task slot released, active=%d/%d, user=%s",
len(task_tool_use_ids),
max_subtasks,
user_id,
)
async def post_tool_use_hook(
input_data: HookInput,
tool_use_id: str | None,
@@ -261,8 +246,6 @@ def create_security_hooks(
"""
_ = context
tool_name = cast(str, input_data.get("tool_name", ""))
_release_task_slot(tool_name, tool_use_id)
is_builtin = not tool_name.startswith(MCP_TOOL_PREFIX)
logger.info(
"[SDK] PostToolUse: %s (builtin=%s, tool_use_id=%s)",
@@ -306,9 +289,6 @@ def create_security_hooks(
f"[SDK] Tool failed: {tool_name}, error={error}, "
f"user={user_id}, tool_use_id={tool_use_id}"
)
_release_task_slot(tool_name, tool_use_id)
return cast(SyncHookJSONOutput, {})
async def pre_compact_hook(

View File

@@ -208,22 +208,19 @@ def test_bash_builtin_blocked_message_clarity():
@pytest.fixture()
def _hooks():
"""Create security hooks and return (pre, post, post_failure) handlers."""
"""Create security hooks and return the PreToolUse handler."""
from .security_hooks import create_security_hooks
hooks = create_security_hooks(user_id="u1", sdk_cwd=SDK_CWD, max_subtasks=2)
pre = hooks["PreToolUse"][0].hooks[0]
post = hooks["PostToolUse"][0].hooks[0]
post_failure = hooks["PostToolUseFailure"][0].hooks[0]
return pre, post, post_failure
return pre
@pytest.mark.skipif(not _sdk_available(), reason="claude_agent_sdk not installed")
@pytest.mark.asyncio
async def test_task_background_blocked(_hooks):
"""Task with run_in_background=true must be denied."""
pre, _, _ = _hooks
result = await pre(
result = await _hooks(
{"tool_name": "Task", "tool_input": {"run_in_background": True, "prompt": "x"}},
tool_use_id=None,
context={},
@@ -236,10 +233,9 @@ async def test_task_background_blocked(_hooks):
@pytest.mark.asyncio
async def test_task_foreground_allowed(_hooks):
"""Task without run_in_background should be allowed."""
pre, _, _ = _hooks
result = await pre(
result = await _hooks(
{"tool_name": "Task", "tool_input": {"prompt": "do stuff"}},
tool_use_id="tu-1",
tool_use_id=None,
context={},
)
assert not _is_denied(result)
@@ -249,102 +245,25 @@ async def test_task_foreground_allowed(_hooks):
@pytest.mark.asyncio
async def test_task_limit_enforced(_hooks):
"""Task spawns beyond max_subtasks should be denied."""
pre, _, _ = _hooks
# First two should pass
for i in range(2):
result = await pre(
for _ in range(2):
result = await _hooks(
{"tool_name": "Task", "tool_input": {"prompt": "ok"}},
tool_use_id=f"tu-limit-{i}",
tool_use_id=None,
context={},
)
assert not _is_denied(result)
# Third should be denied (limit=2)
result = await pre(
result = await _hooks(
{"tool_name": "Task", "tool_input": {"prompt": "over limit"}},
tool_use_id="tu-limit-2",
tool_use_id=None,
context={},
)
assert _is_denied(result)
assert "Maximum" in _reason(result)
@pytest.mark.skipif(not _sdk_available(), reason="claude_agent_sdk not installed")
@pytest.mark.asyncio
async def test_task_slot_released_on_completion(_hooks):
"""Completing a Task should free a slot so new Tasks can be spawned."""
pre, post, _ = _hooks
# Fill both slots
for i in range(2):
result = await pre(
{"tool_name": "Task", "tool_input": {"prompt": "ok"}},
tool_use_id=f"tu-comp-{i}",
context={},
)
assert not _is_denied(result)
# Third should be denied — at capacity
result = await pre(
{"tool_name": "Task", "tool_input": {"prompt": "over"}},
tool_use_id="tu-comp-2",
context={},
)
assert _is_denied(result)
# Complete first task — frees a slot
await post(
{"tool_name": "Task", "tool_input": {}},
tool_use_id="tu-comp-0",
context={},
)
# Now a new Task should be allowed
result = await pre(
{"tool_name": "Task", "tool_input": {"prompt": "after release"}},
tool_use_id="tu-comp-3",
context={},
)
assert not _is_denied(result)
@pytest.mark.skipif(not _sdk_available(), reason="claude_agent_sdk not installed")
@pytest.mark.asyncio
async def test_task_slot_released_on_failure(_hooks):
"""A failed Task should also free its concurrency slot."""
pre, _, post_failure = _hooks
# Fill both slots
for i in range(2):
result = await pre(
{"tool_name": "Task", "tool_input": {"prompt": "ok"}},
tool_use_id=f"tu-fail-{i}",
context={},
)
assert not _is_denied(result)
# At capacity
result = await pre(
{"tool_name": "Task", "tool_input": {"prompt": "over"}},
tool_use_id="tu-fail-2",
context={},
)
assert _is_denied(result)
# Fail first task — should free a slot
await post_failure(
{"tool_name": "Task", "tool_input": {}, "error": "something broke"},
tool_use_id="tu-fail-0",
context={},
)
# New Task should be allowed
result = await pre(
{"tool_name": "Task", "tool_input": {"prompt": "after failure"}},
tool_use_id="tu-fail-3",
context={},
)
assert not _is_denied(result)
# -- _is_tool_error_or_denial ------------------------------------------------
@@ -379,9 +298,7 @@ class TestIsToolErrorOrDenial:
def test_subtask_limit_denial(self):
assert (
_is_tool_error_or_denial(
"Maximum 2 concurrent sub-tasks. "
"Wait for running sub-tasks to finish, "
"or continue in the main conversation."
"Maximum 2 sub-tasks per session. Please continue in the main conversation."
)
is True
)

View File

@@ -75,21 +75,14 @@ class CapturedTranscript:
_SDK_CWD_PREFIX = WORKSPACE_PREFIX
# Special message prefixes for text-based markers (parsed by frontend)
COPILOT_ERROR_PREFIX = "[COPILOT_ERROR]" # Renders as ErrorCard
COPILOT_SYSTEM_PREFIX = "[COPILOT_SYSTEM]" # Renders as system info message
# Heartbeat interval — keep SSE alive through proxies/LBs during tool execution.
# IMPORTANT: Must be less than frontend timeout (12s in useCopilotPage.ts)
_HEARTBEAT_INTERVAL = 10.0 # seconds
# Appended to the system prompt to inform the agent about available tools.
# The SDK built-in Bash is NOT available — use mcp__copilot__bash_exec instead,
# which has kernel-level network isolation (unshare --net).
def _build_sdk_tool_supplement(cwd: str) -> str:
"""Build the SDK tool supplement with the actual working directory injected."""
return f"""
_SDK_TOOL_SUPPLEMENT = """
## Tool notes
@@ -97,16 +90,9 @@ def _build_sdk_tool_supplement(cwd: str) -> str:
- The SDK built-in Bash tool is NOT available. Use the `bash_exec` MCP tool
for shell commands — it runs in a network-isolated sandbox.
### Working directory
- Your working directory is: `{cwd}`
- All SDK Read/Write/Edit/Glob/Grep tools AND `bash_exec` operate inside this
directory. This is the ONLY writable path — do not attempt to read or write
anywhere else on the filesystem.
- Use relative paths or absolute paths under `{cwd}` for all file operations.
### Two storage systems — CRITICAL to understand
1. **Ephemeral working directory** (`{cwd}`):
1. **Ephemeral working directory** (`/tmp/copilot-<session>/`):
- Shared by SDK Read/Write/Edit/Glob/Grep tools AND `bash_exec`
- Files here are **lost between turns** — do NOT rely on them persisting
- Use for temporary work: running scripts, processing data, etc.
@@ -132,21 +118,6 @@ When you create or modify important files (code, configs, outputs), you MUST:
2. At the start of a new turn, call `list_workspace_files` to see what files
are available from previous turns
### Sharing files with the user
After saving a file to the persistent workspace with `write_workspace_file`,
share it with the user by embedding the `download_url` from the response in
your message as a Markdown link or image:
- **Any file** — shows as a clickable download link:
`[report.csv](workspace://file_id#text/csv)`
- **Image** — renders inline in chat:
`![chart](workspace://file_id#image/png)`
- **Video** — renders inline in chat with player controls:
`![recording](workspace://file_id#video/mp4)`
The `download_url` field in the `write_workspace_file` response is already
in the correct format — paste it directly after the `(` in the Markdown.
### Long-running tools
Long-running tools (create_agent, edit_agent, etc.) are handled
asynchronously. You will receive an immediate response; the actual result
@@ -157,7 +128,6 @@ is delivered to the user via a background stream.
All tasks must run in the foreground.
"""
STREAM_LOCK_PREFIX = "copilot:stream:lock:"
@@ -443,20 +413,6 @@ async def stream_chat_completion_sdk(
# Type narrowing: session is guaranteed ChatSession after the check above
session = cast(ChatSession, session)
# Clean up stale error markers from previous turn before starting new turn
# If the last message contains an error marker, remove it (user is retrying)
if (
len(session.messages) > 0
and session.messages[-1].role == "assistant"
and session.messages[-1].content
and COPILOT_ERROR_PREFIX in session.messages[-1].content
):
logger.info(
"[SDK] [%s] Removing stale error marker from previous turn",
session_id[:12],
)
session.messages.pop()
# Append the new message to the session if it's not already there
new_message_role = "user" if is_user_message else "assistant"
if message and (
@@ -486,13 +442,14 @@ async def stream_chat_completion_sdk(
_background_tasks.add(task)
task.add_done_callback(_background_tasks.discard)
# Build system prompt (reuses non-SDK path with Langfuse support)
has_history = len(session.messages) > 1
system_prompt, _ = await _build_system_prompt(
user_id, has_conversation_history=has_history
)
system_prompt += _SDK_TOOL_SUPPLEMENT
message_id = str(uuid.uuid4())
stream_id = str(uuid.uuid4())
stream_completed = False
use_resume = False
resume_file: str | None = None
captured_transcript = CapturedTranscript()
sdk_cwd = ""
# Acquire stream lock to prevent concurrent streams to the same session
lock = AsyncClusterLock(
@@ -515,30 +472,21 @@ async def stream_chat_completion_sdk(
)
return
# Make sure there is no more code between the lock acquitition and try-block.
try:
# Build system prompt (reuses non-SDK path with Langfuse support).
# Pre-compute the cwd here so the exact working directory path can be
# injected into the supplement instead of the generic placeholder.
# Catch ValueError early so the failure yields a clean StreamError rather
# than propagating outside the stream error-handling path.
has_history = len(session.messages) > 1
try:
sdk_cwd = _make_sdk_cwd(session_id)
os.makedirs(sdk_cwd, exist_ok=True)
except (ValueError, OSError) as e:
logger.error("[SDK] [%s] Invalid SDK cwd: %s", session_id[:12], e)
yield StreamError(
errorText="Unable to initialize working directory.",
code="sdk_cwd_error",
)
return
system_prompt, _ = await _build_system_prompt(
user_id, has_conversation_history=has_history
)
system_prompt += _build_sdk_tool_supplement(sdk_cwd)
yield StreamStart(messageId=message_id, sessionId=session_id)
yield StreamStart(messageId=message_id, sessionId=session_id)
stream_completed = False
# Initialise variables before the try so the finally block can
# always attempt transcript upload regardless of errors.
sdk_cwd = ""
use_resume = False
resume_file: str | None = None
captured_transcript = CapturedTranscript()
try:
# Use a session-specific temp dir to avoid cleanup race conditions
# between concurrent sessions.
sdk_cwd = _make_sdk_cwd(session_id)
os.makedirs(sdk_cwd, exist_ok=True)
set_execution_context(user_id, session)
try:
@@ -777,25 +725,6 @@ async def stream_chat_completion_sdk(
- len(adapter.resolved_tool_calls),
)
# Log ResultMessage details for debugging
if isinstance(sdk_msg, ResultMessage):
logger.info(
"[SDK] [%s] Received: ResultMessage %s "
"(unresolved=%d, current=%d, resolved=%d)",
session_id[:12],
sdk_msg.subtype,
len(adapter.current_tool_calls)
- len(adapter.resolved_tool_calls),
len(adapter.current_tool_calls),
len(adapter.resolved_tool_calls),
)
if sdk_msg.subtype in ("error", "error_during_execution"):
logger.error(
"[SDK] [%s] SDK execution failed with error: %s",
session_id[:12],
sdk_msg.result or "(no error message provided)",
)
for response in adapter.convert_message(sdk_msg):
if isinstance(response, StreamStart):
continue
@@ -820,15 +749,6 @@ async def stream_chat_completion_sdk(
extra,
)
# Log errors being sent to frontend
if isinstance(response, StreamError):
logger.error(
"[SDK] [%s] Sending error to frontend: %s (code=%s)",
session_id[:12],
response.errorText,
response.code,
)
yield response
if isinstance(response, StreamTextDelta):
@@ -935,13 +855,13 @@ async def stream_chat_completion_sdk(
yield response
# If the stream ended without a ResultMessage, the SDK
# CLI exited unexpectedly or the user stopped execution.
# Close any open text/step so chunks are well-formed, and
# append a cancellation message so users see feedback.
# StreamFinish is published by mark_session_completed in the processor.
# CLI exited unexpectedly. Close any open text/step so
# the chunks are well-formed. StreamFinish is published
# by mark_session_completed in the processor.
if not stream_completed:
logger.info(
"[SDK] [%s] Stream ended without ResultMessage (stopped by user)",
logger.warning(
"[SDK] [%s] Stream ended without ResultMessage "
"(StopAsyncIteration)",
session_id[:12],
)
closing_responses: list[StreamBaseResponse] = []
@@ -949,15 +869,6 @@ async def stream_chat_completion_sdk(
for r in closing_responses:
yield r
# Add "Stopped by user" message so it persists after refresh
# Use COPILOT_SYSTEM_PREFIX so frontend renders it as system message, not assistant
session.messages.append(
ChatMessage(
role="assistant",
content=f"{COPILOT_SYSTEM_PREFIX} Execution stopped by user",
)
)
if (
assistant_response.content or assistant_response.tool_calls
) and not has_appended_assistant:
@@ -1011,76 +922,43 @@ async def stream_chat_completion_sdk(
"to use the OpenAI-compatible fallback."
)
session = cast(ChatSession, await asyncio.shield(upsert_chat_session(session)))
logger.info(
"[SDK] [%s] Stream completed successfully with %d messages",
"[SDK] [%s] Session saved with %d messages",
session_id[:12],
len(session.messages),
)
except BaseException as e:
# Catch BaseException to handle both Exception and CancelledError
# (CancelledError inherits from BaseException in Python 3.8+)
if isinstance(e, asyncio.CancelledError):
logger.warning("[SDK] [%s] Session cancelled", session_id[:12])
error_msg = "Operation cancelled"
else:
error_msg = str(e) or type(e).__name__
# SDK cleanup RuntimeError is expected during cancellation, log as warning
if isinstance(e, RuntimeError) and "cancel scope" in str(e):
logger.warning(
"[SDK] [%s] SDK cleanup error: %s", session_id[:12], error_msg
)
else:
logger.error(
f"[SDK] [%s] Error: {error_msg}", session_id[:12], exc_info=True
)
# Append error marker to session (non-invasive text parsing approach)
# The finally block will persist the session with this error marker
except asyncio.CancelledError:
# Client disconnect / server shutdown — save session before re-raising
# so accumulated messages aren't lost.
logger.warning("[SDK] [%s] Session cancelled (CancelledError)", session_id[:12])
if session:
session.messages.append(
ChatMessage(
role="assistant", content=f"{COPILOT_ERROR_PREFIX} {error_msg}"
)
)
logger.debug(
"[SDK] [%s] Appended error marker, will be persisted in finally",
session_id[:12],
)
# Yield StreamError for immediate feedback (only for non-cancellation errors)
# Skip for CancelledError and RuntimeError cleanup issues (both are cancellations)
is_cancellation = isinstance(e, asyncio.CancelledError) or (
isinstance(e, RuntimeError) and "cancel scope" in str(e)
)
if not is_cancellation:
yield StreamError(
errorText=error_msg,
code="sdk_error",
)
raise
finally:
# --- Persist session messages ---
# This MUST run in finally to persist messages even when the generator
# is stopped early (e.g., user clicks stop, processor breaks stream loop).
# Without this, messages disappear after refresh because they were never
# saved to the database.
if session is not None:
try:
await asyncio.shield(upsert_chat_session(session))
logger.info(
"[SDK] [%s] Session persisted in finally with %d messages",
"[SDK] [%s] Session saved on cancel (%d messages)",
session_id[:12],
len(session.messages),
)
except Exception as persist_err:
except Exception as save_err:
logger.error(
"[SDK] [%s] Failed to persist session in finally: %s",
"[SDK] [%s] Failed to save session on cancel: %s",
session_id[:12],
persist_err,
exc_info=True,
save_err,
)
raise
except Exception as e:
logger.error(f"[SDK] Error: {e}", exc_info=True)
if session:
try:
await asyncio.shield(upsert_chat_session(session))
except Exception as save_err:
logger.error(f"[SDK] Failed to save session on error: {save_err}")
yield StreamError(
errorText="An error occurred. Please try again.",
code="sdk_error",
)
finally:
# --- Upload transcript for next-turn --resume ---
# This MUST run in finally so the transcript is uploaded even when
# the streaming loop raises an exception. The CLI uses

View File

@@ -707,6 +707,7 @@ async def mark_session_completed(
True if session was newly marked completed, False if already completed/failed
"""
status: Literal["completed", "failed"] = "failed" if error_message else "completed"
redis = await get_redis_async()
meta_key = _get_session_meta_key(session_id)

View File

@@ -10,7 +10,6 @@ from .add_understanding import AddUnderstandingTool
from .agent_output import AgentOutputTool
from .base import BaseTool
from .bash_exec import BashExecTool
from .browse_web import BrowseWebTool
from .create_agent import CreateAgentTool
from .customize_agent import CustomizeAgentTool
from .edit_agent import EditAgentTool
@@ -51,8 +50,6 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
"get_doc_page": GetDocPageTool(),
# Web fetch for safe URL retrieval
"web_fetch": WebFetchTool(),
# Browser-based browsing for JS-rendered pages (Stagehand + Browserbase)
"browse_web": BrowseWebTool(),
# Sandboxed code execution (bubblewrap)
"bash_exec": BashExecTool(),
# Persistent workspace tools (cloud storage, survives across sessions)

View File

@@ -1,11 +1,13 @@
"""External Agent Generator service client.
This module provides a client for communicating with the external Agent Generator
microservice. When AGENTGENERATOR_HOST is configured, the agent generation functions
will delegate to the external service instead of using the built-in LLM-based implementation.
microservice. All generation endpoints use async polling: submit a job (202),
then poll GET /api/jobs/{job_id} every few seconds until the result is ready.
"""
import asyncio
import logging
import time
from typing import Any
import httpx
@@ -25,22 +27,21 @@ logger = logging.getLogger(__name__)
_dummy_mode_warned = False
# ---------------------------------------------------------------------------
# Shared helpers
# ---------------------------------------------------------------------------
POLL_INTERVAL_SECONDS = 10.0
MAX_POLL_TIME_SECONDS = 1800.0 # 30 minutes
MAX_CONSECUTIVE_POLL_ERRORS = 5
def _create_error_response(
error_message: str,
error_type: str = "unknown",
details: dict[str, Any] | None = None,
) -> dict[str, Any]:
"""Create a standardized error response dict.
Args:
error_message: Human-readable error message
error_type: Machine-readable error type
details: Optional additional error details
Returns:
Error dict with type="error" and error details
"""
"""Create a standardized error response dict."""
response: dict[str, Any] = {
"type": "error",
"error": error_message,
@@ -52,14 +53,7 @@ def _create_error_response(
def _classify_http_error(e: httpx.HTTPStatusError) -> tuple[str, str]:
"""Classify an HTTP error into error_type and message.
Args:
e: The HTTP status error
Returns:
Tuple of (error_type, error_message)
"""
"""Classify an HTTP error into error_type and message."""
status = e.response.status_code
if status == 429:
return "rate_limit", f"Agent Generator rate limited: {e}"
@@ -72,14 +66,7 @@ def _classify_http_error(e: httpx.HTTPStatusError) -> tuple[str, str]:
def _classify_request_error(e: httpx.RequestError) -> tuple[str, str]:
"""Classify a request error into error_type and message.
Args:
e: The request error
Returns:
Tuple of (error_type, error_message)
"""
"""Classify a request error into error_type and message."""
error_str = str(e).lower()
if "timeout" in error_str or "timed out" in error_str:
return "timeout", f"Agent Generator request timed out: {e}"
@@ -89,6 +76,10 @@ def _classify_request_error(e: httpx.RequestError) -> tuple[str, str]:
return "request_error", f"Request error calling Agent Generator: {e}"
# ---------------------------------------------------------------------------
# Client / settings singletons
# ---------------------------------------------------------------------------
_client: httpx.AsyncClient | None = None
_settings: Settings | None = None
@@ -136,13 +127,149 @@ def _get_client() -> httpx.AsyncClient:
global _client
if _client is None:
settings = _get_settings()
timeout = httpx.Timeout(float(settings.config.agentgenerator_timeout))
_client = httpx.AsyncClient(
base_url=_get_base_url(),
timeout=httpx.Timeout(settings.config.agentgenerator_timeout),
timeout=timeout,
)
return _client
# ---------------------------------------------------------------------------
# Core polling helper
# ---------------------------------------------------------------------------
async def _submit_and_poll(
endpoint: str,
payload: dict[str, Any],
) -> dict[str, Any]:
"""Submit a job to the agent-generator and poll until the result is ready.
The endpoint is expected to return 202 with ``{"job_id": "..."}`` on success.
We then poll ``GET /api/jobs/{job_id}`` every ``POLL_INTERVAL_SECONDS``
until the job completes or fails.
Returns:
The *result* dict from a completed job, or an error dict.
"""
client = _get_client()
# 1. Submit ----------------------------------------------------------------
try:
response = await client.post(endpoint, json=payload)
response.raise_for_status()
except httpx.HTTPStatusError as e:
error_type, error_msg = _classify_http_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
except httpx.RequestError as e:
error_type, error_msg = _classify_request_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
data = response.json()
job_id = data.get("job_id")
if not job_id:
return _create_error_response(
"Agent Generator did not return a job_id", "invalid_response"
)
logger.info(f"Agent Generator job submitted: {job_id} via {endpoint}")
# 2. Poll ------------------------------------------------------------------
start = time.monotonic()
consecutive_errors = 0
while (time.monotonic() - start) < MAX_POLL_TIME_SECONDS:
await asyncio.sleep(POLL_INTERVAL_SECONDS)
try:
poll_resp = await client.get(f"/api/jobs/{job_id}")
poll_resp.raise_for_status()
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return _create_error_response(
"Agent Generator job not found or expired", "job_not_found"
)
status_code = e.response.status_code
if status_code in {429, 503, 504, 408}:
consecutive_errors += 1
logger.warning(
f"Transient HTTP {status_code} polling job {job_id} "
f"({consecutive_errors}/{MAX_CONSECUTIVE_POLL_ERRORS}): {e}"
)
if consecutive_errors >= MAX_CONSECUTIVE_POLL_ERRORS:
error_type, error_msg = _classify_http_error(e)
logger.error(
f"Giving up on job {job_id} after "
f"{MAX_CONSECUTIVE_POLL_ERRORS} consecutive poll errors: {error_msg}"
)
return _create_error_response(error_msg, error_type)
continue
error_type, error_msg = _classify_http_error(e)
logger.error(f"Poll error for job {job_id}: {error_msg}")
return _create_error_response(error_msg, error_type)
except httpx.RequestError as e:
consecutive_errors += 1
logger.warning(
f"Transient poll error for job {job_id} "
f"({consecutive_errors}/{MAX_CONSECUTIVE_POLL_ERRORS}): {e}"
)
if consecutive_errors >= MAX_CONSECUTIVE_POLL_ERRORS:
error_msg = (
f"Giving up on job {job_id} after "
f"{MAX_CONSECUTIVE_POLL_ERRORS} consecutive poll errors: {e}"
)
logger.error(error_msg)
return _create_error_response(error_msg, "poll_error")
continue
consecutive_errors = 0
poll_data = poll_resp.json()
status = poll_data.get("status")
if status == "completed":
logger.info(f"Agent Generator job {job_id} completed")
result = poll_data.get("result", {})
if not isinstance(result, dict):
return _create_error_response(
"Agent Generator returned invalid result payload",
"invalid_response",
)
return result
elif status == "failed":
error_msg = poll_data.get("error", "Job failed")
logger.error(f"Agent Generator job {job_id} failed: {error_msg}")
return _create_error_response(error_msg, "job_failed")
elif status in {"running", "pending", "queued"}:
continue
else:
return _create_error_response(
f"Agent Generator returned unexpected job status: {status}",
"invalid_response",
)
return _create_error_response("Agent generation timed out after polling", "timeout")
def _extract_agent_json(result: dict[str, Any]) -> dict[str, Any]:
"""Extract and validate agent_json from a job result.
Returns the agent_json dict, or an error response if missing/invalid.
"""
agent_json = result.get("agent_json")
if not isinstance(agent_json, dict):
return _create_error_response(
"Agent Generator returned no agent_json in result", "invalid_response"
)
return agent_json
# ---------------------------------------------------------------------------
# Public functions — same signatures as before, now using polling
# ---------------------------------------------------------------------------
async def decompose_goal_external(
description: str,
context: str = "",
@@ -150,25 +277,17 @@ async def decompose_goal_external(
) -> dict[str, Any] | None:
"""Call the external service to decompose a goal.
Args:
description: Natural language goal description
context: Additional context (e.g., answers to previous questions)
library_agents: User's library agents available for sub-agent composition
Returns one of the following dicts (keyed by ``"type"``):
Returns:
Dict with either:
- {"type": "clarifying_questions", "questions": [...]}
- {"type": "instructions", "steps": [...]}
- {"type": "unachievable_goal", ...}
- {"type": "vague_goal", ...}
- {"type": "error", "error": "...", "error_type": "..."} on error
Or None on unexpected error
* ``{"type": "instructions", "steps": [...]}``
* ``{"type": "clarifying_questions", "questions": [...]}``
* ``{"type": "unachievable_goal", "reason": ..., "suggested_goal": ...}``
* ``{"type": "vague_goal", "suggested_goal": ...}``
* ``{"type": "error", "error": ..., "error_type": ...}``
"""
if _is_dummy_mode():
return await decompose_goal_dummy(description, context, library_agents)
client = _get_client()
if context:
description = f"{description}\n\nAdditional context from user:\n{context}"
@@ -177,67 +296,43 @@ async def decompose_goal_external(
payload["library_agents"] = library_agents
try:
response = await client.post("/api/decompose-description", json=payload)
response.raise_for_status()
data = response.json()
if not data.get("success"):
error_msg = data.get("error", "Unknown error from Agent Generator")
error_type = data.get("error_type", "unknown")
logger.error(
f"Agent Generator decomposition failed: {error_msg} "
f"(type: {error_type})"
)
return _create_error_response(error_msg, error_type)
# Map the response to the expected format
response_type = data.get("type")
if response_type == "instructions":
return {"type": "instructions", "steps": data.get("steps", [])}
elif response_type == "clarifying_questions":
return {
"type": "clarifying_questions",
"questions": data.get("questions", []),
}
elif response_type == "unachievable_goal":
return {
"type": "unachievable_goal",
"reason": data.get("reason"),
"suggested_goal": data.get("suggested_goal"),
}
elif response_type == "vague_goal":
return {
"type": "vague_goal",
"suggested_goal": data.get("suggested_goal"),
}
elif response_type == "error":
# Pass through error from the service
return _create_error_response(
data.get("error", "Unknown error"),
data.get("error_type", "unknown"),
)
else:
logger.error(
f"Unknown response type from external service: {response_type}"
)
return _create_error_response(
f"Unknown response type from Agent Generator: {response_type}",
"invalid_response",
)
except httpx.HTTPStatusError as e:
error_type, error_msg = _classify_http_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
except httpx.RequestError as e:
error_type, error_msg = _classify_request_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
result = await _submit_and_poll("/api/decompose-description", payload)
except Exception as e:
error_msg = f"Unexpected error calling Agent Generator: {e}"
logger.error(error_msg)
return _create_error_response(error_msg, "unexpected_error")
# The result dict from the job is already in the expected format
# (type, steps, questions, etc.) — just return it as-is.
if result.get("type") == "error":
return result
response_type = result.get("type")
if response_type == "instructions":
return {"type": "instructions", "steps": result.get("steps", [])}
elif response_type == "clarifying_questions":
return {
"type": "clarifying_questions",
"questions": result.get("questions", []),
}
elif response_type == "unachievable_goal":
return {
"type": "unachievable_goal",
"reason": result.get("reason"),
"suggested_goal": result.get("suggested_goal"),
}
elif response_type == "vague_goal":
return {
"type": "vague_goal",
"suggested_goal": result.get("suggested_goal"),
}
else:
logger.error(f"Unknown response type from Agent Generator job: {response_type}")
return _create_error_response(
f"Unknown response type: {response_type}",
"invalid_response",
)
async def generate_agent_external(
instructions: dict[str, Any],
@@ -245,51 +340,28 @@ async def generate_agent_external(
) -> dict[str, Any] | None:
"""Call the external service to generate an agent from instructions.
Args:
instructions: Structured instructions from decompose_goal
library_agents: User's library agents available for sub-agent composition
Returns:
Agent JSON dict or error dict {"type": "error", ...} on error
Agent JSON dict or error dict {"type": "error", ...} on error.
"""
if _is_dummy_mode():
return await generate_agent_dummy(instructions, library_agents)
client = _get_client()
# Build request payload
payload: dict[str, Any] = {"instructions": instructions}
if library_agents:
payload["library_agents"] = library_agents
try:
response = await client.post("/api/generate-agent", json=payload)
response.raise_for_status()
data = response.json()
if not data.get("success"):
error_msg = data.get("error", "Unknown error from Agent Generator")
error_type = data.get("error_type", "unknown")
logger.error(
f"Agent Generator generation failed: {error_msg} (type: {error_type})"
)
return _create_error_response(error_msg, error_type)
return data.get("agent_json")
except httpx.HTTPStatusError as e:
error_type, error_msg = _classify_http_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
except httpx.RequestError as e:
error_type, error_msg = _classify_request_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
result = await _submit_and_poll("/api/generate-agent", payload)
except Exception as e:
error_msg = f"Unexpected error calling Agent Generator: {e}"
logger.error(error_msg)
return _create_error_response(error_msg, "unexpected_error")
if result.get("type") == "error":
return result
return _extract_agent_json(result)
async def generate_agent_patch_external(
update_request: str,
@@ -298,24 +370,14 @@ async def generate_agent_patch_external(
) -> dict[str, Any] | None:
"""Call the external service to generate a patch for an existing agent.
Args:
update_request: Natural language description of changes
current_agent: Current agent JSON
library_agents: User's library agents available for sub-agent composition
operation_id: Operation ID for async processing (enables Redis Streams callback)
session_id: Session ID for async processing (enables Redis Streams callback)
Returns:
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
Updated agent JSON, clarifying questions dict, or error dict.
"""
if _is_dummy_mode():
return await generate_agent_patch_dummy(
update_request, current_agent, library_agents
)
client = _get_client()
# Build request payload
payload: dict[str, Any] = {
"update_request": update_request,
"current_agent_json": current_agent,
@@ -324,49 +386,23 @@ async def generate_agent_patch_external(
payload["library_agents"] = library_agents
try:
response = await client.post("/api/update-agent", json=payload)
response.raise_for_status()
data = response.json()
if not data.get("success"):
error_msg = data.get("error", "Unknown error from Agent Generator")
error_type = data.get("error_type", "unknown")
logger.error(
f"Agent Generator patch generation failed: {error_msg} "
f"(type: {error_type})"
)
return _create_error_response(error_msg, error_type)
# Check if it's clarifying questions
if data.get("type") == "clarifying_questions":
return {
"type": "clarifying_questions",
"questions": data.get("questions", []),
}
# Check if it's an error passed through
if data.get("type") == "error":
return _create_error_response(
data.get("error", "Unknown error"),
data.get("error_type", "unknown"),
)
# Otherwise return the updated agent JSON
return data.get("agent_json")
except httpx.HTTPStatusError as e:
error_type, error_msg = _classify_http_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
except httpx.RequestError as e:
error_type, error_msg = _classify_request_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
result = await _submit_and_poll("/api/update-agent", payload)
except Exception as e:
error_msg = f"Unexpected error calling Agent Generator: {e}"
logger.error(error_msg)
return _create_error_response(error_msg, "unexpected_error")
if result.get("type") == "error":
return result
if result.get("type") == "clarifying_questions":
return {
"type": "clarifying_questions",
"questions": result.get("questions", []),
}
return _extract_agent_json(result)
async def customize_template_external(
template_agent: dict[str, Any],
@@ -375,83 +411,51 @@ async def customize_template_external(
) -> dict[str, Any] | None:
"""Call the external service to customize a template/marketplace agent.
Args:
template_agent: The template agent JSON to customize
modification_request: Natural language description of customizations
context: Additional context (e.g., answers to previous questions)
operation_id: Operation ID for async processing (enables Redis Streams callback)
session_id: Session ID for async processing (enables Redis Streams callback)
Returns:
Customized agent JSON, clarifying questions dict, or error dict on error
Customized agent JSON, clarifying questions dict, or error dict.
"""
if _is_dummy_mode():
return await customize_template_dummy(
template_agent, modification_request, context
)
client = _get_client()
request = modification_request
request_text = modification_request
if context:
request = f"{modification_request}\n\nAdditional context from user:\n{context}"
request_text = (
f"{modification_request}\n\nAdditional context from user:\n{context}"
)
payload: dict[str, Any] = {
"template_agent_json": template_agent,
"modification_request": request,
"modification_request": request_text,
}
try:
response = await client.post("/api/template-modification", json=payload)
response.raise_for_status()
data = response.json()
if not data.get("success"):
error_msg = data.get("error", "Unknown error from Agent Generator")
error_type = data.get("error_type", "unknown")
logger.error(
f"Agent Generator template customization failed: {error_msg} "
f"(type: {error_type})"
)
return _create_error_response(error_msg, error_type)
# Check if it's clarifying questions
if data.get("type") == "clarifying_questions":
return {
"type": "clarifying_questions",
"questions": data.get("questions", []),
}
# Check if it's an error passed through
if data.get("type") == "error":
return _create_error_response(
data.get("error", "Unknown error"),
data.get("error_type", "unknown"),
)
# Otherwise return the customized agent JSON
return data.get("agent_json")
except httpx.HTTPStatusError as e:
error_type, error_msg = _classify_http_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
except httpx.RequestError as e:
error_type, error_msg = _classify_request_error(e)
logger.error(error_msg)
return _create_error_response(error_msg, error_type)
result = await _submit_and_poll("/api/template-modification", payload)
except Exception as e:
error_msg = f"Unexpected error calling Agent Generator: {e}"
logger.error(error_msg)
return _create_error_response(error_msg, "unexpected_error")
if result.get("type") == "error":
return result
if result.get("type") == "clarifying_questions":
return {
"type": "clarifying_questions",
"questions": result.get("questions", []),
}
return _extract_agent_json(result)
# ---------------------------------------------------------------------------
# Non-generation endpoints (still synchronous — quick responses)
# ---------------------------------------------------------------------------
async def get_blocks_external() -> list[dict[str, Any]] | None:
"""Get available blocks from the external service.
Returns:
List of block info dicts or None on error
"""
"""Get available blocks from the external service."""
if _is_dummy_mode():
return await get_blocks_dummy()
@@ -480,11 +484,7 @@ async def get_blocks_external() -> list[dict[str, Any]] | None:
async def health_check() -> bool:
"""Check if the external service is healthy.
Returns:
True if healthy, False otherwise
"""
"""Check if the external service is healthy."""
if not is_external_service_configured():
return False

View File

@@ -5,7 +5,7 @@ import re
from datetime import datetime, timedelta, timezone
from typing import Any
from pydantic import BaseModel, Field, field_validator
from pydantic import BaseModel, field_validator
from backend.api.features.library.model import LibraryAgent
from backend.copilot.model import ChatSession
@@ -13,7 +13,6 @@ from backend.data.db_accessors import execution_db, library_db
from backend.data.execution import ExecutionStatus, GraphExecution, GraphExecutionMeta
from .base import BaseTool
from .execution_utils import TERMINAL_STATUSES, wait_for_execution
from .models import (
AgentOutputResponse,
ErrorResponse,
@@ -34,7 +33,6 @@ class AgentOutputInput(BaseModel):
store_slug: str = ""
execution_id: str = ""
run_time: str = "latest"
wait_if_running: int = Field(default=0, ge=0, le=300)
@field_validator(
"agent_name",
@@ -118,11 +116,6 @@ class AgentOutputTool(BaseTool):
Select which run to retrieve using:
- execution_id: Specific execution ID
- run_time: 'latest' (default), 'yesterday', 'last week', or ISO date 'YYYY-MM-DD'
Wait for completion (optional):
- wait_if_running: Max seconds to wait if execution is still running (0-300).
If the execution is running/queued, waits up to this many seconds for completion.
Returns current status on timeout. If already finished, returns immediately.
"""
@property
@@ -152,13 +145,6 @@ class AgentOutputTool(BaseTool):
"Time filter: 'latest', 'yesterday', 'last week', or 'YYYY-MM-DD'"
),
},
"wait_if_running": {
"type": "integer",
"description": (
"Max seconds to wait if execution is still running (0-300). "
"If running, waits for completion. Returns current state on timeout."
),
},
},
"required": [],
}
@@ -238,14 +224,10 @@ class AgentOutputTool(BaseTool):
execution_id: str | None,
time_start: datetime | None,
time_end: datetime | None,
include_running: bool = False,
) -> tuple[GraphExecution | None, list[GraphExecutionMeta], str | None]:
"""
Fetch execution(s) based on filters.
Returns (single_execution, available_executions_meta, error_message).
Args:
include_running: If True, also look for running/queued executions (for waiting)
"""
exec_db = execution_db()
@@ -260,25 +242,11 @@ class AgentOutputTool(BaseTool):
return None, [], f"Execution '{execution_id}' not found"
return execution, [], None
# Determine which statuses to query
statuses = [ExecutionStatus.COMPLETED]
if include_running:
statuses.extend(
[
ExecutionStatus.RUNNING,
ExecutionStatus.QUEUED,
ExecutionStatus.INCOMPLETE,
ExecutionStatus.REVIEW,
ExecutionStatus.FAILED,
ExecutionStatus.TERMINATED,
]
)
# Get executions with time filters
# Get completed executions with time filters
executions = await exec_db.get_graph_executions(
graph_id=graph_id,
user_id=user_id,
statuses=statuses,
statuses=[ExecutionStatus.COMPLETED],
created_time_gte=time_start,
created_time_lte=time_end,
limit=10,
@@ -345,33 +313,10 @@ class AgentOutputTool(BaseTool):
for e in available_executions[:5]
]
# Build appropriate message based on execution status
if execution.status == ExecutionStatus.COMPLETED:
message = f"Found execution outputs for agent '{agent.name}'"
elif execution.status == ExecutionStatus.FAILED:
message = f"Execution for agent '{agent.name}' failed"
elif execution.status == ExecutionStatus.TERMINATED:
message = f"Execution for agent '{agent.name}' was terminated"
elif execution.status == ExecutionStatus.REVIEW:
message = (
f"Execution for agent '{agent.name}' is awaiting human review. "
"The user needs to approve it before it can continue."
)
elif execution.status in (
ExecutionStatus.RUNNING,
ExecutionStatus.QUEUED,
ExecutionStatus.INCOMPLETE,
):
message = (
f"Execution for agent '{agent.name}' is still {execution.status.value}. "
"Results may be incomplete. Use wait_if_running to wait for completion."
)
else:
message = f"Found execution for agent '{agent.name}' (status: {execution.status.value})"
message = f"Found execution outputs for agent '{agent.name}'"
if len(available_executions) > 1:
message += (
f" Showing latest of {len(available_executions)} matching executions."
f". Showing latest of {len(available_executions)} matching executions."
)
return AgentOutputResponse(
@@ -486,17 +431,13 @@ class AgentOutputTool(BaseTool):
# Parse time expression
time_start, time_end = parse_time_expression(input_data.run_time)
# Check if we should wait for running executions
wait_timeout = input_data.wait_if_running
# Fetch execution(s) - include running if we're going to wait
# Fetch execution(s)
execution, available_executions, exec_error = await self._get_execution(
user_id=user_id,
graph_id=agent.graph_id,
execution_id=input_data.execution_id or None,
time_start=time_start,
time_end=time_end,
include_running=wait_timeout > 0,
)
if exec_error:
@@ -505,17 +446,4 @@ class AgentOutputTool(BaseTool):
session_id=session_id,
)
# If we have an execution that's still running and we should wait
if execution and wait_timeout > 0 and execution.status not in TERMINAL_STATUSES:
logger.info(
f"Execution {execution.id} is {execution.status}, "
f"waiting up to {wait_timeout}s for completion"
)
execution = await wait_for_execution(
user_id=user_id,
graph_id=agent.graph_id,
execution_id=execution.id,
timeout_seconds=wait_timeout,
)
return self._build_response(agent, execution, available_executions, session_id)

View File

@@ -1,13 +1,8 @@
"""Shared agent search functionality for find_agent and find_library_agent tools."""
from __future__ import annotations
import logging
import re
from typing import TYPE_CHECKING, Literal
if TYPE_CHECKING:
from backend.api.features.library.model import LibraryAgent
from typing import Literal
from backend.data.db_accessors import library_db, store_db
from backend.util.exceptions import DatabaseError, NotFoundError
@@ -29,24 +24,94 @@ _UUID_PATTERN = re.compile(
re.IGNORECASE,
)
# Keywords that should be treated as "list all" rather than a literal search
_LIST_ALL_KEYWORDS = frozenset({"all", "*", "everything", "any", ""})
def _is_uuid(text: str) -> bool:
"""Check if text is a valid UUID v4."""
return bool(_UUID_PATTERN.match(text.strip()))
async def _get_library_agent_by_id(user_id: str, agent_id: str) -> AgentInfo | None:
"""Fetch a library agent by ID (library agent ID or graph_id).
Tries multiple lookup strategies:
1. First by graph_id (AgentGraph primary key)
2. Then by library agent ID (LibraryAgent primary key)
Args:
user_id: The user ID
agent_id: The ID to look up (can be graph_id or library agent ID)
Returns:
AgentInfo if found, None otherwise
"""
lib_db = library_db()
try:
agent = await lib_db.get_library_agent_by_graph_id(user_id, agent_id)
if agent:
logger.debug(f"Found library agent by graph_id: {agent.name}")
return AgentInfo(
id=agent.id,
name=agent.name,
description=agent.description or "",
source="library",
in_library=True,
creator=agent.creator_name,
status=agent.status.value,
can_access_graph=agent.can_access_graph,
has_external_trigger=agent.has_external_trigger,
new_output=agent.new_output,
graph_id=agent.graph_id,
)
except DatabaseError:
raise
except Exception as e:
logger.warning(
f"Could not fetch library agent by graph_id {agent_id}: {e}",
exc_info=True,
)
try:
agent = await lib_db.get_library_agent(agent_id, user_id)
if agent:
logger.debug(f"Found library agent by library_id: {agent.name}")
return AgentInfo(
id=agent.id,
name=agent.name,
description=agent.description or "",
source="library",
in_library=True,
creator=agent.creator_name,
status=agent.status.value,
can_access_graph=agent.can_access_graph,
has_external_trigger=agent.has_external_trigger,
new_output=agent.new_output,
graph_id=agent.graph_id,
)
except NotFoundError:
logger.debug(f"Library agent not found by library_id: {agent_id}")
except DatabaseError:
raise
except Exception as e:
logger.warning(
f"Could not fetch library agent by library_id {agent_id}: {e}",
exc_info=True,
)
return None
async def search_agents(
query: str,
source: SearchSource,
session_id: str | None = None,
session_id: str | None,
user_id: str | None = None,
) -> ToolResponseBase:
"""
Search for agents in marketplace or user library.
For library searches, keywords like "all", "*", "everything", or an empty
query will list all agents without filtering.
Args:
query: Search query string. Special keywords list all library agents.
query: Search query string
source: "marketplace" or "library"
session_id: Chat session ID
user_id: User ID (required for library search)
@@ -54,11 +119,7 @@ async def search_agents(
Returns:
AgentsFoundResponse, NoResultsResponse, or ErrorResponse
"""
# Normalize list-all keywords to empty string for library searches
if source == "library" and query.lower().strip() in _LIST_ALL_KEYWORDS:
query = ""
if source == "marketplace" and not query:
if not query:
return ErrorResponse(
message="Please provide a search query", session_id=session_id
)
@@ -98,18 +159,28 @@ async def search_agents(
logger.info(f"Found agent by direct ID lookup: {agent.name}")
if not agents:
search_term = query or None
logger.info(
f"{'Listing all agents in' if not query else 'Searching'} "
f"user library{'' if not query else f' for: {query}'}"
)
logger.info(f"Searching user library for: {query}")
results = await library_db().list_library_agents(
user_id=user_id, # type: ignore[arg-type]
search_term=search_term,
page_size=50 if not query else 10,
search_term=query,
page_size=10,
)
for agent in results.agents:
agents.append(_library_agent_to_info(agent))
agents.append(
AgentInfo(
id=agent.id,
name=agent.name,
description=agent.description or "",
source="library",
in_library=True,
creator=agent.creator_name,
status=agent.status.value,
can_access_graph=agent.can_access_graph,
has_external_trigger=agent.has_external_trigger,
new_output=agent.new_output,
graph_id=agent.graph_id,
)
)
logger.info(f"Found {len(agents)} agents in {source}")
except NotFoundError:
pass
@@ -122,62 +193,42 @@ async def search_agents(
)
if not agents:
if source == "marketplace":
suggestions = [
suggestions = (
[
"Try more general terms",
"Browse categories in the marketplace",
"Check spelling",
]
no_results_msg = (
f"No agents found matching '{query}'. Let the user know they can "
"try different keywords or browse the marketplace. Also let them "
"know you can create a custom agent for them based on their needs."
)
elif not query:
# User asked to list all but library is empty
suggestions = [
"Browse the marketplace to find and add agents",
"Use find_agent to search the marketplace",
]
no_results_msg = (
"Your library is empty. Let the user know they can browse the "
"marketplace to find agents, or you can create a custom agent "
"for them based on their needs."
)
else:
suggestions = [
if source == "marketplace"
else [
"Try different keywords",
"Use find_agent to search the marketplace",
"Check your library at /library",
]
no_results_msg = (
f"No agents matching '{query}' found in your library. Let the "
"user know you can create a custom agent for them based on "
"their needs."
)
)
no_results_msg = (
f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs."
if source == "marketplace"
else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs."
)
return NoResultsResponse(
message=no_results_msg, session_id=session_id, suggestions=suggestions
)
if source == "marketplace":
title = (
f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} for '{query}'"
)
elif not query:
title = f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} in your library"
else:
title = f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} in your library for '{query}'"
title = f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} "
title += (
f"for '{query}'"
if source == "marketplace"
else f"in your library for '{query}'"
)
message = (
"Now you have found some options for the user to choose from. "
"You can add a link to a recommended agent at: /marketplace/agent/agent_id "
"Please ask the user if they would like to use any of these agents. "
"Let the user know we can create a custom agent for them based on their needs."
"Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs."
if source == "marketplace"
else "Found agents in the user's library. You can provide a link to view "
"an agent at: /library/agents/{agent_id}. Use agent_output to get "
"execution results, or run_agent to execute. Let the user know we can "
"create a custom agent for them based on their needs."
else "Found agents in the user's library. You can provide a link to view an agent at: "
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs."
)
return AgentsFoundResponse(
@@ -187,67 +238,3 @@ async def search_agents(
count=len(agents),
session_id=session_id,
)
def _is_uuid(text: str) -> bool:
"""Check if text is a valid UUID v4."""
return bool(_UUID_PATTERN.match(text.strip()))
def _library_agent_to_info(agent: LibraryAgent) -> AgentInfo:
"""Convert a library agent model to an AgentInfo."""
return AgentInfo(
id=agent.id,
name=agent.name,
description=agent.description or "",
source="library",
in_library=True,
creator=agent.creator_name,
status=agent.status.value,
can_access_graph=agent.can_access_graph,
has_external_trigger=agent.has_external_trigger,
new_output=agent.new_output,
graph_id=agent.graph_id,
)
async def _get_library_agent_by_id(user_id: str, agent_id: str) -> AgentInfo | None:
"""Fetch a library agent by ID (library agent ID or graph_id).
Tries multiple lookup strategies:
1. First by graph_id (AgentGraph primary key)
2. Then by library agent ID (LibraryAgent primary key)
"""
lib_db = library_db()
try:
agent = await lib_db.get_library_agent_by_graph_id(user_id, agent_id)
if agent:
logger.debug(f"Found library agent by graph_id: {agent.name}")
return _library_agent_to_info(agent)
except NotFoundError:
logger.debug(f"Library agent not found by graph_id: {agent_id}")
except DatabaseError:
raise
except Exception as e:
logger.warning(
f"Could not fetch library agent by graph_id {agent_id}: {e}",
exc_info=True,
)
try:
agent = await lib_db.get_library_agent(agent_id, user_id)
if agent:
logger.debug(f"Found library agent by library_id: {agent.name}")
return _library_agent_to_info(agent)
except NotFoundError:
logger.debug(f"Library agent not found by library_id: {agent_id}")
except DatabaseError:
raise
except Exception as e:
logger.warning(
f"Could not fetch library agent by library_id {agent_id}: {e}",
exc_info=True,
)
return None

View File

@@ -1,227 +0,0 @@
"""Web browsing tool — navigate real browser sessions to extract page content.
Uses Stagehand + Browserbase for cloud-based browser execution. Handles
JS-rendered pages, SPAs, and dynamic content that web_fetch cannot reach.
Requires environment variables:
STAGEHAND_API_KEY — Browserbase API key
STAGEHAND_PROJECT_ID — Browserbase project ID
ANTHROPIC_API_KEY — LLM key used by Stagehand for extraction
"""
import logging
import os
import threading
from typing import Any
from backend.copilot.model import ChatSession
from .base import BaseTool
from .models import BrowseWebResponse, ErrorResponse, ToolResponseBase
logger = logging.getLogger(__name__)
# Stagehand uses the LLM internally for natural-language extraction/actions.
_STAGEHAND_MODEL = "anthropic/claude-sonnet-4-5-20250929"
# Hard cap on extracted content returned to the LLM context.
_MAX_CONTENT_CHARS = 50_000
# Explicit timeouts for Stagehand browser operations (milliseconds).
_GOTO_TIMEOUT_MS = 30_000 # page navigation
_EXTRACT_TIMEOUT_MS = 60_000 # LLM extraction
# ---------------------------------------------------------------------------
# Thread-safety patch for Stagehand signal handlers (applied lazily, once).
#
# Stagehand calls signal.signal() during __init__, which raises ValueError
# when called from a non-main thread (e.g. the CoPilot executor thread pool).
# We patch _register_signal_handlers to be a no-op outside the main thread.
# The patch is applied exactly once per process via double-checked locking.
# ---------------------------------------------------------------------------
_stagehand_patched = False
_patch_lock = threading.Lock()
def _patch_stagehand_once() -> None:
"""Monkey-patch Stagehand signal handler registration to be thread-safe.
Must be called after ``import stagehand.main`` has succeeded.
Safe to call from multiple threads — applies the patch at most once.
"""
global _stagehand_patched
if _stagehand_patched:
return
with _patch_lock:
if _stagehand_patched:
return
import stagehand.main # noqa: PLC0415
_original = stagehand.main.Stagehand._register_signal_handlers
def _safe_register(self: Any) -> None:
if threading.current_thread() is threading.main_thread():
_original(self)
stagehand.main.Stagehand._register_signal_handlers = _safe_register
_stagehand_patched = True
class BrowseWebTool(BaseTool):
"""Navigate a URL with a real browser and extract its content.
Use this instead of ``web_fetch`` when the page requires JavaScript
to render (SPAs, dashboards, paywalled content with JS checks, etc.).
"""
@property
def name(self) -> str:
return "browse_web"
@property
def description(self) -> str:
return (
"Navigate to a URL using a real browser and extract content. "
"Handles JavaScript-rendered pages and dynamic content that "
"web_fetch cannot reach. "
"Specify exactly what to extract via the `instruction` parameter."
)
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The HTTP/HTTPS URL to navigate to.",
},
"instruction": {
"type": "string",
"description": (
"What to extract from the page. Be specific — e.g. "
"'Extract all pricing plans with features and prices', "
"'Get the main article text and author', "
"'List all navigation links'. "
"Defaults to extracting the main page content."
),
"default": "Extract the main content of this page.",
},
},
"required": ["url"],
}
@property
def requires_auth(self) -> bool:
return True
async def _execute(
self,
user_id: str | None, # noqa: ARG002
session: ChatSession,
**kwargs: Any,
) -> ToolResponseBase:
"""Navigate to a URL with a real browser and return extracted content."""
url: str = (kwargs.get("url") or "").strip()
instruction: str = (
kwargs.get("instruction") or "Extract the main content of this page."
)
session_id = session.session_id if session else None
if not url:
return ErrorResponse(
message="Please provide a URL to browse.",
error="missing_url",
session_id=session_id,
)
if not url.startswith(("http://", "https://")):
return ErrorResponse(
message="Only HTTP/HTTPS URLs are supported.",
error="invalid_url",
session_id=session_id,
)
api_key = os.environ.get("STAGEHAND_API_KEY")
project_id = os.environ.get("STAGEHAND_PROJECT_ID")
model_api_key = os.environ.get("ANTHROPIC_API_KEY")
if not api_key or not project_id:
return ErrorResponse(
message=(
"Web browsing is not configured on this platform. "
"STAGEHAND_API_KEY and STAGEHAND_PROJECT_ID are required."
),
error="not_configured",
session_id=session_id,
)
if not model_api_key:
return ErrorResponse(
message=(
"Web browsing is not configured: ANTHROPIC_API_KEY is required "
"for Stagehand's extraction model."
),
error="not_configured",
session_id=session_id,
)
# Lazy import — Stagehand is an optional heavy dependency.
# Importing here scopes any ImportError to this tool only, so other
# tools continue to register and work normally if Stagehand is absent.
try:
from stagehand import Stagehand # noqa: PLC0415
except ImportError:
return ErrorResponse(
message="Web browsing is not available: Stagehand is not installed.",
error="not_configured",
session_id=session_id,
)
# Apply the signal handler patch now that we know stagehand is present.
_patch_stagehand_once()
client: Any | None = None
try:
client = Stagehand(
api_key=api_key,
project_id=project_id,
model_name=_STAGEHAND_MODEL,
model_api_key=model_api_key,
)
await client.init()
page = client.page
assert page is not None, "Stagehand page is not initialized"
await page.goto(url, timeoutMs=_GOTO_TIMEOUT_MS)
result = await page.extract(instruction, timeoutMs=_EXTRACT_TIMEOUT_MS)
# Extract the text content from the Pydantic result model.
raw = result.model_dump().get("extraction", "")
content = str(raw) if raw else ""
truncated = len(content) > _MAX_CONTENT_CHARS
if truncated:
suffix = "\n\n[Content truncated]"
keep = max(0, _MAX_CONTENT_CHARS - len(suffix))
content = content[:keep] + suffix
return BrowseWebResponse(
message=f"Browsed {url}",
url=url,
content=content,
truncated=truncated,
session_id=session_id,
)
except Exception:
logger.exception("[browse_web] Failed for %s", url)
return ErrorResponse(
message="Failed to browse URL.",
error="browse_failed",
session_id=session_id,
)
finally:
if client is not None:
try:
await client.close()
except Exception:
pass

View File

@@ -1,486 +0,0 @@
"""Unit tests for BrowseWebTool.
All tests run without a running server / database. External dependencies
(Stagehand, Browserbase) are mocked via sys.modules injection so the suite
stays fast and deterministic.
"""
import sys
import threading
import uuid
from datetime import UTC, datetime
from unittest.mock import AsyncMock, MagicMock
import pytest
import backend.copilot.tools.browse_web as _browse_web_mod
from backend.copilot.model import ChatSession
from backend.copilot.tools.browse_web import (
_MAX_CONTENT_CHARS,
BrowseWebTool,
_patch_stagehand_once,
)
from backend.copilot.tools.models import BrowseWebResponse, ErrorResponse, ResponseType
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def make_session(user_id: str = "test-user") -> ChatSession:
return ChatSession(
session_id=str(uuid.uuid4()),
user_id=user_id,
messages=[],
usage=[],
started_at=datetime.now(UTC),
updated_at=datetime.now(UTC),
successful_agent_runs={},
successful_agent_schedules={},
)
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def reset_stagehand_patch():
"""Reset the process-level _stagehand_patched flag before every test."""
_browse_web_mod._stagehand_patched = False
yield
_browse_web_mod._stagehand_patched = False
@pytest.fixture()
def env_vars(monkeypatch):
"""Inject the three env vars required by BrowseWebTool."""
monkeypatch.setenv("STAGEHAND_API_KEY", "test-api-key")
monkeypatch.setenv("STAGEHAND_PROJECT_ID", "test-project-id")
monkeypatch.setenv("ANTHROPIC_API_KEY", "test-anthropic-key")
@pytest.fixture()
def stagehand_mocks(monkeypatch):
"""Inject mock stagehand + stagehand.main into sys.modules.
Returns a dict with the mock objects so individual tests can
assert on calls or inject side-effects.
"""
# --- mock page ---
mock_result = MagicMock()
mock_result.model_dump.return_value = {"extraction": "Page content here"}
mock_page = AsyncMock()
mock_page.goto = AsyncMock(return_value=None)
mock_page.extract = AsyncMock(return_value=mock_result)
# --- mock client ---
mock_client = AsyncMock()
mock_client.page = mock_page
mock_client.init = AsyncMock(return_value=None)
mock_client.close = AsyncMock(return_value=None)
MockStagehand = MagicMock(return_value=mock_client)
# --- stagehand top-level module ---
mock_stagehand = MagicMock()
mock_stagehand.Stagehand = MockStagehand
# --- stagehand.main (needed by _patch_stagehand_once) ---
mock_main = MagicMock()
mock_main.Stagehand = MagicMock()
mock_main.Stagehand._register_signal_handlers = MagicMock()
monkeypatch.setitem(sys.modules, "stagehand", mock_stagehand)
monkeypatch.setitem(sys.modules, "stagehand.main", mock_main)
return {
"client": mock_client,
"page": mock_page,
"result": mock_result,
"MockStagehand": MockStagehand,
"mock_main": mock_main,
}
# ---------------------------------------------------------------------------
# 1. Tool metadata
# ---------------------------------------------------------------------------
class TestBrowseWebToolMetadata:
def test_name(self):
assert BrowseWebTool().name == "browse_web"
def test_requires_auth(self):
assert BrowseWebTool().requires_auth is True
def test_url_is_required_parameter(self):
params = BrowseWebTool().parameters
assert "url" in params["properties"]
assert "url" in params["required"]
def test_instruction_is_optional(self):
params = BrowseWebTool().parameters
assert "instruction" in params["properties"]
assert "instruction" not in params.get("required", [])
def test_registered_in_tool_registry(self):
from backend.copilot.tools import TOOL_REGISTRY
assert "browse_web" in TOOL_REGISTRY
assert isinstance(TOOL_REGISTRY["browse_web"], BrowseWebTool)
def test_response_type_enum_value(self):
assert ResponseType.BROWSE_WEB == "browse_web"
# ---------------------------------------------------------------------------
# 2. Input validation (no external deps)
# ---------------------------------------------------------------------------
class TestInputValidation:
async def test_missing_url_returns_error(self):
result = await BrowseWebTool()._execute(user_id="u1", session=make_session())
assert isinstance(result, ErrorResponse)
assert "url" in result.message.lower()
async def test_empty_url_returns_error(self):
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url=""
)
assert isinstance(result, ErrorResponse)
async def test_ftp_url_rejected(self):
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="ftp://example.com/file"
)
assert isinstance(result, ErrorResponse)
assert "http" in result.message.lower()
async def test_file_url_rejected(self):
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="file:///etc/passwd"
)
assert isinstance(result, ErrorResponse)
async def test_javascript_url_rejected(self):
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="javascript:alert(1)"
)
assert isinstance(result, ErrorResponse)
# ---------------------------------------------------------------------------
# 3. Environment variable checks
# ---------------------------------------------------------------------------
class TestEnvVarChecks:
async def test_missing_api_key(self, monkeypatch):
monkeypatch.delenv("STAGEHAND_API_KEY", raising=False)
monkeypatch.setenv("STAGEHAND_PROJECT_ID", "proj")
monkeypatch.setenv("ANTHROPIC_API_KEY", "key")
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, ErrorResponse)
assert result.error == "not_configured"
async def test_missing_project_id(self, monkeypatch):
monkeypatch.setenv("STAGEHAND_API_KEY", "key")
monkeypatch.delenv("STAGEHAND_PROJECT_ID", raising=False)
monkeypatch.setenv("ANTHROPIC_API_KEY", "key")
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, ErrorResponse)
assert result.error == "not_configured"
async def test_missing_anthropic_key(self, monkeypatch):
monkeypatch.setenv("STAGEHAND_API_KEY", "key")
monkeypatch.setenv("STAGEHAND_PROJECT_ID", "proj")
monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, ErrorResponse)
assert result.error == "not_configured"
# ---------------------------------------------------------------------------
# 4. Stagehand absent (ImportError path)
# ---------------------------------------------------------------------------
class TestStagehandAbsent:
async def test_returns_not_configured_error(self, env_vars, monkeypatch):
"""Blocking the stagehand import must return a graceful ErrorResponse."""
# sys.modules entry set to None → Python raises ImportError on import
monkeypatch.setitem(sys.modules, "stagehand", None)
monkeypatch.setitem(sys.modules, "stagehand.main", None)
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, ErrorResponse)
assert result.error == "not_configured"
assert "not available" in result.message or "not installed" in result.message
async def test_other_tools_unaffected_when_stagehand_absent(
self, env_vars, monkeypatch
):
"""Registry import must not raise even when stagehand is blocked."""
monkeypatch.setitem(sys.modules, "stagehand", None)
# This import already happened at module load; just verify the registry exists
from backend.copilot.tools import TOOL_REGISTRY
assert "browse_web" in TOOL_REGISTRY
assert "web_fetch" in TOOL_REGISTRY # unrelated tool still present
# ---------------------------------------------------------------------------
# 5. Successful browse
# ---------------------------------------------------------------------------
class TestSuccessfulBrowse:
async def test_returns_browse_web_response(self, env_vars, stagehand_mocks):
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, BrowseWebResponse)
assert result.url == "https://example.com"
assert result.content == "Page content here"
assert result.truncated is False
async def test_http_url_accepted(self, env_vars, stagehand_mocks):
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="http://example.com"
)
assert isinstance(result, BrowseWebResponse)
async def test_session_id_propagated(self, env_vars, stagehand_mocks):
session = make_session()
result = await BrowseWebTool()._execute(
user_id="u1", session=session, url="https://example.com"
)
assert isinstance(result, BrowseWebResponse)
assert result.session_id == session.session_id
async def test_custom_instruction_forwarded_to_extract(
self, env_vars, stagehand_mocks
):
await BrowseWebTool()._execute(
user_id="u1",
session=make_session(),
url="https://example.com",
instruction="Extract all pricing plans",
)
stagehand_mocks["page"].extract.assert_awaited_once()
first_arg = stagehand_mocks["page"].extract.call_args[0][0]
assert first_arg == "Extract all pricing plans"
async def test_default_instruction_used_when_omitted(
self, env_vars, stagehand_mocks
):
await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
first_arg = stagehand_mocks["page"].extract.call_args[0][0]
assert "main content" in first_arg.lower()
async def test_explicit_timeouts_passed_to_stagehand(
self, env_vars, stagehand_mocks
):
from backend.copilot.tools.browse_web import (
_EXTRACT_TIMEOUT_MS,
_GOTO_TIMEOUT_MS,
)
await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
goto_kwargs = stagehand_mocks["page"].goto.call_args[1]
extract_kwargs = stagehand_mocks["page"].extract.call_args[1]
assert goto_kwargs.get("timeoutMs") == _GOTO_TIMEOUT_MS
assert extract_kwargs.get("timeoutMs") == _EXTRACT_TIMEOUT_MS
async def test_client_closed_after_success(self, env_vars, stagehand_mocks):
await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
stagehand_mocks["client"].close.assert_awaited_once()
# ---------------------------------------------------------------------------
# 6. Truncation
# ---------------------------------------------------------------------------
class TestTruncation:
async def test_short_content_not_truncated(self, env_vars, stagehand_mocks):
stagehand_mocks["result"].model_dump.return_value = {"extraction": "short"}
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, BrowseWebResponse)
assert result.truncated is False
assert result.content == "short"
async def test_oversized_content_is_truncated(self, env_vars, stagehand_mocks):
big = "a" * (_MAX_CONTENT_CHARS + 1000)
stagehand_mocks["result"].model_dump.return_value = {"extraction": big}
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, BrowseWebResponse)
assert result.truncated is True
assert result.content.endswith("[Content truncated]")
async def test_truncated_content_never_exceeds_cap(self, env_vars, stagehand_mocks):
"""The final string must be ≤ _MAX_CONTENT_CHARS regardless of input size."""
big = "b" * (_MAX_CONTENT_CHARS * 3)
stagehand_mocks["result"].model_dump.return_value = {"extraction": big}
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, BrowseWebResponse)
assert len(result.content) == _MAX_CONTENT_CHARS
async def test_content_exactly_at_limit_not_truncated(
self, env_vars, stagehand_mocks
):
exact = "c" * _MAX_CONTENT_CHARS
stagehand_mocks["result"].model_dump.return_value = {"extraction": exact}
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, BrowseWebResponse)
assert result.truncated is False
assert len(result.content) == _MAX_CONTENT_CHARS
async def test_empty_extraction_returns_empty_content(
self, env_vars, stagehand_mocks
):
stagehand_mocks["result"].model_dump.return_value = {"extraction": ""}
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, BrowseWebResponse)
assert result.content == ""
assert result.truncated is False
async def test_none_extraction_returns_empty_content(
self, env_vars, stagehand_mocks
):
stagehand_mocks["result"].model_dump.return_value = {"extraction": None}
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, BrowseWebResponse)
assert result.content == ""
# ---------------------------------------------------------------------------
# 7. Error handling
# ---------------------------------------------------------------------------
class TestErrorHandling:
async def test_stagehand_init_exception_returns_generic_error(
self, env_vars, stagehand_mocks
):
stagehand_mocks["client"].init.side_effect = RuntimeError("Connection refused")
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, ErrorResponse)
assert result.error == "browse_failed"
async def test_raw_exception_text_not_leaked_to_user(
self, env_vars, stagehand_mocks
):
"""Internal error details must not appear in the user-facing message."""
stagehand_mocks["client"].init.side_effect = RuntimeError("SECRET_TOKEN_abc123")
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, ErrorResponse)
assert "SECRET_TOKEN_abc123" not in result.message
assert result.message == "Failed to browse URL."
async def test_goto_timeout_returns_error(self, env_vars, stagehand_mocks):
stagehand_mocks["page"].goto.side_effect = TimeoutError("Navigation timed out")
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, ErrorResponse)
assert result.error == "browse_failed"
async def test_client_closed_after_exception(self, env_vars, stagehand_mocks):
stagehand_mocks["page"].goto.side_effect = RuntimeError("boom")
await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
stagehand_mocks["client"].close.assert_awaited_once()
async def test_close_failure_does_not_propagate(self, env_vars, stagehand_mocks):
"""If close() itself raises, the tool must still return ErrorResponse."""
stagehand_mocks["client"].init.side_effect = RuntimeError("init failed")
stagehand_mocks["client"].close.side_effect = RuntimeError("close also failed")
result = await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
assert isinstance(result, ErrorResponse)
# ---------------------------------------------------------------------------
# 8. Thread-safety of _patch_stagehand_once
# ---------------------------------------------------------------------------
class TestPatchStagehandOnce:
def test_idempotent_double_call(self, stagehand_mocks):
"""_stagehand_patched transitions False→True exactly once."""
assert _browse_web_mod._stagehand_patched is False
_patch_stagehand_once()
assert _browse_web_mod._stagehand_patched is True
_patch_stagehand_once() # second call — still True, not re-patched
assert _browse_web_mod._stagehand_patched is True
def test_safe_register_is_noop_in_worker_thread(self, stagehand_mocks):
"""The patched handler must silently do nothing when called from a worker."""
_patch_stagehand_once()
mock_main = sys.modules["stagehand.main"]
safe_register = mock_main.Stagehand._register_signal_handlers
errors: list[Exception] = []
def run():
try:
safe_register(MagicMock())
except Exception as exc:
errors.append(exc)
t = threading.Thread(target=run)
t.start()
t.join()
assert errors == [], f"Worker thread raised: {errors}"
def test_patched_flag_set_after_execution(self, env_vars, stagehand_mocks):
"""After a successful browse, _stagehand_patched must be True."""
async def _run():
return await BrowseWebTool()._execute(
user_id="u1", session=make_session(), url="https://example.com"
)
import asyncio
asyncio.get_event_loop().run_until_complete(_run())
assert _browse_web_mod._stagehand_patched is True

View File

@@ -1,186 +0,0 @@
"""Shared utilities for execution waiting and status handling."""
import asyncio
import logging
from typing import Any
from backend.data.db_accessors import execution_db
from backend.data.execution import (
AsyncRedisExecutionEventBus,
ExecutionStatus,
GraphExecution,
GraphExecutionEvent,
)
logger = logging.getLogger(__name__)
# Terminal statuses that indicate execution is complete
TERMINAL_STATUSES = frozenset(
{
ExecutionStatus.COMPLETED,
ExecutionStatus.FAILED,
ExecutionStatus.TERMINATED,
}
)
# Statuses where execution is paused but not finished (e.g. human-in-the-loop)
PAUSED_STATUSES = frozenset(
{
ExecutionStatus.REVIEW,
}
)
# Statuses that mean "stop waiting" (terminal or paused)
STOP_WAITING_STATUSES = TERMINAL_STATUSES | PAUSED_STATUSES
_POST_SUBSCRIBE_RECHECK_DELAY = 0.1 # seconds to wait for subscription to establish
async def wait_for_execution(
user_id: str,
graph_id: str,
execution_id: str,
timeout_seconds: int,
) -> GraphExecution | None:
"""
Wait for an execution to reach a terminal or paused status using Redis pubsub.
Handles the race condition between checking status and subscribing by
re-checking the DB after the subscription is established.
Args:
user_id: User ID
graph_id: Graph ID
execution_id: Execution ID to wait for
timeout_seconds: Max seconds to wait
Returns:
The execution with current status, or None if not found
"""
exec_db = execution_db()
# Quick check — maybe it's already done
execution = await exec_db.get_graph_execution(
user_id=user_id,
execution_id=execution_id,
include_node_executions=False,
)
if not execution:
return None
if execution.status in STOP_WAITING_STATUSES:
logger.debug(
f"Execution {execution_id} already in stop-waiting state: "
f"{execution.status}"
)
return execution
logger.info(
f"Waiting up to {timeout_seconds}s for execution {execution_id} "
f"(current status: {execution.status})"
)
event_bus = AsyncRedisExecutionEventBus()
channel_key = f"{user_id}/{graph_id}/{execution_id}"
# Mutable container so _subscribe_and_wait can surface the task even if
# asyncio.wait_for cancels the coroutine before it returns.
task_holder: list[asyncio.Task] = []
try:
result = await asyncio.wait_for(
_subscribe_and_wait(
event_bus, channel_key, user_id, execution_id, exec_db, task_holder
),
timeout=timeout_seconds,
)
return result
except asyncio.TimeoutError:
logger.info(f"Timeout waiting for execution {execution_id}")
except Exception as e:
logger.error(f"Error waiting for execution: {e}", exc_info=True)
finally:
for task in task_holder:
if not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
await event_bus.close()
# Return current state on timeout/error
return await exec_db.get_graph_execution(
user_id=user_id,
execution_id=execution_id,
include_node_executions=False,
)
async def _subscribe_and_wait(
event_bus: AsyncRedisExecutionEventBus,
channel_key: str,
user_id: str,
execution_id: str,
exec_db: Any,
task_holder: list[asyncio.Task],
) -> GraphExecution | None:
"""
Subscribe to execution events and wait for a terminal/paused status.
Appends the consumer task to ``task_holder`` so the caller can clean it up
even if this coroutine is cancelled by ``asyncio.wait_for``.
To avoid the race condition where the execution completes between the
initial DB check and the Redis subscription, we:
1. Start listening (which subscribes internally)
2. Re-check the DB after subscription is active
3. If still running, wait for pubsub events
"""
listen_iter = event_bus.listen_events(channel_key).__aiter__()
done = asyncio.Event()
result_execution: GraphExecution | None = None
async def _consume() -> None:
nonlocal result_execution
try:
async for event in listen_iter:
if isinstance(event, GraphExecutionEvent):
logger.debug(f"Received execution update: {event.status}")
if event.status in STOP_WAITING_STATUSES:
result_execution = await exec_db.get_graph_execution(
user_id=user_id,
execution_id=execution_id,
include_node_executions=False,
)
done.set()
return
except Exception as e:
logger.error(f"Error in execution consumer: {e}", exc_info=True)
done.set()
consume_task = asyncio.create_task(_consume())
task_holder.append(consume_task)
# Give the subscription a moment to establish, then re-check DB
await asyncio.sleep(_POST_SUBSCRIBE_RECHECK_DELAY)
execution = await exec_db.get_graph_execution(
user_id=user_id,
execution_id=execution_id,
include_node_executions=False,
)
if execution and execution.status in STOP_WAITING_STATUSES:
return execution
# Wait for the pubsub consumer to find a terminal event
await done.wait()
return result_execution
def get_execution_outputs(execution: GraphExecution | None) -> dict[str, Any] | None:
"""Extract outputs from an execution, or return None."""
if execution is None:
return None
return execution.outputs

View File

@@ -19,10 +19,9 @@ class FindLibraryAgentTool(BaseTool):
@property
def description(self) -> str:
return (
"Search for or list agents in the user's library. Use this to find "
"agents the user has already added to their library, including agents "
"they created or added from the marketplace. "
"Omit the query to list all agents."
"Search for agents in the user's library. Use this to find agents "
"the user has already added to their library, including agents they "
"created or added from the marketplace."
)
@property
@@ -32,13 +31,10 @@ class FindLibraryAgentTool(BaseTool):
"properties": {
"query": {
"type": "string",
"description": (
"Search query to find agents by name or description. "
"Omit to list all agents in the library."
),
"description": "Search query to find agents by name or description.",
},
},
"required": [],
"required": ["query"],
}
@property
@@ -49,7 +45,7 @@ class FindLibraryAgentTool(BaseTool):
self, user_id: str | None, session: ChatSession, **kwargs
) -> ToolResponseBase:
return await search_agents(
query=(kwargs.get("query") or "").strip(),
query=kwargs.get("query", "").strip(),
source="library",
session_id=session.session_id,
user_id=user_id,

View File

@@ -41,8 +41,6 @@ class ResponseType(str, Enum):
INPUT_VALIDATION_ERROR = "input_validation_error"
# Web fetch
WEB_FETCH = "web_fetch"
# Browser-based web browsing (JS-rendered pages)
BROWSE_WEB = "browse_web"
# Code execution
BASH_EXEC = "bash_exec"
# Feature request types
@@ -440,15 +438,6 @@ class WebFetchResponse(ToolResponseBase):
truncated: bool = False
class BrowseWebResponse(ToolResponseBase):
"""Response for browse_web tool."""
type: ResponseType = ResponseType.BROWSE_WEB
url: str
content: str
truncated: bool = False
class BashExecResponse(ToolResponseBase):
"""Response for bash_exec tool."""

View File

@@ -9,7 +9,6 @@ from backend.copilot.config import ChatConfig
from backend.copilot.model import ChatSession
from backend.copilot.tracking import track_agent_run_success, track_agent_scheduled
from backend.data.db_accessors import graph_db, library_db, user_db
from backend.data.execution import ExecutionStatus
from backend.data.graph import GraphModel
from backend.data.model import CredentialsMetaInput
from backend.executor import utils as execution_utils
@@ -21,15 +20,12 @@ from backend.util.timezone_utils import (
)
from .base import BaseTool
from .execution_utils import get_execution_outputs, wait_for_execution
from .helpers import get_inputs_from_schema
from .models import (
AgentDetails,
AgentDetailsResponse,
AgentOutputResponse,
ErrorResponse,
ExecutionOptions,
ExecutionOutputInfo,
ExecutionStartedResponse,
InputValidationErrorResponse,
SetupInfo,
@@ -70,7 +66,6 @@ class RunAgentInput(BaseModel):
schedule_name: str = ""
cron: str = ""
timezone: str = "UTC"
wait_for_result: int = Field(default=0, ge=0, le=300)
@field_validator(
"username_agent_slug",
@@ -152,14 +147,6 @@ class RunAgentTool(BaseTool):
"type": "string",
"description": "IANA timezone for schedule (default: UTC)",
},
"wait_for_result": {
"type": "integer",
"description": (
"Max seconds to wait for execution to complete (0-300). "
"If >0, blocks until the execution finishes or times out. "
"Returns execution outputs when complete."
),
},
},
"required": [],
}
@@ -354,7 +341,6 @@ class RunAgentTool(BaseTool):
graph=graph,
graph_credentials=graph_credentials,
inputs=params.inputs,
wait_for_result=params.wait_for_result,
)
except NotFoundError as e:
@@ -438,9 +424,8 @@ class RunAgentTool(BaseTool):
graph: GraphModel,
graph_credentials: dict[str, CredentialsMetaInput],
inputs: dict[str, Any],
wait_for_result: int = 0,
) -> ToolResponseBase:
"""Execute an agent immediately, optionally waiting for completion."""
"""Execute an agent immediately."""
session_id = session.session_id
# Check rate limits
@@ -477,91 +462,6 @@ class RunAgentTool(BaseTool):
)
library_agent_link = f"/library/agents/{library_agent.id}"
# If wait_for_result is requested, wait for execution to complete
if wait_for_result > 0:
logger.info(
f"Waiting up to {wait_for_result}s for execution {execution.id}"
)
completed = await wait_for_execution(
user_id=user_id,
graph_id=library_agent.graph_id,
execution_id=execution.id,
timeout_seconds=wait_for_result,
)
if completed and completed.status == ExecutionStatus.COMPLETED:
outputs = get_execution_outputs(completed)
return AgentOutputResponse(
message=(
f"Agent '{library_agent.name}' completed successfully. "
f"View at {library_agent_link}."
),
session_id=session_id,
agent_name=library_agent.name,
agent_id=library_agent.graph_id,
library_agent_id=library_agent.id,
library_agent_link=library_agent_link,
execution=ExecutionOutputInfo(
execution_id=execution.id,
status=completed.status.value,
started_at=completed.started_at,
ended_at=completed.ended_at,
outputs=outputs or {},
),
)
elif completed and completed.status == ExecutionStatus.FAILED:
error_detail = completed.stats.error if completed.stats else None
return ErrorResponse(
message=(
f"Agent '{library_agent.name}' execution failed. "
f"View details at {library_agent_link}."
),
session_id=session_id,
error=error_detail,
)
elif completed and completed.status == ExecutionStatus.TERMINATED:
error_detail = completed.stats.error if completed.stats else None
return ErrorResponse(
message=(
f"Agent '{library_agent.name}' execution was terminated. "
f"View details at {library_agent_link}."
),
session_id=session_id,
error=error_detail,
)
elif completed and completed.status == ExecutionStatus.REVIEW:
return ExecutionStartedResponse(
message=(
f"Agent '{library_agent.name}' is awaiting human review. "
f"Check at {library_agent_link}."
),
session_id=session_id,
execution_id=execution.id,
graph_id=library_agent.graph_id,
graph_name=library_agent.name,
library_agent_id=library_agent.id,
library_agent_link=library_agent_link,
status=ExecutionStatus.REVIEW.value,
)
else:
status = completed.status.value if completed else "unknown"
return ExecutionStartedResponse(
message=(
f"Agent '{library_agent.name}' is still {status} after "
f"{wait_for_result}s. Check results later at "
f"{library_agent_link}. "
f"Use view_agent_output with wait_if_running to check again."
),
session_id=session_id,
execution_id=execution.id,
graph_id=library_agent.graph_id,
graph_name=library_agent.name,
library_agent_id=library_agent.id,
library_agent_link=library_agent_link,
status=status,
)
return ExecutionStartedResponse(
message=(
f"Agent '{library_agent.name}' execution started successfully. "

View File

@@ -214,11 +214,7 @@ class WorkspaceWriteResponse(ToolResponseBase):
file_id: str
name: str
path: str
mime_type: str
size_bytes: int
# workspace:// URL the agent can embed directly in chat to give the user a link.
# Format: workspace://<file_id>#<mime_type> (frontend resolves to download URL)
download_url: str
source: str | None = None # "content", "base64", or "copied from <path>"
content_preview: str | None = None # First 200 chars for text files
@@ -684,21 +680,11 @@ class WriteWorkspaceFileTool(BaseTool):
except Exception:
pass
# Strip MIME parameters (e.g. "text/html; charset=utf-8" → "text/html")
# and normalise to lowercase so the fragment is URL-safe.
normalized_mime = (rec.mime_type or "").split(";", 1)[0].strip().lower()
download_url = (
f"workspace://{rec.id}#{normalized_mime}"
if normalized_mime
else f"workspace://{rec.id}"
)
return WorkspaceWriteResponse(
file_id=rec.id,
name=rec.name,
path=rec.path,
mime_type=normalized_mime,
size_bytes=rec.size_bytes,
download_url=download_url,
source=source,
content_preview=preview,
message=msg,

View File

@@ -79,12 +79,6 @@ INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
}
LIBRARY_FOLDER_INCLUDE: prisma.types.LibraryFolderInclude = {
"LibraryAgents": {"where": {"isDeleted": False}},
"Children": {"where": {"isDeleted": False}},
}
def library_agent_include(
user_id: str,
include_nodes: bool = True,
@@ -111,7 +105,6 @@ def library_agent_include(
"""
result: prisma.types.LibraryAgentInclude = {
"Creator": True, # Always needed for creator info
"Folder": True, # Always needed for folder info
}
# Build AgentGraph include based on requested options

View File

@@ -184,7 +184,7 @@ async def find_webhook_by_credentials_and_props(
credentials_id: str,
webhook_type: str,
resource: str,
events: Optional[list[str]],
events: list[str],
) -> Webhook | None:
webhook = await IntegrationWebhook.prisma().find_first(
where={
@@ -192,7 +192,7 @@ async def find_webhook_by_credentials_and_props(
"credentialsId": credentials_id,
"webhookType": webhook_type,
"resource": resource,
**({"events": {"has_every": events}} if events else {}),
"events": {"has_every": events},
},
)
return Webhook.from_db(webhook) if webhook else None

View File

@@ -47,7 +47,6 @@ class ProviderName(str, Enum):
SLANT3D = "slant3d"
SMARTLEAD = "smartlead"
SMTP = "smtp"
TELEGRAM = "telegram"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"

View File

@@ -15,7 +15,6 @@ def load_webhook_managers() -> dict["ProviderName", type["BaseWebhooksManager"]]
from .compass import CompassWebhookManager
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
from .telegram import TelegramWebhooksManager
webhook_managers.update(
{
@@ -24,7 +23,6 @@ def load_webhook_managers() -> dict["ProviderName", type["BaseWebhooksManager"]]
CompassWebhookManager,
GithubWebhooksManager,
Slant3DWebhooksManager,
TelegramWebhooksManager,
]
}
)

View File

@@ -1,242 +0,0 @@
"""
Telegram Bot API Webhooks Manager.
Handles webhook registration and validation for Telegram bots.
"""
import hmac
import logging
from fastapi import HTTPException, Request
from strenum import StrEnum
from backend.data import integrations
from backend.data.model import APIKeyCredentials, Credentials
from backend.integrations.providers import ProviderName
from backend.util.exceptions import MissingConfigError
from backend.util.request import Requests
from backend.util.settings import Config
from ._base import BaseWebhooksManager
from .utils import webhook_ingress_url
logger = logging.getLogger(__name__)
class TelegramWebhookType(StrEnum):
BOT = "bot"
class TelegramWebhooksManager(BaseWebhooksManager):
"""
Manages Telegram bot webhooks.
Telegram webhooks are registered via the setWebhook API method.
Incoming requests are validated using the secret_token header.
"""
PROVIDER_NAME = ProviderName.TELEGRAM
WebhookType = TelegramWebhookType
TELEGRAM_API_BASE = "https://api.telegram.org"
async def get_suitable_auto_webhook(
self,
user_id: str,
credentials: Credentials,
webhook_type: TelegramWebhookType,
resource: str,
events: list[str],
) -> integrations.Webhook:
"""
Telegram only supports one webhook per bot. Instead of creating a new
webhook object when events change (which causes the old one to be pruned
and deregistered — removing the ONLY webhook for the bot), we find the
existing webhook and update its events in place.
"""
app_config = Config()
if not app_config.platform_base_url:
raise MissingConfigError(
"PLATFORM_BASE_URL must be set to use Webhook functionality"
)
# Exact match — no re-registration needed
if webhook := await integrations.find_webhook_by_credentials_and_props(
user_id=user_id,
credentials_id=credentials.id,
webhook_type=webhook_type,
resource=resource,
events=events,
):
return webhook
# Find any existing webhook for the same bot, regardless of events
if existing := await integrations.find_webhook_by_credentials_and_props(
user_id=user_id,
credentials_id=credentials.id,
webhook_type=webhook_type,
resource=resource,
events=None, # Ignore events for this lookup
):
# Re-register with Telegram using the same URL but new allowed_updates
ingress_url = webhook_ingress_url(self.PROVIDER_NAME, existing.id)
_, config = await self._register_webhook(
credentials,
webhook_type,
resource,
events,
ingress_url,
existing.secret,
)
return await integrations.update_webhook(
existing.id, events=events, config=config
)
# No existing webhook at all — create a new one
return await self._create_webhook(
user_id=user_id,
webhook_type=webhook_type,
events=events,
resource=resource,
credentials=credentials,
)
@classmethod
async def validate_payload(
cls,
webhook: integrations.Webhook,
request: Request,
credentials: Credentials | None,
) -> tuple[dict, str]:
"""
Validates incoming Telegram webhook request.
Telegram sends X-Telegram-Bot-Api-Secret-Token header when secret_token
was set in setWebhook call.
Returns:
tuple: (payload dict, event_type string)
"""
# Verify secret token header
secret_header = request.headers.get("X-Telegram-Bot-Api-Secret-Token")
if not secret_header or not hmac.compare_digest(secret_header, webhook.secret):
raise HTTPException(
status_code=403,
detail="Invalid or missing X-Telegram-Bot-Api-Secret-Token",
)
payload = await request.json()
# Determine event type based on update content
if "message" in payload:
message = payload["message"]
if "text" in message:
event_type = "message.text"
elif "photo" in message:
event_type = "message.photo"
elif "voice" in message:
event_type = "message.voice"
elif "audio" in message:
event_type = "message.audio"
elif "document" in message:
event_type = "message.document"
elif "video" in message:
event_type = "message.video"
else:
logger.warning(
"Unknown Telegram webhook payload type; "
f"message.keys() = {message.keys()}"
)
event_type = "message.other"
elif "edited_message" in payload:
event_type = "message.edited_message"
elif "message_reaction" in payload:
event_type = "message_reaction"
else:
event_type = "unknown"
return payload, event_type
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: TelegramWebhookType,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
"""
Register webhook with Telegram using setWebhook API.
Args:
credentials: Bot token credentials
webhook_type: Type of webhook (always BOT for Telegram)
resource: Resource identifier (unused for Telegram, bots are global)
events: Events to subscribe to
ingress_url: URL to receive webhook payloads
secret: Secret token for request validation
Returns:
tuple: (provider_webhook_id, config dict)
"""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("API key (bot token) is required for Telegram webhooks")
token = credentials.api_key.get_secret_value()
url = f"{self.TELEGRAM_API_BASE}/bot{token}/setWebhook"
# Map event filter to Telegram allowed_updates
if events:
telegram_updates: set[str] = set()
for event in events:
telegram_updates.add(event.split(".")[0])
# "message.edited_message" requires the "edited_message" update type
if "edited_message" in event:
telegram_updates.add("edited_message")
sorted_updates = sorted(telegram_updates)
else:
sorted_updates = ["message", "message_reaction"]
webhook_data = {
"url": ingress_url,
"secret_token": secret,
"allowed_updates": sorted_updates,
}
response = await Requests().post(url, json=webhook_data)
result = response.json()
if not result.get("ok"):
error_desc = result.get("description", "Unknown error")
raise ValueError(f"Failed to set Telegram webhook: {error_desc}")
# Telegram doesn't return a webhook ID, use empty string
config = {
"url": ingress_url,
"allowed_updates": webhook_data["allowed_updates"],
}
return "", config
async def _deregister_webhook(
self, webhook: integrations.Webhook, credentials: Credentials
) -> None:
"""
Deregister webhook by calling setWebhook with empty URL.
This removes the webhook from Telegram's servers.
"""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("API key (bot token) is required for Telegram webhooks")
token = credentials.api_key.get_secret_value()
url = f"{self.TELEGRAM_API_BASE}/bot{token}/setWebhook"
# Setting empty URL removes the webhook
response = await Requests().post(url, json={"url": ""})
result = response.json()
if not result.get("ok"):
error_desc = result.get("description", "Unknown error")
logger.warning(f"Failed to deregister Telegram webhook: {error_desc}")

View File

@@ -372,8 +372,8 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
description="The port for the Agent Generator service",
)
agentgenerator_timeout: int = Field(
default=1800,
description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)",
default=30,
description="The timeout in seconds for individual Agent Generator HTTP requests (submit and poll)",
)
agentgenerator_use_dummy: bool = Field(
default=False,

View File

@@ -1,33 +0,0 @@
-- AlterTable
ALTER TABLE "LibraryAgent" ADD COLUMN "folderId" TEXT;
-- CreateTable
CREATE TABLE "LibraryFolder" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"userId" TEXT NOT NULL,
"name" TEXT NOT NULL,
"icon" TEXT,
"color" TEXT,
"parentId" TEXT,
"isDeleted" BOOLEAN NOT NULL DEFAULT false,
CONSTRAINT "LibraryFolder_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "LibraryFolder_userId_parentId_name_key" ON "LibraryFolder"("userId", "parentId", "name");
-- CreateIndex
CREATE INDEX "LibraryAgent_folderId_idx" ON "LibraryAgent"("folderId");
-- AddForeignKey
ALTER TABLE "LibraryAgent" ADD CONSTRAINT "LibraryAgent_folderId_fkey" FOREIGN KEY ("folderId") REFERENCES "LibraryFolder"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "LibraryFolder" ADD CONSTRAINT "LibraryFolder_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "LibraryFolder" ADD CONSTRAINT "LibraryFolder_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "LibraryFolder"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -1,97 +0,0 @@
-- This migration creates a materialized view for suggested blocks based on execution counts
-- The view aggregates execution counts per block for the last 14 days
--
-- IMPORTANT: For production environments, pg_cron is REQUIRED for automatic refresh
-- Prerequisites for production:
-- 1. pg_cron extension must be installed: CREATE EXTENSION pg_cron;
-- 2. pg_cron must be configured in postgresql.conf:
-- shared_preload_libraries = 'pg_cron'
-- cron.database_name = 'your_database_name'
--
-- For development environments without pg_cron:
-- The migration will succeed but you must manually refresh views with:
-- SET search_path TO platform;
-- SELECT refresh_suggested_blocks_view();
-- Check if pg_cron extension is installed
DO $$
DECLARE
has_pg_cron BOOLEAN;
BEGIN
SELECT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_cron') INTO has_pg_cron;
IF NOT has_pg_cron THEN
RAISE WARNING 'pg_cron is not installed. Materialized view will be created but will NOT refresh automatically. For production, install pg_cron. For development, manually refresh with: SELECT refresh_suggested_blocks_view();';
END IF;
END
$$;
-- Create materialized view for suggested blocks based on execution counts in last 14 days
-- The 14-day threshold is hardcoded to ensure consistent behavior
CREATE MATERIALIZED VIEW IF NOT EXISTS "mv_suggested_blocks" AS
SELECT
agent_node."agentBlockId" AS block_id,
COUNT(execution.id) AS execution_count
FROM "AgentNodeExecution" execution
JOIN "AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
WHERE execution."endedTime" >= (NOW() - INTERVAL '14 days')
GROUP BY agent_node."agentBlockId"
ORDER BY execution_count DESC;
-- Create unique index for concurrent refresh support
CREATE UNIQUE INDEX IF NOT EXISTS "idx_mv_suggested_blocks_block_id" ON "mv_suggested_blocks"("block_id");
-- Create refresh function
CREATE OR REPLACE FUNCTION refresh_suggested_blocks_view()
RETURNS void
LANGUAGE plpgsql
AS $$
DECLARE
target_schema text := current_schema();
BEGIN
-- Use CONCURRENTLY for better performance during refresh
REFRESH MATERIALIZED VIEW CONCURRENTLY "mv_suggested_blocks";
RAISE NOTICE 'Suggested blocks materialized view refreshed in schema % at %', target_schema, NOW();
EXCEPTION
WHEN OTHERS THEN
-- Fallback to non-concurrent refresh if concurrent fails
REFRESH MATERIALIZED VIEW "mv_suggested_blocks";
RAISE NOTICE 'Suggested blocks materialized view refreshed (non-concurrent) in schema % at %. Concurrent refresh failed due to: %', target_schema, NOW(), SQLERRM;
END;
$$;
-- Initial refresh of the materialized view
SELECT refresh_suggested_blocks_view();
-- Schedule automatic refresh every hour (only if pg_cron is available)
DO $$
DECLARE
has_pg_cron BOOLEAN;
current_schema_name text := current_schema();
job_name text;
BEGIN
-- Check if pg_cron extension exists
SELECT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_cron') INTO has_pg_cron;
IF has_pg_cron THEN
job_name := format('refresh-suggested-blocks_%s', current_schema_name);
-- Try to unschedule existing job (ignore errors if it doesn't exist)
BEGIN
PERFORM cron.unschedule(job_name);
EXCEPTION WHEN OTHERS THEN
NULL;
END;
-- Schedule the new job to run every hour
PERFORM cron.schedule(
job_name,
'0 * * * *', -- Every hour at minute 0
format('SET search_path TO %I; SELECT refresh_suggested_blocks_view();', current_schema_name)
);
RAISE NOTICE 'Scheduled job %; runs every hour for schema %', job_name, current_schema_name;
ELSE
RAISE WARNING 'Automatic refresh NOT configured - pg_cron is not available. Manually refresh with: SELECT refresh_suggested_blocks_view();';
END IF;
END;
$$;

View File

@@ -1,7 +0,0 @@
-- This migration adds more than one value to an enum.
-- With PostgreSQL versions 11 and earlier, this is not possible
-- in a single migration. This can be worked around by creating
-- multiple migrations, each migration adding only one value to
-- the enum.
ALTER TYPE "APIKeyPermission" ADD VALUE 'WRITE_GRAPH';
ALTER TYPE "APIKeyPermission" ADD VALUE 'WRITE_LIBRARY';

View File

@@ -1610,101 +1610,6 @@ mccabe = ">=0.7.0,<0.8.0"
pycodestyle = ">=2.14.0,<2.15.0"
pyflakes = ">=3.4.0,<3.5.0"
[[package]]
name = "fonttools"
version = "4.61.1"
description = "Tools to manipulate font files"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c7db70d57e5e1089a274cbb2b1fd635c9a24de809a231b154965d415d6c6d24"},
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5fe9fd43882620017add5eabb781ebfbc6998ee49b35bd7f8f79af1f9f99a958"},
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8db08051fc9e7d8bc622f2112511b8107d8f27cd89e2f64ec45e9825e8288da"},
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a76d4cb80f41ba94a6691264be76435e5f72f2cb3cab0b092a6212855f71c2f6"},
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a13fc8aeb24bad755eea8f7f9d409438eb94e82cf86b08fe77a03fbc8f6a96b1"},
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b846a1fcf8beadeb9ea4f44ec5bdde393e2f1569e17d700bfc49cd69bde75881"},
{file = "fonttools-4.61.1-cp310-cp310-win32.whl", hash = "sha256:78a7d3ab09dc47ac1a363a493e6112d8cabed7ba7caad5f54dbe2f08676d1b47"},
{file = "fonttools-4.61.1-cp310-cp310-win_amd64.whl", hash = "sha256:eff1ac3cc66c2ac7cda1e64b4e2f3ffef474b7335f92fc3833fc632d595fcee6"},
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c6604b735bb12fef8e0efd5578c9fb5d3d8532d5001ea13a19cddf295673ee09"},
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5ce02f38a754f207f2f06557523cd39a06438ba3aafc0639c477ac409fc64e37"},
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77efb033d8d7ff233385f30c62c7c79271c8885d5c9657d967ede124671bbdfb"},
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:75c1a6dfac6abd407634420c93864a1e274ebc1c7531346d9254c0d8f6ca00f9"},
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0de30bfe7745c0d1ffa2b0b7048fb7123ad0d71107e10ee090fa0b16b9452e87"},
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58b0ee0ab5b1fc9921eccfe11d1435added19d6494dde14e323f25ad2bc30c56"},
{file = "fonttools-4.61.1-cp311-cp311-win32.whl", hash = "sha256:f79b168428351d11e10c5aeb61a74e1851ec221081299f4cf56036a95431c43a"},
{file = "fonttools-4.61.1-cp311-cp311-win_amd64.whl", hash = "sha256:fe2efccb324948a11dd09d22136fe2ac8a97d6c1347cf0b58a911dcd529f66b7"},
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f3cb4a569029b9f291f88aafc927dd53683757e640081ca8c412781ea144565e"},
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41a7170d042e8c0024703ed13b71893519a1a6d6e18e933e3ec7507a2c26a4b2"},
{file = "fonttools-4.61.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10d88e55330e092940584774ee5e8a6971b01fc2f4d3466a1d6c158230880796"},
{file = "fonttools-4.61.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15acc09befd16a0fb8a8f62bc147e1a82817542d72184acca9ce6e0aeda9fa6d"},
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6bcdf33aec38d16508ce61fd81838f24c83c90a1d1b8c68982857038673d6b8"},
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5fade934607a523614726119164ff621e8c30e8fa1ffffbbd358662056ba69f0"},
{file = "fonttools-4.61.1-cp312-cp312-win32.whl", hash = "sha256:75da8f28eff26defba42c52986de97b22106cb8f26515b7c22443ebc9c2d3261"},
{file = "fonttools-4.61.1-cp312-cp312-win_amd64.whl", hash = "sha256:497c31ce314219888c0e2fce5ad9178ca83fe5230b01a5006726cdf3ac9f24d9"},
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c56c488ab471628ff3bfa80964372fc13504ece601e0d97a78ee74126b2045c"},
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dc492779501fa723b04d0ab1f5be046797fee17d27700476edc7ee9ae535a61e"},
{file = "fonttools-4.61.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:64102ca87e84261419c3747a0d20f396eb024bdbeb04c2bfb37e2891f5fadcb5"},
{file = "fonttools-4.61.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c1b526c8d3f615a7b1867f38a9410849c8f4aef078535742198e942fba0e9bd"},
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:41ed4b5ec103bd306bb68f81dc166e77409e5209443e5773cb4ed837bcc9b0d3"},
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b501c862d4901792adaec7c25b1ecc749e2662543f68bb194c42ba18d6eec98d"},
{file = "fonttools-4.61.1-cp313-cp313-win32.whl", hash = "sha256:4d7092bb38c53bbc78e9255a59158b150bcdc115a1e3b3ce0b5f267dc35dd63c"},
{file = "fonttools-4.61.1-cp313-cp313-win_amd64.whl", hash = "sha256:21e7c8d76f62ab13c9472ccf74515ca5b9a761d1bde3265152a6dc58700d895b"},
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:fff4f534200a04b4a36e7ae3cb74493afe807b517a09e99cb4faa89a34ed6ecd"},
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:d9203500f7c63545b4ce3799319fe4d9feb1a1b89b28d3cb5abd11b9dd64147e"},
{file = "fonttools-4.61.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa646ecec9528bef693415c79a86e733c70a4965dd938e9a226b0fc64c9d2e6c"},
{file = "fonttools-4.61.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:11f35ad7805edba3aac1a3710d104592df59f4b957e30108ae0ba6c10b11dd75"},
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b931ae8f62db78861b0ff1ac017851764602288575d65b8e8ff1963fed419063"},
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b148b56f5de675ee16d45e769e69f87623a4944f7443850bf9a9376e628a89d2"},
{file = "fonttools-4.61.1-cp314-cp314-win32.whl", hash = "sha256:9b666a475a65f4e839d3d10473fad6d47e0a9db14a2f4a224029c5bfde58ad2c"},
{file = "fonttools-4.61.1-cp314-cp314-win_amd64.whl", hash = "sha256:4f5686e1fe5fce75d82d93c47a438a25bf0d1319d2843a926f741140b2b16e0c"},
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:e76ce097e3c57c4bcb67c5aa24a0ecdbd9f74ea9219997a707a4061fbe2707aa"},
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9cfef3ab326780c04d6646f68d4b4742aae222e8b8ea1d627c74e38afcbc9d91"},
{file = "fonttools-4.61.1-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a75c301f96db737e1c5ed5fd7d77d9c34466de16095a266509e13da09751bd19"},
{file = "fonttools-4.61.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:91669ccac46bbc1d09e9273546181919064e8df73488ea087dcac3e2968df9ba"},
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c33ab3ca9d3ccd581d58e989d67554e42d8d4ded94ab3ade3508455fe70e65f7"},
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:664c5a68ec406f6b1547946683008576ef8b38275608e1cee6c061828171c118"},
{file = "fonttools-4.61.1-cp314-cp314t-win32.whl", hash = "sha256:aed04cabe26f30c1647ef0e8fbb207516fd40fe9472e9439695f5c6998e60ac5"},
{file = "fonttools-4.61.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2180f14c141d2f0f3da43f3a81bc8aa4684860f6b0e6f9e165a4831f24e6a23b"},
{file = "fonttools-4.61.1-py3-none-any.whl", hash = "sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371"},
{file = "fonttools-4.61.1.tar.gz", hash = "sha256:6675329885c44657f826ef01d9e4fb33b9158e9d93c537d84ad8399539bc6f69"},
]
[package.extras]
all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.45.0)", "unicodedata2 (>=17.0.0) ; python_version <= \"3.14\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"]
graphite = ["lz4 (>=1.7.4.2)"]
interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""]
lxml = ["lxml (>=4.0)"]
pathops = ["skia-pathops (>=0.5.0)"]
plot = ["matplotlib"]
repacker = ["uharfbuzz (>=0.45.0)"]
symfont = ["sympy"]
type1 = ["xattr ; sys_platform == \"darwin\""]
unicode = ["unicodedata2 (>=17.0.0) ; python_version <= \"3.14\""]
woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"]
[[package]]
name = "fpdf2"
version = "2.8.6"
description = "Simple & fast PDF generation for Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "fpdf2-2.8.6-py3-none-any.whl", hash = "sha256:464658b896c6b0fcbf883abb316b8f0a52d582eb959d71822ba254d6c790bfdd"},
{file = "fpdf2-2.8.6.tar.gz", hash = "sha256:5132f26bbeee69a7ca6a292e4da1eb3241147b5aea9348b35e780ecd02bf5fc2"},
]
[package.dependencies]
defusedxml = "*"
fonttools = ">=4.34.0"
Pillow = ">=8.3.2,<9.2.dev0 || >=9.3.dev0"
[package.extras]
dev = ["bandit", "black", "mypy", "pre-commit", "pylint", "pyright", "semgrep", "zizmor"]
docs = ["lxml", "mkdocs", "mkdocs-git-revision-date-localized-plugin", "mkdocs-include-markdown-plugin", "mkdocs-macros-plugin", "mkdocs-material", "mkdocs-minify-plugin", "mkdocs-redirects", "mkdocs-with-pdf", "mknotebooks", "pdoc3"]
test = ["brotli", "camelot-py[base]", "endesive[full]", "pytest", "pytest-cov", "qrcode", "tabula-py", "typing-extensions (>=4.0) ; python_version < \"3.11\"", "uharfbuzz"]
[[package]]
name = "frozenlist"
version = "1.8.0"
@@ -8625,4 +8530,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
content-hash = "3869bc3fb8ea50e7101daffce13edbe563c8af568cb751adfa31fb9bb5c8318a"
content-hash = "3ef62836d8321b9a3b8e897dade8dc6ca9022fd9468c53f384b0871b521ab343"

View File

@@ -89,7 +89,6 @@ croniter = "^6.0.0"
stagehand = "^0.5.1"
gravitas-md2gdocs = "^0.1.0"
posthog = "^7.6.0"
fpdf2 = "^2.8.6"
[tool.poetry.group.dev.dependencies]
aiohappyeyeballs = "^2.6.1"

View File

@@ -51,7 +51,6 @@ model User {
ChatSessions ChatSession[]
AgentPresets AgentPreset[]
LibraryAgents LibraryAgent[]
LibraryFolders LibraryFolder[]
Profile Profile[]
UserOnboarding UserOnboarding?
@@ -396,9 +395,6 @@ model LibraryAgent {
creatorId String?
Creator Profile? @relation(fields: [creatorId], references: [id])
folderId String?
Folder LibraryFolder? @relation(fields: [folderId], references: [id], onDelete: Restrict)
useGraphIsActiveVersion Boolean @default(false)
isFavorite Boolean @default(false)
@@ -411,30 +407,6 @@ model LibraryAgent {
@@unique([userId, agentGraphId, agentGraphVersion])
@@index([agentGraphId, agentGraphVersion])
@@index([creatorId])
@@index([folderId])
}
model LibraryFolder {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
userId String
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
name String
icon String?
color String?
parentId String?
Parent LibraryFolder? @relation("FolderHierarchy", fields: [parentId], references: [id], onDelete: Cascade)
Children LibraryFolder[] @relation("FolderHierarchy")
isDeleted Boolean @default(false)
LibraryAgents LibraryAgent[]
@@unique([userId, parentId, name]) // Name unique per parent per user
}
////////////////////////////////////////////////////////////
@@ -948,17 +920,6 @@ view mv_review_stats {
// Refresh uses CONCURRENTLY to avoid blocking reads
}
// Note: This is actually a MATERIALIZED VIEW in the database
// Refreshed automatically every hour via pg_cron (with fallback to manual refresh)
view mv_suggested_blocks {
block_id String @unique
execution_count Int
// Pre-aggregated execution counts per block for the last 14 days
// Used by builder suggestions for ordering blocks by popularity
// Refresh uses CONCURRENTLY to avoid blocking reads
}
model StoreListing {
id String @id @default(uuid())
createdAt DateTime @default(now())
@@ -1130,11 +1091,9 @@ enum APIKeyPermission {
IDENTITY // Info about the authenticated user
EXECUTE_GRAPH // Can execute agent graphs
READ_GRAPH // Can get graph versions and details
WRITE_GRAPH // Can create and update agent graphs
EXECUTE_BLOCK // Can execute individual blocks
READ_BLOCK // Can get block information
READ_STORE // Can read store agents and creators
WRITE_LIBRARY // Can add agents to library
USE_TOOLS // Can use chat tools via external API
MANAGE_INTEGRATIONS // Can initiate OAuth flows and complete them
READ_INTEGRATIONS // Can list credentials and providers

View File

@@ -38,8 +38,6 @@
"can_access_graph": true,
"is_latest_version": true,
"is_favorite": false,
"folder_id": null,
"folder_name": null,
"recommended_schedule_cron": null,
"settings": {
"human_in_the_loop_safe_mode": true,
@@ -85,8 +83,6 @@
"can_access_graph": false,
"is_latest_version": true,
"is_favorite": false,
"folder_id": null,
"folder_name": null,
"recommended_schedule_cron": null,
"settings": {
"human_in_the_loop_safe_mode": true,

View File

@@ -2,7 +2,7 @@
Tests for the Agent Generator external service client.
This test suite verifies the external Agent Generator service integration,
including service detection, API calls, and error handling.
including service detection, async polling, and error handling.
"""
from unittest.mock import AsyncMock, MagicMock, patch
@@ -49,6 +49,292 @@ class TestServiceConfiguration:
assert url == "http://agent-generator.local:8000"
class TestSubmitAndPoll:
"""Test the _submit_and_poll helper that handles async job polling."""
def setup_method(self):
service._settings = None
service._client = None
@pytest.mark.asyncio
async def test_successful_submit_and_poll(self):
"""Test normal submit -> poll -> completed flow."""
submit_resp = MagicMock()
submit_resp.json.return_value = {"job_id": "job-123", "status": "accepted"}
submit_resp.raise_for_status = MagicMock()
poll_resp = MagicMock()
poll_resp.json.return_value = {
"job_id": "job-123",
"status": "completed",
"result": {"type": "instructions", "steps": ["Step 1"]},
}
poll_resp.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = submit_resp
mock_client.get.return_value = poll_resp
with (
patch.object(service, "_get_client", return_value=mock_client),
patch("asyncio.sleep", new_callable=AsyncMock),
):
result = await service._submit_and_poll("/api/test", {"key": "value"})
assert result == {"type": "instructions", "steps": ["Step 1"]}
mock_client.post.assert_called_once_with("/api/test", json={"key": "value"})
mock_client.get.assert_called_once_with("/api/jobs/job-123")
@pytest.mark.asyncio
async def test_poll_returns_failed_job(self):
"""Test submit -> poll -> failed flow."""
submit_resp = MagicMock()
submit_resp.json.return_value = {"job_id": "job-456", "status": "accepted"}
submit_resp.raise_for_status = MagicMock()
poll_resp = MagicMock()
poll_resp.json.return_value = {
"job_id": "job-456",
"status": "failed",
"error": "Generation failed",
}
poll_resp.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = submit_resp
mock_client.get.return_value = poll_resp
with (
patch.object(service, "_get_client", return_value=mock_client),
patch("asyncio.sleep", new_callable=AsyncMock),
):
result = await service._submit_and_poll("/api/test", {})
assert result["type"] == "error"
assert result["error_type"] == "job_failed"
assert "Generation failed" in result["error"]
@pytest.mark.asyncio
async def test_submit_http_error(self):
"""Test HTTP error during job submission."""
mock_response = MagicMock()
mock_response.status_code = 500
mock_client = AsyncMock()
mock_client.post.side_effect = httpx.HTTPStatusError(
"Server error", request=MagicMock(), response=mock_response
)
with patch.object(service, "_get_client", return_value=mock_client):
result = await service._submit_and_poll("/api/test", {})
assert result["type"] == "error"
assert result["error_type"] == "http_error"
@pytest.mark.asyncio
async def test_submit_connection_error(self):
"""Test connection error during job submission."""
mock_client = AsyncMock()
mock_client.post.side_effect = httpx.RequestError("Connection failed")
with patch.object(service, "_get_client", return_value=mock_client):
result = await service._submit_and_poll("/api/test", {})
assert result["type"] == "error"
assert result["error_type"] == "connection_error"
@pytest.mark.asyncio
async def test_no_job_id_in_submit_response(self):
"""Test submit response missing job_id."""
submit_resp = MagicMock()
submit_resp.json.return_value = {"status": "accepted"} # no job_id
submit_resp.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = submit_resp
with patch.object(service, "_get_client", return_value=mock_client):
result = await service._submit_and_poll("/api/test", {})
assert result["type"] == "error"
assert result["error_type"] == "invalid_response"
@pytest.mark.asyncio
async def test_poll_retries_on_transient_network_error(self):
"""Test that transient network errors during polling are retried."""
submit_resp = MagicMock()
submit_resp.json.return_value = {"job_id": "job-789"}
submit_resp.raise_for_status = MagicMock()
ok_poll_resp = MagicMock()
ok_poll_resp.json.return_value = {
"job_id": "job-789",
"status": "completed",
"result": {"data": "ok"},
}
ok_poll_resp.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = submit_resp
# First poll fails with transient error, second succeeds
mock_client.get.side_effect = [
httpx.RequestError("transient"),
ok_poll_resp,
]
with (
patch.object(service, "_get_client", return_value=mock_client),
patch("asyncio.sleep", new_callable=AsyncMock),
):
result = await service._submit_and_poll("/api/test", {})
assert result == {"data": "ok"}
assert mock_client.get.call_count == 2
@pytest.mark.asyncio
async def test_poll_returns_404_for_expired_job(self):
"""Test that 404 during polling returns job_not_found error."""
submit_resp = MagicMock()
submit_resp.json.return_value = {"job_id": "job-expired"}
submit_resp.raise_for_status = MagicMock()
mock_404_response = MagicMock()
mock_404_response.status_code = 404
mock_client = AsyncMock()
mock_client.post.return_value = submit_resp
mock_client.get.side_effect = httpx.HTTPStatusError(
"Not Found", request=MagicMock(), response=mock_404_response
)
with (
patch.object(service, "_get_client", return_value=mock_client),
patch("asyncio.sleep", new_callable=AsyncMock),
):
result = await service._submit_and_poll("/api/test", {})
assert result["type"] == "error"
assert result["error_type"] == "job_not_found"
@pytest.mark.asyncio
async def test_poll_retries_on_transient_http_status(self):
"""Test that transient HTTP status codes (429, 503, etc.) are retried."""
submit_resp = MagicMock()
submit_resp.json.return_value = {"job_id": "job-transient"}
submit_resp.raise_for_status = MagicMock()
mock_429_response = MagicMock()
mock_429_response.status_code = 429
ok_poll_resp = MagicMock()
ok_poll_resp.json.return_value = {
"job_id": "job-transient",
"status": "completed",
"result": {"data": "recovered"},
}
ok_poll_resp.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = submit_resp
mock_client.get.side_effect = [
httpx.HTTPStatusError(
"Too Many Requests", request=MagicMock(), response=mock_429_response
),
ok_poll_resp,
]
with (
patch.object(service, "_get_client", return_value=mock_client),
patch("asyncio.sleep", new_callable=AsyncMock),
):
result = await service._submit_and_poll("/api/test", {})
assert result == {"data": "recovered"}
assert mock_client.get.call_count == 2
@pytest.mark.asyncio
async def test_poll_does_not_retry_non_transient_http_status(self):
"""Test that non-transient HTTP status codes (e.g. 500) fail immediately."""
submit_resp = MagicMock()
submit_resp.json.return_value = {"job_id": "job-500"}
submit_resp.raise_for_status = MagicMock()
mock_500_response = MagicMock()
mock_500_response.status_code = 500
mock_client = AsyncMock()
mock_client.post.return_value = submit_resp
mock_client.get.side_effect = httpx.HTTPStatusError(
"Internal Server Error", request=MagicMock(), response=mock_500_response
)
with (
patch.object(service, "_get_client", return_value=mock_client),
patch("asyncio.sleep", new_callable=AsyncMock),
):
result = await service._submit_and_poll("/api/test", {})
assert result["type"] == "error"
assert result["error_type"] == "http_error"
assert mock_client.get.call_count == 1
@pytest.mark.asyncio
async def test_poll_timeout(self):
"""Test that polling times out after MAX_POLL_TIME_SECONDS."""
submit_resp = MagicMock()
submit_resp.json.return_value = {"job_id": "job-slow"}
submit_resp.raise_for_status = MagicMock()
running_resp = MagicMock()
running_resp.json.return_value = {"job_id": "job-slow", "status": "running"}
running_resp.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = submit_resp
mock_client.get.return_value = running_resp
# Simulate time passing: first call returns 0.0 (start), then jumps past limit
monotonic_values = iter([0.0, 0.0, 100.0])
with (
patch.object(service, "_get_client", return_value=mock_client),
patch.object(service, "MAX_POLL_TIME_SECONDS", 50.0),
patch.object(service, "POLL_INTERVAL_SECONDS", 0.01),
patch("asyncio.sleep", new_callable=AsyncMock),
patch("backend.copilot.tools.agent_generator.service.time") as mock_time,
):
mock_time.monotonic.side_effect = monotonic_values
result = await service._submit_and_poll("/api/test", {})
assert result["type"] == "error"
assert result["error_type"] == "timeout"
@pytest.mark.asyncio
async def test_poll_gives_up_after_consecutive_transient_errors(self):
"""Test that polling gives up after MAX_CONSECUTIVE_POLL_ERRORS."""
submit_resp = MagicMock()
submit_resp.json.return_value = {"job_id": "job-flaky"}
submit_resp.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = submit_resp
mock_client.get.side_effect = httpx.RequestError("network down")
# Ensure monotonic always returns 0 so timeout doesn't kick in
with (
patch.object(service, "_get_client", return_value=mock_client),
patch.object(service, "MAX_POLL_TIME_SECONDS", 9999.0),
patch.object(service, "POLL_INTERVAL_SECONDS", 0.01),
patch("asyncio.sleep", new_callable=AsyncMock),
patch("backend.copilot.tools.agent_generator.service.time") as mock_time,
):
mock_time.monotonic.return_value = 0.0
result = await service._submit_and_poll("/api/test", {})
assert result["type"] == "error"
assert result["error_type"] == "poll_error"
assert mock_client.get.call_count == service.MAX_CONSECUTIVE_POLL_ERRORS
class TestDecomposeGoalExternal:
"""Test decompose_goal_external function."""
@@ -60,40 +346,37 @@ class TestDecomposeGoalExternal:
@pytest.mark.asyncio
async def test_decompose_goal_returns_instructions(self):
"""Test successful decomposition returning instructions."""
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"type": "instructions",
"steps": ["Step 1", "Step 2"],
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {
"type": "instructions",
"steps": ["Step 1", "Step 2"],
}
result = await service.decompose_goal_external("Build a chatbot")
assert result == {"type": "instructions", "steps": ["Step 1", "Step 2"]}
mock_client.post.assert_called_once_with(
"/api/decompose-description", json={"description": "Build a chatbot"}
mock_poll.assert_called_once_with(
"/api/decompose-description",
{"description": "Build a chatbot"},
)
@pytest.mark.asyncio
async def test_decompose_goal_returns_clarifying_questions(self):
"""Test decomposition returning clarifying questions."""
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"type": "clarifying_questions",
"questions": ["What platform?", "What language?"],
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {
"type": "clarifying_questions",
"questions": ["What platform?", "What language?"],
}
result = await service.decompose_goal_external("Build something")
assert result == {
@@ -104,18 +387,13 @@ class TestDecomposeGoalExternal:
@pytest.mark.asyncio
async def test_decompose_goal_with_context(self):
"""Test decomposition with additional context enriched into description."""
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"type": "instructions",
"steps": ["Step 1"],
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {"type": "instructions", "steps": ["Step 1"]}
await service.decompose_goal_external(
"Build a chatbot", context="Use Python"
)
@@ -123,27 +401,25 @@ class TestDecomposeGoalExternal:
expected_description = (
"Build a chatbot\n\nAdditional context from user:\nUse Python"
)
mock_client.post.assert_called_once_with(
mock_poll.assert_called_once_with(
"/api/decompose-description",
json={"description": expected_description},
{"description": expected_description},
)
@pytest.mark.asyncio
async def test_decompose_goal_returns_unachievable_goal(self):
"""Test decomposition returning unachievable goal response."""
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"type": "unachievable_goal",
"reason": "Cannot do X",
"suggested_goal": "Try Y instead",
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {
"type": "unachievable_goal",
"reason": "Cannot do X",
"suggested_goal": "Try Y instead",
}
result = await service.decompose_goal_external("Do something impossible")
assert result == {
@@ -153,58 +429,40 @@ class TestDecomposeGoalExternal:
}
@pytest.mark.asyncio
async def test_decompose_goal_handles_http_error(self):
"""Test decomposition handles HTTP errors gracefully."""
mock_response = MagicMock()
mock_response.status_code = 500
mock_client = AsyncMock()
mock_client.post.side_effect = httpx.HTTPStatusError(
"Server error", request=MagicMock(), response=mock_response
)
with patch.object(service, "_get_client", return_value=mock_client):
async def test_decompose_goal_handles_poll_error(self):
"""Test that errors from _submit_and_poll are passed through."""
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {
"type": "error",
"error": "HTTP error calling Agent Generator: Server error",
"error_type": "http_error",
}
result = await service.decompose_goal_external("Build a chatbot")
assert result is not None
assert result.get("type") == "error"
assert result.get("error_type") == "http_error"
assert "Server error" in result.get("error", "")
@pytest.mark.asyncio
async def test_decompose_goal_handles_request_error(self):
"""Test decomposition handles request errors gracefully."""
mock_client = AsyncMock()
mock_client.post.side_effect = httpx.RequestError("Connection failed")
with patch.object(service, "_get_client", return_value=mock_client):
async def test_decompose_goal_handles_unexpected_exception(self):
"""Test that unexpected exceptions are caught and returned as errors."""
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.side_effect = RuntimeError("unexpected")
result = await service.decompose_goal_external("Build a chatbot")
assert result is not None
assert result.get("type") == "error"
assert result.get("error_type") == "connection_error"
assert "Connection failed" in result.get("error", "")
@pytest.mark.asyncio
async def test_decompose_goal_handles_service_error(self):
"""Test decomposition handles service returning error."""
mock_response = MagicMock()
mock_response.json.return_value = {
"success": False,
"error": "Internal error",
"error_type": "internal_error",
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
result = await service.decompose_goal_external("Build a chatbot")
assert result is not None
assert result.get("type") == "error"
assert result.get("error") == "Internal error"
assert result.get("error_type") == "internal_error"
assert result.get("error_type") == "unexpected_error"
class TestGenerateAgentExternal:
@@ -223,39 +481,59 @@ class TestGenerateAgentExternal:
"nodes": [],
"links": [],
}
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"agent_json": agent_json,
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {"success": True, "agent_json": agent_json}
instructions = {"type": "instructions", "steps": ["Step 1"]}
with patch.object(service, "_get_client", return_value=mock_client):
instructions = {"type": "instructions", "steps": ["Step 1"]}
result = await service.generate_agent_external(instructions)
assert result == agent_json
mock_client.post.assert_called_once_with(
"/api/generate-agent", json={"instructions": instructions}
mock_poll.assert_called_once_with(
"/api/generate-agent",
{"instructions": instructions},
)
@pytest.mark.asyncio
async def test_generate_agent_handles_error(self):
"""Test agent generation handles errors gracefully."""
mock_client = AsyncMock()
mock_client.post.side_effect = httpx.RequestError("Connection failed")
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {
"type": "error",
"error": "Connection failed",
"error_type": "connection_error",
}
result = await service.generate_agent_external({"steps": []})
assert result is not None
assert result.get("type") == "error"
assert result.get("error_type") == "connection_error"
assert "Connection failed" in result.get("error", "")
@pytest.mark.asyncio
async def test_generate_agent_missing_agent_json(self):
"""Test that missing agent_json in result returns an error."""
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {"success": True}
result = await service.generate_agent_external({"steps": ["Step 1"]})
assert result is not None
assert result.get("type") == "error"
assert result.get("error_type") == "invalid_response"
class TestGenerateAgentPatchExternal:
@@ -274,27 +552,24 @@ class TestGenerateAgentPatchExternal:
"nodes": [{"id": "1", "block_id": "test"}],
"links": [],
}
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"agent_json": updated_agent,
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {"success": True, "agent_json": updated_agent}
current_agent = {"name": "Old Agent", "nodes": [], "links": []}
with patch.object(service, "_get_client", return_value=mock_client):
current_agent = {"name": "Old Agent", "nodes": [], "links": []}
result = await service.generate_agent_patch_external(
"Add a new node", current_agent
)
assert result == updated_agent
mock_client.post.assert_called_once_with(
mock_poll.assert_called_once_with(
"/api/update-agent",
json={
{
"update_request": "Add a new node",
"current_agent_json": current_agent,
},
@@ -303,18 +578,16 @@ class TestGenerateAgentPatchExternal:
@pytest.mark.asyncio
async def test_generate_patch_returns_clarifying_questions(self):
"""Test patch generation returning clarifying questions."""
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"type": "clarifying_questions",
"questions": ["What type of node?"],
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {
"type": "clarifying_questions",
"questions": ["What type of node?"],
}
result = await service.generate_agent_patch_external(
"Add something", {"nodes": []}
)
@@ -355,9 +628,12 @@ class TestHealthCheck:
mock_client = AsyncMock()
mock_client.get.return_value = mock_response
with patch.object(service, "is_external_service_configured", return_value=True):
with patch.object(service, "_get_client", return_value=mock_client):
result = await service.health_check()
with (
patch.object(service, "is_external_service_configured", return_value=True),
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(service, "_get_client", return_value=mock_client),
):
result = await service.health_check()
assert result is True
mock_client.get.assert_called_once_with("/health")
@@ -375,9 +651,12 @@ class TestHealthCheck:
mock_client = AsyncMock()
mock_client.get.return_value = mock_response
with patch.object(service, "is_external_service_configured", return_value=True):
with patch.object(service, "_get_client", return_value=mock_client):
result = await service.health_check()
with (
patch.object(service, "is_external_service_configured", return_value=True),
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(service, "_get_client", return_value=mock_client),
):
result = await service.health_check()
assert result is False
@@ -387,9 +666,12 @@ class TestHealthCheck:
mock_client = AsyncMock()
mock_client.get.side_effect = httpx.RequestError("Connection failed")
with patch.object(service, "is_external_service_configured", return_value=True):
with patch.object(service, "_get_client", return_value=mock_client):
result = await service.health_check()
with (
patch.object(service, "is_external_service_configured", return_value=True),
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(service, "_get_client", return_value=mock_client),
):
result = await service.health_check()
assert result is False
@@ -419,7 +701,10 @@ class TestGetBlocksExternal:
mock_client = AsyncMock()
mock_client.get.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(service, "_get_client", return_value=mock_client),
):
result = await service.get_blocks_external()
assert result == blocks
@@ -431,7 +716,10 @@ class TestGetBlocksExternal:
mock_client = AsyncMock()
mock_client.get.side_effect = httpx.RequestError("Connection failed")
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(service, "_get_client", return_value=mock_client),
):
result = await service.get_blocks_external()
assert result is None
@@ -459,26 +747,22 @@ class TestLibraryAgentsPassthrough:
},
]
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"type": "instructions",
"steps": ["Step 1"],
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {"type": "instructions", "steps": ["Step 1"]}
await service.decompose_goal_external(
"Send an email",
library_agents=library_agents,
)
# Verify library_agents was passed in the payload
call_args = mock_client.post.call_args
assert call_args[1]["json"]["library_agents"] == library_agents
call_args = mock_poll.call_args
payload = call_args[0][1]
assert payload["library_agents"] == library_agents
@pytest.mark.asyncio
async def test_generate_agent_passes_library_agents(self):
@@ -494,25 +778,24 @@ class TestLibraryAgentsPassthrough:
},
]
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"agent_json": {"name": "Test Agent", "nodes": []},
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {
"agent_json": {"name": "Test Agent", "nodes": []},
}
await service.generate_agent_external(
{"steps": ["Step 1"]},
library_agents=library_agents,
)
# Verify library_agents was passed in the payload
call_args = mock_client.post.call_args
assert call_args[1]["json"]["library_agents"] == library_agents
call_args = mock_poll.call_args
payload = call_args[0][1]
assert payload["library_agents"] == library_agents
@pytest.mark.asyncio
async def test_generate_agent_patch_passes_library_agents(self):
@@ -528,17 +811,15 @@ class TestLibraryAgentsPassthrough:
},
]
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"agent_json": {"name": "Updated Agent", "nodes": []},
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {
"agent_json": {"name": "Updated Agent", "nodes": []},
}
await service.generate_agent_patch_external(
"Add error handling",
{"name": "Original Agent", "nodes": []},
@@ -546,29 +827,26 @@ class TestLibraryAgentsPassthrough:
)
# Verify library_agents was passed in the payload
call_args = mock_client.post.call_args
assert call_args[1]["json"]["library_agents"] == library_agents
call_args = mock_poll.call_args
payload = call_args[0][1]
assert payload["library_agents"] == library_agents
@pytest.mark.asyncio
async def test_decompose_goal_without_library_agents(self):
"""Test that decompose goal works without library_agents."""
mock_response = MagicMock()
mock_response.json.return_value = {
"success": True,
"type": "instructions",
"steps": ["Step 1"],
}
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
with patch.object(service, "_get_client", return_value=mock_client):
with (
patch.object(service, "_is_dummy_mode", return_value=False),
patch.object(
service, "_submit_and_poll", new_callable=AsyncMock
) as mock_poll,
):
mock_poll.return_value = {"type": "instructions", "steps": ["Step 1"]}
await service.decompose_goal_external("Build a workflow")
# Verify library_agents was NOT passed when not provided
call_args = mock_client.post.call_args
assert "library_agents" not in call_args[1]["json"]
call_args = mock_poll.call_args
payload = call_args[0][1]
assert "library_agents" not in payload
if __name__ == "__main__":

View File

@@ -6,7 +6,6 @@ const config: StorybookConfig = {
"../src/components/tokens/**/*.stories.@(js|jsx|mjs|ts|tsx)",
"../src/components/atoms/**/*.stories.@(js|jsx|mjs|ts|tsx)",
"../src/components/molecules/**/*.stories.@(js|jsx|mjs|ts|tsx)",
"../src/components/ai-elements/**/*.stories.@(js|jsx|mjs|ts|tsx)",
],
addons: [
"@storybook/addon-a11y",

View File

@@ -32,7 +32,6 @@
"dependencies": {
"@ai-sdk/react": "3.0.61",
"@faker-js/faker": "10.0.0",
"@ferrucc-io/emoji-picker": "0.0.48",
"@hookform/resolvers": "5.2.2",
"@next/third-parties": "15.4.6",
"@phosphor-icons/react": "2.1.10",

View File

@@ -18,9 +18,6 @@ importers:
'@faker-js/faker':
specifier: 10.0.0
version: 10.0.0
'@ferrucc-io/emoji-picker':
specifier: 0.0.48
version: 0.0.48(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(tailwindcss@3.4.17)
'@hookform/resolvers':
specifier: 5.2.2
version: 5.2.2(react-hook-form@7.66.0(react@18.3.1))
@@ -1510,14 +1507,6 @@ packages:
resolution: {integrity: sha512-UollFEUkVXutsaP+Vndjxar40Gs5JL2HeLcl8xO1QAjJgOdhc3OmBFWyEylS+RddWaaBiAzH+5/17PLQJwDiLw==}
engines: {node: ^20.19.0 || ^22.13.0 || ^23.5.0 || >=24.0.0, npm: '>=10'}
'@ferrucc-io/emoji-picker@0.0.48':
resolution: {integrity: sha512-DJ5u+6VLF9OK7x+S/luwrVb5CHC6W16jL5b8vBUYNpxKWSuFgyliDHVtw1SGe6+dr5RUbf8WQwPJdKZmU3Ittg==}
engines: {node: '>=18'}
peerDependencies:
react: ^18.2.0 || ^19.0.0
react-dom: ^18.2.0 || ^19.0.0
tailwindcss: '>=3.0.0'
'@floating-ui/core@1.7.3':
resolution: {integrity: sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==}
@@ -3125,10 +3114,6 @@ packages:
'@shikijs/vscode-textmate@10.0.2':
resolution: {integrity: sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==}
'@sindresorhus/is@4.6.0':
resolution: {integrity: sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==}
engines: {node: '>=10'}
'@standard-schema/spec@1.0.0':
resolution: {integrity: sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==}
@@ -3391,19 +3376,10 @@ packages:
react: '>=16.8'
react-dom: '>=16.8'
'@tanstack/react-virtual@3.13.18':
resolution: {integrity: sha512-dZkhyfahpvlaV0rIKnvQiVoWPyURppl6w4m9IwMDpuIjcJ1sD9YGWrt0wISvgU7ewACXx2Ct46WPgI6qAD4v6A==}
peerDependencies:
react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
'@tanstack/table-core@8.21.3':
resolution: {integrity: sha512-ldZXEhOBb8Is7xLs01fR3YEc3DERiz5silj8tnGkFZytt1abEvl/GhUmCE0PMLaMPTa3Jk4HbKmRlHmu+gCftg==}
engines: {node: '>=12'}
'@tanstack/virtual-core@3.13.18':
resolution: {integrity: sha512-Mx86Hqu1k39icq2Zusq+Ey2J6dDWTjDvEv43PJtRCoEYTLyfaPnxIQ6iy7YAOK0NV/qOEmZQ/uCufrppZxTgcg==}
'@testing-library/dom@10.4.1':
resolution: {integrity: sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==}
engines: {node: '>=18'}
@@ -4397,10 +4373,6 @@ packages:
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
engines: {node: '>=10'}
char-regex@1.0.2:
resolution: {integrity: sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==}
engines: {node: '>=10'}
character-entities-html4@2.1.0:
resolution: {integrity: sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==}
@@ -5018,9 +4990,6 @@ packages:
emoji-regex@9.2.2:
resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==}
emojilib@2.4.0:
resolution: {integrity: sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==}
emojis-list@3.0.0:
resolution: {integrity: sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==}
engines: {node: '>= 4'}
@@ -6001,24 +5970,6 @@ packages:
resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==}
hasBin: true
jotai@2.17.1:
resolution: {integrity: sha512-TFNZZDa/0ewCLQyRC/Sq9crtixNj/Xdf/wmj9631xxMuKToVJZDbqcHIYN0OboH+7kh6P6tpIK7uKWClj86PKw==}
engines: {node: '>=12.20.0'}
peerDependencies:
'@babel/core': '>=7.0.0'
'@babel/template': '>=7.0.0'
'@types/react': '>=17.0.0'
react: '>=17.0.0'
peerDependenciesMeta:
'@babel/core':
optional: true
'@babel/template':
optional: true
'@types/react':
optional: true
react:
optional: true
js-tokens@4.0.0:
resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==}
@@ -6637,10 +6588,6 @@ packages:
node-abort-controller@3.1.1:
resolution: {integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==}
node-emoji@2.2.0:
resolution: {integrity: sha512-Z3lTE9pLaJF47NyMhd4ww1yFTAP8YhYI8SleJiHzM46Fgpm5cnNzSl9XfzFNqbaz+VlJrIj3fXQ4DeN1Rjm6cw==}
engines: {node: '>=18'}
node-fetch-h2@2.3.0:
resolution: {integrity: sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg==}
engines: {node: 4.x || >=6.0.0}
@@ -7739,10 +7686,6 @@ packages:
resolution: {integrity: sha512-LH7FpTAkeD+y5xQC4fzS+tFtaNlvt3Ib1zKzvhjv/Y+cioV4zIuw4IZr2yhRLu67CWL7FR9/6KXKnjRoZTvGGQ==}
engines: {node: '>=12'}
skin-tone@2.0.0:
resolution: {integrity: sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==}
engines: {node: '>=8'}
slash@3.0.0:
resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==}
engines: {node: '>=8'}
@@ -8220,13 +8163,6 @@ packages:
resolution: {integrity: sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==}
engines: {node: '>=4'}
unicode-emoji-json@0.8.0:
resolution: {integrity: sha512-3wDXXvp6YGoKGhS2O2H7+V+bYduOBydN1lnI0uVfr1cIdY02uFFiEH1i3kE5CCE4l6UqbLKVmEFW9USxTAMD1g==}
unicode-emoji-modifier-base@1.0.0:
resolution: {integrity: sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==}
engines: {node: '>=4'}
unicode-match-property-ecmascript@2.0.0:
resolution: {integrity: sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==}
engines: {node: '>=4'}
@@ -9836,22 +9772,6 @@ snapshots:
'@faker-js/faker@10.0.0': {}
'@ferrucc-io/emoji-picker@0.0.48(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(tailwindcss@3.4.17)':
dependencies:
'@tanstack/react-virtual': 3.13.18(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
clsx: 2.1.1
jotai: 2.17.1(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@18.3.17)(react@18.3.1)
node-emoji: 2.2.0
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
tailwind-merge: 2.6.0
tailwindcss: 3.4.17
unicode-emoji-json: 0.8.0
transitivePeerDependencies:
- '@babel/core'
- '@babel/template'
- '@types/react'
'@floating-ui/core@1.7.3':
dependencies:
'@floating-ui/utils': 0.2.10
@@ -11613,8 +11533,6 @@ snapshots:
'@shikijs/vscode-textmate@10.0.2': {}
'@sindresorhus/is@4.6.0': {}
'@standard-schema/spec@1.0.0': {}
'@standard-schema/spec@1.1.0': {}
@@ -12083,16 +12001,8 @@ snapshots:
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
'@tanstack/react-virtual@3.13.18(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
'@tanstack/virtual-core': 3.13.18
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
'@tanstack/table-core@8.21.3': {}
'@tanstack/virtual-core@3.13.18': {}
'@testing-library/dom@10.4.1':
dependencies:
'@babel/code-frame': 7.27.1
@@ -13184,8 +13094,6 @@ snapshots:
ansi-styles: 4.3.0
supports-color: 7.2.0
char-regex@1.0.2: {}
character-entities-html4@2.1.0: {}
character-entities-legacy@3.0.0: {}
@@ -13829,8 +13737,6 @@ snapshots:
emoji-regex@9.2.2: {}
emojilib@2.4.0: {}
emojis-list@3.0.0: {}
endent@2.1.0:
@@ -15112,13 +15018,6 @@ snapshots:
jiti@2.6.1: {}
jotai@2.17.1(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@18.3.17)(react@18.3.1):
optionalDependencies:
'@babel/core': 7.28.5
'@babel/template': 7.27.2
'@types/react': 18.3.17
react: 18.3.1
js-tokens@4.0.0: {}
js-yaml@4.1.0:
@@ -15987,13 +15886,6 @@ snapshots:
node-abort-controller@3.1.1: {}
node-emoji@2.2.0:
dependencies:
'@sindresorhus/is': 4.6.0
char-regex: 1.0.2
emojilib: 2.4.0
skin-tone: 2.0.0
node-fetch-h2@2.3.0:
dependencies:
http2-client: 1.3.5
@@ -17294,10 +17186,6 @@ snapshots:
dependencies:
jsep: 1.4.0
skin-tone@2.0.0:
dependencies:
unicode-emoji-modifier-base: 1.0.0
slash@3.0.0: {}
sonner@2.0.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
@@ -17813,10 +17701,6 @@ snapshots:
unicode-canonical-property-names-ecmascript@2.0.1: {}
unicode-emoji-json@0.8.0: {}
unicode-emoji-modifier-base@1.0.0: {}
unicode-match-property-ecmascript@2.0.0:
dependencies:
unicode-canonical-property-names-ecmascript: 2.0.1

Binary file not shown.

Before

Width:  |  Height:  |  Size: 192 KiB

View File

@@ -19,8 +19,6 @@ const SCOPE_DESCRIPTIONS: { [key in APIKeyPermission]: string } = {
IDENTITY: "View your user ID, e-mail, and timezone",
EXECUTE_GRAPH: "Run your agents",
READ_GRAPH: "View your agents and their configurations",
WRITE_GRAPH: "Create agent graphs",
WRITE_LIBRARY: "Add agents to your library",
EXECUTE_BLOCK: "Execute individual blocks",
READ_BLOCK: "View available blocks",
READ_STORE: "Access the Marketplace",

View File

@@ -63,19 +63,8 @@ const CustomEdge = ({
return (
<>
<path
d={edgePath}
fill="none"
stroke="black"
strokeOpacity={0}
strokeWidth={20}
className="react-flow__edge-interaction cursor-pointer"
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
/>
<BaseEdge
path={edgePath}
interactionWidth={0}
markerEnd={markerEnd}
className={cn(
isStatic && "!stroke-[1.5px] [stroke-dasharray:6]",

View File

@@ -46,7 +46,7 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
<div className="space-y-2">
<Text variant="small-medium">Input</Text>
<ContentRenderer value={latestInputData} shortContent={true} />
<ContentRenderer value={latestInputData} shortContent={false} />
<div className="mt-1 flex justify-end gap-1">
<NodeDataViewer
@@ -98,7 +98,7 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
Data:
</Text>
<div className="relative space-y-2">
{value.slice(0, 3).map((item, index) => (
{value.map((item, index) => (
<div key={index}>
<ContentRenderer
value={item}

View File

@@ -37,15 +37,15 @@ export const ContentRenderer: React.FC<{
!shortContent
) {
return (
<div className="overflow-hidden [&>*]:rounded-xlarge [&>*]:!text-xs [&_pre]:whitespace-pre-wrap [&_pre]:break-words">
<div className="[&>*]:rounded-xlarge [&>*]:!text-xs">
{renderer?.render(value, metadata)}
</div>
);
}
return (
<div className="overflow-hidden [&>*]:rounded-xlarge [&>*]:!text-xs">
<TextRenderer value={value} truncateLengthLimit={200} />
<div className="[&>*]:rounded-xlarge [&>*]:!text-xs">
<TextRenderer value={value} truncateLengthLimit={100} />
</div>
);
};

View File

@@ -1,3 +1,4 @@
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import {
@@ -163,119 +164,129 @@ export const NodeDataViewer: FC<NodeDataViewerProps> = ({
</div>
</div>
<div className="flex-1">
<div className="my-4">
{shouldGroupExecutions ? (
<div className="space-y-4">
{groupedExecutions.map((execution) => (
<div
key={execution.execId}
className="rounded-3xl border border-slate-200 bg-white p-4 shadow-sm"
>
<div className="flex items-center gap-2">
<Text variant="body" className="text-slate-600">
Execution ID:
</Text>
<Text
variant="body-medium"
className="rounded-full border border-gray-300 bg-gray-50 px-2 py-1 font-mono text-xs"
>
{execution.execId}
</Text>
</div>
<div className="mt-2 space-y-4">
{execution.outputItems.length > 0 ? (
execution.outputItems.map((item, index) => (
<div key={item.key} className="group">
<OutputItem
value={item.value}
metadata={item.metadata}
renderer={item.renderer}
/>
<div className="mt-2 flex gap-3">
<Button
variant="secondary"
className="min-w-0 p-1"
size="icon"
onClick={() =>
handleCopyGroupedItem(
execution.execId,
index,
item,
)
}
aria-label="Copy item"
>
{copiedKey ===
`${execution.execId}-${index}` ? (
<CheckIcon className="size-4 text-green-600" />
) : (
<CopyIcon className="size-4 text-black" />
)}
</Button>
<Button
variant="secondary"
size="icon"
className="min-w-0 p-1"
onClick={() => handleDownloadGroupedItem(item)}
aria-label="Download item"
>
<DownloadIcon className="size-4 text-black" />
</Button>
<div className="flex-1 overflow-hidden">
<ScrollArea className="h-full">
<div className="my-4">
{shouldGroupExecutions ? (
<div className="space-y-4">
{groupedExecutions.map((execution) => (
<div
key={execution.execId}
className="rounded-3xl border border-slate-200 bg-white p-4 shadow-sm"
>
<div className="flex items-center gap-2">
<Text variant="body" className="text-slate-600">
Execution ID:
</Text>
<Text
variant="body-medium"
className="rounded-full border border-gray-300 bg-gray-50 px-2 py-1 font-mono text-xs"
>
{execution.execId}
</Text>
</div>
<div className="mt-2 space-y-4">
{execution.outputItems.length > 0 ? (
execution.outputItems.map((item, index) => (
<div
key={item.key}
className="group flex items-start gap-4"
>
<div className="w-full flex-1">
<OutputItem
value={item.value}
metadata={item.metadata}
renderer={item.renderer}
/>
</div>
<div className="flex w-fit gap-3">
<Button
variant="secondary"
className="min-w-0 p-1"
size="icon"
onClick={() =>
handleCopyGroupedItem(
execution.execId,
index,
item,
)
}
aria-label="Copy item"
>
{copiedKey ===
`${execution.execId}-${index}` ? (
<CheckIcon className="size-4 text-green-600" />
) : (
<CopyIcon className="size-4 text-black" />
)}
</Button>
<Button
variant="secondary"
size="icon"
className="min-w-0 p-1"
onClick={() =>
handleDownloadGroupedItem(item)
}
aria-label="Download item"
>
<DownloadIcon className="size-4 text-black" />
</Button>
</div>
</div>
</div>
))
) : (
<div className="py-4 text-center text-gray-500">
No data available
</div>
)}
</div>
</div>
))}
</div>
) : dataArray.length > 0 ? (
<div className="space-y-4">
{outputItems.map((item, index) => (
<div key={item.key} className="group">
<OutputItem
value={item.value}
metadata={item.metadata}
renderer={item.renderer}
/>
<div className="mt-2 flex gap-3">
<Button
variant="secondary"
className="min-w-0 p-1"
size="icon"
onClick={() => handleCopyItem(index)}
aria-label="Copy item"
>
{copiedIndex === index ? (
<CheckIcon className="size-4 text-green-600" />
))
) : (
<CopyIcon className="size-4 text-black" />
<div className="py-4 text-center text-gray-500">
No data available
</div>
)}
</Button>
<Button
variant="secondary"
size="icon"
className="min-w-0 p-1"
onClick={() => handleDownloadItem(index)}
aria-label="Download item"
>
<DownloadIcon className="size-4 text-black" />
</Button>
</div>
</div>
</div>
))}
</div>
) : (
<div className="py-8 text-center text-gray-500">
No data available
</div>
)}
</div>
))}
</div>
) : dataArray.length > 0 ? (
<div className="space-y-4">
{outputItems.map((item, index) => (
<div key={item.key} className="group relative">
<OutputItem
value={item.value}
metadata={item.metadata}
renderer={item.renderer}
/>
<div className="absolute right-3 top-3 flex gap-3">
<Button
variant="secondary"
className="min-w-0 p-1"
size="icon"
onClick={() => handleCopyItem(index)}
aria-label="Copy item"
>
{copiedIndex === index ? (
<CheckIcon className="size-4 text-green-600" />
) : (
<CopyIcon className="size-4 text-black" />
)}
</Button>
<Button
variant="secondary"
size="icon"
className="min-w-0 p-1"
onClick={() => handleDownloadItem(index)}
aria-label="Download item"
>
<DownloadIcon className="size-4 text-black" />
</Button>
</div>
</div>
))}
</div>
) : (
<div className="py-8 text-center text-gray-500">
No data available
</div>
)}
</div>
</ScrollArea>
</div>
<div className="flex justify-end pt-4">

View File

@@ -127,10 +127,7 @@ export const Block: BlockComponent = ({
// preview when user drags it
const dragPreview = document.createElement("div");
dragPreview.style.cssText = blockDragPreviewStyle;
dragPreview.textContent = beautifyString(title || "").replace(
/ Block$/,
"",
);
dragPreview.textContent = beautifyString(title || "");
document.body.appendChild(dragPreview);
e.dataTransfer.setDragImage(dragPreview, 0, 0);
@@ -165,10 +162,7 @@ export const Block: BlockComponent = ({
"line-clamp-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 group-disabled:text-zinc-400",
)}
>
{highlightText(
beautifyString(title).replace(/ Block$/, ""),
highlightedText,
)}
{highlightText(beautifyString(title), highlightedText)}
</span>
)}
{description && (

View File

@@ -2,7 +2,7 @@ import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore"
import { FilterChip } from "../FilterChip";
import { categories } from "./constants";
import { FilterSheet } from "../FilterSheet/FilterSheet";
import { CategoryKey } from "./types";
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
export const BlockMenuFilters = () => {
const {
@@ -15,7 +15,7 @@ export const BlockMenuFilters = () => {
removeCreator,
} = useBlockMenuStore();
const handleFilterClick = (filter: CategoryKey) => {
const handleFilterClick = (filter: GetV2BuilderSearchFilterAnyOfItem) => {
if (filters.includes(filter)) {
removeFilter(filter);
} else {

View File

@@ -1,15 +1,15 @@
import { SearchEntryFilterAnyOfItem } from "@/app/api/__generated__/models/searchEntryFilterAnyOfItem";
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
import { CategoryKey } from "./types";
export const categories: Array<{ key: CategoryKey; name: string }> = [
{ key: SearchEntryFilterAnyOfItem.blocks, name: "Blocks" },
{ key: GetV2BuilderSearchFilterAnyOfItem.blocks, name: "Blocks" },
{
key: SearchEntryFilterAnyOfItem.integrations,
key: GetV2BuilderSearchFilterAnyOfItem.integrations,
name: "Integrations",
},
{
key: SearchEntryFilterAnyOfItem.marketplace_agents,
key: GetV2BuilderSearchFilterAnyOfItem.marketplace_agents,
name: "Marketplace agents",
},
{ key: SearchEntryFilterAnyOfItem.my_agents, name: "My agents" },
{ key: GetV2BuilderSearchFilterAnyOfItem.my_agents, name: "My agents" },
];

View File

@@ -1,4 +1,4 @@
import { SearchEntryFilterAnyOfItem } from "@/app/api/__generated__/models/searchEntryFilterAnyOfItem";
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
export type DefaultStateType =
| "suggestion"
@@ -10,7 +10,7 @@ export type DefaultStateType =
| "marketplace_agents"
| "my_agents";
export type CategoryKey = SearchEntryFilterAnyOfItem;
export type CategoryKey = GetV2BuilderSearchFilterAnyOfItem;
export interface Filters {
categories: {

View File

@@ -23,7 +23,7 @@ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { getQueryClient } from "@/lib/react-query/queryClient";
import { useToast } from "@/components/molecules/Toast/use-toast";
import * as Sentry from "@sentry/nextjs";
import { CategoryCounts } from "../BlockMenuFilters/types";
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
export const useBlockMenuSearchContent = () => {
const {
@@ -67,7 +67,7 @@ export const useBlockMenuSearchContent = () => {
page_size: 8,
search_query: searchQuery,
search_id: searchId,
filter: filters.length > 0 ? filters.join(",") : undefined,
filter: filters.length > 0 ? filters : undefined,
by_creator: creators.length > 0 ? creators : undefined,
},
{
@@ -117,7 +117,10 @@ export const useBlockMenuSearchContent = () => {
}
const latestData = okData(searchQueryData.pages.at(-1));
setCategoryCounts(
(latestData?.total_items as CategoryCounts) || {
(latestData?.total_items as Record<
GetV2BuilderSearchFilterAnyOfItem,
number
>) || {
blocks: 0,
integrations: 0,
marketplace_agents: 0,

View File

@@ -1,7 +1,7 @@
import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore";
import { useState } from "react";
import { INITIAL_CREATORS_TO_SHOW } from "./constant";
import { CategoryKey } from "../BlockMenuFilters/types";
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
export const useFilterSheet = () => {
const { filters, creators_list, creators, setFilters, setCreators } =
@@ -9,13 +9,15 @@ export const useFilterSheet = () => {
const [isOpen, setIsOpen] = useState(false);
const [localCategories, setLocalCategories] =
useState<CategoryKey[]>(filters);
useState<GetV2BuilderSearchFilterAnyOfItem[]>(filters);
const [localCreators, setLocalCreators] = useState<string[]>(creators);
const [displayedCreatorsCount, setDisplayedCreatorsCount] = useState(
INITIAL_CREATORS_TO_SHOW,
);
const handleLocalCategoryChange = (category: CategoryKey) => {
const handleLocalCategoryChange = (
category: GetV2BuilderSearchFilterAnyOfItem,
) => {
setLocalCategories((prev) => {
if (prev.includes(category)) {
return prev.filter((c) => c !== category);

View File

@@ -61,10 +61,7 @@ export const IntegrationBlock: IntegrationBlockComponent = ({
// preview when user drags it
const dragPreview = document.createElement("div");
dragPreview.style.cssText = blockDragPreviewStyle;
dragPreview.textContent = beautifyString(title || "").replace(
/ Block$/,
"",
);
dragPreview.textContent = beautifyString(title || "");
document.body.appendChild(dragPreview);
e.dataTransfer.setDragImage(dragPreview, 0, 0);
@@ -103,10 +100,7 @@ export const IntegrationBlock: IntegrationBlockComponent = ({
"line-clamp-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 group-disabled:text-zinc-400",
)}
>
{highlightText(
beautifyString(title).replace(/ Block$/, ""),
highlightedText,
)}
{highlightText(beautifyString(title), highlightedText)}
</span>
)}
{description && (

View File

@@ -81,14 +81,6 @@ export const UGCAgentBlock: UGCAgentBlockComponent = ({
>
Version {version}
</span>
<span
className={cn(
"rounded-[0.75rem] bg-zinc-200 px-[0.5rem] font-sans text-xs leading-[1.25rem] text-zinc-500",
)}
>
Your Agent
</span>
</div>
</div>
<div

View File

@@ -3,29 +3,28 @@ import { DefaultStateType } from "../components/NewControlPanel/NewBlockMenu/typ
import { SearchResponseItemsItem } from "@/app/api/__generated__/models/searchResponseItemsItem";
import { getSearchItemType } from "../components/NewControlPanel/NewBlockMenu/BlockMenuSearchContent/helper";
import { StoreAgent } from "@/app/api/__generated__/models/storeAgent";
import {
CategoryKey,
CategoryCounts,
} from "../components/NewControlPanel/NewBlockMenu/BlockMenuFilters/types";
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
type BlockMenuStore = {
searchQuery: string;
searchId: string | undefined;
defaultState: DefaultStateType;
integration: string | undefined;
filters: CategoryKey[];
filters: GetV2BuilderSearchFilterAnyOfItem[];
creators: string[];
creators_list: string[];
categoryCounts: CategoryCounts;
categoryCounts: Record<GetV2BuilderSearchFilterAnyOfItem, number>;
setCategoryCounts: (counts: CategoryCounts) => void;
setCategoryCounts: (
counts: Record<GetV2BuilderSearchFilterAnyOfItem, number>,
) => void;
setCreatorsList: (searchData: SearchResponseItemsItem[]) => void;
addCreator: (creator: string) => void;
setCreators: (creators: string[]) => void;
removeCreator: (creator: string) => void;
addFilter: (filter: CategoryKey) => void;
setFilters: (filters: CategoryKey[]) => void;
removeFilter: (filter: CategoryKey) => void;
addFilter: (filter: GetV2BuilderSearchFilterAnyOfItem) => void;
setFilters: (filters: GetV2BuilderSearchFilterAnyOfItem[]) => void;
removeFilter: (filter: GetV2BuilderSearchFilterAnyOfItem) => void;
setSearchQuery: (query: string) => void;
setSearchId: (id: string | undefined) => void;
setDefaultState: (state: DefaultStateType) => void;

View File

@@ -1,61 +0,0 @@
"use client";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { BookOpenIcon, PencilSimpleIcon } from "@phosphor-icons/react";
import Image from "next/image";
import sparklesImg from "../MiniGame/assets/sparkles.png";
interface Props {
agentName: string;
message: string;
libraryAgentLink: string;
agentPageLink: string;
}
export function AgentSavedCard({
agentName,
message,
libraryAgentLink,
agentPageLink,
}: Props) {
return (
<div className="rounded-xl border border-border/60 bg-card p-4 shadow-sm">
<div className="flex items-baseline gap-2">
<Image
src={sparklesImg}
alt="sparkles"
width={24}
height={24}
className="relative top-1"
/>
<Text variant="body-medium" className="mb-2 text-[16px] text-black">
Agent <span className="text-violet-600">{agentName}</span> {message}
</Text>
</div>
<div className="mt-3 flex flex-wrap gap-2">
<Button
size="small"
as="NextLink"
href={libraryAgentLink}
target="_blank"
rel="noopener noreferrer"
>
<BookOpenIcon size={14} weight="regular" />
Open in library
</Button>
<Button
as="NextLink"
variant="secondary"
size="small"
href={agentPageLink}
target="_blank"
rel="noopener noreferrer"
>
<PencilSimpleIcon size={14} weight="regular" />
Open in builder
</Button>
</div>
</div>
);
}

View File

@@ -10,7 +10,6 @@ import {
MessageResponse,
} from "@/components/ai-elements/message";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
import { useEffect, useState } from "react";
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
@@ -28,68 +27,18 @@ import { GenericTool } from "../../tools/GenericTool/GenericTool";
import { ViewAgentOutputTool } from "../../tools/ViewAgentOutput/ViewAgentOutput";
// ---------------------------------------------------------------------------
// Special text parsing (error markers, workspace URLs, etc.)
// Workspace media support
// ---------------------------------------------------------------------------
// Special message prefixes for text-based markers (set by backend)
const COPILOT_ERROR_PREFIX = "[COPILOT_ERROR]";
const COPILOT_SYSTEM_PREFIX = "[COPILOT_SYSTEM]";
type MarkerType = "error" | "system" | null;
/**
* Parse special markers from message content (error, system).
*
* Detects markers added by the backend for special rendering:
* - `[COPILOT_ERROR] message` → ErrorCard
* - `[COPILOT_SYSTEM] message` → System info message
*
* Returns marker type, marker text, and cleaned text.
*/
function parseSpecialMarkers(text: string): {
markerType: MarkerType;
markerText: string;
cleanText: string;
} {
// Check for error marker
const errorMatch = text.match(
new RegExp(`\\${COPILOT_ERROR_PREFIX}\\s*(.+?)$`, "s"),
);
if (errorMatch) {
return {
markerType: "error",
markerText: errorMatch[1].trim(),
cleanText: text.replace(errorMatch[0], "").trim(),
};
}
// Check for system marker
const systemMatch = text.match(
new RegExp(`\\${COPILOT_SYSTEM_PREFIX}\\s*(.+?)$`, "s"),
);
if (systemMatch) {
return {
markerType: "system",
markerText: systemMatch[1].trim(),
cleanText: text.replace(systemMatch[0], "").trim(),
};
}
return { markerType: null, markerText: "", cleanText: text };
}
/**
* Resolve workspace:// URLs in markdown text to proxy download URLs.
*
* Handles both image syntax `![alt](workspace://id#mime)` and regular link
* syntax `[text](workspace://id)`. For images the MIME type hash fragment is
* inspected so that videos can be rendered with a `<video>` element via the
* custom img component.
* Detects MIME type from the hash fragment (e.g. workspace://id#video/mp4)
* and prefixes the alt text with "video:" so the custom img component can
* render a <video> element instead.
*/
function resolveWorkspaceUrls(text: string): string {
// Handle image links: ![alt](workspace://id#mime)
let resolved = text.replace(
/!\[([^\]]*)\]\(workspace:\/\/([^)#\s]+)(?:#([^)#\s]*))?\)/g,
return text.replace(
/!\[([^\]]*)\]\(workspace:\/\/([^)#\s]+)(?:#([^)\s]*))?\)/g,
(_match, alt: string, fileId: string, mimeHint?: string) => {
const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId);
const url = `/api/proxy${apiPath}`;
@@ -99,25 +48,6 @@ function resolveWorkspaceUrls(text: string): string {
return `![${alt || "Image"}](${url})`;
},
);
// Handle regular links: [text](workspace://id) — without the leading "!"
// These are blocked by Streamdown's rehype-harden sanitizer because
// "workspace://" is not in the allowed URL-scheme whitelist, which causes
// "[blocked]" to appear next to the link text.
// Use an absolute URL so Streamdown's "Copy link" button copies the full
// URL (including host) rather than just the path.
resolved = resolved.replace(
/(?<!!)\[([^\]]*)\]\(workspace:\/\/([^)#\s]+)(?:#[^)#\s]*)?\)/g,
(_match, linkText: string, fileId: string) => {
const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId);
const origin =
typeof window !== "undefined" ? window.location.origin : "";
const url = `${origin}/api/proxy${apiPath}`;
return `[${linkText || "Download file"}](${url})`;
},
);
return resolved;
}
/**
@@ -199,42 +129,24 @@ export const ChatMessagesContainer = ({
}: ChatMessagesContainerProps) => {
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
const lastMessage = messages[messages.length - 1];
// Determine if something is visibly "in-flight" in the last assistant message:
// - Text is actively streaming (last part is non-empty text)
// - A tool call is pending (state is input-streaming or input-available)
const hasInflight = (() => {
if (lastMessage?.role !== "assistant") return false;
const parts = lastMessage.parts;
if (parts.length === 0) return false;
const lastPart = parts[parts.length - 1];
// Text is actively being written
if (lastPart.type === "text" && lastPart.text.trim().length > 0)
return true;
// A tool call is still pending (no output yet)
if (
lastPart.type.startsWith("tool-") &&
"state" in lastPart &&
(lastPart.state === "input-streaming" ||
lastPart.state === "input-available")
)
return true;
return false;
})();
const showThinking =
status === "submitted" || (status === "streaming" && !hasInflight);
useEffect(() => {
if (showThinking) {
if (status === "submitted") {
setThinkingPhrase(getRandomPhrase());
}
}, [showThinking]);
}, [status]);
const lastMessage = messages[messages.length - 1];
const lastAssistantHasVisibleContent =
lastMessage?.role === "assistant" &&
lastMessage.parts.some(
(p) =>
(p.type === "text" && p.text.trim().length > 0) ||
p.type.startsWith("tool-"),
);
const showThinking =
status === "submitted" ||
(status === "streaming" && !lastAssistantHasVisibleContent);
return (
<Conversation className="min-h-0 flex-1">
@@ -252,6 +164,11 @@ export const ChatMessagesContainer = ({
const isLastAssistant =
messageIndex === messages.length - 1 &&
message.role === "assistant";
const messageHasVisibleContent = message.parts.some(
(p) =>
(p.type === "text" && p.text.trim().length > 0) ||
p.type.startsWith("tool-"),
);
return (
<Message from={message.role} key={message.id}>
@@ -264,41 +181,15 @@ export const ChatMessagesContainer = ({
>
{message.parts.map((part, i) => {
switch (part.type) {
case "text": {
// Check for special markers (error, system)
const { markerType, markerText, cleanText } =
parseSpecialMarkers(part.text);
if (markerType === "error") {
return (
<ErrorCard
key={`${message.id}-${i}`}
responseError={{ message: markerText }}
context="execution"
/>
);
}
if (markerType === "system") {
return (
<div
key={`${message.id}-${i}`}
className="my-2 rounded-lg bg-neutral-100 px-3 py-2 text-sm italic text-neutral-600"
>
{markerText}
</div>
);
}
case "text":
return (
<MessageResponse
key={`${message.id}-${i}`}
components={STREAMDOWN_COMPONENTS}
>
{resolveWorkspaceUrls(cleanText)}
{resolveWorkspaceUrls(part.text)}
</MessageResponse>
);
}
case "tool-find_block":
return (
<FindBlocksTool
@@ -386,11 +277,13 @@ export const ChatMessagesContainer = ({
return null;
}
})}
{isLastAssistant && showThinking && (
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
{thinkingPhrase}
</span>
)}
{isLastAssistant &&
!messageHasVisibleContent &&
showThinking && (
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
{thinkingPhrase}
</span>
)}
</MessageContent>
</Message>
);

View File

@@ -1,69 +0,0 @@
"use client";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { WarningDiamondIcon } from "@phosphor-icons/react";
interface Props {
message?: string;
fallbackMessage: string;
error?: string;
details?: string;
actions: Array<{
label: string;
onClick: () => void;
variant?: "outline" | "ghost";
}>;
}
export function ToolErrorCard({
message,
fallbackMessage,
error,
details,
actions,
}: Props) {
return (
<div className="space-y-3 rounded-lg border border-red-200 bg-red-50 p-4">
<div className="flex items-start gap-2">
<WarningDiamondIcon
size={20}
weight="regular"
className="mt-0.5 shrink-0 text-red-500"
/>
<div className="flex-1 space-y-2">
<Text variant="body-medium" className="text-red-900">
{message || fallbackMessage}
</Text>
{error && (
<details className="text-xs text-red-700">
<summary className="cursor-pointer font-medium">
Technical details
</summary>
<pre className="mt-2 max-h-40 overflow-auto whitespace-pre-wrap break-words rounded bg-red-100 p-2">
{error}
</pre>
</details>
)}
{details && (
<pre className="max-h-40 overflow-auto whitespace-pre-wrap break-words rounded bg-red-100 p-2 text-xs text-red-700">
{details}
</pre>
)}
</div>
</div>
<div className="flex gap-2 pt-3">
{actions.map((action, i) => (
<Button
key={i}
variant={action.variant ?? "outline"}
size="small"
onClick={action.onClick}
>
{action.label}
</Button>
))}
</div>
</div>
);
}

View File

@@ -921,29 +921,26 @@ export default function StyleguidePage() {
output: {
type: ResponseType.agent_details,
agent: {
id: "agent-yt-1",
name: "YouTube Summarizer",
description:
"Summarizes YouTube videos into key points.",
inputs: {
type: "object",
properties: {
video_url: {
type: "string",
title: "Video URL",
description: "The YouTube video URL to summarize",
default: "https://youtube.com/watch?v=example",
},
language: {
type: "string",
title: "Output Language",
description:
"Language for the summary (default: English)",
default: "English",
},
inputs: [
{
name: "video_url",
title: "Video URL",
type: "string",
required: true,
description: "The YouTube video URL to summarize",
},
required: ["video_url"],
},
{
name: "language",
title: "Output Language",
type: "string",
required: false,
description:
"Language for the summary (default: English)",
},
],
},
message: "This agent requires inputs to run.",
},

View File

@@ -1,9 +1,16 @@
"use client";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import {
BookOpenIcon,
PencilSimpleIcon,
WarningDiamondIcon,
} from "@phosphor-icons/react";
import type { ToolUIPart } from "ai";
import { AgentSavedCard } from "../../components/AgentSavedCard/AgentSavedCard";
import Image from "next/image";
import NextLink from "next/link";
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { ToolErrorCard } from "../../components/ToolErrorCard/ToolErrorCard";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
import {
ContentCardDescription,
@@ -13,7 +20,11 @@ import {
ContentMessage,
} from "../../components/ToolAccordion/AccordionContent";
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
import { ClarificationQuestionsCard } from "./components/ClarificationQuestionsCard";
import {
ClarificationQuestionsCard,
ClarifyingQuestion,
} from "./components/ClarificationQuestionsCard";
import sparklesImg from "../../components/MiniGame/assets/sparkles.png";
import { MiniGame } from "../../components/MiniGame/MiniGame";
import { SuggestedGoalCard } from "./components/SuggestedGoalCard";
import {
@@ -28,7 +39,6 @@ import {
isSuggestedGoalOutput,
ToolIcon,
truncateText,
normalizeClarifyingQuestions,
type CreateAgentToolOutput,
} from "./helpers";
@@ -56,6 +66,9 @@ function getAccordionMeta(output: CreateAgentToolOutput | null) {
};
}
if (isAgentSavedOutput(output)) {
return { icon, title: output.agent_name, expanded: true };
}
if (isAgentPreviewOutput(output)) {
return {
icon,
@@ -79,7 +92,13 @@ function getAccordionMeta(output: CreateAgentToolOutput | null) {
expanded: true,
};
}
return { icon, title: "" };
return {
icon: (
<WarningDiamondIcon size={32} weight="light" className="text-red-500" />
),
title: "Error",
titleClassName: "text-red-500",
};
}
export function CreateAgentTool({ part }: Props) {
@@ -135,79 +154,154 @@ export function CreateAgentTool({ part }: Props) {
)}
{isError && output && isErrorOutput(output) && (
<ToolErrorCard
message={output.message}
fallbackMessage="Failed to generate the agent. Please try again."
error={output.error ? formatMaybeJson(output.error) : undefined}
details={output.details ? formatMaybeJson(output.details) : undefined}
actions={[
{
label: "Try again",
onClick: () => onSend("Please try creating the agent again."),
},
{
label: "Simplify goal",
variant: "ghost",
onClick: () => onSend("Can you help me simplify this goal?"),
},
]}
/>
<div className="space-y-3 rounded-lg border border-red-200 bg-red-50 p-4">
<div className="flex items-start gap-2">
<WarningDiamondIcon
size={20}
weight="regular"
className="mt-0.5 shrink-0 text-red-500"
/>
<div className="flex-1 space-y-2">
<Text variant="body-medium" className="text-red-900">
{output.message ||
"Failed to generate the agent. Please try again."}
</Text>
{output.error && (
<details className="text-xs text-red-700">
<summary className="cursor-pointer font-medium">
Technical details
</summary>
<pre className="mt-2 max-h-40 overflow-auto whitespace-pre-wrap break-words rounded bg-red-100 p-2">
{formatMaybeJson(output.error)}
</pre>
</details>
)}
{output.details && (
<pre className="max-h-40 overflow-auto whitespace-pre-wrap break-words rounded bg-red-100 p-2 text-xs text-red-700">
{formatMaybeJson(output.details)}
</pre>
)}
</div>
</div>
<div className="flex gap-2">
<Button
variant="outline"
size="small"
onClick={() => onSend("Please try creating the agent again.")}
>
Try again
</Button>
<Button
variant="outline"
size="small"
onClick={() => onSend("Can you help me simplify this goal?")}
>
Simplify goal
</Button>
</div>
</div>
)}
{hasExpandableContent &&
!(output && isClarificationNeededOutput(output)) &&
!(output && isAgentSavedOutput(output)) && (
<ToolAccordion {...getAccordionMeta(output)}>
{isOperating && (
<ContentGrid>
<MiniGame />
<ContentHint>
This could take a few minutes play while you wait!
</ContentHint>
</ContentGrid>
)}
{hasExpandableContent && (
<ToolAccordion {...getAccordionMeta(output)}>
{isOperating && (
<ContentGrid>
<MiniGame />
<ContentHint>
This could take a few minutes play while you wait!
</ContentHint>
</ContentGrid>
)}
{output && isAgentPreviewOutput(output) && (
<ContentGrid>
<ContentMessage>{output.message}</ContentMessage>
{output.description?.trim() && (
<ContentCardDescription>
{output.description}
</ContentCardDescription>
)}
<ContentCodeBlock>
{truncateText(formatMaybeJson(output.agent_json), 1600)}
</ContentCodeBlock>
</ContentGrid>
)}
{output && isAgentSavedOutput(output) && (
<div className="rounded-xl border border-border/60 bg-card p-4 shadow-sm">
<div className="flex items-baseline gap-2">
<Image
src={sparklesImg}
alt="sparkles"
width={24}
height={24}
className="relative top-1"
/>
<Text
variant="body-medium"
className="mb-2 text-[16px] text-black"
>
Agent{" "}
<span className="text-violet-600">{output.agent_name}</span>{" "}
has been saved to your library!
</Text>
</div>
<div className="mt-3 flex flex-wrap gap-4">
<Button variant="outline" size="small">
<NextLink
href={output.library_agent_link}
className="inline-flex items-center gap-1.5"
target="_blank"
rel="noopener noreferrer"
>
<BookOpenIcon size={14} weight="regular" />
Open in library
</NextLink>
</Button>
<Button variant="outline" size="small">
<NextLink
href={output.agent_page_link}
target="_blank"
rel="noopener noreferrer"
className="inline-flex items-center gap-1.5"
>
<PencilSimpleIcon size={14} weight="regular" />
Open in builder
</NextLink>
</Button>
</div>
</div>
)}
{output && isSuggestedGoalOutput(output) && (
<SuggestedGoalCard
message={output.message}
suggestedGoal={output.suggested_goal}
reason={output.reason}
goalType={output.goal_type ?? "vague"}
onUseSuggestedGoal={handleUseSuggestedGoal}
/>
)}
</ToolAccordion>
)}
{output && isAgentPreviewOutput(output) && (
<ContentGrid>
<ContentMessage>{output.message}</ContentMessage>
{output.description?.trim() && (
<ContentCardDescription>
{output.description}
</ContentCardDescription>
)}
<ContentCodeBlock>
{truncateText(formatMaybeJson(output.agent_json), 1600)}
</ContentCodeBlock>
</ContentGrid>
)}
{output && isAgentSavedOutput(output) && (
<AgentSavedCard
agentName={output.agent_name}
message="has been saved to your library!"
libraryAgentLink={output.library_agent_link}
agentPageLink={output.agent_page_link}
/>
)}
{output && isClarificationNeededOutput(output) && (
<ClarificationQuestionsCard
questions={(output.questions ?? []).map((q) => {
const item: ClarifyingQuestion = {
question: q.question,
keyword: q.keyword,
};
const example =
typeof q.example === "string" && q.example.trim()
? q.example.trim()
: null;
if (example) item.example = example;
return item;
})}
message={output.message}
onSubmitAnswers={handleClarificationAnswers}
/>
)}
{output && isClarificationNeededOutput(output) && (
<ClarificationQuestionsCard
questions={normalizeClarifyingQuestions(output.questions ?? [])}
message={output.message}
onSubmitAnswers={handleClarificationAnswers}
/>
{output && isSuggestedGoalOutput(output) && (
<SuggestedGoalCard
message={output.message}
suggestedGoal={output.suggested_goal}
reason={output.reason}
goalType={output.goal_type ?? "vague"}
onUseSuggestedGoal={handleUseSuggestedGoal}
/>
)}
</ToolAccordion>
)}
</div>
);

View File

@@ -5,9 +5,14 @@ import { Card } from "@/components/atoms/Card/Card";
import { Input } from "@/components/atoms/Input/Input";
import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
import { ChatTeardropDotsIcon, CheckCircleIcon } from "@phosphor-icons/react";
import { CheckCircleIcon, QuestionIcon } from "@phosphor-icons/react";
import { useEffect, useRef, useState } from "react";
import type { ClarifyingQuestion } from "../helpers";
export interface ClarifyingQuestion {
question: string;
keyword: string;
example?: string;
}
interface Props {
questions: ClarifyingQuestion[];
@@ -128,26 +133,29 @@ export function ClarificationQuestionsCard({
return (
<div
className={cn(
"group relative flex w-full justify-start gap-3",
"group relative flex w-full justify-start gap-3 px-4 py-3",
className,
)}
>
<div className="flex w-full max-w-3xl gap-3">
<div className="flex-shrink-0">
<div className="flex h-7 w-7 items-center justify-center rounded-lg bg-indigo-500">
<QuestionIcon className="h-4 w-4 text-indigo-50" weight="bold" />
</div>
</div>
<div className="flex min-w-0 flex-1 flex-col">
<Card className="space-y-6 p-8">
<Card className="space-y-4 p-4">
<div>
<div className="flex gap-3">
<ChatTeardropDotsIcon className="size-6" />
<Text variant="h4" className="mb-1 text-slate-900">
I need more information
</Text>
</div>
<Text variant="body" className="text-slate-600">
<Text variant="h4" className="mb-1 text-slate-900">
I need more information
</Text>
<Text variant="small" className="text-slate-600">
{message}
</Text>
</div>
<div className="space-y-6">
<div className="space-y-3">
{questions.map((q, index) => {
const isAnswered = !!answers[q.keyword]?.trim();
@@ -155,34 +163,34 @@ export function ClarificationQuestionsCard({
<div
key={`${q.keyword}-${index}`}
className={cn(
"relative rounded-lg border border-dotted p-3",
"relative rounded-lg border p-3",
isAnswered
? "border-green-500 bg-green-50/50"
: "border-slate-100 bg-slate-50/50",
: "border-slate-200 bg-white/50",
)}
>
<div className="mb-2 flex items-start gap-2">
{isAnswered ? (
<CheckCircleIcon
size={20}
size={16}
className="mt-0.5 text-green-500"
weight="bold"
/>
) : (
<div className="mt-0 flex h-6 w-6 items-center justify-center rounded-full border border-slate-300 font-mono">
<div className="mt-0.5 flex h-4 w-4 items-center justify-center rounded-full border border-slate-300 bg-white text-xs text-slate-500">
{index + 1}
</div>
)}
<div className="flex-1">
<Text
variant="h5"
variant="small"
className="mb-2 font-semibold text-slate-900"
>
{q.question}
</Text>
{q.example && (
<Text
variant="body"
variant="small"
className="mb-2 italic text-slate-500"
>
Example: {q.example}
@@ -207,11 +215,11 @@ export function ClarificationQuestionsCard({
})}
</div>
<div className="flex max-w-[25rem] gap-2">
<div className="flex gap-2">
<Button
onClick={handleSubmit}
disabled={!allAnswered}
className="w-auto flex-1"
className="flex-1"
variant="primary"
>
Submit Answers

View File

@@ -157,41 +157,3 @@ export function truncateText(text: string, maxChars: number): string {
if (trimmed.length <= maxChars) return trimmed;
return `${trimmed.slice(0, maxChars).trimEnd()}`;
}
export interface ClarifyingQuestion {
question: string;
keyword: string;
example?: string;
}
export function normalizeClarifyingQuestions(
questions: Array<{ question: string; keyword: string; example?: unknown }>,
): ClarifyingQuestion[] {
const seen = new Set<string>();
return questions.map((q, index) => {
let keyword = q.keyword?.trim().toLowerCase() || "";
if (!keyword) {
keyword = `question-${index}`;
}
let unique = keyword;
let suffix = 1;
while (seen.has(unique)) {
unique = `${keyword}-${suffix}`;
suffix++;
}
seen.add(unique);
const item: ClarifyingQuestion = {
question: q.question,
keyword: unique,
};
const example =
typeof q.example === "string" && q.example.trim()
? q.example.trim()
: null;
if (example) item.example = example;
return item;
});
}

View File

@@ -1,9 +1,17 @@
"use client";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import {
BookOpenIcon,
PencilSimpleIcon,
WarningDiamondIcon,
} from "@phosphor-icons/react";
import type { ToolUIPart } from "ai";
import { AgentSavedCard } from "../../components/AgentSavedCard/AgentSavedCard";
import Image from "next/image";
import NextLink from "next/link";
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { ToolErrorCard } from "../../components/ToolErrorCard/ToolErrorCard";
import sparklesImg from "../../components/MiniGame/assets/sparkles.png";
import { MiniGame } from "../../components/MiniGame/MiniGame";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
import {
@@ -14,8 +22,10 @@ import {
ContentMessage,
} from "../../components/ToolAccordion/AccordionContent";
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
import { ClarificationQuestionsCard } from "../CreateAgent/components/ClarificationQuestionsCard";
import { normalizeClarifyingQuestions } from "../CreateAgent/helpers";
import {
ClarificationQuestionsCard,
ClarifyingQuestion,
} from "../CreateAgent/components/ClarificationQuestionsCard";
import {
AccordionIcon,
formatMaybeJson,
@@ -59,6 +69,9 @@ function getAccordionMeta(output: EditAgentToolOutput | null): {
};
}
if (isAgentSavedOutput(output)) {
return { icon, title: output.agent_name, expanded: true };
}
if (isAgentPreviewOutput(output)) {
return {
icon,
@@ -74,7 +87,13 @@ function getAccordionMeta(output: EditAgentToolOutput | null): {
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
};
}
return { icon, title: "" };
return {
icon: (
<WarningDiamondIcon size={32} weight="light" className="text-red-500" />
),
title: "Error",
titleClassName: "text-red-500",
};
}
export function EditAgentTool({ part }: Props) {
@@ -124,64 +143,135 @@ export function EditAgentTool({ part }: Props) {
)}
{isError && output && isErrorOutput(output) && (
<ToolErrorCard
message={output.message}
fallbackMessage="Failed to edit the agent. Please try again."
error={output.error ? formatMaybeJson(output.error) : undefined}
details={output.details ? formatMaybeJson(output.details) : undefined}
actions={[
{
label: "Try again",
onClick: () => onSend("Please try editing the agent again."),
},
]}
/>
<div className="space-y-3 rounded-lg border border-red-200 bg-red-50 p-4">
<div className="flex items-start gap-2">
<WarningDiamondIcon
size={20}
weight="regular"
className="mt-0.5 shrink-0 text-red-500"
/>
<div className="flex-1 space-y-2">
<Text variant="body-medium" className="text-red-900">
{output.message ||
"Failed to edit the agent. Please try again."}
</Text>
{output.error && (
<details className="text-xs text-red-700">
<summary className="cursor-pointer font-medium">
Technical details
</summary>
<pre className="mt-2 max-h-40 overflow-auto whitespace-pre-wrap break-words rounded bg-red-100 p-2">
{formatMaybeJson(output.error)}
</pre>
</details>
)}
{output.details && (
<pre className="max-h-40 overflow-auto whitespace-pre-wrap break-words rounded bg-red-100 p-2 text-xs text-red-700">
{formatMaybeJson(output.details)}
</pre>
)}
</div>
</div>
<Button
variant="outline"
size="small"
onClick={() => onSend("Please try editing the agent again.")}
>
Try again
</Button>
</div>
)}
{hasExpandableContent &&
!(output && isClarificationNeededOutput(output)) &&
!(output && isAgentSavedOutput(output)) && (
<ToolAccordion {...getAccordionMeta(output)}>
{isOperating && (
<ContentGrid>
<MiniGame />
<ContentHint>
This could take a few minutes play while you wait!
</ContentHint>
</ContentGrid>
)}
{hasExpandableContent && (
<ToolAccordion {...getAccordionMeta(output)}>
{isOperating && (
<ContentGrid>
<MiniGame />
<ContentHint>
This could take a few minutes play while you wait!
</ContentHint>
</ContentGrid>
)}
{output && isAgentPreviewOutput(output) && (
<ContentGrid>
<ContentMessage>{output.message}</ContentMessage>
{output.description?.trim() && (
<ContentCardDescription>
{output.description}
</ContentCardDescription>
)}
<ContentCodeBlock>
{truncateText(formatMaybeJson(output.agent_json), 1600)}
</ContentCodeBlock>
</ContentGrid>
)}
</ToolAccordion>
)}
{output && isAgentSavedOutput(output) && (
<div className="rounded-xl border border-border/60 bg-card p-4 shadow-sm">
<div className="flex items-baseline gap-2">
<Image
src={sparklesImg}
alt="sparkles"
width={24}
height={24}
className="relative top-1"
/>
<Text
variant="body-medium"
className="mb-2 text-[16px] text-black"
>
Agent{" "}
<span className="text-violet-600">{output.agent_name}</span>{" "}
has been updated!
</Text>
</div>
<div className="mt-3 flex flex-wrap gap-4">
<Button variant="outline" size="small">
<NextLink
href={output.library_agent_link}
className="inline-flex items-center gap-1.5"
target="_blank"
rel="noopener noreferrer"
>
<BookOpenIcon size={14} weight="regular" />
Open in library
</NextLink>
</Button>
<Button variant="outline" size="small">
<NextLink
href={output.agent_page_link}
target="_blank"
rel="noopener noreferrer"
className="inline-flex items-center gap-1.5"
>
<PencilSimpleIcon size={14} weight="regular" />
Open in builder
</NextLink>
</Button>
</div>
</div>
)}
{output && isAgentSavedOutput(output) && (
<AgentSavedCard
agentName={output.agent_name}
message="has been updated!"
libraryAgentLink={output.library_agent_link}
agentPageLink={output.agent_page_link}
/>
)}
{output && isAgentPreviewOutput(output) && (
<ContentGrid>
<ContentMessage>{output.message}</ContentMessage>
{output.description?.trim() && (
<ContentCardDescription>
{output.description}
</ContentCardDescription>
)}
<ContentCodeBlock>
{truncateText(formatMaybeJson(output.agent_json), 1600)}
</ContentCodeBlock>
</ContentGrid>
)}
{output && isClarificationNeededOutput(output) && (
<ClarificationQuestionsCard
questions={normalizeClarifyingQuestions(output.questions ?? [])}
message={output.message}
onSubmitAnswers={handleClarificationAnswers}
/>
{output && isClarificationNeededOutput(output) && (
<ClarificationQuestionsCard
questions={(output.questions ?? []).map((q) => {
const item: ClarifyingQuestion = {
question: q.question,
keyword: q.keyword,
};
const example =
typeof q.example === "string" && q.example.trim()
? q.example.trim()
: null;
if (example) item.example = example;
return item;
})}
message={output.message}
onSubmitAnswers={handleClarificationAnswers}
/>
)}
</ToolAccordion>
)}
</div>
);

View File

@@ -501,79 +501,27 @@ function getFileAccordionData(
"path",
"pattern",
) ?? "File";
const content = getStringField(
output,
"content",
"text",
"preview",
"content_preview",
"_raw",
);
const content = getStringField(output, "content", "text", "_raw");
const message = getStringField(output, "message");
// Handle base64 content from workspace files
let displayContent = content;
if (output.content_base64 && typeof output.content_base64 === "string") {
try {
const bytes = Uint8Array.from(atob(output.content_base64), (c) =>
c.charCodeAt(0),
);
displayContent = new TextDecoder().decode(bytes);
} catch {
displayContent = "[Binary content]";
}
}
// Handle MCP-style content blocks from SDK tools (Read, Glob, Grep, Edit)
if (!displayContent) {
displayContent = extractMcpText(output);
}
// For Glob/list results, try to show file list
// Files can be either strings (from Glob) or objects (from list_workspace_files)
const files = Array.isArray(output.files) ? output.files : null;
// Format file list for display
let fileListText: string | null = null;
if (files && files.length > 0) {
const fileLines = files.map((f: unknown) => {
if (typeof f === "string") {
return f;
}
if (typeof f === "object" && f !== null) {
const fileObj = f as Record<string, unknown>;
// Workspace file format: path (size, mime_type)
const filePath =
typeof fileObj.path === "string"
? fileObj.path
: typeof fileObj.name === "string"
? fileObj.name
: "unknown";
const mimeType =
typeof fileObj.mime_type === "string" ? fileObj.mime_type : "unknown";
const size =
typeof fileObj.size_bytes === "number"
? ` (${(fileObj.size_bytes / 1024).toFixed(1)} KB, ${mimeType})`
: "";
return `${filePath}${size}`;
}
return String(f);
});
fileListText = fileLines.join("\n");
}
const files = Array.isArray(output.files)
? output.files.filter((f: unknown): f is string => typeof f === "string")
: null;
return {
title: message ?? "File output",
description: truncate(filePath, 80),
content: (
<div className="space-y-2">
{displayContent && (
<ContentCodeBlock>{truncate(displayContent, 2000)}</ContentCodeBlock>
{content && (
<ContentCodeBlock>{truncate(content, 2000)}</ContentCodeBlock>
)}
{fileListText && (
<ContentCodeBlock>{truncate(fileListText, 2000)}</ContentCodeBlock>
{files && files.length > 0 && (
<ContentCodeBlock>
{truncate(files.join("\n"), 2000)}
</ContentCodeBlock>
)}
{!displayContent && !fileListText && message && (
{!content && !files && message && (
<ContentMessage>{message}</ContentMessage>
)}
</div>

View File

@@ -2,11 +2,13 @@
import type { AgentDetailsResponse } from "@/app/api/__generated__/models/agentDetailsResponse";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { FormRenderer } from "@/components/renderers/InputRenderer/FormRenderer";
import { AnimatePresence, motion } from "framer-motion";
import { useState } from "react";
import { useCopilotChatActions } from "../../../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { ContentMessage } from "../../../../components/ToolAccordion/AccordionContent";
import { buildInputSchema, extractDefaults, isFormValid } from "./helpers";
import { buildInputSchema } from "./helpers";
interface Props {
output: AgentDetailsResponse;
@@ -14,25 +16,16 @@ interface Props {
export function AgentDetailsCard({ output }: Props) {
const { onSend } = useCopilotChatActions();
const schema = buildInputSchema(output.agent.inputs);
const [showInputForm, setShowInputForm] = useState(false);
const [inputValues, setInputValues] = useState<Record<string, unknown>>({});
const defaults = schema ? extractDefaults(schema) : {};
const [inputValues, setInputValues] =
useState<Record<string, unknown>>(defaults);
const [valid, setValid] = useState(() =>
schema ? isFormValid(schema, defaults) : false,
);
function handleChange(v: { formData?: Record<string, unknown> }) {
const data = v.formData ?? {};
setInputValues(data);
if (schema) {
setValid(isFormValid(schema, data));
}
function handleRunWithExamples() {
onSend(
`Run the agent "${output.agent.name}" with placeholder/example values so I can test it.`,
);
}
function handleProceed() {
function handleRunWithInputs() {
const nonEmpty = Object.fromEntries(
Object.entries(inputValues).filter(
([, v]) => v !== undefined && v !== null && v !== "",
@@ -41,61 +34,83 @@ export function AgentDetailsCard({ output }: Props) {
onSend(
`Run the agent "${output.agent.name}" with these inputs: ${JSON.stringify(nonEmpty, null, 2)}`,
);
}
if (!schema) {
return (
<div className="grid gap-2">
<ContentMessage>This agent has no configurable inputs.</ContentMessage>
<div className="flex gap-2 pt-2">
<Button
size="small"
className="w-fit"
onClick={() =>
onSend(
`Run the agent "${output.agent.name}" with placeholder/example values so I can test it.`,
)
}
>
Proceed
</Button>
</div>
</div>
);
setShowInputForm(false);
setInputValues({});
}
return (
<div className="grid gap-2">
<ContentMessage>
Review the inputs below and press Proceed to run.
Run this agent with example values or your own inputs.
</ContentMessage>
<div className="mt-2 rounded-2xl border bg-background p-3 pt-4">
<FormRenderer
jsonSchema={schema}
handleChange={handleChange}
uiSchema={{
"ui:submitButtonOptions": { norender: true },
}}
initialValues={inputValues}
formContext={{
showHandles: false,
size: "small",
}}
/>
</div>
<div className="mt-4">
<div className="flex gap-2 pt-4">
<Button size="small" className="w-fit" onClick={handleRunWithExamples}>
Run with example values
</Button>
<Button
variant="primary"
variant="outline"
size="small"
className="w-fit"
disabled={!valid}
onClick={handleProceed}
onClick={() => setShowInputForm((prev) => !prev)}
>
Proceed
Run with my inputs
</Button>
</div>
<AnimatePresence initial={false}>
{showInputForm && buildInputSchema(output.agent.inputs) && (
<motion.div
initial={{ height: 0, opacity: 0, filter: "blur(6px)" }}
animate={{ height: "auto", opacity: 1, filter: "blur(0px)" }}
exit={{ height: 0, opacity: 0, filter: "blur(6px)" }}
transition={{
height: { type: "spring", bounce: 0.15, duration: 0.5 },
opacity: { duration: 0.25 },
filter: { duration: 0.2 },
}}
className="overflow-hidden"
style={{ willChange: "height, opacity, filter" }}
>
<div className="mt-4 rounded-2xl border bg-background p-3 pt-4">
<Text variant="body-medium">Enter your inputs</Text>
<FormRenderer
jsonSchema={buildInputSchema(output.agent.inputs)!}
handleChange={(v) => setInputValues(v.formData ?? {})}
uiSchema={{
"ui:submitButtonOptions": { norender: true },
}}
initialValues={inputValues}
formContext={{
showHandles: false,
size: "small",
}}
/>
<div className="-mt-8 flex gap-2">
<Button
variant="primary"
size="small"
className="w-fit"
onClick={handleRunWithInputs}
>
Run
</Button>
<Button
variant="secondary"
size="small"
className="w-fit"
onClick={() => {
setShowInputForm(false);
setInputValues({});
}}
>
Cancel
</Button>
</div>
</div>
</motion.div>
)}
</AnimatePresence>
</div>
);
}

View File

@@ -1,5 +1,4 @@
import type { RJSFSchema } from "@rjsf/utils";
import { customValidator } from "@/components/renderers/InputRenderer/utils/custom-validator";
export function buildInputSchema(inputs: unknown): RJSFSchema | null {
if (!inputs || typeof inputs !== "object") return null;
@@ -7,31 +6,3 @@ export function buildInputSchema(inputs: unknown): RJSFSchema | null {
if (!properties || Object.keys(properties).length === 0) return null;
return inputs as RJSFSchema;
}
export function extractDefaults(schema: RJSFSchema): Record<string, unknown> {
const defaults: Record<string, unknown> = {};
const props = schema.properties;
if (!props || typeof props !== "object") return defaults;
for (const [key, prop] of Object.entries(props)) {
if (typeof prop !== "object" || prop === null) continue;
if ("default" in prop && prop.default !== undefined) {
defaults[key] = prop.default;
} else if (
"examples" in prop &&
Array.isArray(prop.examples) &&
prop.examples.length > 0
) {
defaults[key] = prop.examples[0];
}
}
return defaults;
}
export function isFormValid(
schema: RJSFSchema,
formData: Record<string, unknown>,
): boolean {
const { errors } = customValidator.validateFormData(formData, schema);
return errors.length === 0;
}

View File

@@ -0,0 +1,188 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { ResponseType } from "@/app/api/__generated__/models/responseType";
import type { BlockDetailsResponse } from "../../helpers";
import { BlockDetailsCard } from "./BlockDetailsCard";
const meta: Meta<typeof BlockDetailsCard> = {
title: "Copilot/RunBlock/BlockDetailsCard",
component: BlockDetailsCard,
parameters: {
layout: "centered",
},
tags: ["autodocs"],
decorators: [
(Story) => (
<div style={{ maxWidth: 480 }}>
<Story />
</div>
),
],
};
export default meta;
type Story = StoryObj<typeof meta>;
const baseBlock: BlockDetailsResponse = {
type: ResponseType.block_details,
message:
"Here are the details for the GetWeather block. Provide the required inputs to run it.",
session_id: "session-123",
user_authenticated: true,
block: {
id: "block-abc-123",
name: "GetWeather",
description: "Fetches current weather data for a given location.",
inputs: {
type: "object",
properties: {
location: {
title: "Location",
type: "string",
description:
"City name or coordinates (e.g. 'London' or '51.5,-0.1')",
},
units: {
title: "Units",
type: "string",
description: "Temperature units: 'metric' or 'imperial'",
},
},
required: ["location"],
},
outputs: {
type: "object",
properties: {
temperature: {
title: "Temperature",
type: "number",
description: "Current temperature in the requested units",
},
condition: {
title: "Condition",
type: "string",
description: "Weather condition description (e.g. 'Sunny', 'Rain')",
},
},
},
credentials: [],
},
};
export const Default: Story = {
args: {
output: baseBlock,
},
};
export const InputsOnly: Story = {
args: {
output: {
...baseBlock,
message: "This block requires inputs. No outputs are defined.",
block: {
...baseBlock.block,
outputs: {},
},
},
},
};
export const OutputsOnly: Story = {
args: {
output: {
...baseBlock,
message: "This block has no required inputs.",
block: {
...baseBlock.block,
inputs: {},
},
},
},
};
export const ManyFields: Story = {
args: {
output: {
...baseBlock,
message: "Block with many input and output fields.",
block: {
...baseBlock.block,
name: "SendEmail",
description: "Sends an email via SMTP.",
inputs: {
type: "object",
properties: {
to: {
title: "To",
type: "string",
description: "Recipient email address",
},
subject: {
title: "Subject",
type: "string",
description: "Email subject line",
},
body: {
title: "Body",
type: "string",
description: "Email body content",
},
cc: {
title: "CC",
type: "string",
description: "CC recipients (comma-separated)",
},
bcc: {
title: "BCC",
type: "string",
description: "BCC recipients (comma-separated)",
},
},
required: ["to", "subject", "body"],
},
outputs: {
type: "object",
properties: {
message_id: {
title: "Message ID",
type: "string",
description: "Unique ID of the sent email",
},
status: {
title: "Status",
type: "string",
description: "Delivery status",
},
},
},
},
},
},
};
export const NoFieldDescriptions: Story = {
args: {
output: {
...baseBlock,
message: "Fields without descriptions.",
block: {
...baseBlock.block,
name: "SimpleBlock",
inputs: {
type: "object",
properties: {
input_a: { title: "Input A", type: "string" },
input_b: { title: "Input B", type: "number" },
},
required: ["input_a"],
},
outputs: {
type: "object",
properties: {
result: { title: "Result", type: "string" },
},
},
},
},
},
};

View File

@@ -134,7 +134,7 @@ export function SetupRequirementsCard({ output }: Props) {
<Button
variant="primary"
size="small"
className="mt-4 w-fit"
className="w-fit"
disabled={!canRun}
onClick={handleRun}
>

View File

@@ -18,7 +18,7 @@ export function useChatSession() {
const sessionQuery = useGetV2GetSession(sessionId ?? "", {
query: {
enabled: !!sessionId,
staleTime: Infinity, // Manual invalidation on session switch
staleTime: Infinity,
refetchOnWindowFocus: false,
refetchOnReconnect: true,
refetchOnMount: true,
@@ -47,7 +47,7 @@ export function useChatSession() {
const hasActiveStream = useMemo(() => {
if (sessionQuery.data?.status !== 200) return false;
return !!sessionQuery.data.data.active_stream;
}, [sessionQuery.data, sessionId]);
}, [sessionQuery.data]);
// Memoize so the effect in useCopilotPage doesn't infinite-loop on a new
// array reference every render. Re-derives only when query data changes.
@@ -119,6 +119,5 @@ export function useChatSession() {
isSessionError: sessionQuery.isError,
createSession,
isCreatingSession,
refetchSession: sessionQuery.refetch,
};
}

View File

@@ -15,9 +15,7 @@ import type { UIMessage } from "ai";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { useChatSession } from "./useChatSession";
const RECONNECT_BASE_DELAY_MS = 1_000;
const RECONNECT_MAX_DELAY_MS = 30_000;
const RECONNECT_MAX_ATTEMPTS = 5;
const STREAM_START_TIMEOUT_MS = 12_000;
/** Mark any in-progress tool parts as completed/errored so spinners stop. */
function resolveInProgressTools(
@@ -37,12 +35,42 @@ function resolveInProgressTools(
}));
}
/** Simple ID-based deduplication - trust backend for correctness */
/** Build a fingerprint from a message's role + text/tool content for cross-boundary dedup. */
function messageFingerprint(msg: UIMessage): string {
const fragments = msg.parts.map((p) => {
if ("text" in p && typeof p.text === "string") return p.text;
if ("toolCallId" in p && typeof p.toolCallId === "string")
return `tool:${p.toolCallId}`;
return "";
});
return `${msg.role}::${fragments.join("\n")}`;
}
/**
* Deduplicate messages by ID *and* by content fingerprint.
* ID-based dedup catches duplicates within the same source (e.g. two
* identical stream events). Fingerprint-based dedup catches duplicates
* across the hydration/stream boundary where IDs differ (synthetic
* `${sessionId}-${index}` vs AI SDK nanoid).
*
* NOTE: Fingerprint dedup only applies to assistant messages, not user messages.
* Users should be able to send the same message multiple times.
*/
function deduplicateMessages(messages: UIMessage[]): UIMessage[] {
const seenIds = new Set<string>();
const seenFingerprints = new Set<string>();
return messages.filter((msg) => {
if (seenIds.has(msg.id)) return false;
seenIds.add(msg.id);
// Only apply fingerprint deduplication to assistant messages
// User messages should allow duplicates (same text sent multiple times)
if (msg.role === "assistant") {
const fp = messageFingerprint(msg);
if (fp !== "::" && seenFingerprints.has(fp)) return false;
seenFingerprints.add(fp);
}
return true;
});
}
@@ -66,7 +94,6 @@ export function useCopilotPage() {
isSessionError,
createSession,
isCreatingSession,
refetchSession,
} = useChatSession();
const { mutate: deleteSessionMutation, isPending: isDeleting } =
@@ -126,48 +153,6 @@ export function useCopilotPage() {
[sessionId],
);
// Reconnect state
const [reconnectAttempts, setReconnectAttempts] = useState(0);
const [isReconnectScheduled, setIsReconnectScheduled] = useState(false);
const reconnectTimerRef = useRef<ReturnType<typeof setTimeout>>();
const hasShownDisconnectToast = useRef(false);
// Consolidated reconnect logic
function handleReconnect(sid: string) {
if (isReconnectScheduled || !sid) return;
const nextAttempt = reconnectAttempts + 1;
if (nextAttempt > RECONNECT_MAX_ATTEMPTS) {
toast({
title: "Connection lost",
description: "Unable to reconnect. Please refresh the page.",
variant: "destructive",
});
return;
}
setIsReconnectScheduled(true);
setReconnectAttempts(nextAttempt);
if (!hasShownDisconnectToast.current) {
hasShownDisconnectToast.current = true;
toast({
title: "Connection lost",
description: "Reconnecting...",
});
}
const delay = Math.min(
RECONNECT_BASE_DELAY_MS * 2 ** reconnectAttempts,
RECONNECT_MAX_DELAY_MS,
);
reconnectTimerRef.current = setTimeout(() => {
setIsReconnectScheduled(false);
resumeStream();
}, delay);
}
const {
messages: rawMessages,
sendMessage,
@@ -179,32 +164,9 @@ export function useCopilotPage() {
} = useChat({
id: sessionId ?? undefined,
transport: transport ?? undefined,
onFinish: async ({ isDisconnect, isAbort }) => {
if (isAbort || !sessionId) return;
if (isDisconnect) {
handleReconnect(sessionId);
return;
}
// Check if backend executor is still running after clean close
const result = await refetchSession();
const backendActive =
result.data?.status === 200 && !!result.data.data.active_stream;
if (backendActive) {
handleReconnect(sessionId);
}
},
onError: (error) => {
if (!sessionId) return;
// Only reconnect on network errors (not HTTP errors)
const isNetworkError =
error.name === "TypeError" || error.name === "AbortError";
if (isNetworkError) {
handleReconnect(sessionId);
}
},
// Don't use resume: true — it fires before hydration completes, causing
// the hydrated messages to overwrite the resumed stream. Instead we
// call resumeStream() manually after hydration + active_stream detection.
});
// Deduplicate messages continuously to prevent duplicates when resuming streams
@@ -243,31 +205,51 @@ export function useCopilotPage() {
}
}
// Hydrate messages from REST API when not actively streaming
// Abort the stream if the backend doesn't start sending data within 12s.
const stopRef = useRef(stop);
stopRef.current = stop;
useEffect(() => {
if (status !== "submitted") return;
const timer = setTimeout(() => {
stopRef.current();
toast({
title: "Stream timed out",
description: "The server took too long to respond. Please try again.",
variant: "destructive",
});
}, STREAM_START_TIMEOUT_MS);
return () => clearTimeout(timer);
}, [status]);
// Hydrate messages from the REST session endpoint.
// Skip hydration while streaming to avoid overwriting the live stream.
useEffect(() => {
if (!hydratedMessages || hydratedMessages.length === 0) return;
if (status === "streaming" || status === "submitted") return;
if (isReconnectScheduled) return;
setMessages((prev) => {
if (prev.length >= hydratedMessages.length) return prev;
// Deduplicate to handle rare cases where duplicate streams might occur
return deduplicateMessages(hydratedMessages);
});
}, [hydratedMessages, setMessages, status, isReconnectScheduled]);
}, [hydratedMessages, setMessages, status]);
// Track resume state per session
// Ref: tracks whether we've already resumed for a given session.
// Format: Map<sessionId, hasResumed>
const hasResumedRef = useRef<Map<string, boolean>>(new Map());
// Clean up reconnect state on session switch
useEffect(() => {
clearTimeout(reconnectTimerRef.current);
reconnectTimerRef.current = undefined;
setReconnectAttempts(0);
setIsReconnectScheduled(false);
hasShownDisconnectToast.current = false;
prevStatusRef.current = status; // Reset to avoid cross-session state bleeding
}, [sessionId, status]);
// Invalidate session cache when stream completes
// When the stream ends (or drops), invalidate the session cache so the
// next hydration fetches fresh messages from the backend. Without this,
// staleTime: Infinity means the cache keeps the pre-stream data forever,
// and any messages added during streaming are lost on remount/navigation.
// Track status transitions for cache invalidation and auto-reconnect.
// Auto-reconnect: GCP's L7 load balancer kills SSE connections at ~5 min.
// When that happens the AI SDK goes "streaming" → "error". If the backend
// executor is still running (hasActiveStream), we call resumeStream() to
// reconnect via GET and replay from Redis.
const MAX_RECONNECT_ATTEMPTS = 3;
const reconnectAttemptsRef = useRef(0);
const prevStatusRef = useRef(status);
useEffect(() => {
const prev = prevStatusRef.current;
@@ -276,16 +258,43 @@ export function useCopilotPage() {
const wasActive = prev === "streaming" || prev === "submitted";
const isIdle = status === "ready" || status === "error";
if (wasActive && isIdle && sessionId && !isReconnectScheduled) {
// Invalidate session cache when stream ends so hydration fetches fresh data
if (wasActive && isIdle && sessionId) {
queryClient.invalidateQueries({
queryKey: getGetV2GetSessionQueryKey(sessionId),
});
if (status === "ready") {
setReconnectAttempts(0);
hasShownDisconnectToast.current = false;
}
// Auto-reconnect on mid-stream SSE drop
if (
prev === "streaming" &&
status === "error" &&
sessionId &&
hasActiveStream
) {
if (reconnectAttemptsRef.current < MAX_RECONNECT_ATTEMPTS) {
reconnectAttemptsRef.current += 1;
const attempt = reconnectAttemptsRef.current;
console.info(
`[copilot] SSE dropped mid-stream, reconnecting (attempt ${attempt}/${MAX_RECONNECT_ATTEMPTS})...`,
);
const timer = setTimeout(() => resumeStream(), 1_000);
return () => clearTimeout(timer);
} else {
toast({
title: "Connection lost",
description:
"Could not reconnect to the stream. Please refresh the page.",
variant: "destructive",
});
}
}
}, [status, sessionId, queryClient, isReconnectScheduled]);
// Reset reconnect counter when stream completes normally or resumes
if (status === "ready" || status === "streaming") {
reconnectAttemptsRef.current = 0;
}
}, [status, sessionId, hasActiveStream, queryClient, resumeStream]);
// Resume an active stream AFTER hydration completes.
// IMPORTANT: Only runs when page loads with existing active stream (reconnection).
@@ -382,16 +391,16 @@ export function useCopilotPage() {
}
}, [isDeleting]);
// True while reconnecting or backend has active stream but we haven't connected yet
// True while we know the backend has an active stream but haven't
// reconnected yet. Used to disable the send button and show stop UI.
const isReconnecting =
isReconnectScheduled ||
(hasActiveStream && status !== "streaming" && status !== "submitted");
hasActiveStream && status !== "streaming" && status !== "submitted";
return {
sessionId,
messages,
status,
error: isReconnecting ? undefined : error,
error,
stop,
isReconnecting,
isLoadingSession,

View File

@@ -1,31 +1,17 @@
"use client";
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { LibraryAgentSort } from "@/app/api/__generated__/models/libraryAgentSort";
import { Text } from "@/components/atoms/Text/Text";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
import { HeartIcon } from "@phosphor-icons/react";
import { useFavoriteAgents } from "../../hooks/useFavoriteAgents";
import { LibraryAgentCard } from "../LibraryAgentCard/LibraryAgentCard";
import { LibraryTabs, Tab } from "../LibraryTabs/LibraryTabs";
import { LibraryActionSubHeader } from "../LibraryActionSubHeader/LibraryActionSubHeader";
interface Props {
searchTerm: string;
tabs: Tab[];
activeTab: string;
onTabChange: (tabId: string) => void;
setLibrarySort: (value: LibraryAgentSort) => void;
}
export function FavoritesSection({
searchTerm,
tabs,
activeTab,
onTabChange,
setLibrarySort,
}: Props) {
export function FavoritesSection({ searchTerm }: Props) {
const {
allAgents: favoriteAgents,
agentLoading: isLoading,
@@ -35,33 +21,38 @@ export function FavoritesSection({
isFetchingNextPage,
} = useFavoriteAgents({ searchTerm });
return (
<>
<LibraryActionSubHeader
agentCount={agentCount}
setLibrarySort={setLibrarySort}
/>
<LibraryTabs
tabs={tabs}
activeTab={activeTab}
onTabChange={onTabChange}
/>
if (isLoading || favoriteAgents.length === 0) {
return null;
}
{isLoading ? (
<div className="flex h-[200px] items-center justify-center">
<LoadingSpinner size="large" />
return (
<div className="!mb-8">
<div className="mb-3 flex items-center gap-2 p-2">
<HeartIcon className="h-5 w-5" weight="fill" />
<div className="flex items-baseline gap-2">
<Text variant="h4">Favorites</Text>
{!isLoading && (
<Text
variant="body"
data-testid="agents-count"
className="relative bottom-px text-zinc-500"
>
{agentCount}
</Text>
)}
</div>
) : favoriteAgents.length === 0 ? (
<div className="flex h-[200px] flex-col items-center justify-center gap-2 text-zinc-500">
<HeartIcon className="h-10 w-10" />
<Text variant="body">No favorite agents yet</Text>
</div>
) : (
</div>
<div className="relative">
<InfiniteScroll
isFetchingNextPage={isFetchingNextPage}
fetchNextPage={fetchNextPage}
hasNextPage={hasNextPage}
loader={<LoadingSpinner size="medium" />}
loader={
<div className="flex h-8 w-full items-center justify-center">
<div className="h-6 w-6 animate-spin rounded-full border-b-2 border-t-2 border-neutral-800" />
</div>
}
>
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4">
{favoriteAgents.map((agent: LibraryAgent) => (
@@ -69,7 +60,9 @@ export function FavoritesSection({
))}
</div>
</InfiniteScroll>
)}
</>
</div>
{favoriteAgents.length > 0 && <div className="!mt-10 border-t" />}
</div>
);
}

View File

@@ -1,71 +0,0 @@
"use client";
import { motion, AnimatePresence, useReducedMotion } from "framer-motion";
import { HeartIcon } from "@phosphor-icons/react";
import { useEffect, useState } from "react";
interface FlyingHeartProps {
startPosition: { x: number; y: number } | null;
targetPosition: { x: number; y: number } | null;
onAnimationComplete: () => void;
}
export function FlyingHeart({
startPosition,
targetPosition,
onAnimationComplete,
}: FlyingHeartProps) {
const [isVisible, setIsVisible] = useState(false);
const shouldReduceMotion = useReducedMotion();
useEffect(() => {
if (startPosition && targetPosition) {
setIsVisible(true);
}
}, [startPosition, targetPosition]);
if (!startPosition || !targetPosition) return null;
return (
<AnimatePresence>
{isVisible && (
<motion.div
className="pointer-events-none fixed z-50"
initial={{
x: startPosition.x,
y: startPosition.y,
scale: 1,
opacity: 1,
}}
animate={{
x: shouldReduceMotion ? targetPosition.x : targetPosition.x,
y: shouldReduceMotion ? targetPosition.y : targetPosition.y,
scale: 0.5,
opacity: 0,
}}
exit={{ opacity: 0 }}
transition={
shouldReduceMotion
? { duration: 0 }
: {
type: "spring",
damping: 20,
stiffness: 200,
duration: 0.5,
}
}
onAnimationComplete={() => {
setIsVisible(false);
onAnimationComplete();
}}
>
<HeartIcon
size={24}
weight="fill"
className="text-red-500 drop-shadow-md"
/>
</motion.div>
)}
</AnimatePresence>
);
}

View File

@@ -1,25 +0,0 @@
"use client";
import { EmojiPicker } from "@ferrucc-io/emoji-picker";
interface Props {
onEmojiSelect: (emoji: string) => void;
containerHeight?: number;
}
export function LazyEmojiPicker({
onEmojiSelect,
containerHeight = 295,
}: Props) {
return (
<EmojiPicker
onEmojiSelect={onEmojiSelect}
emojiSize={32}
className="w-full rounded-2xl px-2"
>
<EmojiPicker.Group>
<EmojiPicker.List hideStickyHeader containerHeight={containerHeight} />
</EmojiPicker.Group>
</EmojiPicker>
);
}

View File

@@ -13,7 +13,7 @@ export function LibraryActionSubHeader({ agentCount, setLibrarySort }: Props) {
return (
<div className="flex items-baseline justify-between">
<div className="flex items-baseline gap-4">
<Text variant="h5">My agents</Text>
<Text variant="h4">My agents</Text>
<Text
variant="body"
data-testid="agents-count"

View File

@@ -4,7 +4,6 @@ import { Text } from "@/components/atoms/Text/Text";
import { CaretCircleRightIcon } from "@phosphor-icons/react";
import Image from "next/image";
import NextLink from "next/link";
import { motion } from "framer-motion";
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import Avatar, {
@@ -15,21 +14,13 @@ import { Link } from "@/components/atoms/Link/Link";
import { AgentCardMenu } from "./components/AgentCardMenu";
import { FavoriteButton } from "./components/FavoriteButton";
import { useLibraryAgentCard } from "./useLibraryAgentCard";
import { useFavoriteAnimation } from "../../context/FavoriteAnimationContext";
interface Props {
agent: LibraryAgent;
draggable?: boolean;
}
export function LibraryAgentCard({ agent, draggable = true }: Props) {
export function LibraryAgentCard({ agent }: Props) {
const { id, name, graph_id, can_access_graph, image_url } = agent;
const { triggerFavoriteAnimation } = useFavoriteAnimation();
function handleDragStart(e: React.DragEvent<HTMLDivElement>) {
e.dataTransfer.setData("application/agent-id", id);
e.dataTransfer.effectAllowed = "move";
}
const {
isFromMarketplace,
@@ -37,119 +28,103 @@ export function LibraryAgentCard({ agent, draggable = true }: Props) {
profile,
creator_image_url,
handleToggleFavorite,
} = useLibraryAgentCard({
agent,
onFavoriteAdd: triggerFavoriteAnimation,
});
} = useLibraryAgentCard({ agent });
return (
<div
draggable={draggable}
onDragStart={handleDragStart}
className="[@media(pointer:fine)]:cursor-grab [@media(pointer:fine)]:active:cursor-grabbing"
data-testid="library-agent-card"
data-agent-id={id}
className="group relative inline-flex h-[10.625rem] w-full max-w-[25rem] flex-col items-start justify-start gap-2.5 rounded-medium border border-zinc-100 bg-white transition-all duration-300 hover:shadow-md"
>
<motion.div
layoutId={`agent-card-${id}`}
data-testid="library-agent-card"
data-agent-id={id}
className="group relative inline-flex h-[10.625rem] w-full max-w-[25rem] flex-col items-start justify-start gap-2.5 rounded-medium border border-zinc-100 bg-white hover:shadow-md"
transition={{
type: "spring",
damping: 25,
stiffness: 300,
}}
style={{ willChange: "transform" }}
>
<NextLink href={`/library/agents/${id}`} className="flex-shrink-0">
<div className="relative flex items-center gap-2 px-4 pt-3">
<Avatar className="h-4 w-4 rounded-full">
<AvatarImage
src={
isFromMarketplace
? creator_image_url || "/avatar-placeholder.png"
: profile?.avatar_url || "/avatar-placeholder.png"
}
alt={`${name} creator avatar`}
/>
<AvatarFallback size={48}>{name.charAt(0)}</AvatarFallback>
</Avatar>
<Text
variant="small-medium"
className="uppercase tracking-wide text-zinc-400"
>
{isFromMarketplace ? "FROM MARKETPLACE" : "Built by you"}
</Text>
</div>
</NextLink>
<FavoriteButton
isFavorite={isFavorite}
onClick={handleToggleFavorite}
className="absolute right-10 top-0"
/>
<AgentCardMenu agent={agent} />
<NextLink href={`/library/agents/${id}`} className="flex-shrink-0">
<div className="relative flex items-center gap-2 px-4 pt-3">
<Avatar className="h-4 w-4 rounded-full">
<AvatarImage
src={
isFromMarketplace
? creator_image_url || "/avatar-placeholder.png"
: profile?.avatar_url || "/avatar-placeholder.png"
}
alt={`${name} creator avatar`}
/>
<AvatarFallback size={48}>{name.charAt(0)}</AvatarFallback>
</Avatar>
<Text
variant="small-medium"
className="uppercase tracking-wide text-zinc-400"
>
{isFromMarketplace ? "FROM MARKETPLACE" : "Built by you"}
</Text>
</div>
</NextLink>
<FavoriteButton
isFavorite={isFavorite}
onClick={handleToggleFavorite}
className="absolute right-10 top-0"
/>
<AgentCardMenu agent={agent} />
<div className="flex w-full flex-1 flex-col px-4 pb-2">
<div className="flex w-full flex-1 flex-col px-4 pb-2">
<Link
href={`/library/agents/${id}`}
className="flex w-full items-start justify-between gap-2 no-underline hover:no-underline"
>
<Text
variant="h5"
data-testid="library-agent-card-name"
className="line-clamp-3 hyphens-auto break-words no-underline hover:no-underline"
>
{name}
</Text>
{!image_url ? (
<div
className={`h-[3.64rem] w-[6.70rem] flex-shrink-0 rounded-small ${
[
"bg-gradient-to-r from-green-200 to-blue-200",
"bg-gradient-to-r from-pink-200 to-purple-200",
"bg-gradient-to-r from-yellow-200 to-orange-200",
"bg-gradient-to-r from-blue-200 to-cyan-200",
"bg-gradient-to-r from-indigo-200 to-purple-200",
][parseInt(id.slice(0, 8), 16) % 5]
}`}
style={{
backgroundSize: "200% 200%",
animation: "gradient 15s ease infinite",
}}
/>
) : (
<Image
src={image_url}
alt={`${name} preview image`}
width={107}
height={58}
className="flex-shrink-0 rounded-small object-cover"
/>
)}
</Link>
<div className="mt-auto flex w-full justify-start gap-6 border-t border-zinc-100 pb-1 pt-3">
<Link
href={`/library/agents/${id}`}
className="flex w-full items-start justify-between gap-2 no-underline hover:no-underline focus:ring-0"
data-testid="library-agent-card-see-runs-link"
className="flex items-center gap-1 text-[13px]"
>
<Text
variant="h5"
data-testid="library-agent-card-name"
className="line-clamp-3 hyphens-auto break-words no-underline hover:no-underline"
>
{name}
</Text>
{!image_url ? (
<div
className={`h-[3.64rem] w-[6.70rem] flex-shrink-0 rounded-small ${
[
"bg-gradient-to-r from-green-200 to-blue-200",
"bg-gradient-to-r from-pink-200 to-purple-200",
"bg-gradient-to-r from-yellow-200 to-orange-200",
"bg-gradient-to-r from-blue-200 to-cyan-200",
"bg-gradient-to-r from-indigo-200 to-purple-200",
][parseInt(id.slice(0, 8), 16) % 5]
}`}
style={{
backgroundSize: "200% 200%",
animation: "gradient 15s ease infinite",
}}
/>
) : (
<Image
src={image_url}
alt={`${name} preview image`}
width={107}
height={58}
className="flex-shrink-0 rounded-small object-cover"
/>
)}
See runs <CaretCircleRightIcon size={20} />
</Link>
<div className="mt-auto flex w-full justify-start gap-6 border-t border-zinc-100 pb-1 pt-3">
{can_access_graph && (
<Link
href={`/library/agents/${id}`}
data-testid="library-agent-card-see-runs-link"
href={`/build?flowID=${graph_id}`}
data-testid="library-agent-card-open-in-builder-link"
className="flex items-center gap-1 text-[13px]"
isExternal
>
See runs <CaretCircleRightIcon size={20} />
Open in builder <CaretCircleRightIcon size={20} />
</Link>
{can_access_graph && (
<Link
href={`/build?flowID=${graph_id}`}
data-testid="library-agent-card-open-in-builder-link"
className="flex items-center gap-1 text-[13px]"
isExternal
>
Open in builder <CaretCircleRightIcon size={20} />
</Link>
)}
</div>
)}
</div>
</motion.div>
</div>
</div>
);
}

View File

@@ -5,10 +5,6 @@ import {
useDeleteV2DeleteLibraryAgent,
usePostV2ForkLibraryAgent,
} from "@/app/api/__generated__/endpoints/library/library";
import {
usePostV2BulkMoveAgents,
getGetV2ListLibraryFoldersQueryKey,
} from "@/app/api/__generated__/endpoints/folders/folders";
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
@@ -26,7 +22,6 @@ import { useQueryClient } from "@tanstack/react-query";
import Link from "next/link";
import { useRouter } from "next/navigation";
import { useState } from "react";
import { MoveToFolderDialog } from "../../MoveToFolderDialog/MoveToFolderDialog";
interface AgentCardMenuProps {
agent: LibraryAgent;
@@ -37,25 +32,11 @@ export function AgentCardMenu({ agent }: AgentCardMenuProps) {
const queryClient = useQueryClient();
const router = useRouter();
const [showDeleteDialog, setShowDeleteDialog] = useState(false);
const [showMoveDialog, setShowMoveDialog] = useState(false);
const [isDeletingAgent, setIsDeletingAgent] = useState(false);
const [isDuplicatingAgent, setIsDuplicatingAgent] = useState(false);
const [isRemovingFromFolder, setIsRemovingFromFolder] = useState(false);
const { mutateAsync: deleteAgent } = useDeleteV2DeleteLibraryAgent();
const { mutateAsync: forkAgent } = usePostV2ForkLibraryAgent();
const { mutateAsync: bulkMoveAgents } = usePostV2BulkMoveAgents({
mutation: {
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: getGetV2ListLibraryAgentsQueryKey(),
});
queryClient.invalidateQueries({
queryKey: getGetV2ListLibraryFoldersQueryKey(),
});
},
},
});
async function handleDuplicateAgent() {
if (!agent.id) return;
@@ -89,37 +70,6 @@ export function AgentCardMenu({ agent }: AgentCardMenuProps) {
}
}
async function handleRemoveFromFolder() {
if (!agent.id) return;
setIsRemovingFromFolder(true);
try {
await bulkMoveAgents({
data: {
agent_ids: [agent.id],
folder_id: null,
},
});
toast({
title: "Removed from folder",
description: "Agent has been moved back to your library.",
});
} catch (error: unknown) {
toast({
title: "Failed to remove from folder",
description:
error instanceof Error
? error.message
: "An unexpected error occurred.",
variant: "destructive",
});
} finally {
setIsRemovingFromFolder(false);
}
}
async function handleDeleteAgent() {
if (!agent.id) return;
@@ -188,31 +138,6 @@ export function AgentCardMenu({ agent }: AgentCardMenuProps) {
Duplicate agent
</DropdownMenuItem>
<DropdownMenuSeparator />
<DropdownMenuItem
onClick={(e) => {
e.stopPropagation();
setShowMoveDialog(true);
}}
className="flex items-center gap-2"
>
Move to folder
</DropdownMenuItem>
{agent.folder_id && (
<>
<DropdownMenuSeparator />
<DropdownMenuItem
onClick={(e) => {
e.stopPropagation();
handleRemoveFromFolder();
}}
disabled={isRemovingFromFolder}
className="flex items-center gap-2"
>
Remove from folder
</DropdownMenuItem>
</>
)}
<DropdownMenuSeparator />
<DropdownMenuItem
onClick={(e) => {
e.stopPropagation();
@@ -258,14 +183,6 @@ export function AgentCardMenu({ agent }: AgentCardMenuProps) {
</div>
</Dialog.Content>
</Dialog>
<MoveToFolderDialog
agentId={agent.id}
agentName={agent.name}
currentFolderId={agent.folder_id}
isOpen={showMoveDialog}
setIsOpen={setShowMoveDialog}
/>
</>
);
}

View File

@@ -3,15 +3,10 @@
import { cn } from "@/lib/utils";
import { HeartIcon } from "@phosphor-icons/react";
import type { MouseEvent } from "react";
import { useRef } from "react";
import { motion, AnimatePresence } from "framer-motion";
interface FavoriteButtonProps {
isFavorite: boolean;
onClick: (
e: MouseEvent<HTMLButtonElement>,
position: { x: number; y: number },
) => void;
onClick: (e: MouseEvent<HTMLButtonElement>) => void;
className?: string;
}
@@ -20,49 +15,25 @@ export function FavoriteButton({
onClick,
className,
}: FavoriteButtonProps) {
const buttonRef = useRef<HTMLButtonElement>(null);
function handleClick(e: MouseEvent<HTMLButtonElement>) {
const rect = buttonRef.current?.getBoundingClientRect();
const position = rect
? {
x: rect.left + rect.width / 2 - 12,
y: rect.top + rect.height / 2 - 12,
}
: { x: 0, y: 0 };
onClick(e, position);
}
return (
<button
ref={buttonRef}
onClick={handleClick}
onClick={onClick}
className={cn(
"rounded-full p-2 transition-all duration-200",
"hover:scale-110 active:scale-95",
"hover:scale-110",
!isFavorite && "opacity-0 group-hover:opacity-100",
className,
)}
aria-label={isFavorite ? "Remove from favorites" : "Add to favorites"}
>
<AnimatePresence mode="wait" initial={false}>
<motion.div
key={isFavorite ? "filled" : "empty"}
initial={{ scale: 0.5, opacity: 0 }}
animate={{ scale: 1, opacity: 1 }}
exit={{ scale: 0.5, opacity: 0 }}
transition={{ type: "spring", damping: 15, stiffness: 300 }}
>
<HeartIcon
size={20}
weight={isFavorite ? "fill" : "regular"}
className={cn(
"transition-colors duration-200",
isFavorite ? "text-red-500" : "text-gray-600 hover:text-red-500",
)}
/>
</motion.div>
</AnimatePresence>
<HeartIcon
size={20}
weight={isFavorite ? "fill" : "regular"}
className={cn(
"transition-colors duration-200",
isFavorite ? "text-red-500" : "text-gray-600 hover:text-red-500",
)}
/>
</button>
);
}

View File

@@ -14,11 +14,11 @@ import { updateFavoriteInQueries } from "./helpers";
interface Props {
agent: LibraryAgent;
onFavoriteAdd?: (position: { x: number; y: number }) => void;
}
export function useLibraryAgentCard({ agent, onFavoriteAdd }: Props) {
const { id, is_favorite, creator_image_url, marketplace_listing } = agent;
export function useLibraryAgentCard({ agent }: Props) {
const { id, name, is_favorite, creator_image_url, marketplace_listing } =
agent;
const isFromMarketplace = Boolean(marketplace_listing);
const [isFavorite, setIsFavorite] = useState(is_favorite);
@@ -49,31 +49,26 @@ export function useLibraryAgentCard({ agent, onFavoriteAdd }: Props) {
});
}
async function handleToggleFavorite(
e: React.MouseEvent,
position: { x: number; y: number },
) {
async function handleToggleFavorite(e: React.MouseEvent) {
e.preventDefault();
e.stopPropagation();
const newIsFavorite = !isFavorite;
// Optimistic update - update UI immediately
setIsFavorite(newIsFavorite);
updateQueryData(newIsFavorite);
// Trigger animation immediately for adding to favorites
if (newIsFavorite && onFavoriteAdd) {
onFavoriteAdd(position);
}
try {
await updateLibraryAgent({
libraryAgentId: id,
data: { is_favorite: newIsFavorite },
});
toast({
title: newIsFavorite ? "Added to favorites" : "Removed from favorites",
description: `${name} has been ${newIsFavorite ? "added to" : "removed from"} your favorites.`,
});
} catch {
// Revert on failure
setIsFavorite(!newIsFavorite);
updateQueryData(!newIsFavorite);

View File

@@ -1,123 +1,30 @@
"use client";
import { LibraryAgentSort } from "@/app/api/__generated__/models/libraryAgentSort";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
import { LibraryActionSubHeader } from "../LibraryActionSubHeader/LibraryActionSubHeader";
import { LibraryAgentCard } from "../LibraryAgentCard/LibraryAgentCard";
import { LibraryFolder } from "../LibraryFolder/LibraryFolder";
import { LibrarySubSection } from "../LibrarySubSection/LibrarySubSection";
import { Button } from "@/components/atoms/Button/Button";
import { ArrowLeftIcon, HeartIcon } from "@phosphor-icons/react";
import { Text } from "@/components/atoms/Text/Text";
import { Tab } from "../LibraryTabs/LibraryTabs";
import {
AnimatePresence,
LayoutGroup,
motion,
useReducedMotion,
} from "framer-motion";
import { LibraryFolderEditDialog } from "../LibraryFolderEditDialog/LibraryFolderEditDialog";
import { LibraryFolderDeleteDialog } from "../LibraryFolderDeleteDialog/LibraryFolderDeleteDialog";
import { useLibraryAgentList } from "./useLibraryAgentList";
// cancels the current spring and starts a new one from current state.
const containerVariants = {
hidden: {},
show: {},
exit: {
opacity: 0,
filter: "blur(4px)",
transition: { duration: 0.12 },
},
};
const reducedContainerVariants = {
hidden: {},
show: {},
exit: {
opacity: 0,
transition: { duration: 0.12 },
},
};
const itemInitial = {
opacity: 0,
filter: "blur(4px)",
};
const itemAnimate = {
opacity: 1,
filter: "blur(0px)",
};
const itemTransition = {
type: "spring" as const,
stiffness: 300,
damping: 25,
opacity: { duration: 0.2 },
filter: { duration: 0.15 },
};
const reducedItemInitial = { opacity: 0 };
const reducedItemAnimate = { opacity: 1 };
const reducedItemTransition = { duration: 0.15 };
interface Props {
searchTerm: string;
librarySort: LibraryAgentSort;
setLibrarySort: (value: LibraryAgentSort) => void;
selectedFolderId: string | null;
onFolderSelect: (folderId: string | null) => void;
tabs: Tab[];
activeTab: string;
onTabChange: (tabId: string) => void;
}
export function LibraryAgentList({
searchTerm,
librarySort,
setLibrarySort,
selectedFolderId,
onFolderSelect,
tabs,
activeTab,
onTabChange,
}: Props) {
const shouldReduceMotion = useReducedMotion();
const activeContainerVariants = shouldReduceMotion
? reducedContainerVariants
: containerVariants;
const activeInitial = shouldReduceMotion ? reducedItemInitial : itemInitial;
const activeAnimate = shouldReduceMotion ? reducedItemAnimate : itemAnimate;
const activeTransition = shouldReduceMotion
? reducedItemTransition
: itemTransition;
const {
isFavoritesTab,
agentLoading,
agentCount,
agents,
allAgents: agents,
hasNextPage,
isFetchingNextPage,
fetchNextPage,
foldersData,
currentFolder,
showFolders,
editingFolder,
setEditingFolder,
deletingFolder,
setDeletingFolder,
handleAgentDrop,
handleFolderDeleted,
} = useLibraryAgentList({
searchTerm,
librarySort,
selectedFolderId,
onFolderSelect,
activeTab,
});
} = useLibraryAgentList({ searchTerm, librarySort });
return (
<>
@@ -125,47 +32,11 @@ export function LibraryAgentList({
agentCount={agentCount}
setLibrarySort={setLibrarySort}
/>
{!selectedFolderId && (
<LibrarySubSection
tabs={tabs}
activeTab={activeTab}
onTabChange={onTabChange}
/>
)}
<div>
{selectedFolderId && (
<div className="mb-4 flex items-center gap-2">
<Button
variant="ghost"
size="small"
onClick={() => onFolderSelect(null)}
className="gap-1 text-zinc-500 hover:text-zinc-900"
>
<ArrowLeftIcon className="h-4 w-4" />
My Library
</Button>
{currentFolder && (
<>
<Text variant="small" className="text-zinc-400">
/
</Text>
<Text variant="h4" className="text-zinc-700">
{currentFolder.icon} {currentFolder.name}
</Text>
</>
)}
</div>
)}
<div className="px-2">
{agentLoading ? (
<div className="flex h-[200px] items-center justify-center">
<LoadingSpinner size="large" />
</div>
) : isFavoritesTab && agents.length === 0 ? (
<div className="flex h-[200px] flex-col items-center justify-center gap-2 text-zinc-500">
<HeartIcon className="h-10 w-10" />
<Text variant="body">No favorite agents yet</Text>
</div>
) : (
<InfiniteScroll
isFetchingNextPage={isFetchingNextPage}
@@ -173,85 +44,14 @@ export function LibraryAgentList({
hasNextPage={hasNextPage}
loader={<LoadingSpinner size="medium" />}
>
<LayoutGroup>
<AnimatePresence mode="popLayout">
<motion.div
key={`${activeTab}-${selectedFolderId || "all"}`}
className="grid grid-cols-1 gap-6 sm:grid-cols-2 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4"
variants={activeContainerVariants}
initial="hidden"
animate="show"
exit="exit"
>
{showFolders &&
foldersData?.folders.map((folder, i) => (
<motion.div
key={folder.id}
initial={activeInitial}
animate={activeAnimate}
transition={{
...activeTransition,
delay: i * 0.04,
}}
>
<LibraryFolder
id={folder.id}
name={folder.name}
agentCount={folder.agent_count ?? 0}
color={folder.color ?? undefined}
icon={folder.icon ?? "📁"}
onAgentDrop={handleAgentDrop}
onClick={() => onFolderSelect(folder.id)}
onEdit={() => setEditingFolder(folder)}
onDelete={() => setDeletingFolder(folder)}
/>
</motion.div>
))}
{agents.map((agent, i) => (
<motion.div
key={agent.id}
initial={activeInitial}
animate={activeAnimate}
transition={{
...activeTransition,
delay:
((showFolders
? (foldersData?.folders.length ?? 0)
: 0) +
i) *
0.04,
}}
>
<LibraryAgentCard agent={agent} />
</motion.div>
))}
</motion.div>
</AnimatePresence>
</LayoutGroup>
<div className="grid grid-cols-1 gap-6 sm:grid-cols-2 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4">
{agents.map((agent) => (
<LibraryAgentCard key={agent.id} agent={agent} />
))}
</div>
</InfiniteScroll>
)}
</div>
{editingFolder && (
<LibraryFolderEditDialog
folder={editingFolder}
isOpen={!!editingFolder}
setIsOpen={(open) => {
if (!open) setEditingFolder(null);
}}
/>
)}
{deletingFolder && (
<LibraryFolderDeleteDialog
folder={deletingFolder}
isOpen={!!deletingFolder}
setIsOpen={(open) => {
if (!open) setDeletingFolder(null);
}}
onDeleted={handleFolderDeleted}
/>
)}
</>
);
}

View File

@@ -1,69 +1,36 @@
"use client";
import { useGetV2ListLibraryAgentsInfinite } from "@/app/api/__generated__/endpoints/library/library";
import { getGetV2ListLibraryAgentsQueryKey } from "@/app/api/__generated__/endpoints/library/library";
import {
useGetV2ListLibraryFolders,
usePostV2BulkMoveAgents,
getGetV2ListLibraryFoldersQueryKey,
} from "@/app/api/__generated__/endpoints/folders/folders";
import type { getV2ListLibraryFoldersResponseSuccess } from "@/app/api/__generated__/endpoints/folders/folders";
import type { LibraryFolder } from "@/app/api/__generated__/models/libraryFolder";
import { LibraryAgentSort } from "@/app/api/__generated__/models/libraryAgentSort";
import {
okData,
getPaginatedTotalCount,
getPaginationNextPageNumber,
unpaginate,
} from "@/app/api/helpers";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { useFavoriteAgents } from "../../hooks/useFavoriteAgents";
import { getQueryClient } from "@/lib/react-query/queryClient";
import { useQueryClient } from "@tanstack/react-query";
import { useEffect, useRef, useState } from "react";
import { useEffect, useRef } from "react";
interface Props {
searchTerm: string;
librarySort: LibraryAgentSort;
selectedFolderId: string | null;
onFolderSelect: (folderId: string | null) => void;
activeTab: string;
}
export function useLibraryAgentList({
searchTerm,
librarySort,
selectedFolderId,
onFolderSelect,
activeTab,
}: Props) {
const isFavoritesTab = activeTab === "favorites";
const { toast } = useToast();
const stableQueryClient = getQueryClient();
const queryClient = useQueryClient();
export function useLibraryAgentList({ searchTerm, librarySort }: Props) {
const queryClient = getQueryClient();
const prevSortRef = useRef<LibraryAgentSort | null>(null);
const [editingFolder, setEditingFolder] = useState<LibraryFolder | null>(
null,
);
const [deletingFolder, setDeletingFolder] = useState<LibraryFolder | null>(
null,
);
const {
data: agentsQueryData,
fetchNextPage,
hasNextPage,
isFetchingNextPage,
isLoading: allAgentsLoading,
isLoading: agentLoading,
} = useGetV2ListLibraryAgentsInfinite(
{
page: 1,
page_size: 20,
search_term: searchTerm || undefined,
sort_by: librarySort,
folder_id: selectedFolderId ?? undefined,
include_root_only: selectedFolderId === null ? true : undefined,
},
{
query: {
@@ -72,147 +39,28 @@ export function useLibraryAgentList({
},
);
// Reset queries when sort changes to ensure fresh data with correct sorting
useEffect(() => {
if (prevSortRef.current !== null && prevSortRef.current !== librarySort) {
stableQueryClient.resetQueries({
// Reset all library agent queries to ensure fresh fetch with new sort
queryClient.resetQueries({
queryKey: ["/api/library/agents"],
});
}
prevSortRef.current = librarySort;
}, [librarySort, stableQueryClient]);
}, [librarySort, queryClient]);
const allAgentsList = agentsQueryData
const allAgents = agentsQueryData
? unpaginate(agentsQueryData, "agents")
: [];
const allAgentsCount = getPaginatedTotalCount(agentsQueryData);
const favoriteAgentsData = useFavoriteAgents({ searchTerm });
const {
agentLoading,
agentCount,
allAgents: agents,
hasNextPage: agentsHasNextPage,
isFetchingNextPage: agentsIsFetchingNextPage,
fetchNextPage: agentsFetchNextPage,
} = isFavoritesTab
? favoriteAgentsData
: {
agentLoading: allAgentsLoading,
agentCount: allAgentsCount,
allAgents: allAgentsList,
hasNextPage: hasNextPage,
isFetchingNextPage: isFetchingNextPage,
fetchNextPage: fetchNextPage,
};
const { data: rawFoldersData } = useGetV2ListLibraryFolders(undefined, {
query: { select: okData },
});
const foldersData = searchTerm ? undefined : rawFoldersData;
const { mutate: moveAgentToFolder } = usePostV2BulkMoveAgents({
mutation: {
onMutate: async ({ data }) => {
await queryClient.cancelQueries({
queryKey: getGetV2ListLibraryFoldersQueryKey(),
});
await queryClient.cancelQueries({
queryKey: getGetV2ListLibraryAgentsQueryKey(),
});
const previousFolders =
queryClient.getQueriesData<getV2ListLibraryFoldersResponseSuccess>({
queryKey: getGetV2ListLibraryFoldersQueryKey(),
});
if (data.folder_id) {
queryClient.setQueriesData<getV2ListLibraryFoldersResponseSuccess>(
{ queryKey: getGetV2ListLibraryFoldersQueryKey() },
(old) => {
if (!old?.data?.folders) return old;
return {
...old,
data: {
...old.data,
folders: old.data.folders.map((f) =>
f.id === data.folder_id
? {
...f,
agent_count:
(f.agent_count ?? 0) + data.agent_ids.length,
}
: f,
),
},
};
},
);
}
return { previousFolders };
},
onError: (_error, _variables, context) => {
if (context?.previousFolders) {
for (const [queryKey, data] of context.previousFolders) {
queryClient.setQueryData(queryKey, data);
}
}
toast({
title: "Error",
description: "Failed to move agent. Please try again.",
variant: "destructive",
});
},
onSettled: () => {
queryClient.invalidateQueries({
queryKey: getGetV2ListLibraryFoldersQueryKey(),
});
queryClient.invalidateQueries({
queryKey: getGetV2ListLibraryAgentsQueryKey(),
});
},
},
});
function handleAgentDrop(agentId: string, folderId: string) {
moveAgentToFolder({
data: {
agent_ids: [agentId],
folder_id: folderId,
},
});
}
const currentFolder = selectedFolderId
? foldersData?.folders.find((f) => f.id === selectedFolderId)
: null;
const showFolders = !isFavoritesTab && !selectedFolderId;
function handleFolderDeleted() {
if (selectedFolderId === deletingFolder?.id) {
onFolderSelect(null);
}
}
const agentCount = getPaginatedTotalCount(agentsQueryData);
return {
isFavoritesTab,
allAgents,
agentLoading,
hasNextPage,
agentCount,
agents,
hasNextPage: agentsHasNextPage,
isFetchingNextPage: agentsIsFetchingNextPage,
fetchNextPage: agentsFetchNextPage,
foldersData,
currentFolder,
showFolders,
editingFolder,
setEditingFolder,
deletingFolder,
setDeletingFolder,
handleAgentDrop,
handleFolderDeleted,
isFetchingNextPage,
fetchNextPage,
};
}

View File

@@ -1,255 +0,0 @@
import { motion } from "framer-motion";
import { Text } from "@/components/atoms/Text/Text";
type FolderSize = "xs" | "sm" | "md" | "lg" | "xl";
export type FolderColorName = "blue" | "purple" | "emerald" | "orange" | "pink";
export type FolderColor = FolderColorName | (string & {});
const hexToColorName: Record<string, FolderColorName> = {
"#3B82F6": "blue",
"#3b82f6": "blue",
"#A855F7": "purple",
"#a855f7": "purple",
"#10B981": "emerald",
"#10b981": "emerald",
"#F97316": "orange",
"#f97316": "orange",
"#EC4899": "pink",
"#ec4899": "pink",
};
export function resolveColor(color: FolderColor | undefined): FolderColorName {
if (!color) return "blue";
if (color in hexToColorName) return hexToColorName[color];
if (color in colorMap) return color as FolderColorName;
return "blue";
}
interface Props {
className?: string;
size?: FolderSize | number;
color?: FolderColor;
icon?: string;
isOpen?: boolean;
}
const sizeMap: Record<FolderSize, number> = {
xs: 0.4,
sm: 0.75,
md: 1,
lg: 1.25,
xl: 1.5,
};
const colorMap: Record<
FolderColorName,
{
bg: string;
border: string;
borderLight: string;
fill: string;
stroke: string;
}
> = {
blue: {
bg: "bg-blue-300",
border: "border-blue-300",
borderLight: "border-blue-200",
fill: "fill-blue-300",
stroke: "stroke-blue-400",
},
purple: {
bg: "bg-purple-200",
border: "border-purple-200",
borderLight: "border-purple-200",
fill: "fill-purple-200",
stroke: "stroke-purple-400",
},
emerald: {
bg: "bg-emerald-300",
border: "border-emerald-300",
borderLight: "border-emerald-200",
fill: "fill-emerald-300",
stroke: "stroke-emerald-400",
},
orange: {
bg: "bg-orange-200",
border: "border-orange-200",
borderLight: "border-orange-200",
fill: "fill-orange-200",
stroke: "stroke-orange-400",
},
pink: {
bg: "bg-pink-300",
border: "border-pink-300",
borderLight: "border-pink-200",
fill: "fill-pink-300",
stroke: "stroke-pink-400",
},
};
export const folderCardStyles: Record<
FolderColorName,
{ bg: string; border: string; buttonBase: string; buttonHover: string }
> = {
blue: {
bg: "bg-blue-50",
border: "border-blue-200",
buttonBase: "border-zinc-600 text-black",
buttonHover: "hover:bg-blue-200",
},
purple: {
bg: "bg-purple-50",
border: "border-purple-200",
buttonBase: "border-zinc-600 text-black ",
buttonHover: "hover:bg-purple-200",
},
emerald: {
bg: "bg-emerald-50",
border: "border-emerald-200",
buttonBase: "border-zinc-600 text-black",
buttonHover: "hover:bg-emerald-200",
},
orange: {
bg: "bg-orange-50",
border: "border-orange-200",
buttonBase: "border-zinc-600 text-black",
buttonHover: "hover:bg-orange-200",
},
pink: {
bg: "bg-pink-50",
border: "border-pink-200",
buttonBase: "border-zinc-600 text-black",
buttonHover: "hover:bg-pink-200",
},
};
export function FolderIcon({
className = "",
size = "xs",
color = "blue",
icon,
isOpen = false,
}: Props) {
const scale = typeof size === "number" ? size : sizeMap[size];
const resolvedColor = resolveColor(color);
const colors = colorMap[resolvedColor];
return (
<div
className={`group relative cursor-pointer ${className}`}
style={{
width: 320 * scale,
height: 208 * scale,
}}
>
<div
className="h-52 w-80 origin-top-left"
style={{ transform: `scale(${scale})`, perspective: "500px" }}
>
<div
className={`folder-back relative mx-auto flex h-full w-[87.5%] justify-center overflow-visible rounded-3xl ${colors.bg} ${colors.border}`}
>
{[
{
initial: { rotate: -3, x: -38, y: 2 },
open: { rotate: -8, x: -70, y: -75 },
transition: {
type: "spring" as const,
bounce: 0.15,
stiffness: 160,
damping: 22,
},
className: "z-10",
},
{
initial: { rotate: 0, x: 0, y: 0 },
open: { rotate: 1, x: 2, y: -95 },
transition: {
type: "spring" as const,
duration: 0.55,
bounce: 0.12,
stiffness: 190,
damping: 24,
},
className: "z-20",
},
{
initial: { rotate: 3.5, x: 42, y: 1 },
open: { rotate: 9, x: 75, y: -80 },
transition: {
type: "spring" as const,
duration: 0.58,
bounce: 0.17,
stiffness: 170,
damping: 21,
},
className: "z-10",
},
].map((page, i) => (
<motion.div
key={i}
initial={page.initial}
animate={isOpen ? page.open : page.initial}
transition={page.transition}
className={`absolute top-2 h-fit w-32 rounded-xl shadow-lg ${page.className}`}
>
<Page color={resolvedColor} />
</motion.div>
))}
</div>
<motion.div
animate={{
rotateX: isOpen ? -15 : 0,
}}
transition={{ type: "spring", duration: 0.5, bounce: 0.25 }}
className="absolute inset-x-0 -bottom-px z-30 mx-auto flex h-44 w-[87.5%] origin-bottom items-end justify-center overflow-visible"
style={{ transformStyle: "preserve-3d" }}
>
<svg
className="h-auto w-full"
viewBox="0 0 173 109"
fill="none"
xmlns="http://www.w3.org/2000/svg"
preserveAspectRatio="none"
>
<path
className={`${colors.fill} ${colors.stroke}`}
d="M15.0423 0.500003C0.5 0.500009 0.5 14.2547 0.5 14.2547V92.5C0.5 101.337 7.66344 108.5 16.5 108.5H156.5C165.337 108.5 172.5 101.337 172.5 92.5V34.3302C172.5 25.4936 165.355 18.3302 156.519 18.3302H108.211C98.1341 18.3302 91.2921 5.57144 82.0156 1.63525C80.3338 0.921645 78.2634 0.500002 75.7187 0.500003H15.0423Z"
/>
</svg>
<div className="absolute inset-0 flex items-center justify-center text-7xl">
{icon}
</div>
</motion.div>
</div>
</div>
);
}
interface PageProps {
color: FolderColorName;
}
function Page({ color = "blue" }: PageProps) {
const colors = colorMap[color];
return (
<div
className={`h-full w-full rounded-xl border bg-white p-4 ${colors.borderLight}`}
>
<div className="flex flex-col gap-2">
<Text variant="h5" className="text-black">
agent.json
</Text>
{Array.from({ length: 8 }).map((_, i) => (
<div key={i} className="flex gap-2">
<div className="h-1.5 flex-1 rounded-full bg-neutral-100" />
<div className="h-1.5 flex-1 rounded-full bg-neutral-100" />
</div>
))}
</div>
</div>
);
}

Some files were not shown because too many files have changed in this diff Show More