mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-13 00:05:02 -05:00
Compare commits
87 Commits
abhimanyuy
...
feat/copit
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
43884a7322 | ||
|
|
cb45e7957b | ||
|
|
f1d02fb8f3 | ||
|
|
47de6b6420 | ||
|
|
62cd2eea89 | ||
|
|
ae61ec692e | ||
|
|
9296bd8736 | ||
|
|
308113c03d | ||
|
|
51abf13254 | ||
|
|
54b03d3a29 | ||
|
|
239dff5ebd | ||
|
|
1dd53db21c | ||
|
|
06c16ee2fe | ||
|
|
8d2a649ee5 | ||
|
|
cb166dd6fb | ||
|
|
9589474709 | ||
|
|
3d31f62bf1 | ||
|
|
b8b6c9de23 | ||
|
|
749a78723a | ||
|
|
bec2e1ddee | ||
|
|
ec1ab06e0d | ||
|
|
f31cb49557 | ||
|
|
fd28c386f4 | ||
|
|
3bea584659 | ||
|
|
4f6055f494 | ||
|
|
695a185fa1 | ||
|
|
113e87a23c | ||
|
|
d7f7a2747f | ||
|
|
68849e197c | ||
|
|
211478bb29 | ||
|
|
0e88dd15b2 | ||
|
|
7f3c227f0a | ||
|
|
40b58807ab | ||
|
|
d0e2e6f013 | ||
|
|
efdc8d73cc | ||
|
|
a34810d8a2 | ||
|
|
038b7d5841 | ||
|
|
cac93b0cc9 | ||
|
|
2025aaf5f2 | ||
|
|
ae9bce3bae | ||
|
|
3107d889fc | ||
|
|
f174fb6303 | ||
|
|
920a4c5f15 | ||
|
|
e95fadbb86 | ||
|
|
b14b3803ad | ||
|
|
82c483d6c8 | ||
|
|
7cffa1895f | ||
|
|
9791bdd724 | ||
|
|
750a674c78 | ||
|
|
960c7980a3 | ||
|
|
e85d437bb2 | ||
|
|
44f9536bd6 | ||
|
|
1c1085a227 | ||
|
|
d7ef70469e | ||
|
|
1926127ddd | ||
|
|
8b509e56de | ||
|
|
acb2d0bd1b | ||
|
|
51aa369c80 | ||
|
|
6403ffe353 | ||
|
|
c40a98ba3c | ||
|
|
a31fc8b162 | ||
|
|
0f2d1a6553 | ||
|
|
87d817b83b | ||
|
|
acf932bf4f | ||
|
|
f562d9a277 | ||
|
|
3c92a96504 | ||
|
|
8b8e1df739 | ||
|
|
602a0a4fb1 | ||
|
|
8d7d531ae0 | ||
|
|
43153a12e0 | ||
|
|
587e11c60a | ||
|
|
57da545e02 | ||
|
|
626980bf27 | ||
|
|
e42b27af3c | ||
|
|
34face15d2 | ||
|
|
7d32c83f95 | ||
|
|
6e2a45b84e | ||
|
|
32f6532e9c | ||
|
|
0bbe8a184d | ||
|
|
7592deed63 | ||
|
|
b9c759ce4f | ||
|
|
5efb80d47b | ||
|
|
b49d8e2cba | ||
|
|
452544530d | ||
|
|
32ee7e6cf8 | ||
|
|
670663c406 | ||
|
|
0dbe4cf51e |
@@ -62,12 +62,18 @@ ENV POETRY_HOME=/opt/poetry \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||
# Install Python, FFmpeg, ImageMagick, and CLI tools for agent use.
|
||||
# bubblewrap provides OS-level sandbox (whitelist-only FS + no network)
|
||||
# for the bash_exec MCP tool.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ffmpeg \
|
||||
imagemagick \
|
||||
jq \
|
||||
ripgrep \
|
||||
tree \
|
||||
bubblewrap \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy only necessary files from builder
|
||||
|
||||
@@ -27,12 +27,11 @@ class ChatConfig(BaseSettings):
|
||||
session_ttl: int = Field(default=43200, description="Session TTL in seconds")
|
||||
|
||||
# Streaming Configuration
|
||||
max_context_messages: int = Field(
|
||||
default=50, ge=1, le=200, description="Maximum context messages"
|
||||
)
|
||||
|
||||
stream_timeout: int = Field(default=300, description="Stream timeout in seconds")
|
||||
max_retries: int = Field(default=3, description="Maximum number of retries")
|
||||
max_retries: int = Field(
|
||||
default=3,
|
||||
description="Max retries for fallback path (SDK handles retries internally)",
|
||||
)
|
||||
max_agent_runs: int = Field(default=30, description="Maximum number of agent runs")
|
||||
max_agent_schedules: int = Field(
|
||||
default=30, description="Maximum number of agent schedules"
|
||||
@@ -93,6 +92,26 @@ class ChatConfig(BaseSettings):
|
||||
description="Name of the prompt in Langfuse to fetch",
|
||||
)
|
||||
|
||||
# Claude Agent SDK Configuration
|
||||
use_claude_agent_sdk: bool = Field(
|
||||
default=True,
|
||||
description="Use Claude Agent SDK for chat completions",
|
||||
)
|
||||
claude_agent_model: str | None = Field(
|
||||
default=None,
|
||||
description="Model for the Claude Agent SDK path. If None, derives from "
|
||||
"the `model` field by stripping the OpenRouter provider prefix.",
|
||||
)
|
||||
claude_agent_max_buffer_size: int = Field(
|
||||
default=10 * 1024 * 1024, # 10MB (default SDK is 1MB)
|
||||
description="Max buffer size in bytes for Claude Agent SDK JSON message parsing. "
|
||||
"Increase if tool outputs exceed the limit.",
|
||||
)
|
||||
claude_agent_max_subtasks: int = Field(
|
||||
default=10,
|
||||
description="Max number of sub-agent Tasks the SDK can spawn per session.",
|
||||
)
|
||||
|
||||
# Extended thinking configuration for Claude models
|
||||
thinking_enabled: bool = Field(
|
||||
default=True,
|
||||
@@ -138,6 +157,17 @@ class ChatConfig(BaseSettings):
|
||||
v = os.getenv("CHAT_INTERNAL_API_KEY")
|
||||
return v
|
||||
|
||||
@field_validator("use_claude_agent_sdk", mode="before")
|
||||
@classmethod
|
||||
def get_use_claude_agent_sdk(cls, v):
|
||||
"""Get use_claude_agent_sdk from environment if not provided."""
|
||||
# Check environment variable - default to True if not set
|
||||
env_val = os.getenv("CHAT_USE_CLAUDE_AGENT_SDK", "").lower()
|
||||
if env_val:
|
||||
return env_val in ("true", "1", "yes", "on")
|
||||
# Default to True (SDK enabled by default)
|
||||
return True if v is None else v
|
||||
|
||||
# Prompt paths for different contexts
|
||||
PROMPT_PATHS: dict[str, str] = {
|
||||
"default": "prompts/chat_system.md",
|
||||
|
||||
@@ -334,9 +334,8 @@ async def _get_session_from_cache(session_id: str) -> ChatSession | None:
|
||||
try:
|
||||
session = ChatSession.model_validate_json(raw_session)
|
||||
logger.info(
|
||||
f"Loading session {session_id} from cache: "
|
||||
f"message_count={len(session.messages)}, "
|
||||
f"roles={[m.role for m in session.messages]}"
|
||||
f"[CACHE] Loaded session {session_id}: {len(session.messages)} messages, "
|
||||
f"last_roles={[m.role for m in session.messages[-3:]]}" # Last 3 roles
|
||||
)
|
||||
return session
|
||||
except Exception as e:
|
||||
@@ -378,11 +377,9 @@ async def _get_session_from_db(session_id: str) -> ChatSession | None:
|
||||
return None
|
||||
|
||||
messages = prisma_session.Messages
|
||||
logger.info(
|
||||
f"Loading session {session_id} from DB: "
|
||||
f"has_messages={messages is not None}, "
|
||||
f"message_count={len(messages) if messages else 0}, "
|
||||
f"roles={[m.role for m in messages] if messages else []}"
|
||||
logger.debug(
|
||||
f"[DB] Loaded session {session_id}: {len(messages) if messages else 0} messages, "
|
||||
f"roles={[m.role for m in messages[-3:]] if messages else []}" # Last 3 roles
|
||||
)
|
||||
|
||||
return ChatSession.from_db(prisma_session, messages)
|
||||
@@ -433,10 +430,9 @@ async def _save_session_to_db(
|
||||
"function_call": msg.function_call,
|
||||
}
|
||||
)
|
||||
logger.info(
|
||||
f"Saving {len(new_messages)} new messages to DB for session {session.session_id}: "
|
||||
f"roles={[m['role'] for m in messages_data]}, "
|
||||
f"start_sequence={existing_message_count}"
|
||||
logger.debug(
|
||||
f"[DB] Saving {len(new_messages)} messages to session {session.session_id}, "
|
||||
f"roles={[m['role'] for m in messages_data]}"
|
||||
)
|
||||
await chat_db.add_chat_messages_batch(
|
||||
session_id=session.session_id,
|
||||
@@ -476,7 +472,7 @@ async def get_chat_session(
|
||||
logger.warning(f"Unexpected cache error for session {session_id}: {e}")
|
||||
|
||||
# Fall back to database
|
||||
logger.info(f"Session {session_id} not in cache, checking database")
|
||||
logger.debug(f"Session {session_id} not in cache, checking database")
|
||||
session = await _get_session_from_db(session_id)
|
||||
|
||||
if session is None:
|
||||
@@ -493,7 +489,6 @@ async def get_chat_session(
|
||||
# Cache the session from DB
|
||||
try:
|
||||
await _cache_session(session)
|
||||
logger.info(f"Cached session {session_id} from database")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to cache session {session_id}: {e}")
|
||||
|
||||
@@ -558,6 +553,40 @@ async def upsert_chat_session(
|
||||
return session
|
||||
|
||||
|
||||
async def append_and_save_message(session_id: str, message: ChatMessage) -> ChatSession:
|
||||
"""Atomically append a message to a session and persist it.
|
||||
|
||||
Acquires the session lock, re-fetches the latest session state,
|
||||
appends the message, and saves — preventing message loss when
|
||||
concurrent requests modify the same session.
|
||||
"""
|
||||
lock = await _get_session_lock(session_id)
|
||||
|
||||
async with lock:
|
||||
session = await get_chat_session(session_id)
|
||||
if session is None:
|
||||
raise ValueError(f"Session {session_id} not found")
|
||||
|
||||
session.messages.append(message)
|
||||
existing_message_count = await chat_db.get_chat_session_message_count(
|
||||
session_id
|
||||
)
|
||||
|
||||
try:
|
||||
await _save_session_to_db(session, existing_message_count)
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to persist message to session {session_id}"
|
||||
) from e
|
||||
|
||||
try:
|
||||
await _cache_session(session)
|
||||
except Exception as e:
|
||||
logger.warning(f"Cache write failed for session {session_id}: {e}")
|
||||
|
||||
return session
|
||||
|
||||
|
||||
async def create_chat_session(user_id: str) -> ChatSession:
|
||||
"""Create a new chat session and persist it.
|
||||
|
||||
@@ -664,13 +693,19 @@ async def update_session_title(session_id: str, title: str) -> bool:
|
||||
logger.warning(f"Session {session_id} not found for title update")
|
||||
return False
|
||||
|
||||
# Invalidate cache so next fetch gets updated title
|
||||
# Update title in cache if it exists (instead of invalidating).
|
||||
# This prevents race conditions where cache invalidation causes
|
||||
# the frontend to see stale DB data while streaming is still in progress.
|
||||
try:
|
||||
redis_key = _get_session_cache_key(session_id)
|
||||
async_redis = await get_redis_async()
|
||||
await async_redis.delete(redis_key)
|
||||
cached = await _get_session_from_cache(session_id)
|
||||
if cached:
|
||||
cached.title = title
|
||||
await _cache_session(cached)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to invalidate cache for session {session_id}: {e}")
|
||||
# Not critical - title will be correct on next full cache refresh
|
||||
logger.warning(
|
||||
f"Failed to update title in cache for session {session_id}: {e}"
|
||||
)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Chat API routes for chat session management and streaming via SSE."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import uuid as uuid_module
|
||||
from collections.abc import AsyncGenerator
|
||||
@@ -11,13 +12,22 @@ from fastapi.responses import StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.feature_flag import Flag, is_feature_enabled
|
||||
|
||||
from . import service as chat_service
|
||||
from . import stream_registry
|
||||
from .completion_handler import process_operation_failure, process_operation_success
|
||||
from .config import ChatConfig
|
||||
from .model import ChatSession, create_chat_session, get_chat_session, get_user_sessions
|
||||
from .response_model import StreamFinish, StreamHeartbeat
|
||||
from .model import (
|
||||
ChatMessage,
|
||||
ChatSession,
|
||||
append_and_save_message,
|
||||
create_chat_session,
|
||||
get_chat_session,
|
||||
get_user_sessions,
|
||||
)
|
||||
from .response_model import StreamError, StreamFinish, StreamHeartbeat, StreamStart
|
||||
from .sdk import service as sdk_service
|
||||
from .tools.models import (
|
||||
AgentDetailsResponse,
|
||||
AgentOutputResponse,
|
||||
@@ -40,6 +50,7 @@ from .tools.models import (
|
||||
SetupRequirementsResponse,
|
||||
UnderstandingUpdatedResponse,
|
||||
)
|
||||
from .tracking import track_user_message
|
||||
|
||||
config = ChatConfig()
|
||||
|
||||
@@ -231,6 +242,10 @@ async def get_session(
|
||||
active_task, last_message_id = await stream_registry.get_active_task_for_session(
|
||||
session_id, user_id
|
||||
)
|
||||
logger.info(
|
||||
f"[GET_SESSION] session={session_id}, active_task={active_task is not None}, "
|
||||
f"msg_count={len(messages)}, last_role={messages[-1].get('role') if messages else 'none'}"
|
||||
)
|
||||
if active_task:
|
||||
# Filter out the in-progress assistant message from the session response.
|
||||
# The client will receive the complete assistant response through the SSE
|
||||
@@ -300,10 +315,9 @@ async def stream_chat_post(
|
||||
f"user={user_id}, message_len={len(request.message)}",
|
||||
extra={"json_fields": log_meta},
|
||||
)
|
||||
|
||||
session = await _validate_and_get_session(session_id, user_id)
|
||||
logger.info(
|
||||
f"[TIMING] session validated in {(time.perf_counter() - stream_start_time)*1000:.1f}ms",
|
||||
f"[TIMING] session validated in {(time.perf_counter() - stream_start_time) * 1000:.1f}ms",
|
||||
extra={
|
||||
"json_fields": {
|
||||
**log_meta,
|
||||
@@ -312,6 +326,25 @@ async def stream_chat_post(
|
||||
},
|
||||
)
|
||||
|
||||
# Atomically append user message to session BEFORE creating task to avoid
|
||||
# race condition where GET_SESSION sees task as "running" but message isn't
|
||||
# saved yet. append_and_save_message re-fetches inside a lock to prevent
|
||||
# message loss from concurrent requests.
|
||||
if request.message:
|
||||
message = ChatMessage(
|
||||
role="user" if request.is_user_message else "assistant",
|
||||
content=request.message,
|
||||
)
|
||||
if request.is_user_message:
|
||||
track_user_message(
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
message_length=len(request.message),
|
||||
)
|
||||
logger.info(f"[STREAM] Saving user message to session {session_id}")
|
||||
session = await append_and_save_message(session_id, message)
|
||||
logger.info(f"[STREAM] User message saved for session {session_id}")
|
||||
|
||||
# Create a task in the stream registry for reconnection support
|
||||
task_id = str(uuid_module.uuid4())
|
||||
operation_id = str(uuid_module.uuid4())
|
||||
@@ -327,7 +360,7 @@ async def stream_chat_post(
|
||||
operation_id=operation_id,
|
||||
)
|
||||
logger.info(
|
||||
f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start)*1000:.1f}ms",
|
||||
f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start) * 1000:.1f}ms",
|
||||
extra={
|
||||
"json_fields": {
|
||||
**log_meta,
|
||||
@@ -348,15 +381,47 @@ async def stream_chat_post(
|
||||
first_chunk_time, ttfc = None, None
|
||||
chunk_count = 0
|
||||
try:
|
||||
async for chunk in chat_service.stream_chat_completion(
|
||||
# Emit a start event with task_id for reconnection
|
||||
start_chunk = StreamStart(messageId=task_id, taskId=task_id)
|
||||
await stream_registry.publish_chunk(task_id, start_chunk)
|
||||
logger.info(
|
||||
f"[TIMING] StreamStart published at {(time_module.perf_counter() - gen_start_time) * 1000:.1f}ms",
|
||||
extra={
|
||||
"json_fields": {
|
||||
**log_meta,
|
||||
"elapsed_ms": (time_module.perf_counter() - gen_start_time)
|
||||
* 1000,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
# Choose service based on LaunchDarkly flag (falls back to config default)
|
||||
use_sdk = await is_feature_enabled(
|
||||
Flag.COPILOT_SDK,
|
||||
user_id or "anonymous",
|
||||
default=config.use_claude_agent_sdk,
|
||||
)
|
||||
stream_fn = (
|
||||
sdk_service.stream_chat_completion_sdk
|
||||
if use_sdk
|
||||
else chat_service.stream_chat_completion
|
||||
)
|
||||
logger.info(
|
||||
f"[TIMING] Calling {'sdk' if use_sdk else 'standard'} stream_chat_completion",
|
||||
extra={"json_fields": log_meta},
|
||||
)
|
||||
# Pass message=None since we already added it to the session above
|
||||
async for chunk in stream_fn(
|
||||
session_id,
|
||||
request.message,
|
||||
None, # Message already in session
|
||||
is_user_message=request.is_user_message,
|
||||
user_id=user_id,
|
||||
session=session, # Pass pre-fetched session to avoid double-fetch
|
||||
session=session, # Pass session with message already added
|
||||
context=request.context,
|
||||
_task_id=task_id, # Pass task_id so service emits start with taskId for reconnection
|
||||
):
|
||||
# Skip duplicate StreamStart — we already published one above
|
||||
if isinstance(chunk, StreamStart):
|
||||
continue
|
||||
chunk_count += 1
|
||||
if first_chunk_time is None:
|
||||
first_chunk_time = time_module.perf_counter()
|
||||
@@ -377,7 +442,7 @@ async def stream_chat_post(
|
||||
gen_end_time = time_module.perf_counter()
|
||||
total_time = (gen_end_time - gen_start_time) * 1000
|
||||
logger.info(
|
||||
f"[TIMING] run_ai_generation FINISHED in {total_time/1000:.1f}s; "
|
||||
f"[TIMING] run_ai_generation FINISHED in {total_time / 1000:.1f}s; "
|
||||
f"task={task_id}, session={session_id}, "
|
||||
f"ttfc={ttfc or -1:.2f}s, n_chunks={chunk_count}",
|
||||
extra={
|
||||
@@ -404,6 +469,17 @@ async def stream_chat_post(
|
||||
}
|
||||
},
|
||||
)
|
||||
# Publish a StreamError so the frontend can display an error message
|
||||
try:
|
||||
await stream_registry.publish_chunk(
|
||||
task_id,
|
||||
StreamError(
|
||||
errorText="An error occurred. Please try again.",
|
||||
code="stream_error",
|
||||
),
|
||||
)
|
||||
except Exception:
|
||||
pass # Best-effort; mark_task_completed will publish StreamFinish
|
||||
await stream_registry.mark_task_completed(task_id, "failed")
|
||||
|
||||
# Start the AI generation in a background task
|
||||
@@ -506,8 +582,14 @@ async def stream_chat_post(
|
||||
"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}
|
||||
},
|
||||
)
|
||||
# Surface error to frontend so it doesn't appear stuck
|
||||
yield StreamError(
|
||||
errorText="An error occurred. Please try again.",
|
||||
code="stream_error",
|
||||
).to_sse()
|
||||
yield StreamFinish().to_sse()
|
||||
finally:
|
||||
# Unsubscribe when client disconnects or stream ends to prevent resource leak
|
||||
# Unsubscribe when client disconnects or stream ends
|
||||
if subscriber_queue is not None:
|
||||
try:
|
||||
await stream_registry.unsubscribe_from_task(
|
||||
@@ -751,8 +833,6 @@ async def stream_task(
|
||||
)
|
||||
|
||||
async def event_generator() -> AsyncGenerator[str, None]:
|
||||
import asyncio
|
||||
|
||||
heartbeat_interval = 15.0 # Send heartbeat every 15 seconds
|
||||
try:
|
||||
while True:
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
"""Claude Agent SDK integration for CoPilot.
|
||||
|
||||
This module provides the integration layer between the Claude Agent SDK
|
||||
and the existing CoPilot tool system, enabling drop-in replacement of
|
||||
the current LLM orchestration with the battle-tested Claude Agent SDK.
|
||||
"""
|
||||
|
||||
from .service import stream_chat_completion_sdk
|
||||
from .tool_adapter import create_copilot_mcp_server
|
||||
|
||||
__all__ = [
|
||||
"stream_chat_completion_sdk",
|
||||
"create_copilot_mcp_server",
|
||||
]
|
||||
@@ -0,0 +1,198 @@
|
||||
"""Response adapter for converting Claude Agent SDK messages to Vercel AI SDK format.
|
||||
|
||||
This module provides the adapter layer that converts streaming messages from
|
||||
the Claude Agent SDK into the Vercel AI SDK UI Stream Protocol format that
|
||||
the frontend expects.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
from claude_agent_sdk import (
|
||||
AssistantMessage,
|
||||
Message,
|
||||
ResultMessage,
|
||||
SystemMessage,
|
||||
TextBlock,
|
||||
ToolResultBlock,
|
||||
ToolUseBlock,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
from backend.api.features.chat.response_model import (
|
||||
StreamBaseResponse,
|
||||
StreamError,
|
||||
StreamFinish,
|
||||
StreamFinishStep,
|
||||
StreamStart,
|
||||
StreamStartStep,
|
||||
StreamTextDelta,
|
||||
StreamTextEnd,
|
||||
StreamTextStart,
|
||||
StreamToolInputAvailable,
|
||||
StreamToolInputStart,
|
||||
StreamToolOutputAvailable,
|
||||
)
|
||||
from backend.api.features.chat.sdk.tool_adapter import (
|
||||
MCP_TOOL_PREFIX,
|
||||
pop_pending_tool_output,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SDKResponseAdapter:
|
||||
"""Adapter for converting Claude Agent SDK messages to Vercel AI SDK format.
|
||||
|
||||
This class maintains state during a streaming session to properly track
|
||||
text blocks, tool calls, and message lifecycle.
|
||||
"""
|
||||
|
||||
def __init__(self, message_id: str | None = None):
|
||||
self.message_id = message_id or str(uuid.uuid4())
|
||||
self.text_block_id = str(uuid.uuid4())
|
||||
self.has_started_text = False
|
||||
self.has_ended_text = False
|
||||
self.current_tool_calls: dict[str, dict[str, str]] = {}
|
||||
self.task_id: str | None = None
|
||||
self.step_open = False
|
||||
|
||||
def set_task_id(self, task_id: str) -> None:
|
||||
"""Set the task ID for reconnection support."""
|
||||
self.task_id = task_id
|
||||
|
||||
def convert_message(self, sdk_message: Message) -> list[StreamBaseResponse]:
|
||||
"""Convert a single SDK message to Vercel AI SDK format."""
|
||||
responses: list[StreamBaseResponse] = []
|
||||
|
||||
if isinstance(sdk_message, SystemMessage):
|
||||
if sdk_message.subtype == "init":
|
||||
responses.append(
|
||||
StreamStart(messageId=self.message_id, taskId=self.task_id)
|
||||
)
|
||||
# Open the first step (matches non-SDK: StreamStart then StreamStartStep)
|
||||
responses.append(StreamStartStep())
|
||||
self.step_open = True
|
||||
|
||||
elif isinstance(sdk_message, AssistantMessage):
|
||||
# After tool results, the SDK sends a new AssistantMessage for the
|
||||
# next LLM turn. Open a new step if the previous one was closed.
|
||||
if not self.step_open:
|
||||
responses.append(StreamStartStep())
|
||||
self.step_open = True
|
||||
|
||||
for block in sdk_message.content:
|
||||
if isinstance(block, TextBlock):
|
||||
if block.text:
|
||||
self._ensure_text_started(responses)
|
||||
responses.append(
|
||||
StreamTextDelta(id=self.text_block_id, delta=block.text)
|
||||
)
|
||||
|
||||
elif isinstance(block, ToolUseBlock):
|
||||
self._end_text_if_open(responses)
|
||||
|
||||
# Strip MCP prefix so frontend sees "find_block"
|
||||
# instead of "mcp__copilot__find_block".
|
||||
tool_name = block.name.removeprefix(MCP_TOOL_PREFIX)
|
||||
|
||||
responses.append(
|
||||
StreamToolInputStart(toolCallId=block.id, toolName=tool_name)
|
||||
)
|
||||
responses.append(
|
||||
StreamToolInputAvailable(
|
||||
toolCallId=block.id,
|
||||
toolName=tool_name,
|
||||
input=block.input,
|
||||
)
|
||||
)
|
||||
self.current_tool_calls[block.id] = {"name": tool_name}
|
||||
|
||||
elif isinstance(sdk_message, UserMessage):
|
||||
# UserMessage carries tool results back from tool execution.
|
||||
content = sdk_message.content
|
||||
blocks = content if isinstance(content, list) else []
|
||||
for block in blocks:
|
||||
if isinstance(block, ToolResultBlock) and block.tool_use_id:
|
||||
tool_info = self.current_tool_calls.get(block.tool_use_id, {})
|
||||
tool_name = tool_info.get("name", "unknown")
|
||||
|
||||
# Prefer the stashed full output over the SDK's
|
||||
# (potentially truncated) ToolResultBlock content.
|
||||
# The SDK truncates large results, writing them to disk,
|
||||
# which breaks frontend widget parsing.
|
||||
output = pop_pending_tool_output(tool_name) or (
|
||||
_extract_tool_output(block.content)
|
||||
)
|
||||
|
||||
responses.append(
|
||||
StreamToolOutputAvailable(
|
||||
toolCallId=block.tool_use_id,
|
||||
toolName=tool_name,
|
||||
output=output,
|
||||
success=not (block.is_error or False),
|
||||
)
|
||||
)
|
||||
|
||||
# Close the current step after tool results — the next
|
||||
# AssistantMessage will open a new step for the continuation.
|
||||
if self.step_open:
|
||||
responses.append(StreamFinishStep())
|
||||
self.step_open = False
|
||||
|
||||
elif isinstance(sdk_message, ResultMessage):
|
||||
self._end_text_if_open(responses)
|
||||
# Close the step before finishing.
|
||||
if self.step_open:
|
||||
responses.append(StreamFinishStep())
|
||||
self.step_open = False
|
||||
|
||||
if sdk_message.subtype == "success":
|
||||
responses.append(StreamFinish())
|
||||
elif sdk_message.subtype in ("error", "error_during_execution"):
|
||||
error_msg = getattr(sdk_message, "result", None) or "Unknown error"
|
||||
responses.append(
|
||||
StreamError(errorText=str(error_msg), code="sdk_error")
|
||||
)
|
||||
responses.append(StreamFinish())
|
||||
|
||||
else:
|
||||
logger.debug(f"Unhandled SDK message type: {type(sdk_message).__name__}")
|
||||
|
||||
return responses
|
||||
|
||||
def _ensure_text_started(self, responses: list[StreamBaseResponse]) -> None:
|
||||
"""Start (or restart) a text block if needed."""
|
||||
if not self.has_started_text or self.has_ended_text:
|
||||
if self.has_ended_text:
|
||||
self.text_block_id = str(uuid.uuid4())
|
||||
self.has_ended_text = False
|
||||
responses.append(StreamTextStart(id=self.text_block_id))
|
||||
self.has_started_text = True
|
||||
|
||||
def _end_text_if_open(self, responses: list[StreamBaseResponse]) -> None:
|
||||
"""End the current text block if one is open."""
|
||||
if self.has_started_text and not self.has_ended_text:
|
||||
responses.append(StreamTextEnd(id=self.text_block_id))
|
||||
self.has_ended_text = True
|
||||
|
||||
|
||||
def _extract_tool_output(content: str | list[dict[str, str]] | None) -> str:
|
||||
"""Extract a string output from a ToolResultBlock's content field."""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
parts = [item.get("text", "") for item in content if item.get("type") == "text"]
|
||||
if parts:
|
||||
return "".join(parts)
|
||||
try:
|
||||
return json.dumps(content)
|
||||
except (TypeError, ValueError):
|
||||
return str(content)
|
||||
if content is None:
|
||||
return ""
|
||||
try:
|
||||
return json.dumps(content)
|
||||
except (TypeError, ValueError):
|
||||
return str(content)
|
||||
@@ -0,0 +1,366 @@
|
||||
"""Unit tests for the SDK response adapter."""
|
||||
|
||||
from claude_agent_sdk import (
|
||||
AssistantMessage,
|
||||
ResultMessage,
|
||||
SystemMessage,
|
||||
TextBlock,
|
||||
ToolResultBlock,
|
||||
ToolUseBlock,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
from backend.api.features.chat.response_model import (
|
||||
StreamBaseResponse,
|
||||
StreamError,
|
||||
StreamFinish,
|
||||
StreamFinishStep,
|
||||
StreamStart,
|
||||
StreamStartStep,
|
||||
StreamTextDelta,
|
||||
StreamTextEnd,
|
||||
StreamTextStart,
|
||||
StreamToolInputAvailable,
|
||||
StreamToolInputStart,
|
||||
StreamToolOutputAvailable,
|
||||
)
|
||||
|
||||
from .response_adapter import SDKResponseAdapter
|
||||
from .tool_adapter import MCP_TOOL_PREFIX
|
||||
|
||||
|
||||
def _adapter() -> SDKResponseAdapter:
|
||||
a = SDKResponseAdapter(message_id="msg-1")
|
||||
a.set_task_id("task-1")
|
||||
return a
|
||||
|
||||
|
||||
# -- SystemMessage -----------------------------------------------------------
|
||||
|
||||
|
||||
def test_system_init_emits_start_and_step():
|
||||
adapter = _adapter()
|
||||
results = adapter.convert_message(SystemMessage(subtype="init", data={}))
|
||||
assert len(results) == 2
|
||||
assert isinstance(results[0], StreamStart)
|
||||
assert results[0].messageId == "msg-1"
|
||||
assert results[0].taskId == "task-1"
|
||||
assert isinstance(results[1], StreamStartStep)
|
||||
|
||||
|
||||
def test_system_non_init_emits_nothing():
|
||||
adapter = _adapter()
|
||||
results = adapter.convert_message(SystemMessage(subtype="other", data={}))
|
||||
assert results == []
|
||||
|
||||
|
||||
# -- AssistantMessage with TextBlock -----------------------------------------
|
||||
|
||||
|
||||
def test_text_block_emits_step_start_and_delta():
|
||||
adapter = _adapter()
|
||||
msg = AssistantMessage(content=[TextBlock(text="hello")], model="test")
|
||||
results = adapter.convert_message(msg)
|
||||
assert len(results) == 3
|
||||
assert isinstance(results[0], StreamStartStep)
|
||||
assert isinstance(results[1], StreamTextStart)
|
||||
assert isinstance(results[2], StreamTextDelta)
|
||||
assert results[2].delta == "hello"
|
||||
|
||||
|
||||
def test_empty_text_block_emits_only_step():
|
||||
adapter = _adapter()
|
||||
msg = AssistantMessage(content=[TextBlock(text="")], model="test")
|
||||
results = adapter.convert_message(msg)
|
||||
# Empty text skipped, but step still opens
|
||||
assert len(results) == 1
|
||||
assert isinstance(results[0], StreamStartStep)
|
||||
|
||||
|
||||
def test_multiple_text_deltas_reuse_block_id():
|
||||
adapter = _adapter()
|
||||
msg1 = AssistantMessage(content=[TextBlock(text="a")], model="test")
|
||||
msg2 = AssistantMessage(content=[TextBlock(text="b")], model="test")
|
||||
r1 = adapter.convert_message(msg1)
|
||||
r2 = adapter.convert_message(msg2)
|
||||
# First gets step+start+delta, second only delta (block & step already started)
|
||||
assert len(r1) == 3
|
||||
assert isinstance(r1[0], StreamStartStep)
|
||||
assert isinstance(r1[1], StreamTextStart)
|
||||
assert len(r2) == 1
|
||||
assert isinstance(r2[0], StreamTextDelta)
|
||||
assert r1[1].id == r2[0].id # same block ID
|
||||
|
||||
|
||||
# -- AssistantMessage with ToolUseBlock --------------------------------------
|
||||
|
||||
|
||||
def test_tool_use_emits_input_start_and_available():
|
||||
"""Tool names arrive with MCP prefix and should be stripped for the frontend."""
|
||||
adapter = _adapter()
|
||||
msg = AssistantMessage(
|
||||
content=[
|
||||
ToolUseBlock(
|
||||
id="tool-1",
|
||||
name=f"{MCP_TOOL_PREFIX}find_agent",
|
||||
input={"q": "x"},
|
||||
)
|
||||
],
|
||||
model="test",
|
||||
)
|
||||
results = adapter.convert_message(msg)
|
||||
assert len(results) == 3
|
||||
assert isinstance(results[0], StreamStartStep)
|
||||
assert isinstance(results[1], StreamToolInputStart)
|
||||
assert results[1].toolCallId == "tool-1"
|
||||
assert results[1].toolName == "find_agent" # prefix stripped
|
||||
assert isinstance(results[2], StreamToolInputAvailable)
|
||||
assert results[2].toolName == "find_agent" # prefix stripped
|
||||
assert results[2].input == {"q": "x"}
|
||||
|
||||
|
||||
def test_text_then_tool_ends_text_block():
|
||||
adapter = _adapter()
|
||||
text_msg = AssistantMessage(content=[TextBlock(text="thinking...")], model="test")
|
||||
tool_msg = AssistantMessage(
|
||||
content=[ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}tool", input={})],
|
||||
model="test",
|
||||
)
|
||||
adapter.convert_message(text_msg) # opens step + text
|
||||
results = adapter.convert_message(tool_msg)
|
||||
# Step already open, so: TextEnd, ToolInputStart, ToolInputAvailable
|
||||
assert len(results) == 3
|
||||
assert isinstance(results[0], StreamTextEnd)
|
||||
assert isinstance(results[1], StreamToolInputStart)
|
||||
|
||||
|
||||
# -- UserMessage with ToolResultBlock ----------------------------------------
|
||||
|
||||
|
||||
def test_tool_result_emits_output_and_finish_step():
|
||||
adapter = _adapter()
|
||||
# First register the tool call (opens step) — SDK sends prefixed name
|
||||
tool_msg = AssistantMessage(
|
||||
content=[ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}find_agent", input={})],
|
||||
model="test",
|
||||
)
|
||||
adapter.convert_message(tool_msg)
|
||||
|
||||
# Now send tool result
|
||||
result_msg = UserMessage(
|
||||
content=[ToolResultBlock(tool_use_id="t1", content="found 3 agents")]
|
||||
)
|
||||
results = adapter.convert_message(result_msg)
|
||||
assert len(results) == 2
|
||||
assert isinstance(results[0], StreamToolOutputAvailable)
|
||||
assert results[0].toolCallId == "t1"
|
||||
assert results[0].toolName == "find_agent" # prefix stripped
|
||||
assert results[0].output == "found 3 agents"
|
||||
assert results[0].success is True
|
||||
assert isinstance(results[1], StreamFinishStep)
|
||||
|
||||
|
||||
def test_tool_result_error():
|
||||
adapter = _adapter()
|
||||
adapter.convert_message(
|
||||
AssistantMessage(
|
||||
content=[
|
||||
ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}run_agent", input={})
|
||||
],
|
||||
model="test",
|
||||
)
|
||||
)
|
||||
result_msg = UserMessage(
|
||||
content=[ToolResultBlock(tool_use_id="t1", content="timeout", is_error=True)]
|
||||
)
|
||||
results = adapter.convert_message(result_msg)
|
||||
assert isinstance(results[0], StreamToolOutputAvailable)
|
||||
assert results[0].success is False
|
||||
assert isinstance(results[1], StreamFinishStep)
|
||||
|
||||
|
||||
def test_tool_result_list_content():
|
||||
adapter = _adapter()
|
||||
adapter.convert_message(
|
||||
AssistantMessage(
|
||||
content=[ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}tool", input={})],
|
||||
model="test",
|
||||
)
|
||||
)
|
||||
result_msg = UserMessage(
|
||||
content=[
|
||||
ToolResultBlock(
|
||||
tool_use_id="t1",
|
||||
content=[
|
||||
{"type": "text", "text": "line1"},
|
||||
{"type": "text", "text": "line2"},
|
||||
],
|
||||
)
|
||||
]
|
||||
)
|
||||
results = adapter.convert_message(result_msg)
|
||||
assert isinstance(results[0], StreamToolOutputAvailable)
|
||||
assert results[0].output == "line1line2"
|
||||
assert isinstance(results[1], StreamFinishStep)
|
||||
|
||||
|
||||
def test_string_user_message_ignored():
|
||||
"""A plain string UserMessage (not tool results) produces no output."""
|
||||
adapter = _adapter()
|
||||
results = adapter.convert_message(UserMessage(content="hello"))
|
||||
assert results == []
|
||||
|
||||
|
||||
# -- ResultMessage -----------------------------------------------------------
|
||||
|
||||
|
||||
def test_result_success_emits_finish_step_and_finish():
|
||||
adapter = _adapter()
|
||||
# Start some text first (opens step)
|
||||
adapter.convert_message(
|
||||
AssistantMessage(content=[TextBlock(text="done")], model="test")
|
||||
)
|
||||
msg = ResultMessage(
|
||||
subtype="success",
|
||||
duration_ms=100,
|
||||
duration_api_ms=50,
|
||||
is_error=False,
|
||||
num_turns=1,
|
||||
session_id="s1",
|
||||
)
|
||||
results = adapter.convert_message(msg)
|
||||
# TextEnd + FinishStep + StreamFinish
|
||||
assert len(results) == 3
|
||||
assert isinstance(results[0], StreamTextEnd)
|
||||
assert isinstance(results[1], StreamFinishStep)
|
||||
assert isinstance(results[2], StreamFinish)
|
||||
|
||||
|
||||
def test_result_error_emits_error_and_finish():
|
||||
adapter = _adapter()
|
||||
msg = ResultMessage(
|
||||
subtype="error",
|
||||
duration_ms=100,
|
||||
duration_api_ms=50,
|
||||
is_error=True,
|
||||
num_turns=0,
|
||||
session_id="s1",
|
||||
result="API rate limited",
|
||||
)
|
||||
results = adapter.convert_message(msg)
|
||||
# No step was open, so no FinishStep — just Error + Finish
|
||||
assert len(results) == 2
|
||||
assert isinstance(results[0], StreamError)
|
||||
assert "API rate limited" in results[0].errorText
|
||||
assert isinstance(results[1], StreamFinish)
|
||||
|
||||
|
||||
# -- Text after tools (new block ID) ----------------------------------------
|
||||
|
||||
|
||||
def test_text_after_tool_gets_new_block_id():
|
||||
adapter = _adapter()
|
||||
# Text -> Tool -> ToolResult -> Text should get a new text block ID and step
|
||||
adapter.convert_message(
|
||||
AssistantMessage(content=[TextBlock(text="before")], model="test")
|
||||
)
|
||||
adapter.convert_message(
|
||||
AssistantMessage(
|
||||
content=[ToolUseBlock(id="t1", name=f"{MCP_TOOL_PREFIX}tool", input={})],
|
||||
model="test",
|
||||
)
|
||||
)
|
||||
# Send tool result (closes step)
|
||||
adapter.convert_message(
|
||||
UserMessage(content=[ToolResultBlock(tool_use_id="t1", content="ok")])
|
||||
)
|
||||
results = adapter.convert_message(
|
||||
AssistantMessage(content=[TextBlock(text="after")], model="test")
|
||||
)
|
||||
# Should get StreamStartStep (new step) + StreamTextStart (new block) + StreamTextDelta
|
||||
assert len(results) == 3
|
||||
assert isinstance(results[0], StreamStartStep)
|
||||
assert isinstance(results[1], StreamTextStart)
|
||||
assert isinstance(results[2], StreamTextDelta)
|
||||
assert results[2].delta == "after"
|
||||
|
||||
|
||||
# -- Full conversation flow --------------------------------------------------
|
||||
|
||||
|
||||
def test_full_conversation_flow():
|
||||
"""Simulate a complete conversation: init -> text -> tool -> result -> text -> finish."""
|
||||
adapter = _adapter()
|
||||
all_responses: list[StreamBaseResponse] = []
|
||||
|
||||
# 1. Init
|
||||
all_responses.extend(
|
||||
adapter.convert_message(SystemMessage(subtype="init", data={}))
|
||||
)
|
||||
# 2. Assistant text
|
||||
all_responses.extend(
|
||||
adapter.convert_message(
|
||||
AssistantMessage(content=[TextBlock(text="Let me search")], model="test")
|
||||
)
|
||||
)
|
||||
# 3. Tool use
|
||||
all_responses.extend(
|
||||
adapter.convert_message(
|
||||
AssistantMessage(
|
||||
content=[
|
||||
ToolUseBlock(
|
||||
id="t1",
|
||||
name=f"{MCP_TOOL_PREFIX}find_agent",
|
||||
input={"query": "email"},
|
||||
)
|
||||
],
|
||||
model="test",
|
||||
)
|
||||
)
|
||||
)
|
||||
# 4. Tool result
|
||||
all_responses.extend(
|
||||
adapter.convert_message(
|
||||
UserMessage(
|
||||
content=[ToolResultBlock(tool_use_id="t1", content="Found 2 agents")]
|
||||
)
|
||||
)
|
||||
)
|
||||
# 5. More text
|
||||
all_responses.extend(
|
||||
adapter.convert_message(
|
||||
AssistantMessage(content=[TextBlock(text="I found 2")], model="test")
|
||||
)
|
||||
)
|
||||
# 6. Result
|
||||
all_responses.extend(
|
||||
adapter.convert_message(
|
||||
ResultMessage(
|
||||
subtype="success",
|
||||
duration_ms=500,
|
||||
duration_api_ms=400,
|
||||
is_error=False,
|
||||
num_turns=2,
|
||||
session_id="s1",
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
types = [type(r).__name__ for r in all_responses]
|
||||
assert types == [
|
||||
"StreamStart",
|
||||
"StreamStartStep", # step 1: text + tool call
|
||||
"StreamTextStart",
|
||||
"StreamTextDelta", # "Let me search"
|
||||
"StreamTextEnd", # closed before tool
|
||||
"StreamToolInputStart",
|
||||
"StreamToolInputAvailable",
|
||||
"StreamToolOutputAvailable", # tool result
|
||||
"StreamFinishStep", # step 1 closed after tool result
|
||||
"StreamStartStep", # step 2: continuation text
|
||||
"StreamTextStart", # new block after tool
|
||||
"StreamTextDelta", # "I found 2"
|
||||
"StreamTextEnd", # closed by result
|
||||
"StreamFinishStep", # step 2 closed
|
||||
"StreamFinish",
|
||||
]
|
||||
@@ -0,0 +1,296 @@
|
||||
"""Security hooks for Claude Agent SDK integration.
|
||||
|
||||
This module provides security hooks that validate tool calls before execution,
|
||||
ensuring multi-user isolation and preventing unauthorized operations.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from typing import Any, cast
|
||||
|
||||
from backend.api.features.chat.sdk.tool_adapter import MCP_TOOL_PREFIX
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Tools that are blocked entirely (CLI/system access).
|
||||
# "Bash" (capital) is the SDK built-in — it's NOT in allowed_tools but blocked
|
||||
# here as defence-in-depth. The agent uses mcp__copilot__bash_exec instead,
|
||||
# which has kernel-level network isolation (unshare --net).
|
||||
BLOCKED_TOOLS = {
|
||||
"Bash",
|
||||
"bash",
|
||||
"shell",
|
||||
"exec",
|
||||
"terminal",
|
||||
"command",
|
||||
}
|
||||
|
||||
# Tools allowed only when their path argument stays within the SDK workspace.
|
||||
# The SDK uses these to handle oversized tool results (writes to tool-results/
|
||||
# files, then reads them back) and for workspace file operations.
|
||||
WORKSPACE_SCOPED_TOOLS = {"Read", "Write", "Edit", "Glob", "Grep"}
|
||||
|
||||
# Dangerous patterns in tool inputs
|
||||
DANGEROUS_PATTERNS = [
|
||||
r"sudo",
|
||||
r"rm\s+-rf",
|
||||
r"dd\s+if=",
|
||||
r"/etc/passwd",
|
||||
r"/etc/shadow",
|
||||
r"chmod\s+777",
|
||||
r"curl\s+.*\|.*sh",
|
||||
r"wget\s+.*\|.*sh",
|
||||
r"eval\s*\(",
|
||||
r"exec\s*\(",
|
||||
r"__import__",
|
||||
r"os\.system",
|
||||
r"subprocess",
|
||||
]
|
||||
|
||||
|
||||
def _deny(reason: str) -> dict[str, Any]:
|
||||
"""Return a hook denial response."""
|
||||
return {
|
||||
"hookSpecificOutput": {
|
||||
"hookEventName": "PreToolUse",
|
||||
"permissionDecision": "deny",
|
||||
"permissionDecisionReason": reason,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _validate_workspace_path(
|
||||
tool_name: str, tool_input: dict[str, Any], sdk_cwd: str | None
|
||||
) -> dict[str, Any]:
|
||||
"""Validate that a workspace-scoped tool only accesses allowed paths.
|
||||
|
||||
Allowed directories:
|
||||
- The SDK working directory (``/tmp/copilot-<session>/``)
|
||||
- The SDK tool-results directory (``~/.claude/projects/…/tool-results/``)
|
||||
"""
|
||||
path = tool_input.get("file_path") or tool_input.get("path") or ""
|
||||
if not path:
|
||||
# Glob/Grep without a path default to cwd which is already sandboxed
|
||||
return {}
|
||||
|
||||
# Resolve relative paths against sdk_cwd (the SDK sets cwd so the LLM
|
||||
# naturally uses relative paths like "test.txt" instead of absolute ones).
|
||||
if not os.path.isabs(path) and sdk_cwd:
|
||||
resolved = os.path.normpath(os.path.join(sdk_cwd, path))
|
||||
else:
|
||||
resolved = os.path.normpath(os.path.expanduser(path))
|
||||
|
||||
# Allow access within the SDK working directory
|
||||
if sdk_cwd:
|
||||
norm_cwd = os.path.normpath(sdk_cwd)
|
||||
if resolved.startswith(norm_cwd + os.sep) or resolved == norm_cwd:
|
||||
return {}
|
||||
|
||||
# Allow access to ~/.claude/projects/*/tool-results/ (big tool results)
|
||||
claude_dir = os.path.normpath(os.path.expanduser("~/.claude/projects"))
|
||||
if resolved.startswith(claude_dir + os.sep) and "tool-results" in resolved:
|
||||
return {}
|
||||
|
||||
logger.warning(
|
||||
f"Blocked {tool_name} outside workspace: {path} (resolved={resolved})"
|
||||
)
|
||||
workspace_hint = f" Allowed workspace: {sdk_cwd}" if sdk_cwd else ""
|
||||
return _deny(
|
||||
f"[SECURITY] Tool '{tool_name}' can only access files within the workspace "
|
||||
f"directory.{workspace_hint} "
|
||||
"This is enforced by the platform and cannot be bypassed."
|
||||
)
|
||||
|
||||
|
||||
def _validate_tool_access(
|
||||
tool_name: str, tool_input: dict[str, Any], sdk_cwd: str | None = None
|
||||
) -> dict[str, Any]:
|
||||
"""Validate that a tool call is allowed.
|
||||
|
||||
Returns:
|
||||
Empty dict to allow, or dict with hookSpecificOutput to deny
|
||||
"""
|
||||
# Block forbidden tools
|
||||
if tool_name in BLOCKED_TOOLS:
|
||||
logger.warning(f"Blocked tool access attempt: {tool_name}")
|
||||
return _deny(
|
||||
f"[SECURITY] Tool '{tool_name}' is blocked for security. "
|
||||
"This is enforced by the platform and cannot be bypassed. "
|
||||
"Use the CoPilot-specific MCP tools instead."
|
||||
)
|
||||
|
||||
# Workspace-scoped tools: allowed only within the SDK workspace directory
|
||||
if tool_name in WORKSPACE_SCOPED_TOOLS:
|
||||
return _validate_workspace_path(tool_name, tool_input, sdk_cwd)
|
||||
|
||||
# Check for dangerous patterns in tool input
|
||||
# Use json.dumps for predictable format (str() produces Python repr)
|
||||
input_str = json.dumps(tool_input) if tool_input else ""
|
||||
|
||||
for pattern in DANGEROUS_PATTERNS:
|
||||
if re.search(pattern, input_str, re.IGNORECASE):
|
||||
logger.warning(
|
||||
f"Blocked dangerous pattern in tool input: {pattern} in {tool_name}"
|
||||
)
|
||||
return _deny(
|
||||
"[SECURITY] Input contains a blocked pattern. "
|
||||
"This is enforced by the platform and cannot be bypassed."
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def _validate_user_isolation(
|
||||
tool_name: str, tool_input: dict[str, Any], user_id: str | None
|
||||
) -> dict[str, Any]:
|
||||
"""Validate that tool calls respect user isolation."""
|
||||
# For workspace file tools, ensure path doesn't escape
|
||||
if "workspace" in tool_name.lower():
|
||||
path = tool_input.get("path", "") or tool_input.get("file_path", "")
|
||||
if path:
|
||||
# Check for path traversal
|
||||
if ".." in path or path.startswith("/"):
|
||||
logger.warning(
|
||||
f"Blocked path traversal attempt: {path} by user {user_id}"
|
||||
)
|
||||
return {
|
||||
"hookSpecificOutput": {
|
||||
"hookEventName": "PreToolUse",
|
||||
"permissionDecision": "deny",
|
||||
"permissionDecisionReason": "Path traversal not allowed",
|
||||
}
|
||||
}
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def create_security_hooks(
|
||||
user_id: str | None,
|
||||
sdk_cwd: str | None = None,
|
||||
max_subtasks: int = 3,
|
||||
) -> dict[str, Any]:
|
||||
"""Create the security hooks configuration for Claude Agent SDK.
|
||||
|
||||
Includes security validation and observability hooks:
|
||||
- PreToolUse: Security validation before tool execution
|
||||
- PostToolUse: Log successful tool executions
|
||||
- PostToolUseFailure: Log and handle failed tool executions
|
||||
- PreCompact: Log context compaction events (SDK handles compaction automatically)
|
||||
|
||||
Args:
|
||||
user_id: Current user ID for isolation validation
|
||||
sdk_cwd: SDK working directory for workspace-scoped tool validation
|
||||
max_subtasks: Maximum Task (sub-agent) spawns allowed per session
|
||||
|
||||
Returns:
|
||||
Hooks configuration dict for ClaudeAgentOptions
|
||||
"""
|
||||
try:
|
||||
from claude_agent_sdk import HookMatcher
|
||||
from claude_agent_sdk.types import HookContext, HookInput, SyncHookJSONOutput
|
||||
|
||||
# Per-session counter for Task sub-agent spawns
|
||||
task_spawn_count = 0
|
||||
|
||||
async def pre_tool_use_hook(
|
||||
input_data: HookInput,
|
||||
tool_use_id: str | None,
|
||||
context: HookContext,
|
||||
) -> SyncHookJSONOutput:
|
||||
"""Combined pre-tool-use validation hook."""
|
||||
nonlocal task_spawn_count
|
||||
_ = context # unused but required by signature
|
||||
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||
tool_input = cast(dict[str, Any], input_data.get("tool_input", {}))
|
||||
|
||||
# Rate-limit Task (sub-agent) spawns per session
|
||||
if tool_name == "Task":
|
||||
task_spawn_count += 1
|
||||
if task_spawn_count > max_subtasks:
|
||||
logger.warning(
|
||||
f"[SDK] Task limit reached ({max_subtasks}), user={user_id}"
|
||||
)
|
||||
return cast(
|
||||
SyncHookJSONOutput,
|
||||
_deny(
|
||||
f"Maximum {max_subtasks} sub-tasks per session. "
|
||||
"Please continue in the main conversation."
|
||||
),
|
||||
)
|
||||
|
||||
# Strip MCP prefix for consistent validation
|
||||
is_copilot_tool = tool_name.startswith(MCP_TOOL_PREFIX)
|
||||
clean_name = tool_name.removeprefix(MCP_TOOL_PREFIX)
|
||||
|
||||
# Only block non-CoPilot tools; our MCP-registered tools
|
||||
# (including Read for oversized results) are already sandboxed.
|
||||
if not is_copilot_tool:
|
||||
result = _validate_tool_access(clean_name, tool_input, sdk_cwd)
|
||||
if result:
|
||||
return cast(SyncHookJSONOutput, result)
|
||||
|
||||
# Validate user isolation
|
||||
result = _validate_user_isolation(clean_name, tool_input, user_id)
|
||||
if result:
|
||||
return cast(SyncHookJSONOutput, result)
|
||||
|
||||
logger.debug(f"[SDK] Tool start: {tool_name}, user={user_id}")
|
||||
return cast(SyncHookJSONOutput, {})
|
||||
|
||||
async def post_tool_use_hook(
|
||||
input_data: HookInput,
|
||||
tool_use_id: str | None,
|
||||
context: HookContext,
|
||||
) -> SyncHookJSONOutput:
|
||||
"""Log successful tool executions for observability."""
|
||||
_ = context
|
||||
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||
logger.debug(f"[SDK] Tool success: {tool_name}, tool_use_id={tool_use_id}")
|
||||
return cast(SyncHookJSONOutput, {})
|
||||
|
||||
async def post_tool_failure_hook(
|
||||
input_data: HookInput,
|
||||
tool_use_id: str | None,
|
||||
context: HookContext,
|
||||
) -> SyncHookJSONOutput:
|
||||
"""Log failed tool executions for debugging."""
|
||||
_ = context
|
||||
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||
error = input_data.get("error", "Unknown error")
|
||||
logger.warning(
|
||||
f"[SDK] Tool failed: {tool_name}, error={error}, "
|
||||
f"user={user_id}, tool_use_id={tool_use_id}"
|
||||
)
|
||||
return cast(SyncHookJSONOutput, {})
|
||||
|
||||
async def pre_compact_hook(
|
||||
input_data: HookInput,
|
||||
tool_use_id: str | None,
|
||||
context: HookContext,
|
||||
) -> SyncHookJSONOutput:
|
||||
"""Log when SDK triggers context compaction.
|
||||
|
||||
The SDK automatically compacts conversation history when it grows too large.
|
||||
This hook provides visibility into when compaction happens.
|
||||
"""
|
||||
_ = context, tool_use_id
|
||||
trigger = input_data.get("trigger", "auto")
|
||||
logger.info(
|
||||
f"[SDK] Context compaction triggered: {trigger}, user={user_id}"
|
||||
)
|
||||
return cast(SyncHookJSONOutput, {})
|
||||
|
||||
return {
|
||||
"PreToolUse": [HookMatcher(matcher="*", hooks=[pre_tool_use_hook])],
|
||||
"PostToolUse": [HookMatcher(matcher="*", hooks=[post_tool_use_hook])],
|
||||
"PostToolUseFailure": [
|
||||
HookMatcher(matcher="*", hooks=[post_tool_failure_hook])
|
||||
],
|
||||
"PreCompact": [HookMatcher(matcher="*", hooks=[pre_compact_hook])],
|
||||
}
|
||||
except ImportError:
|
||||
# Fallback for when SDK isn't available - return empty hooks
|
||||
logger.warning("claude-agent-sdk not available, security hooks disabled")
|
||||
return {}
|
||||
@@ -0,0 +1,165 @@
|
||||
"""Unit tests for SDK security hooks."""
|
||||
|
||||
import os
|
||||
|
||||
from .security_hooks import _validate_tool_access, _validate_user_isolation
|
||||
|
||||
SDK_CWD = "/tmp/copilot-abc123"
|
||||
|
||||
|
||||
def _is_denied(result: dict) -> bool:
|
||||
hook = result.get("hookSpecificOutput", {})
|
||||
return hook.get("permissionDecision") == "deny"
|
||||
|
||||
|
||||
# -- Blocked tools -----------------------------------------------------------
|
||||
|
||||
|
||||
def test_blocked_tools_denied():
|
||||
for tool in ("bash", "shell", "exec", "terminal", "command"):
|
||||
result = _validate_tool_access(tool, {})
|
||||
assert _is_denied(result), f"{tool} should be blocked"
|
||||
|
||||
|
||||
def test_unknown_tool_allowed():
|
||||
result = _validate_tool_access("SomeCustomTool", {})
|
||||
assert result == {}
|
||||
|
||||
|
||||
# -- Workspace-scoped tools --------------------------------------------------
|
||||
|
||||
|
||||
def test_read_within_workspace_allowed():
|
||||
result = _validate_tool_access(
|
||||
"Read", {"file_path": f"{SDK_CWD}/file.txt"}, sdk_cwd=SDK_CWD
|
||||
)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_write_within_workspace_allowed():
|
||||
result = _validate_tool_access(
|
||||
"Write", {"file_path": f"{SDK_CWD}/output.json"}, sdk_cwd=SDK_CWD
|
||||
)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_edit_within_workspace_allowed():
|
||||
result = _validate_tool_access(
|
||||
"Edit", {"file_path": f"{SDK_CWD}/src/main.py"}, sdk_cwd=SDK_CWD
|
||||
)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_glob_within_workspace_allowed():
|
||||
result = _validate_tool_access("Glob", {"path": f"{SDK_CWD}/src"}, sdk_cwd=SDK_CWD)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_grep_within_workspace_allowed():
|
||||
result = _validate_tool_access("Grep", {"path": f"{SDK_CWD}/src"}, sdk_cwd=SDK_CWD)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_read_outside_workspace_denied():
|
||||
result = _validate_tool_access(
|
||||
"Read", {"file_path": "/etc/passwd"}, sdk_cwd=SDK_CWD
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
def test_write_outside_workspace_denied():
|
||||
result = _validate_tool_access(
|
||||
"Write", {"file_path": "/home/user/secrets.txt"}, sdk_cwd=SDK_CWD
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
def test_traversal_attack_denied():
|
||||
result = _validate_tool_access(
|
||||
"Read",
|
||||
{"file_path": f"{SDK_CWD}/../../etc/passwd"},
|
||||
sdk_cwd=SDK_CWD,
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
def test_no_path_allowed():
|
||||
"""Glob/Grep without a path argument defaults to cwd — should pass."""
|
||||
result = _validate_tool_access("Glob", {}, sdk_cwd=SDK_CWD)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_read_no_cwd_denies_absolute():
|
||||
"""If no sdk_cwd is set, absolute paths are denied."""
|
||||
result = _validate_tool_access("Read", {"file_path": "/tmp/anything"})
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
# -- Tool-results directory --------------------------------------------------
|
||||
|
||||
|
||||
def test_read_tool_results_allowed():
|
||||
home = os.path.expanduser("~")
|
||||
path = f"{home}/.claude/projects/-tmp-copilot-abc123/tool-results/12345.txt"
|
||||
result = _validate_tool_access("Read", {"file_path": path}, sdk_cwd=SDK_CWD)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_read_claude_projects_without_tool_results_denied():
|
||||
home = os.path.expanduser("~")
|
||||
path = f"{home}/.claude/projects/-tmp-copilot-abc123/settings.json"
|
||||
result = _validate_tool_access("Read", {"file_path": path}, sdk_cwd=SDK_CWD)
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
# -- Built-in Bash is blocked (use bash_exec MCP tool instead) ---------------
|
||||
|
||||
|
||||
def test_bash_builtin_always_blocked():
|
||||
"""SDK built-in Bash is blocked — bash_exec MCP tool with bubblewrap is used instead."""
|
||||
result = _validate_tool_access("Bash", {"command": "echo hello"}, sdk_cwd=SDK_CWD)
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
# -- Dangerous patterns ------------------------------------------------------
|
||||
|
||||
|
||||
def test_dangerous_pattern_blocked():
|
||||
result = _validate_tool_access("SomeTool", {"cmd": "sudo rm -rf /"})
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
def test_subprocess_pattern_blocked():
|
||||
result = _validate_tool_access("SomeTool", {"code": "subprocess.run(...)"})
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
# -- User isolation ----------------------------------------------------------
|
||||
|
||||
|
||||
def test_workspace_path_traversal_blocked():
|
||||
result = _validate_user_isolation(
|
||||
"workspace_read", {"path": "../../../etc/shadow"}, user_id="user-1"
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
def test_workspace_absolute_path_blocked():
|
||||
result = _validate_user_isolation(
|
||||
"workspace_read", {"path": "/etc/passwd"}, user_id="user-1"
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
def test_workspace_normal_path_allowed():
|
||||
result = _validate_user_isolation(
|
||||
"workspace_read", {"path": "src/main.py"}, user_id="user-1"
|
||||
)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_non_workspace_tool_passes_isolation():
|
||||
result = _validate_user_isolation(
|
||||
"find_agent", {"query": "email"}, user_id="user-1"
|
||||
)
|
||||
assert result == {}
|
||||
@@ -0,0 +1,668 @@
|
||||
"""Claude Agent SDK service layer for CoPilot chat completions."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from collections.abc import AsyncGenerator
|
||||
from typing import Any
|
||||
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
from .. import stream_registry
|
||||
from ..config import ChatConfig
|
||||
from ..model import (
|
||||
ChatMessage,
|
||||
ChatSession,
|
||||
get_chat_session,
|
||||
update_session_title,
|
||||
upsert_chat_session,
|
||||
)
|
||||
from ..response_model import (
|
||||
StreamBaseResponse,
|
||||
StreamError,
|
||||
StreamFinish,
|
||||
StreamStart,
|
||||
StreamTextDelta,
|
||||
StreamToolInputAvailable,
|
||||
StreamToolOutputAvailable,
|
||||
)
|
||||
from ..service import (
|
||||
_build_system_prompt,
|
||||
_execute_long_running_tool_with_streaming,
|
||||
_generate_session_title,
|
||||
)
|
||||
from ..tools.models import OperationPendingResponse, OperationStartedResponse
|
||||
from ..tools.sandbox import WORKSPACE_PREFIX, make_session_path
|
||||
from ..tracking import track_user_message
|
||||
from .response_adapter import SDKResponseAdapter
|
||||
from .security_hooks import create_security_hooks
|
||||
from .tool_adapter import (
|
||||
COPILOT_TOOL_NAMES,
|
||||
LongRunningCallback,
|
||||
create_copilot_mcp_server,
|
||||
set_execution_context,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = ChatConfig()
|
||||
|
||||
# Set to hold background tasks to prevent garbage collection
|
||||
_background_tasks: set[asyncio.Task[Any]] = set()
|
||||
|
||||
|
||||
_SDK_CWD_PREFIX = WORKSPACE_PREFIX
|
||||
|
||||
# Appended to the system prompt to inform the agent about available tools.
|
||||
# The SDK built-in Bash is NOT available — use mcp__copilot__bash_exec instead,
|
||||
# which has kernel-level network isolation (unshare --net).
|
||||
_SDK_TOOL_SUPPLEMENT = """
|
||||
|
||||
## Tool notes
|
||||
|
||||
- The SDK built-in Bash tool is NOT available. Use the `bash_exec` MCP tool
|
||||
for shell commands — it runs in a network-isolated sandbox.
|
||||
- **Shared workspace**: The SDK Read/Write tools and `bash_exec` share the
|
||||
same working directory. Files created by one are readable by the other.
|
||||
These files are **ephemeral** — they exist only for the current session.
|
||||
- **Persistent storage**: Use `write_workspace_file` / `read_workspace_file`
|
||||
for files that should persist across sessions (stored in cloud storage).
|
||||
- Long-running tools (create_agent, edit_agent, etc.) are handled
|
||||
asynchronously. You will receive an immediate response; the actual result
|
||||
is delivered to the user via a background stream.
|
||||
"""
|
||||
|
||||
|
||||
def _build_long_running_callback(user_id: str | None) -> LongRunningCallback:
|
||||
"""Build a callback that delegates long-running tools to the non-SDK infrastructure.
|
||||
|
||||
Long-running tools (create_agent, edit_agent, etc.) are delegated to the
|
||||
existing background infrastructure: stream_registry (Redis Streams),
|
||||
database persistence, and SSE reconnection. This means results survive
|
||||
page refreshes / pod restarts, and the frontend shows the proper loading
|
||||
widget with progress updates.
|
||||
|
||||
The returned callback matches the ``LongRunningCallback`` signature:
|
||||
``(tool_name, args, session) -> MCP response dict``.
|
||||
"""
|
||||
|
||||
async def _callback(
|
||||
tool_name: str, args: dict[str, Any], session: ChatSession
|
||||
) -> dict[str, Any]:
|
||||
operation_id = str(uuid.uuid4())
|
||||
task_id = str(uuid.uuid4())
|
||||
tool_call_id = f"sdk-{uuid.uuid4().hex[:12]}"
|
||||
session_id = session.session_id
|
||||
|
||||
# --- Build user-friendly messages (matches non-SDK service) ---
|
||||
if tool_name == "create_agent":
|
||||
desc = args.get("description", "")
|
||||
desc_preview = (desc[:100] + "...") if len(desc) > 100 else desc
|
||||
pending_msg = (
|
||||
f"Creating your agent: {desc_preview}"
|
||||
if desc_preview
|
||||
else "Creating agent... This may take a few minutes."
|
||||
)
|
||||
started_msg = (
|
||||
"Agent creation started. You can close this tab - "
|
||||
"check your library in a few minutes."
|
||||
)
|
||||
elif tool_name == "edit_agent":
|
||||
changes = args.get("changes", "")
|
||||
changes_preview = (changes[:100] + "...") if len(changes) > 100 else changes
|
||||
pending_msg = (
|
||||
f"Editing agent: {changes_preview}"
|
||||
if changes_preview
|
||||
else "Editing agent... This may take a few minutes."
|
||||
)
|
||||
started_msg = (
|
||||
"Agent edit started. You can close this tab - "
|
||||
"check your library in a few minutes."
|
||||
)
|
||||
else:
|
||||
pending_msg = f"Running {tool_name}... This may take a few minutes."
|
||||
started_msg = (
|
||||
f"{tool_name} started. You can close this tab - "
|
||||
"check back in a few minutes."
|
||||
)
|
||||
|
||||
# --- Register task in Redis for SSE reconnection ---
|
||||
await stream_registry.create_task(
|
||||
task_id=task_id,
|
||||
session_id=session_id,
|
||||
user_id=user_id,
|
||||
tool_call_id=tool_call_id,
|
||||
tool_name=tool_name,
|
||||
operation_id=operation_id,
|
||||
)
|
||||
|
||||
# --- Save OperationPendingResponse to chat history ---
|
||||
pending_message = ChatMessage(
|
||||
role="tool",
|
||||
content=OperationPendingResponse(
|
||||
message=pending_msg,
|
||||
operation_id=operation_id,
|
||||
tool_name=tool_name,
|
||||
).model_dump_json(),
|
||||
tool_call_id=tool_call_id,
|
||||
)
|
||||
session.messages.append(pending_message)
|
||||
await upsert_chat_session(session)
|
||||
|
||||
# --- Spawn background task (reuses non-SDK infrastructure) ---
|
||||
bg_task = asyncio.create_task(
|
||||
_execute_long_running_tool_with_streaming(
|
||||
tool_name=tool_name,
|
||||
parameters=args,
|
||||
tool_call_id=tool_call_id,
|
||||
operation_id=operation_id,
|
||||
task_id=task_id,
|
||||
session_id=session_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
)
|
||||
_background_tasks.add(bg_task)
|
||||
bg_task.add_done_callback(_background_tasks.discard)
|
||||
await stream_registry.set_task_asyncio_task(task_id, bg_task)
|
||||
|
||||
logger.info(
|
||||
f"[SDK] Long-running tool {tool_name} delegated to background "
|
||||
f"(operation_id={operation_id}, task_id={task_id})"
|
||||
)
|
||||
|
||||
# --- Return OperationStartedResponse as MCP tool result ---
|
||||
# This flows through SDK → response adapter → frontend, triggering
|
||||
# the loading widget with SSE reconnection support.
|
||||
started_json = OperationStartedResponse(
|
||||
message=started_msg,
|
||||
operation_id=operation_id,
|
||||
tool_name=tool_name,
|
||||
task_id=task_id,
|
||||
).model_dump_json()
|
||||
|
||||
return {
|
||||
"content": [{"type": "text", "text": started_json}],
|
||||
"isError": False,
|
||||
}
|
||||
|
||||
return _callback
|
||||
|
||||
|
||||
def _resolve_sdk_model() -> str | None:
|
||||
"""Resolve the model name for the Claude Agent SDK CLI.
|
||||
|
||||
Uses ``config.claude_agent_model`` if set, otherwise derives from
|
||||
``config.model`` by stripping the OpenRouter provider prefix (e.g.,
|
||||
``"anthropic/claude-opus-4.6"`` → ``"claude-opus-4.6"``).
|
||||
"""
|
||||
if config.claude_agent_model:
|
||||
return config.claude_agent_model
|
||||
model = config.model
|
||||
if "/" in model:
|
||||
return model.split("/", 1)[1]
|
||||
return model
|
||||
|
||||
|
||||
def _build_sdk_env() -> dict[str, str]:
|
||||
"""Build env vars for the SDK CLI process.
|
||||
|
||||
Routes API calls through OpenRouter (or a custom base_url) using
|
||||
the same ``config.api_key`` / ``config.base_url`` as the non-SDK path.
|
||||
This gives per-call token and cost tracking on the OpenRouter dashboard.
|
||||
|
||||
Only overrides ``ANTHROPIC_API_KEY`` when a valid proxy URL and auth
|
||||
token are both present — otherwise returns an empty dict so the SDK
|
||||
falls back to its default credentials.
|
||||
"""
|
||||
env: dict[str, str] = {}
|
||||
if config.api_key and config.base_url:
|
||||
# Strip /v1 suffix — SDK expects the base URL without a version path
|
||||
base = config.base_url.rstrip("/")
|
||||
if base.endswith("/v1"):
|
||||
base = base[:-3]
|
||||
if not base or not base.startswith("http"):
|
||||
# Invalid base_url — don't override SDK defaults
|
||||
return env
|
||||
env["ANTHROPIC_BASE_URL"] = base
|
||||
env["ANTHROPIC_AUTH_TOKEN"] = config.api_key
|
||||
# Must be explicitly empty so the CLI uses AUTH_TOKEN instead
|
||||
env["ANTHROPIC_API_KEY"] = ""
|
||||
return env
|
||||
|
||||
|
||||
def _make_sdk_cwd(session_id: str) -> str:
|
||||
"""Create a safe, session-specific working directory path.
|
||||
|
||||
Delegates to :func:`~backend.api.features.chat.tools.sandbox.make_session_path`
|
||||
(single source of truth for path sanitization) and adds a defence-in-depth
|
||||
assertion.
|
||||
"""
|
||||
cwd = make_session_path(session_id)
|
||||
# Defence-in-depth: normpath + startswith is a CodeQL-recognised sanitizer
|
||||
cwd = os.path.normpath(cwd)
|
||||
if not cwd.startswith(_SDK_CWD_PREFIX):
|
||||
raise ValueError(f"SDK cwd escaped prefix: {cwd}")
|
||||
return cwd
|
||||
|
||||
|
||||
def _cleanup_sdk_tool_results(cwd: str) -> None:
|
||||
"""Remove SDK tool-result files for a specific session working directory.
|
||||
|
||||
The SDK creates tool-result files under ~/.claude/projects/<encoded-cwd>/tool-results/.
|
||||
We clean only the specific cwd's results to avoid race conditions between
|
||||
concurrent sessions.
|
||||
|
||||
Security: cwd MUST be created by _make_sdk_cwd() which sanitizes session_id.
|
||||
"""
|
||||
import shutil
|
||||
|
||||
# Security check 1: Validate cwd is under the expected prefix
|
||||
normalized = os.path.normpath(cwd)
|
||||
if not normalized.startswith(_SDK_CWD_PREFIX):
|
||||
logger.warning(f"[SDK] Rejecting cleanup for invalid path: {cwd}")
|
||||
return
|
||||
|
||||
# Security check 2: Ensure no path traversal in the normalized path
|
||||
if ".." in normalized:
|
||||
logger.warning(f"[SDK] Rejecting cleanup for traversal attempt: {cwd}")
|
||||
return
|
||||
|
||||
# SDK encodes the cwd path by replacing '/' with '-'
|
||||
encoded_cwd = normalized.replace("/", "-")
|
||||
|
||||
# Construct the project directory path (known-safe home expansion)
|
||||
claude_projects = os.path.expanduser("~/.claude/projects")
|
||||
project_dir = os.path.join(claude_projects, encoded_cwd)
|
||||
|
||||
# Security check 3: Validate project_dir is under ~/.claude/projects
|
||||
project_dir = os.path.normpath(project_dir)
|
||||
if not project_dir.startswith(claude_projects):
|
||||
logger.warning(
|
||||
f"[SDK] Rejecting cleanup for escaped project path: {project_dir}"
|
||||
)
|
||||
return
|
||||
|
||||
results_dir = os.path.join(project_dir, "tool-results")
|
||||
if os.path.isdir(results_dir):
|
||||
for filename in os.listdir(results_dir):
|
||||
file_path = os.path.join(results_dir, filename)
|
||||
try:
|
||||
if os.path.isfile(file_path):
|
||||
os.remove(file_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# Also clean up the temp cwd directory itself
|
||||
try:
|
||||
shutil.rmtree(normalized, ignore_errors=True)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
async def _compress_conversation_history(
|
||||
session: ChatSession,
|
||||
) -> list[ChatMessage]:
|
||||
"""Compress prior conversation messages if they exceed the token threshold.
|
||||
|
||||
Uses the shared compress_context() from prompt.py which supports:
|
||||
- LLM summarization of old messages (keeps recent ones intact)
|
||||
- Progressive content truncation as fallback
|
||||
- Middle-out deletion as last resort
|
||||
|
||||
Returns the compressed prior messages (everything except the current message).
|
||||
"""
|
||||
prior = session.messages[:-1]
|
||||
if len(prior) < 2:
|
||||
return prior
|
||||
|
||||
from backend.util.prompt import compress_context
|
||||
|
||||
# Convert ChatMessages to dicts for compress_context
|
||||
messages_dict = []
|
||||
for msg in prior:
|
||||
msg_dict: dict[str, Any] = {"role": msg.role}
|
||||
if msg.content:
|
||||
msg_dict["content"] = msg.content
|
||||
if msg.tool_calls:
|
||||
msg_dict["tool_calls"] = msg.tool_calls
|
||||
if msg.tool_call_id:
|
||||
msg_dict["tool_call_id"] = msg.tool_call_id
|
||||
messages_dict.append(msg_dict)
|
||||
|
||||
try:
|
||||
import openai
|
||||
|
||||
async with openai.AsyncOpenAI(
|
||||
api_key=config.api_key, base_url=config.base_url, timeout=30.0
|
||||
) as client:
|
||||
result = await compress_context(
|
||||
messages=messages_dict,
|
||||
model=config.model,
|
||||
client=client,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"[SDK] Context compression with LLM failed: {e}")
|
||||
# Fall back to truncation-only (no LLM summarization)
|
||||
result = await compress_context(
|
||||
messages=messages_dict,
|
||||
model=config.model,
|
||||
client=None,
|
||||
)
|
||||
|
||||
if result.was_compacted:
|
||||
logger.info(
|
||||
f"[SDK] Context compacted: {result.original_token_count} -> "
|
||||
f"{result.token_count} tokens "
|
||||
f"({result.messages_summarized} summarized, "
|
||||
f"{result.messages_dropped} dropped)"
|
||||
)
|
||||
# Convert compressed dicts back to ChatMessages
|
||||
return [
|
||||
ChatMessage(
|
||||
role=m["role"],
|
||||
content=m.get("content"),
|
||||
tool_calls=m.get("tool_calls"),
|
||||
tool_call_id=m.get("tool_call_id"),
|
||||
)
|
||||
for m in result.messages
|
||||
]
|
||||
|
||||
return prior
|
||||
|
||||
|
||||
def _format_conversation_context(messages: list[ChatMessage]) -> str | None:
|
||||
"""Format conversation messages into a context prefix for the user message.
|
||||
|
||||
Returns a string like:
|
||||
<conversation_history>
|
||||
User: hello
|
||||
You responded: Hi! How can I help?
|
||||
</conversation_history>
|
||||
|
||||
Returns None if there are no messages to format.
|
||||
"""
|
||||
if not messages:
|
||||
return None
|
||||
|
||||
lines: list[str] = []
|
||||
for msg in messages:
|
||||
if not msg.content:
|
||||
continue
|
||||
if msg.role == "user":
|
||||
lines.append(f"User: {msg.content}")
|
||||
elif msg.role == "assistant":
|
||||
lines.append(f"You responded: {msg.content}")
|
||||
# Skip tool messages — they're internal details
|
||||
|
||||
if not lines:
|
||||
return None
|
||||
|
||||
return "<conversation_history>\n" + "\n".join(lines) + "\n</conversation_history>"
|
||||
|
||||
|
||||
async def stream_chat_completion_sdk(
|
||||
session_id: str,
|
||||
message: str | None = None,
|
||||
tool_call_response: str | None = None, # noqa: ARG001
|
||||
is_user_message: bool = True,
|
||||
user_id: str | None = None,
|
||||
retry_count: int = 0, # noqa: ARG001
|
||||
session: ChatSession | None = None,
|
||||
context: dict[str, str] | None = None, # noqa: ARG001
|
||||
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||
"""Stream chat completion using Claude Agent SDK.
|
||||
|
||||
Drop-in replacement for stream_chat_completion with improved reliability.
|
||||
"""
|
||||
|
||||
if session is None:
|
||||
session = await get_chat_session(session_id, user_id)
|
||||
|
||||
if not session:
|
||||
raise NotFoundError(
|
||||
f"Session {session_id} not found. Please create a new session first."
|
||||
)
|
||||
|
||||
if message:
|
||||
session.messages.append(
|
||||
ChatMessage(
|
||||
role="user" if is_user_message else "assistant", content=message
|
||||
)
|
||||
)
|
||||
if is_user_message:
|
||||
track_user_message(
|
||||
user_id=user_id, session_id=session_id, message_length=len(message)
|
||||
)
|
||||
|
||||
session = await upsert_chat_session(session)
|
||||
|
||||
# Generate title for new sessions (first user message)
|
||||
if is_user_message and not session.title:
|
||||
user_messages = [m for m in session.messages if m.role == "user"]
|
||||
if len(user_messages) == 1:
|
||||
first_message = user_messages[0].content or message or ""
|
||||
if first_message:
|
||||
task = asyncio.create_task(
|
||||
_update_title_async(session_id, first_message, user_id)
|
||||
)
|
||||
_background_tasks.add(task)
|
||||
task.add_done_callback(_background_tasks.discard)
|
||||
|
||||
# Build system prompt (reuses non-SDK path with Langfuse support)
|
||||
has_history = len(session.messages) > 1
|
||||
system_prompt, _ = await _build_system_prompt(
|
||||
user_id, has_conversation_history=has_history
|
||||
)
|
||||
system_prompt += _SDK_TOOL_SUPPLEMENT
|
||||
message_id = str(uuid.uuid4())
|
||||
task_id = str(uuid.uuid4())
|
||||
|
||||
yield StreamStart(messageId=message_id, taskId=task_id)
|
||||
|
||||
stream_completed = False
|
||||
# Initialise sdk_cwd before the try so the finally can reference it
|
||||
# even if _make_sdk_cwd raises (in that case it stays as "").
|
||||
sdk_cwd = ""
|
||||
|
||||
try:
|
||||
# Use a session-specific temp dir to avoid cleanup race conditions
|
||||
# between concurrent sessions.
|
||||
sdk_cwd = _make_sdk_cwd(session_id)
|
||||
os.makedirs(sdk_cwd, exist_ok=True)
|
||||
|
||||
set_execution_context(
|
||||
user_id,
|
||||
session,
|
||||
long_running_callback=_build_long_running_callback(user_id),
|
||||
)
|
||||
try:
|
||||
from claude_agent_sdk import ClaudeAgentOptions, ClaudeSDKClient
|
||||
|
||||
# Fail fast when no API credentials are available at all
|
||||
sdk_env = _build_sdk_env()
|
||||
if not sdk_env and not os.environ.get("ANTHROPIC_API_KEY"):
|
||||
raise RuntimeError(
|
||||
"No API key configured. Set OPEN_ROUTER_API_KEY "
|
||||
"(or CHAT_API_KEY) for OpenRouter routing, "
|
||||
"or ANTHROPIC_API_KEY for direct Anthropic access."
|
||||
)
|
||||
|
||||
mcp_server = create_copilot_mcp_server()
|
||||
|
||||
sdk_model = _resolve_sdk_model()
|
||||
|
||||
security_hooks = create_security_hooks(
|
||||
user_id,
|
||||
sdk_cwd=sdk_cwd,
|
||||
max_subtasks=config.claude_agent_max_subtasks,
|
||||
)
|
||||
|
||||
options = ClaudeAgentOptions(
|
||||
system_prompt=system_prompt,
|
||||
mcp_servers={"copilot": mcp_server}, # type: ignore[arg-type]
|
||||
allowed_tools=COPILOT_TOOL_NAMES,
|
||||
hooks=security_hooks, # type: ignore[arg-type]
|
||||
cwd=sdk_cwd,
|
||||
max_buffer_size=config.claude_agent_max_buffer_size,
|
||||
# Only pass model/env when OpenRouter is configured
|
||||
**({"model": sdk_model, "env": sdk_env} if sdk_env else {}),
|
||||
)
|
||||
|
||||
adapter = SDKResponseAdapter(message_id=message_id)
|
||||
adapter.set_task_id(task_id)
|
||||
|
||||
async with ClaudeSDKClient(options=options) as client:
|
||||
current_message = message or ""
|
||||
if not current_message and session.messages:
|
||||
last_user = [m for m in session.messages if m.role == "user"]
|
||||
if last_user:
|
||||
current_message = last_user[-1].content or ""
|
||||
|
||||
if not current_message.strip():
|
||||
yield StreamError(
|
||||
errorText="Message cannot be empty.",
|
||||
code="empty_prompt",
|
||||
)
|
||||
yield StreamFinish()
|
||||
return
|
||||
|
||||
# Build query with conversation history context.
|
||||
# Compress history first to handle long conversations.
|
||||
query_message = current_message
|
||||
if len(session.messages) > 1:
|
||||
compressed = await _compress_conversation_history(session)
|
||||
history_context = _format_conversation_context(compressed)
|
||||
if history_context:
|
||||
query_message = (
|
||||
f"{history_context}\n\n"
|
||||
f"Now, the user says:\n{current_message}"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"[SDK] Sending query: {current_message[:80]!r}"
|
||||
f" ({len(session.messages)} msgs in session)"
|
||||
)
|
||||
await client.query(query_message, session_id=session_id)
|
||||
|
||||
assistant_response = ChatMessage(role="assistant", content="")
|
||||
accumulated_tool_calls: list[dict[str, Any]] = []
|
||||
has_appended_assistant = False
|
||||
has_tool_results = False
|
||||
|
||||
async for sdk_msg in client.receive_messages():
|
||||
logger.debug(
|
||||
f"[SDK] Received: {type(sdk_msg).__name__} "
|
||||
f"{getattr(sdk_msg, 'subtype', '')}"
|
||||
)
|
||||
for response in adapter.convert_message(sdk_msg):
|
||||
if isinstance(response, StreamStart):
|
||||
continue
|
||||
|
||||
yield response
|
||||
|
||||
if isinstance(response, StreamTextDelta):
|
||||
delta = response.delta or ""
|
||||
# After tool results, start a new assistant
|
||||
# message for the post-tool text.
|
||||
if has_tool_results and has_appended_assistant:
|
||||
assistant_response = ChatMessage(
|
||||
role="assistant", content=delta
|
||||
)
|
||||
accumulated_tool_calls = []
|
||||
has_appended_assistant = False
|
||||
has_tool_results = False
|
||||
session.messages.append(assistant_response)
|
||||
has_appended_assistant = True
|
||||
else:
|
||||
assistant_response.content = (
|
||||
assistant_response.content or ""
|
||||
) + delta
|
||||
if not has_appended_assistant:
|
||||
session.messages.append(assistant_response)
|
||||
has_appended_assistant = True
|
||||
|
||||
elif isinstance(response, StreamToolInputAvailable):
|
||||
accumulated_tool_calls.append(
|
||||
{
|
||||
"id": response.toolCallId,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": response.toolName,
|
||||
"arguments": json.dumps(response.input or {}),
|
||||
},
|
||||
}
|
||||
)
|
||||
assistant_response.tool_calls = accumulated_tool_calls
|
||||
if not has_appended_assistant:
|
||||
session.messages.append(assistant_response)
|
||||
has_appended_assistant = True
|
||||
|
||||
elif isinstance(response, StreamToolOutputAvailable):
|
||||
session.messages.append(
|
||||
ChatMessage(
|
||||
role="tool",
|
||||
content=(
|
||||
response.output
|
||||
if isinstance(response.output, str)
|
||||
else str(response.output)
|
||||
),
|
||||
tool_call_id=response.toolCallId,
|
||||
)
|
||||
)
|
||||
has_tool_results = True
|
||||
|
||||
elif isinstance(response, StreamFinish):
|
||||
stream_completed = True
|
||||
|
||||
if stream_completed:
|
||||
break
|
||||
|
||||
if (
|
||||
assistant_response.content or assistant_response.tool_calls
|
||||
) and not has_appended_assistant:
|
||||
session.messages.append(assistant_response)
|
||||
|
||||
except ImportError:
|
||||
raise RuntimeError(
|
||||
"claude-agent-sdk is not installed. "
|
||||
"Disable SDK mode (CHAT_USE_CLAUDE_AGENT_SDK=false) "
|
||||
"to use the OpenAI-compatible fallback."
|
||||
)
|
||||
|
||||
await upsert_chat_session(session)
|
||||
logger.debug(
|
||||
f"[SDK] Session {session_id} saved with {len(session.messages)} messages"
|
||||
)
|
||||
if not stream_completed:
|
||||
yield StreamFinish()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[SDK] Error: {e}", exc_info=True)
|
||||
try:
|
||||
await upsert_chat_session(session)
|
||||
except Exception as save_err:
|
||||
logger.error(f"[SDK] Failed to save session on error: {save_err}")
|
||||
yield StreamError(
|
||||
errorText="An error occurred. Please try again.",
|
||||
code="sdk_error",
|
||||
)
|
||||
yield StreamFinish()
|
||||
finally:
|
||||
if sdk_cwd:
|
||||
_cleanup_sdk_tool_results(sdk_cwd)
|
||||
|
||||
|
||||
async def _update_title_async(
|
||||
session_id: str, message: str, user_id: str | None = None
|
||||
) -> None:
|
||||
"""Background task to update session title."""
|
||||
try:
|
||||
title = await _generate_session_title(
|
||||
message, user_id=user_id, session_id=session_id
|
||||
)
|
||||
if title:
|
||||
await update_session_title(session_id, title)
|
||||
logger.debug(f"[SDK] Generated title for {session_id}: {title}")
|
||||
except Exception as e:
|
||||
logger.warning(f"[SDK] Failed to update session title: {e}")
|
||||
@@ -0,0 +1,320 @@
|
||||
"""Tool adapter for wrapping existing CoPilot tools as Claude Agent SDK MCP tools.
|
||||
|
||||
This module provides the adapter layer that converts existing BaseTool implementations
|
||||
into in-process MCP tools that can be used with the Claude Agent SDK.
|
||||
|
||||
Long-running tools (``is_long_running=True``) are delegated to the non-SDK
|
||||
background infrastructure (stream_registry, Redis persistence, SSE reconnection)
|
||||
via a callback provided by the service layer. This avoids wasteful SDK polling
|
||||
and makes results survive page refreshes.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from collections.abc import Awaitable, Callable
|
||||
from contextvars import ContextVar
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools import TOOL_REGISTRY
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Allowed base directory for the Read tool (SDK saves oversized tool results here).
|
||||
# Restricted to ~/.claude/projects/ and further validated to require "tool-results"
|
||||
# in the path — prevents reading settings, credentials, or other sensitive files.
|
||||
_SDK_PROJECTS_DIR = os.path.expanduser("~/.claude/projects/")
|
||||
|
||||
# MCP server naming - the SDK prefixes tool names as "mcp__{server_name}__{tool}"
|
||||
MCP_SERVER_NAME = "copilot"
|
||||
MCP_TOOL_PREFIX = f"mcp__{MCP_SERVER_NAME}__"
|
||||
|
||||
# Context variables to pass user/session info to tool execution
|
||||
_current_user_id: ContextVar[str | None] = ContextVar("current_user_id", default=None)
|
||||
_current_session: ContextVar[ChatSession | None] = ContextVar(
|
||||
"current_session", default=None
|
||||
)
|
||||
# Stash for MCP tool outputs before the SDK potentially truncates them.
|
||||
# Keyed by tool_name → full output string. Consumed (popped) by the
|
||||
# response adapter when it builds StreamToolOutputAvailable.
|
||||
_pending_tool_outputs: ContextVar[dict[str, str]] = ContextVar(
|
||||
"pending_tool_outputs", default=None # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
# Callback type for delegating long-running tools to the non-SDK infrastructure.
|
||||
# Args: (tool_name, arguments, session) → MCP-formatted response dict.
|
||||
LongRunningCallback = Callable[
|
||||
[str, dict[str, Any], ChatSession], Awaitable[dict[str, Any]]
|
||||
]
|
||||
|
||||
# ContextVar so the service layer can inject the callback per-request.
|
||||
_long_running_callback: ContextVar[LongRunningCallback | None] = ContextVar(
|
||||
"long_running_callback", default=None
|
||||
)
|
||||
|
||||
|
||||
def set_execution_context(
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
long_running_callback: LongRunningCallback | None = None,
|
||||
) -> None:
|
||||
"""Set the execution context for tool calls.
|
||||
|
||||
This must be called before streaming begins to ensure tools have access
|
||||
to user_id and session information.
|
||||
|
||||
Args:
|
||||
user_id: Current user's ID.
|
||||
session: Current chat session.
|
||||
long_running_callback: Optional callback to delegate long-running tools
|
||||
to the non-SDK background infrastructure (stream_registry + Redis).
|
||||
"""
|
||||
_current_user_id.set(user_id)
|
||||
_current_session.set(session)
|
||||
_pending_tool_outputs.set({})
|
||||
_long_running_callback.set(long_running_callback)
|
||||
|
||||
|
||||
def get_execution_context() -> tuple[str | None, ChatSession | None]:
|
||||
"""Get the current execution context."""
|
||||
return (
|
||||
_current_user_id.get(),
|
||||
_current_session.get(),
|
||||
)
|
||||
|
||||
|
||||
def pop_pending_tool_output(tool_name: str) -> str | None:
|
||||
"""Pop and return the stashed full output for *tool_name*.
|
||||
|
||||
The SDK CLI may truncate large tool results (writing them to disk and
|
||||
replacing the content with a file reference). This stash keeps the
|
||||
original MCP output so the response adapter can forward it to the
|
||||
frontend for proper widget rendering.
|
||||
|
||||
Returns ``None`` if nothing was stashed for *tool_name*.
|
||||
"""
|
||||
pending = _pending_tool_outputs.get(None)
|
||||
if pending is None:
|
||||
return None
|
||||
return pending.pop(tool_name, None)
|
||||
|
||||
|
||||
async def _execute_tool_sync(
|
||||
base_tool: BaseTool,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
args: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""Execute a tool synchronously and return MCP-formatted response."""
|
||||
effective_id = f"sdk-{uuid.uuid4().hex[:12]}"
|
||||
result = await base_tool.execute(
|
||||
user_id=user_id,
|
||||
session=session,
|
||||
tool_call_id=effective_id,
|
||||
**args,
|
||||
)
|
||||
|
||||
text = (
|
||||
result.output if isinstance(result.output, str) else json.dumps(result.output)
|
||||
)
|
||||
|
||||
# Stash the full output before the SDK potentially truncates it.
|
||||
pending = _pending_tool_outputs.get(None)
|
||||
if pending is not None:
|
||||
pending[base_tool.name] = text
|
||||
|
||||
return {
|
||||
"content": [{"type": "text", "text": text}],
|
||||
"isError": not result.success,
|
||||
}
|
||||
|
||||
|
||||
def _mcp_error(message: str) -> dict[str, Any]:
|
||||
return {
|
||||
"content": [
|
||||
{"type": "text", "text": json.dumps({"error": message, "type": "error"})}
|
||||
],
|
||||
"isError": True,
|
||||
}
|
||||
|
||||
|
||||
def create_tool_handler(base_tool: BaseTool):
|
||||
"""Create an async handler function for a BaseTool.
|
||||
|
||||
This wraps the existing BaseTool._execute method to be compatible
|
||||
with the Claude Agent SDK MCP tool format.
|
||||
|
||||
Long-running tools (``is_long_running=True``) are delegated to the
|
||||
non-SDK background infrastructure via a callback set in the execution
|
||||
context. The callback persists the operation in Redis (stream_registry)
|
||||
so results survive page refreshes and pod restarts.
|
||||
"""
|
||||
|
||||
async def tool_handler(args: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Execute the wrapped tool and return MCP-formatted response."""
|
||||
user_id, session = get_execution_context()
|
||||
|
||||
if session is None:
|
||||
return _mcp_error("No session context available")
|
||||
|
||||
# --- Long-running: delegate to non-SDK background infrastructure ---
|
||||
if base_tool.is_long_running:
|
||||
callback = _long_running_callback.get(None)
|
||||
if callback:
|
||||
try:
|
||||
return await callback(base_tool.name, args, session)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Long-running callback failed for {base_tool.name}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return _mcp_error(f"Failed to start {base_tool.name}: {e}")
|
||||
# No callback — fall through to synchronous execution
|
||||
logger.warning(
|
||||
f"[SDK] No long-running callback for {base_tool.name}, "
|
||||
f"executing synchronously (may block)"
|
||||
)
|
||||
|
||||
# --- Normal (fast) tool: execute synchronously ---
|
||||
try:
|
||||
return await _execute_tool_sync(base_tool, user_id, session, args)
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing tool {base_tool.name}: {e}", exc_info=True)
|
||||
return _mcp_error(f"Failed to execute {base_tool.name}: {e}")
|
||||
|
||||
return tool_handler
|
||||
|
||||
|
||||
def _build_input_schema(base_tool: BaseTool) -> dict[str, Any]:
|
||||
"""Build a JSON Schema input schema for a tool."""
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": base_tool.parameters.get("properties", {}),
|
||||
"required": base_tool.parameters.get("required", []),
|
||||
}
|
||||
|
||||
|
||||
async def _read_file_handler(args: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Read a file with optional offset/limit. Restricted to SDK working directory.
|
||||
|
||||
After reading, the file is deleted to prevent accumulation in long-running pods.
|
||||
"""
|
||||
file_path = args.get("file_path", "")
|
||||
offset = args.get("offset", 0)
|
||||
limit = args.get("limit", 2000)
|
||||
|
||||
# Security: only allow reads under ~/.claude/projects/**/tool-results/
|
||||
real_path = os.path.realpath(file_path)
|
||||
if not real_path.startswith(_SDK_PROJECTS_DIR) or "tool-results" not in real_path:
|
||||
return {
|
||||
"content": [{"type": "text", "text": f"Access denied: {file_path}"}],
|
||||
"isError": True,
|
||||
}
|
||||
|
||||
try:
|
||||
with open(real_path) as f:
|
||||
lines = f.readlines()
|
||||
selected = lines[offset : offset + limit]
|
||||
content = "".join(selected)
|
||||
return {"content": [{"type": "text", "text": content}], "isError": False}
|
||||
except FileNotFoundError:
|
||||
return {
|
||||
"content": [{"type": "text", "text": f"File not found: {file_path}"}],
|
||||
"isError": True,
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"content": [{"type": "text", "text": f"Error reading file: {e}"}],
|
||||
"isError": True,
|
||||
}
|
||||
|
||||
|
||||
_READ_TOOL_NAME = "Read"
|
||||
_READ_TOOL_DESCRIPTION = (
|
||||
"Read a file from the local filesystem. "
|
||||
"Use offset and limit to read specific line ranges for large files."
|
||||
)
|
||||
_READ_TOOL_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file_path": {
|
||||
"type": "string",
|
||||
"description": "The absolute path to the file to read",
|
||||
},
|
||||
"offset": {
|
||||
"type": "integer",
|
||||
"description": "Line number to start reading from (0-indexed). Default: 0",
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Number of lines to read. Default: 2000",
|
||||
},
|
||||
},
|
||||
"required": ["file_path"],
|
||||
}
|
||||
|
||||
|
||||
# Create the MCP server configuration
|
||||
def create_copilot_mcp_server():
|
||||
"""Create an in-process MCP server configuration for CoPilot tools.
|
||||
|
||||
This can be passed to ClaudeAgentOptions.mcp_servers.
|
||||
|
||||
Note: The actual SDK MCP server creation depends on the claude-agent-sdk
|
||||
package being available. This function returns the configuration that
|
||||
can be used with the SDK.
|
||||
"""
|
||||
try:
|
||||
from claude_agent_sdk import create_sdk_mcp_server, tool
|
||||
|
||||
# Create decorated tool functions
|
||||
sdk_tools = []
|
||||
|
||||
for tool_name, base_tool in TOOL_REGISTRY.items():
|
||||
handler = create_tool_handler(base_tool)
|
||||
decorated = tool(
|
||||
tool_name,
|
||||
base_tool.description,
|
||||
_build_input_schema(base_tool),
|
||||
)(handler)
|
||||
sdk_tools.append(decorated)
|
||||
|
||||
# Add the Read tool so the SDK can read back oversized tool results
|
||||
read_tool = tool(
|
||||
_READ_TOOL_NAME,
|
||||
_READ_TOOL_DESCRIPTION,
|
||||
_READ_TOOL_SCHEMA,
|
||||
)(_read_file_handler)
|
||||
sdk_tools.append(read_tool)
|
||||
|
||||
server = create_sdk_mcp_server(
|
||||
name=MCP_SERVER_NAME,
|
||||
version="1.0.0",
|
||||
tools=sdk_tools,
|
||||
)
|
||||
|
||||
return server
|
||||
|
||||
except ImportError:
|
||||
# Let ImportError propagate so service.py handles the fallback
|
||||
raise
|
||||
|
||||
|
||||
# SDK built-in tools allowed within the workspace directory.
|
||||
# Security hooks validate that file paths stay within sdk_cwd.
|
||||
# Bash is NOT included — use the sandboxed MCP bash_exec tool instead,
|
||||
# which provides kernel-level network isolation via unshare --net.
|
||||
# Task allows spawning sub-agents (rate-limited by security hooks).
|
||||
_SDK_BUILTIN_TOOLS = ["Read", "Write", "Edit", "Glob", "Grep", "Task"]
|
||||
|
||||
# List of tool names for allowed_tools configuration
|
||||
# Include MCP tools, the MCP Read tool for oversized results,
|
||||
# and SDK built-in file tools for workspace operations.
|
||||
COPILOT_TOOL_NAMES = [
|
||||
*[f"{MCP_TOOL_PREFIX}{name}" for name in TOOL_REGISTRY.keys()],
|
||||
f"{MCP_TOOL_PREFIX}{_READ_TOOL_NAME}",
|
||||
*_SDK_BUILTIN_TOOLS,
|
||||
]
|
||||
@@ -245,12 +245,16 @@ async def _get_system_prompt_template(context: str) -> str:
|
||||
return DEFAULT_SYSTEM_PROMPT.format(users_information=context)
|
||||
|
||||
|
||||
async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
|
||||
async def _build_system_prompt(
|
||||
user_id: str | None, has_conversation_history: bool = False
|
||||
) -> tuple[str, Any]:
|
||||
"""Build the full system prompt including business understanding if available.
|
||||
|
||||
Args:
|
||||
user_id: The user ID for fetching business understanding
|
||||
If "default" and this is the user's first session, will use "onboarding" instead.
|
||||
user_id: The user ID for fetching business understanding.
|
||||
has_conversation_history: Whether there's existing conversation history.
|
||||
If True, we don't tell the model to greet/introduce (since they're
|
||||
already in a conversation).
|
||||
|
||||
Returns:
|
||||
Tuple of (compiled prompt string, business understanding object)
|
||||
@@ -266,6 +270,8 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
|
||||
|
||||
if understanding:
|
||||
context = format_understanding_for_prompt(understanding)
|
||||
elif has_conversation_history:
|
||||
context = "No prior understanding saved yet. Continue the existing conversation naturally."
|
||||
else:
|
||||
context = "This is the first time you are meeting the user. Greet them and introduce them to the platform"
|
||||
|
||||
@@ -374,7 +380,6 @@ async def stream_chat_completion(
|
||||
|
||||
Raises:
|
||||
NotFoundError: If session_id is invalid
|
||||
ValueError: If max_context_messages is exceeded
|
||||
|
||||
"""
|
||||
completion_start = time.monotonic()
|
||||
@@ -459,8 +464,9 @@ async def stream_chat_completion(
|
||||
|
||||
# Generate title for new sessions on first user message (non-blocking)
|
||||
# Check: is_user_message, no title yet, and this is the first user message
|
||||
if is_user_message and message and not session.title:
|
||||
user_messages = [m for m in session.messages if m.role == "user"]
|
||||
user_messages = [m for m in session.messages if m.role == "user"]
|
||||
first_user_msg = message or (user_messages[0].content if user_messages else None)
|
||||
if is_user_message and first_user_msg and not session.title:
|
||||
if len(user_messages) == 1:
|
||||
# First user message - generate title in background
|
||||
import asyncio
|
||||
@@ -468,7 +474,7 @@ async def stream_chat_completion(
|
||||
# Capture only the values we need (not the session object) to avoid
|
||||
# stale data issues when the main flow modifies the session
|
||||
captured_session_id = session_id
|
||||
captured_message = message
|
||||
captured_message = first_user_msg
|
||||
captured_user_id = user_id
|
||||
|
||||
async def _update_title():
|
||||
@@ -1237,7 +1243,7 @@ async def _stream_chat_chunks(
|
||||
|
||||
total_time = (time_module.perf_counter() - stream_chunks_start) * 1000
|
||||
logger.info(
|
||||
f"[TIMING] _stream_chat_chunks COMPLETED in {total_time/1000:.1f}s; "
|
||||
f"[TIMING] _stream_chat_chunks COMPLETED in {total_time / 1000:.1f}s; "
|
||||
f"session={session.session_id}, user={session.user_id}",
|
||||
extra={"json_fields": {**log_meta, "total_time_ms": total_time}},
|
||||
)
|
||||
|
||||
@@ -814,6 +814,28 @@ async def get_active_task_for_session(
|
||||
if task_user_id and user_id != task_user_id:
|
||||
continue
|
||||
|
||||
# Auto-expire stale tasks that exceeded stream_timeout
|
||||
created_at_str = meta.get("created_at", "")
|
||||
if created_at_str:
|
||||
try:
|
||||
created_at = datetime.fromisoformat(created_at_str)
|
||||
age_seconds = (
|
||||
datetime.now(timezone.utc) - created_at
|
||||
).total_seconds()
|
||||
if age_seconds > config.stream_timeout:
|
||||
logger.warning(
|
||||
f"[TASK_LOOKUP] Auto-expiring stale task {task_id[:8]}... "
|
||||
f"(age={age_seconds:.0f}s > timeout={config.stream_timeout}s)"
|
||||
)
|
||||
await mark_task_completed(task_id, "failed")
|
||||
continue
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
logger.info(
|
||||
f"[TASK_LOOKUP] Found running task {task_id[:8]}... for session {session_id[:8]}..."
|
||||
)
|
||||
|
||||
# Get the last message ID from Redis Stream
|
||||
stream_key = _get_task_stream_key(task_id)
|
||||
last_id = "0-0"
|
||||
|
||||
@@ -9,6 +9,8 @@ from backend.api.features.chat.tracking import track_tool_called
|
||||
from .add_understanding import AddUnderstandingTool
|
||||
from .agent_output import AgentOutputTool
|
||||
from .base import BaseTool
|
||||
from .bash_exec import BashExecTool
|
||||
from .check_operation_status import CheckOperationStatusTool
|
||||
from .create_agent import CreateAgentTool
|
||||
from .customize_agent import CustomizeAgentTool
|
||||
from .edit_agent import EditAgentTool
|
||||
@@ -19,6 +21,7 @@ from .get_doc_page import GetDocPageTool
|
||||
from .run_agent import RunAgentTool
|
||||
from .run_block import RunBlockTool
|
||||
from .search_docs import SearchDocsTool
|
||||
from .web_fetch import WebFetchTool
|
||||
from .workspace_files import (
|
||||
DeleteWorkspaceFileTool,
|
||||
ListWorkspaceFilesTool,
|
||||
@@ -43,9 +46,14 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
|
||||
"run_agent": RunAgentTool(),
|
||||
"run_block": RunBlockTool(),
|
||||
"view_agent_output": AgentOutputTool(),
|
||||
"check_operation_status": CheckOperationStatusTool(),
|
||||
"search_docs": SearchDocsTool(),
|
||||
"get_doc_page": GetDocPageTool(),
|
||||
# Workspace tools for CoPilot file operations
|
||||
# Web fetch for safe URL retrieval
|
||||
"web_fetch": WebFetchTool(),
|
||||
# Sandboxed code execution (bubblewrap)
|
||||
"bash_exec": BashExecTool(),
|
||||
# Persistent workspace tools (cloud storage, survives across sessions)
|
||||
"list_workspace_files": ListWorkspaceFilesTool(),
|
||||
"read_workspace_file": ReadWorkspaceFileTool(),
|
||||
"write_workspace_file": WriteWorkspaceFileTool(),
|
||||
|
||||
@@ -0,0 +1,131 @@
|
||||
"""Bash execution tool — run shell commands in a bubblewrap sandbox.
|
||||
|
||||
Full Bash scripting is allowed (loops, conditionals, pipes, functions, etc.).
|
||||
Safety comes from OS-level isolation (bubblewrap): only system dirs visible
|
||||
read-only, writable workspace only, clean env, no network.
|
||||
|
||||
Requires bubblewrap (``bwrap``) — the tool is disabled when bwrap is not
|
||||
available (e.g. macOS development).
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
BashExecResponse,
|
||||
ErrorResponse,
|
||||
ToolResponseBase,
|
||||
)
|
||||
from backend.api.features.chat.tools.sandbox import (
|
||||
get_workspace_dir,
|
||||
has_full_sandbox,
|
||||
run_sandboxed,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BashExecTool(BaseTool):
|
||||
"""Execute Bash commands in a bubblewrap sandbox."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "bash_exec"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
if not has_full_sandbox():
|
||||
return (
|
||||
"Bash execution is DISABLED — bubblewrap sandbox is not "
|
||||
"available on this platform. Do not call this tool."
|
||||
)
|
||||
return (
|
||||
"Execute a Bash command or script in a bubblewrap sandbox. "
|
||||
"Full Bash scripting is supported (loops, conditionals, pipes, "
|
||||
"functions, etc.). "
|
||||
"The sandbox shares the same working directory as the SDK Read/Write "
|
||||
"tools — files created by either are accessible to both. "
|
||||
"SECURITY: Only system directories (/usr, /bin, /lib, /etc) are "
|
||||
"visible read-only, the per-session workspace is the only writable "
|
||||
"path, environment variables are wiped (no secrets), all network "
|
||||
"access is blocked at the kernel level, and resource limits are "
|
||||
"enforced (max 64 processes, 512MB memory, 50MB file size). "
|
||||
"Application code, configs, and other directories are NOT accessible. "
|
||||
"To fetch web content, use the web_fetch tool instead. "
|
||||
"Execution is killed after the timeout (default 30s, max 120s). "
|
||||
"Returns stdout and stderr. "
|
||||
"Useful for file manipulation, data processing with Unix tools "
|
||||
"(grep, awk, sed, jq, etc.), and running shell scripts."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"type": "string",
|
||||
"description": "Bash command or script to execute.",
|
||||
},
|
||||
"timeout": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max execution time in seconds (default 30, max 120)."
|
||||
),
|
||||
"default": 30,
|
||||
},
|
||||
},
|
||||
"required": ["command"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return False
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
) -> ToolResponseBase:
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not has_full_sandbox():
|
||||
return ErrorResponse(
|
||||
message="bash_exec requires bubblewrap sandbox (Linux only).",
|
||||
error="sandbox_unavailable",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
command: str = (kwargs.get("command") or "").strip()
|
||||
timeout: int = kwargs.get("timeout", 30)
|
||||
|
||||
if not command:
|
||||
return ErrorResponse(
|
||||
message="No command provided.",
|
||||
error="empty_command",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
workspace = get_workspace_dir(session_id or "default")
|
||||
|
||||
stdout, stderr, exit_code, timed_out = await run_sandboxed(
|
||||
command=["bash", "-c", command],
|
||||
cwd=workspace,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return BashExecResponse(
|
||||
message=(
|
||||
"Execution timed out"
|
||||
if timed_out
|
||||
else f"Command executed (exit {exit_code})"
|
||||
),
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
exit_code=exit_code,
|
||||
timed_out=timed_out,
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -0,0 +1,127 @@
|
||||
"""CheckOperationStatusTool — query the status of a long-running operation."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
ErrorResponse,
|
||||
ResponseType,
|
||||
ToolResponseBase,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OperationStatusResponse(ToolResponseBase):
|
||||
"""Response for check_operation_status tool."""
|
||||
|
||||
type: ResponseType = ResponseType.OPERATION_STATUS
|
||||
task_id: str
|
||||
operation_id: str
|
||||
status: str # "running", "completed", "failed"
|
||||
tool_name: str | None = None
|
||||
message: str = ""
|
||||
|
||||
|
||||
class CheckOperationStatusTool(BaseTool):
|
||||
"""Check the status of a long-running operation (create_agent, edit_agent, etc.).
|
||||
|
||||
The CoPilot uses this tool to report back to the user whether an
|
||||
operation that was started earlier has completed, failed, or is still
|
||||
running.
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "check_operation_status"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Check the current status of a long-running operation such as "
|
||||
"create_agent or edit_agent. Accepts either an operation_id or "
|
||||
"task_id from a previous operation_started response. "
|
||||
"Returns the current status: running, completed, or failed."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"operation_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The operation_id from an operation_started response."
|
||||
),
|
||||
},
|
||||
"task_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The task_id from an operation_started response. "
|
||||
"Used as fallback if operation_id is not provided."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return False
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
from backend.api.features.chat import stream_registry
|
||||
|
||||
operation_id: str = kwargs.get("operation_id", "").strip()
|
||||
task_id: str = kwargs.get("task_id", "").strip()
|
||||
|
||||
if not operation_id and not task_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide an operation_id or task_id.",
|
||||
error="missing_parameter",
|
||||
)
|
||||
|
||||
task = None
|
||||
if operation_id:
|
||||
task = await stream_registry.find_task_by_operation_id(operation_id)
|
||||
if task is None and task_id:
|
||||
task = await stream_registry.get_task(task_id)
|
||||
|
||||
if task is None:
|
||||
# Task not in Redis — it may have already expired (TTL).
|
||||
# Check conversation history for the result instead.
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
"Operation not found — it may have already completed and "
|
||||
"expired from the status tracker. Check the conversation "
|
||||
"history for the result."
|
||||
),
|
||||
error="not_found",
|
||||
)
|
||||
|
||||
status_messages = {
|
||||
"running": (
|
||||
f"The {task.tool_name or 'operation'} is still running. "
|
||||
"Please wait for it to complete."
|
||||
),
|
||||
"completed": (
|
||||
f"The {task.tool_name or 'operation'} has completed successfully."
|
||||
),
|
||||
"failed": f"The {task.tool_name or 'operation'} has failed.",
|
||||
}
|
||||
|
||||
return OperationStatusResponse(
|
||||
task_id=task.task_id,
|
||||
operation_id=task.operation_id,
|
||||
status=task.status,
|
||||
tool_name=task.tool_name,
|
||||
message=status_messages.get(task.status, f"Status: {task.status}"),
|
||||
)
|
||||
@@ -40,6 +40,12 @@ class ResponseType(str, Enum):
|
||||
OPERATION_IN_PROGRESS = "operation_in_progress"
|
||||
# Input validation
|
||||
INPUT_VALIDATION_ERROR = "input_validation_error"
|
||||
# Web fetch
|
||||
WEB_FETCH = "web_fetch"
|
||||
# Code execution
|
||||
BASH_EXEC = "bash_exec"
|
||||
# Operation status check
|
||||
OPERATION_STATUS = "operation_status"
|
||||
|
||||
|
||||
# Base response model
|
||||
@@ -335,11 +341,17 @@ class BlockInfoSummary(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
input_schema: dict[str, Any]
|
||||
output_schema: dict[str, Any]
|
||||
input_schema: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Full JSON schema for block inputs",
|
||||
)
|
||||
output_schema: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Full JSON schema for block outputs",
|
||||
)
|
||||
required_inputs: list[BlockInputFieldInfo] = Field(
|
||||
default_factory=list,
|
||||
description="List of required input fields for this block",
|
||||
description="List of input fields for this block",
|
||||
)
|
||||
|
||||
|
||||
@@ -352,7 +364,7 @@ class BlockListResponse(ToolResponseBase):
|
||||
query: str
|
||||
usage_hint: str = Field(
|
||||
default="To execute a block, call run_block with block_id set to the block's "
|
||||
"'id' field and input_data containing the required fields from input_schema."
|
||||
"'id' field and input_data containing the fields listed in required_inputs."
|
||||
)
|
||||
|
||||
|
||||
@@ -421,3 +433,24 @@ class AsyncProcessingResponse(ToolResponseBase):
|
||||
status: str = "accepted" # Must be "accepted" for detection
|
||||
operation_id: str | None = None
|
||||
task_id: str | None = None
|
||||
|
||||
|
||||
class WebFetchResponse(ToolResponseBase):
|
||||
"""Response for web_fetch tool."""
|
||||
|
||||
type: ResponseType = ResponseType.WEB_FETCH
|
||||
url: str
|
||||
status_code: int
|
||||
content_type: str
|
||||
content: str
|
||||
truncated: bool = False
|
||||
|
||||
|
||||
class BashExecResponse(ToolResponseBase):
|
||||
"""Response for bash_exec tool."""
|
||||
|
||||
type: ResponseType = ResponseType.BASH_EXEC
|
||||
stdout: str
|
||||
stderr: str
|
||||
exit_code: int
|
||||
timed_out: bool = False
|
||||
|
||||
@@ -0,0 +1,267 @@
|
||||
"""Sandbox execution utilities for code execution tools.
|
||||
|
||||
Provides filesystem + network isolated command execution using **bubblewrap**
|
||||
(``bwrap``): whitelist-only filesystem (only system dirs visible read-only),
|
||||
writable workspace only, clean environment, network blocked.
|
||||
|
||||
Tools that call :func:`run_sandboxed` must first check :func:`has_full_sandbox`
|
||||
and refuse to run if bubblewrap is not available.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Output limits — prevent blowing up LLM context
|
||||
_MAX_OUTPUT_CHARS = 50_000
|
||||
_DEFAULT_TIMEOUT = 30
|
||||
_MAX_TIMEOUT = 120
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Sandbox capability detection (cached at first call)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_BWRAP_AVAILABLE: bool | None = None
|
||||
|
||||
|
||||
def has_full_sandbox() -> bool:
|
||||
"""Return True if bubblewrap is available (filesystem + network isolation).
|
||||
|
||||
On non-Linux platforms (macOS), always returns False.
|
||||
"""
|
||||
global _BWRAP_AVAILABLE
|
||||
if _BWRAP_AVAILABLE is None:
|
||||
_BWRAP_AVAILABLE = (
|
||||
platform.system() == "Linux" and shutil.which("bwrap") is not None
|
||||
)
|
||||
return _BWRAP_AVAILABLE
|
||||
|
||||
|
||||
WORKSPACE_PREFIX = "/tmp/copilot-"
|
||||
|
||||
|
||||
def make_session_path(session_id: str) -> str:
|
||||
"""Build a sanitized, session-specific path under :data:`WORKSPACE_PREFIX`.
|
||||
|
||||
Shared by both the SDK working-directory setup and the sandbox tools so
|
||||
they always resolve to the same directory for a given session.
|
||||
|
||||
Steps:
|
||||
1. Strip all characters except ``[A-Za-z0-9-]``.
|
||||
2. Construct ``/tmp/copilot-<safe_id>``.
|
||||
3. Validate via ``os.path.normpath`` + ``startswith`` (CodeQL-recognised
|
||||
sanitizer) to prevent path traversal.
|
||||
|
||||
Raises:
|
||||
ValueError: If the resulting path escapes the prefix.
|
||||
"""
|
||||
import re
|
||||
|
||||
safe_id = re.sub(r"[^A-Za-z0-9-]", "", session_id)
|
||||
if not safe_id:
|
||||
safe_id = "default"
|
||||
path = os.path.normpath(f"{WORKSPACE_PREFIX}{safe_id}")
|
||||
if not path.startswith(WORKSPACE_PREFIX):
|
||||
raise ValueError(f"Session path escaped prefix: {path}")
|
||||
return path
|
||||
|
||||
|
||||
def get_workspace_dir(session_id: str) -> str:
|
||||
"""Get or create the workspace directory for a session.
|
||||
|
||||
Uses :func:`make_session_path` — the same path the SDK uses — so that
|
||||
bash_exec shares the workspace with the SDK file tools.
|
||||
"""
|
||||
workspace = make_session_path(session_id)
|
||||
os.makedirs(workspace, exist_ok=True)
|
||||
return workspace
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Bubblewrap command builder
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# System directories mounted read-only inside the sandbox.
|
||||
# ONLY these are visible — /app, /root, /home, /opt, /var etc. are NOT accessible.
|
||||
_SYSTEM_RO_BINDS = [
|
||||
"/usr", # binaries, libraries, Python interpreter
|
||||
"/etc", # system config: ld.so, locale, passwd, alternatives
|
||||
]
|
||||
|
||||
# Compat paths: symlinks to /usr/* on modern Debian, real dirs on older systems.
|
||||
# On Debian 13 these are symlinks (e.g. /bin -> usr/bin). bwrap --ro-bind
|
||||
# can't create a symlink target, so we detect and use --symlink instead.
|
||||
# /lib64 is critical: the ELF dynamic linker lives at /lib64/ld-linux-x86-64.so.2.
|
||||
_COMPAT_PATHS = [
|
||||
("/bin", "usr/bin"), # -> /usr/bin on Debian 13
|
||||
("/sbin", "usr/sbin"), # -> /usr/sbin on Debian 13
|
||||
("/lib", "usr/lib"), # -> /usr/lib on Debian 13
|
||||
("/lib64", "usr/lib64"), # 64-bit libraries / ELF interpreter
|
||||
]
|
||||
|
||||
# Resource limits to prevent fork bombs, memory exhaustion, and disk abuse.
|
||||
# Applied via ulimit inside the sandbox before exec'ing the user command.
|
||||
_RESOURCE_LIMITS = (
|
||||
"ulimit -u 64" # max 64 processes (prevents fork bombs)
|
||||
" -v 524288" # 512 MB virtual memory
|
||||
" -f 51200" # 50 MB max file size (1024-byte blocks)
|
||||
" -n 256" # 256 open file descriptors
|
||||
" 2>/dev/null"
|
||||
)
|
||||
|
||||
|
||||
def _build_bwrap_command(
|
||||
command: list[str], cwd: str, env: dict[str, str]
|
||||
) -> list[str]:
|
||||
"""Build a bubblewrap command with strict filesystem + network isolation.
|
||||
|
||||
Security model:
|
||||
- **Whitelist-only filesystem**: only system directories (``/usr``, ``/etc``,
|
||||
``/bin``, ``/lib``) are mounted read-only. Application code (``/app``),
|
||||
home directories, ``/var``, ``/opt``, etc. are NOT accessible at all.
|
||||
- **Writable workspace only**: the per-session workspace is the sole
|
||||
writable path.
|
||||
- **Clean environment**: ``--clearenv`` wipes all inherited env vars.
|
||||
Only the explicitly-passed safe env vars are set inside the sandbox.
|
||||
- **Network isolation**: ``--unshare-net`` blocks all network access.
|
||||
- **Resource limits**: ulimit caps on processes (64), memory (512MB),
|
||||
file size (50MB), and open FDs (256) to prevent fork bombs and abuse.
|
||||
- **New session**: prevents terminal control escape.
|
||||
- **Die with parent**: prevents orphaned sandbox processes.
|
||||
"""
|
||||
cmd = [
|
||||
"bwrap",
|
||||
# Create a new user namespace so bwrap can set up sandboxing
|
||||
# inside unprivileged Docker containers (no CAP_SYS_ADMIN needed).
|
||||
"--unshare-user",
|
||||
# Wipe all inherited environment variables (API keys, secrets, etc.)
|
||||
"--clearenv",
|
||||
]
|
||||
|
||||
# Set only the safe env vars inside the sandbox
|
||||
for key, value in env.items():
|
||||
cmd.extend(["--setenv", key, value])
|
||||
|
||||
# System directories: read-only
|
||||
for path in _SYSTEM_RO_BINDS:
|
||||
cmd.extend(["--ro-bind", path, path])
|
||||
|
||||
# Compat paths: use --symlink when host path is a symlink (Debian 13),
|
||||
# --ro-bind when it's a real directory (older distros).
|
||||
for path, symlink_target in _COMPAT_PATHS:
|
||||
if os.path.islink(path):
|
||||
cmd.extend(["--symlink", symlink_target, path])
|
||||
elif os.path.exists(path):
|
||||
cmd.extend(["--ro-bind", path, path])
|
||||
|
||||
# Wrap the user command with resource limits:
|
||||
# sh -c 'ulimit ...; exec "$@"' -- <original command>
|
||||
# `exec "$@"` replaces the shell so there's no extra process overhead,
|
||||
# and properly handles arguments with spaces.
|
||||
limited_command = [
|
||||
"sh",
|
||||
"-c",
|
||||
f'{_RESOURCE_LIMITS}; exec "$@"',
|
||||
"--",
|
||||
*command,
|
||||
]
|
||||
|
||||
cmd.extend(
|
||||
[
|
||||
# Fresh virtual filesystems
|
||||
"--dev",
|
||||
"/dev",
|
||||
"--proc",
|
||||
"/proc",
|
||||
"--tmpfs",
|
||||
"/tmp",
|
||||
# Workspace bind AFTER --tmpfs /tmp so it's visible through the tmpfs.
|
||||
# (workspace lives under /tmp/copilot-<session>)
|
||||
"--bind",
|
||||
cwd,
|
||||
cwd,
|
||||
# Isolation
|
||||
"--unshare-net",
|
||||
"--die-with-parent",
|
||||
"--new-session",
|
||||
"--chdir",
|
||||
cwd,
|
||||
"--",
|
||||
*limited_command,
|
||||
]
|
||||
)
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def run_sandboxed(
|
||||
command: list[str],
|
||||
cwd: str,
|
||||
timeout: int = _DEFAULT_TIMEOUT,
|
||||
env: dict[str, str] | None = None,
|
||||
) -> tuple[str, str, int, bool]:
|
||||
"""Run a command inside a bubblewrap sandbox.
|
||||
|
||||
Callers **must** check :func:`has_full_sandbox` before calling this
|
||||
function. If bubblewrap is not available, this function raises
|
||||
:class:`RuntimeError` rather than running unsandboxed.
|
||||
|
||||
Returns:
|
||||
(stdout, stderr, exit_code, timed_out)
|
||||
"""
|
||||
if not has_full_sandbox():
|
||||
raise RuntimeError(
|
||||
"run_sandboxed() requires bubblewrap but bwrap is not available. "
|
||||
"Callers must check has_full_sandbox() before calling this function."
|
||||
)
|
||||
|
||||
timeout = min(max(timeout, 1), _MAX_TIMEOUT)
|
||||
|
||||
safe_env = {
|
||||
"PATH": "/usr/local/bin:/usr/bin:/bin",
|
||||
"HOME": cwd,
|
||||
"TMPDIR": cwd,
|
||||
"LANG": "en_US.UTF-8",
|
||||
"PYTHONDONTWRITEBYTECODE": "1",
|
||||
"PYTHONIOENCODING": "utf-8",
|
||||
}
|
||||
if env:
|
||||
safe_env.update(env)
|
||||
|
||||
full_command = _build_bwrap_command(command, cwd, safe_env)
|
||||
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
*full_command,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
cwd=cwd,
|
||||
env=safe_env,
|
||||
)
|
||||
|
||||
try:
|
||||
stdout_bytes, stderr_bytes = await asyncio.wait_for(
|
||||
proc.communicate(), timeout=timeout
|
||||
)
|
||||
stdout = stdout_bytes.decode("utf-8", errors="replace")[:_MAX_OUTPUT_CHARS]
|
||||
stderr = stderr_bytes.decode("utf-8", errors="replace")[:_MAX_OUTPUT_CHARS]
|
||||
return stdout, stderr, proc.returncode or 0, False
|
||||
except asyncio.TimeoutError:
|
||||
proc.kill()
|
||||
await proc.communicate()
|
||||
return "", f"Execution timed out after {timeout}s", -1, True
|
||||
|
||||
except RuntimeError:
|
||||
raise
|
||||
except Exception as e:
|
||||
return "", f"Sandbox error: {e}", -1, False
|
||||
@@ -0,0 +1,156 @@
|
||||
"""Web fetch tool — safely retrieve public web page content."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import aiohttp
|
||||
import html2text
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
ErrorResponse,
|
||||
ToolResponseBase,
|
||||
WebFetchResponse,
|
||||
)
|
||||
from backend.util.request import Requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Limits
|
||||
_MAX_CONTENT_BYTES = 102_400 # 100 KB download cap
|
||||
_MAX_OUTPUT_CHARS = 50_000 # 50K char truncation for LLM context
|
||||
_REQUEST_TIMEOUT = aiohttp.ClientTimeout(total=15)
|
||||
|
||||
# Content types we'll read as text
|
||||
_TEXT_CONTENT_TYPES = {
|
||||
"text/html",
|
||||
"text/plain",
|
||||
"text/xml",
|
||||
"text/csv",
|
||||
"text/markdown",
|
||||
"application/json",
|
||||
"application/xml",
|
||||
"application/xhtml+xml",
|
||||
"application/rss+xml",
|
||||
"application/atom+xml",
|
||||
}
|
||||
|
||||
|
||||
def _is_text_content(content_type: str) -> bool:
|
||||
base = content_type.split(";")[0].strip().lower()
|
||||
return base in _TEXT_CONTENT_TYPES or base.startswith("text/")
|
||||
|
||||
|
||||
def _html_to_text(html: str) -> str:
|
||||
h = html2text.HTML2Text()
|
||||
h.ignore_links = False
|
||||
h.ignore_images = True
|
||||
h.body_width = 0
|
||||
return h.handle(html)
|
||||
|
||||
|
||||
class WebFetchTool(BaseTool):
|
||||
"""Safely fetch content from a public URL using SSRF-protected HTTP."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "web_fetch"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Fetch the content of a public web page by URL. "
|
||||
"Returns readable text extracted from HTML by default. "
|
||||
"Useful for reading documentation, articles, and API responses. "
|
||||
"Only supports HTTP/HTTPS GET requests to public URLs "
|
||||
"(private/internal network addresses are blocked)."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The public HTTP/HTTPS URL to fetch.",
|
||||
},
|
||||
"extract_text": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"If true (default), extract readable text from HTML. "
|
||||
"If false, return raw content."
|
||||
),
|
||||
"default": True,
|
||||
},
|
||||
},
|
||||
"required": ["url"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return False
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
) -> ToolResponseBase:
|
||||
url: str = (kwargs.get("url") or "").strip()
|
||||
extract_text: bool = kwargs.get("extract_text", True)
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not url:
|
||||
return ErrorResponse(
|
||||
message="Please provide a URL to fetch.",
|
||||
error="missing_url",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
client = Requests(raise_for_status=False, retry_max_attempts=1)
|
||||
response = await client.get(url, timeout=_REQUEST_TIMEOUT)
|
||||
except ValueError as e:
|
||||
# validate_url raises ValueError for SSRF / blocked IPs
|
||||
return ErrorResponse(
|
||||
message=f"URL blocked: {e}",
|
||||
error="url_blocked",
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"[web_fetch] Request failed for {url}: {e}")
|
||||
return ErrorResponse(
|
||||
message=f"Failed to fetch URL: {e}",
|
||||
error="fetch_failed",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
content_type = response.headers.get("content-type", "")
|
||||
if not _is_text_content(content_type):
|
||||
return ErrorResponse(
|
||||
message=f"Non-text content type: {content_type.split(';')[0]}",
|
||||
error="unsupported_content_type",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
raw = response.content[:_MAX_CONTENT_BYTES]
|
||||
text = raw.decode("utf-8", errors="replace")
|
||||
|
||||
if extract_text and "html" in content_type.lower():
|
||||
text = _html_to_text(text)
|
||||
|
||||
truncated = len(text) > _MAX_OUTPUT_CHARS
|
||||
if truncated:
|
||||
text = text[:_MAX_OUTPUT_CHARS]
|
||||
|
||||
return WebFetchResponse(
|
||||
message=f"Fetched {url}" + (" (truncated)" if truncated else ""),
|
||||
url=response.url,
|
||||
status_code=response.status,
|
||||
content_type=content_type.split(";")[0].strip(),
|
||||
content=text,
|
||||
truncated=truncated,
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -88,7 +88,9 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"List files in the user's workspace. "
|
||||
"List files in the user's persistent workspace (cloud storage). "
|
||||
"These files survive across sessions. "
|
||||
"For ephemeral session files, use the SDK Read/Glob tools instead. "
|
||||
"Returns file names, paths, sizes, and metadata. "
|
||||
"Optionally filter by path prefix."
|
||||
)
|
||||
@@ -204,7 +206,9 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Read a file from the user's workspace. "
|
||||
"Read a file from the user's persistent workspace (cloud storage). "
|
||||
"These files survive across sessions. "
|
||||
"For ephemeral session files, use the SDK Read tool instead. "
|
||||
"Specify either file_id or path to identify the file. "
|
||||
"For small text files, returns content directly. "
|
||||
"For large or binary files, returns metadata and a download URL. "
|
||||
@@ -378,7 +382,9 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Write or create a file in the user's workspace. "
|
||||
"Write or create a file in the user's persistent workspace (cloud storage). "
|
||||
"These files survive across sessions. "
|
||||
"For ephemeral session files, use the SDK Write tool instead. "
|
||||
"Provide the content as a base64-encoded string. "
|
||||
f"Maximum file size is {Config().max_file_size_mb}MB. "
|
||||
"Files are saved to the current session's folder by default. "
|
||||
@@ -523,7 +529,7 @@ class DeleteWorkspaceFileTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Delete a file from the user's workspace. "
|
||||
"Delete a file from the user's persistent workspace (cloud storage). "
|
||||
"Specify either file_id or path to identify the file. "
|
||||
"Paths are scoped to the current session by default. "
|
||||
"Use /sessions/<session_id>/... for cross-session access."
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import json
|
||||
import shlex
|
||||
import uuid
|
||||
from typing import Literal, Optional
|
||||
from typing import TYPE_CHECKING, Literal, Optional
|
||||
|
||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||
from pydantic import BaseModel, SecretStr
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.blocks._base import (
|
||||
Block,
|
||||
@@ -20,6 +20,13 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.sandbox_files import (
|
||||
SandboxFileOutput,
|
||||
extract_and_store_sandbox_files,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor.utils import ExecutionContext
|
||||
|
||||
|
||||
class ClaudeCodeExecutionError(Exception):
|
||||
@@ -174,22 +181,15 @@ class ClaudeCodeBlock(Block):
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class FileOutput(BaseModel):
|
||||
"""A file extracted from the sandbox."""
|
||||
|
||||
path: str
|
||||
relative_path: str # Path relative to working directory (for GitHub, etc.)
|
||||
name: str
|
||||
content: str
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
response: str = SchemaField(
|
||||
description="The output/response from Claude Code execution"
|
||||
)
|
||||
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
|
||||
files: list[SandboxFileOutput] = SchemaField(
|
||||
description=(
|
||||
"List of text files created/modified by Claude Code during this execution. "
|
||||
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
|
||||
"Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. "
|
||||
"workspace_ref contains a workspace:// URI if the file was stored to workspace."
|
||||
)
|
||||
)
|
||||
conversation_history: str = SchemaField(
|
||||
@@ -252,6 +252,7 @@ class ClaudeCodeBlock(Block):
|
||||
"relative_path": "index.html",
|
||||
"name": "index.html",
|
||||
"content": "<html>Hello World</html>",
|
||||
"workspace_ref": None,
|
||||
}
|
||||
],
|
||||
),
|
||||
@@ -267,11 +268,12 @@ class ClaudeCodeBlock(Block):
|
||||
"execute_claude_code": lambda *args, **kwargs: (
|
||||
"Created index.html with hello world content", # response
|
||||
[
|
||||
ClaudeCodeBlock.FileOutput(
|
||||
SandboxFileOutput(
|
||||
path="/home/user/index.html",
|
||||
relative_path="index.html",
|
||||
name="index.html",
|
||||
content="<html>Hello World</html>",
|
||||
workspace_ref=None,
|
||||
)
|
||||
], # files
|
||||
"User: Create a hello world HTML file\n"
|
||||
@@ -294,7 +296,8 @@ class ClaudeCodeBlock(Block):
|
||||
existing_sandbox_id: str,
|
||||
conversation_history: str,
|
||||
dispose_sandbox: bool,
|
||||
) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
|
||||
execution_context: "ExecutionContext",
|
||||
) -> tuple[str, list[SandboxFileOutput], str, str, str]:
|
||||
"""
|
||||
Execute Claude Code in an E2B sandbox.
|
||||
|
||||
@@ -449,14 +452,18 @@ class ClaudeCodeBlock(Block):
|
||||
else:
|
||||
new_conversation_history = turn_entry
|
||||
|
||||
# Extract files created/modified during this run
|
||||
files = await self._extract_files(
|
||||
sandbox, working_directory, start_timestamp
|
||||
# Extract files created/modified during this run and store to workspace
|
||||
sandbox_files = await extract_and_store_sandbox_files(
|
||||
sandbox=sandbox,
|
||||
working_directory=working_directory,
|
||||
execution_context=execution_context,
|
||||
since_timestamp=start_timestamp,
|
||||
text_only=True,
|
||||
)
|
||||
|
||||
return (
|
||||
response,
|
||||
files,
|
||||
sandbox_files, # Already SandboxFileOutput objects
|
||||
new_conversation_history,
|
||||
current_session_id,
|
||||
sandbox_id,
|
||||
@@ -471,140 +478,6 @@ class ClaudeCodeBlock(Block):
|
||||
if dispose_sandbox and sandbox:
|
||||
await sandbox.kill()
|
||||
|
||||
async def _extract_files(
|
||||
self,
|
||||
sandbox: BaseAsyncSandbox,
|
||||
working_directory: str,
|
||||
since_timestamp: str | None = None,
|
||||
) -> list["ClaudeCodeBlock.FileOutput"]:
|
||||
"""
|
||||
Extract text files created/modified during this Claude Code execution.
|
||||
|
||||
Args:
|
||||
sandbox: The E2B sandbox instance
|
||||
working_directory: Directory to search for files
|
||||
since_timestamp: ISO timestamp - only return files modified after this time
|
||||
|
||||
Returns:
|
||||
List of FileOutput objects with path, relative_path, name, and content
|
||||
"""
|
||||
files: list[ClaudeCodeBlock.FileOutput] = []
|
||||
|
||||
# Text file extensions we can safely read as text
|
||||
text_extensions = {
|
||||
".txt",
|
||||
".md",
|
||||
".html",
|
||||
".htm",
|
||||
".css",
|
||||
".js",
|
||||
".ts",
|
||||
".jsx",
|
||||
".tsx",
|
||||
".json",
|
||||
".xml",
|
||||
".yaml",
|
||||
".yml",
|
||||
".toml",
|
||||
".ini",
|
||||
".cfg",
|
||||
".conf",
|
||||
".py",
|
||||
".rb",
|
||||
".php",
|
||||
".java",
|
||||
".c",
|
||||
".cpp",
|
||||
".h",
|
||||
".hpp",
|
||||
".cs",
|
||||
".go",
|
||||
".rs",
|
||||
".swift",
|
||||
".kt",
|
||||
".scala",
|
||||
".sh",
|
||||
".bash",
|
||||
".zsh",
|
||||
".sql",
|
||||
".graphql",
|
||||
".env",
|
||||
".gitignore",
|
||||
".dockerfile",
|
||||
"Dockerfile",
|
||||
".vue",
|
||||
".svelte",
|
||||
".astro",
|
||||
".mdx",
|
||||
".rst",
|
||||
".tex",
|
||||
".csv",
|
||||
".log",
|
||||
}
|
||||
|
||||
try:
|
||||
# List files recursively using find command
|
||||
# Exclude node_modules and .git directories, but allow hidden files
|
||||
# like .env and .gitignore (they're filtered by text_extensions later)
|
||||
# Filter by timestamp to only get files created/modified during this run
|
||||
safe_working_dir = shlex.quote(working_directory)
|
||||
timestamp_filter = ""
|
||||
if since_timestamp:
|
||||
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
||||
find_result = await sandbox.commands.run(
|
||||
f"find {safe_working_dir} -type f "
|
||||
f"{timestamp_filter}"
|
||||
f"-not -path '*/node_modules/*' "
|
||||
f"-not -path '*/.git/*' "
|
||||
f"2>/dev/null"
|
||||
)
|
||||
|
||||
if find_result.stdout:
|
||||
for file_path in find_result.stdout.strip().split("\n"):
|
||||
if not file_path:
|
||||
continue
|
||||
|
||||
# Check if it's a text file we can read
|
||||
is_text = any(
|
||||
file_path.endswith(ext) for ext in text_extensions
|
||||
) or file_path.endswith("Dockerfile")
|
||||
|
||||
if is_text:
|
||||
try:
|
||||
content = await sandbox.files.read(file_path)
|
||||
# Handle bytes or string
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8", errors="replace")
|
||||
|
||||
# Extract filename from path
|
||||
file_name = file_path.split("/")[-1]
|
||||
|
||||
# Calculate relative path by stripping working directory
|
||||
relative_path = file_path
|
||||
if file_path.startswith(working_directory):
|
||||
relative_path = file_path[len(working_directory) :]
|
||||
# Remove leading slash if present
|
||||
if relative_path.startswith("/"):
|
||||
relative_path = relative_path[1:]
|
||||
|
||||
files.append(
|
||||
ClaudeCodeBlock.FileOutput(
|
||||
path=file_path,
|
||||
relative_path=relative_path,
|
||||
name=file_name,
|
||||
content=content,
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
# Skip files that can't be read
|
||||
pass
|
||||
|
||||
except Exception:
|
||||
# If file extraction fails, return empty results
|
||||
pass
|
||||
|
||||
return files
|
||||
|
||||
def _escape_prompt(self, prompt: str) -> str:
|
||||
"""Escape the prompt for safe shell execution."""
|
||||
# Use single quotes and escape any single quotes in the prompt
|
||||
@@ -617,6 +490,7 @@ class ClaudeCodeBlock(Block):
|
||||
*,
|
||||
e2b_credentials: APIKeyCredentials,
|
||||
anthropic_credentials: APIKeyCredentials,
|
||||
execution_context: "ExecutionContext",
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
@@ -637,6 +511,7 @@ class ClaudeCodeBlock(Block):
|
||||
existing_sandbox_id=input_data.sandbox_id,
|
||||
conversation_history=input_data.conversation_history,
|
||||
dispose_sandbox=input_data.dispose_sandbox,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
|
||||
yield "response", response
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Literal, Optional
|
||||
from typing import TYPE_CHECKING, Any, Literal, Optional
|
||||
|
||||
from e2b_code_interpreter import AsyncSandbox
|
||||
from e2b_code_interpreter import Result as E2BExecutionResult
|
||||
@@ -20,6 +20,13 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.sandbox_files import (
|
||||
SandboxFileOutput,
|
||||
extract_and_store_sandbox_files,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor.utils import ExecutionContext
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -85,6 +92,9 @@ class CodeExecutionResult(MainCodeExecutionResult):
|
||||
class BaseE2BExecutorMixin:
|
||||
"""Shared implementation methods for E2B executor blocks."""
|
||||
|
||||
# Default working directory in E2B sandboxes
|
||||
WORKING_DIR = "/home/user"
|
||||
|
||||
async def execute_code(
|
||||
self,
|
||||
api_key: str,
|
||||
@@ -95,14 +105,21 @@ class BaseE2BExecutorMixin:
|
||||
timeout: Optional[int] = None,
|
||||
sandbox_id: Optional[str] = None,
|
||||
dispose_sandbox: bool = False,
|
||||
execution_context: Optional["ExecutionContext"] = None,
|
||||
extract_files: bool = False,
|
||||
):
|
||||
"""
|
||||
Unified code execution method that handles all three use cases:
|
||||
1. Create new sandbox and execute (ExecuteCodeBlock)
|
||||
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
||||
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
||||
|
||||
Args:
|
||||
extract_files: If True and execution_context provided, extract files
|
||||
created/modified during execution and store to workspace.
|
||||
""" # noqa
|
||||
sandbox = None
|
||||
files: list[SandboxFileOutput] = []
|
||||
try:
|
||||
if sandbox_id:
|
||||
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
||||
@@ -118,6 +135,12 @@ class BaseE2BExecutorMixin:
|
||||
for cmd in setup_commands:
|
||||
await sandbox.commands.run(cmd)
|
||||
|
||||
# Capture timestamp before execution to scope file extraction
|
||||
start_timestamp = None
|
||||
if extract_files:
|
||||
ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S")
|
||||
start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None
|
||||
|
||||
# Execute the code
|
||||
execution = await sandbox.run_code(
|
||||
code,
|
||||
@@ -133,7 +156,24 @@ class BaseE2BExecutorMixin:
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
|
||||
# Extract files created/modified during this execution
|
||||
if extract_files and execution_context:
|
||||
files = await extract_and_store_sandbox_files(
|
||||
sandbox=sandbox,
|
||||
working_directory=self.WORKING_DIR,
|
||||
execution_context=execution_context,
|
||||
since_timestamp=start_timestamp,
|
||||
text_only=False, # Include binary files too
|
||||
)
|
||||
|
||||
return (
|
||||
results,
|
||||
text_output,
|
||||
stdout_logs,
|
||||
stderr_logs,
|
||||
sandbox.sandbox_id,
|
||||
files,
|
||||
)
|
||||
finally:
|
||||
# Dispose of sandbox if requested to reduce usage costs
|
||||
if dispose_sandbox and sandbox:
|
||||
@@ -238,6 +278,12 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
||||
description="Standard output logs from execution"
|
||||
)
|
||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||
files: list[SandboxFileOutput] = SchemaField(
|
||||
description=(
|
||||
"Files created or modified during execution. "
|
||||
"Each file has path, name, content, and workspace_ref (if stored)."
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -259,23 +305,30 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
||||
("results", []),
|
||||
("response", "Hello World"),
|
||||
("stdout_logs", "Hello World\n"),
|
||||
("files", []),
|
||||
],
|
||||
test_mock={
|
||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
|
||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa
|
||||
[], # results
|
||||
"Hello World", # text_output
|
||||
"Hello World\n", # stdout_logs
|
||||
"", # stderr_logs
|
||||
"sandbox_id", # sandbox_id
|
||||
[], # files
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
execution_context: "ExecutionContext",
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||
results, text_output, stdout, stderr, _, files = await self.execute_code(
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
code=input_data.code,
|
||||
language=input_data.language,
|
||||
@@ -283,6 +336,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
||||
setup_commands=input_data.setup_commands,
|
||||
timeout=input_data.timeout,
|
||||
dispose_sandbox=input_data.dispose_sandbox,
|
||||
execution_context=execution_context,
|
||||
extract_files=True,
|
||||
)
|
||||
|
||||
# Determine result object shape & filter out empty formats
|
||||
@@ -296,6 +351,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
||||
yield "stdout_logs", stdout
|
||||
if stderr:
|
||||
yield "stderr_logs", stderr
|
||||
# Always yield files (empty list if none)
|
||||
yield "files", [f.model_dump() for f in files]
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@@ -393,6 +450,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
||||
"Hello World\n", # stdout_logs
|
||||
"", # stderr_logs
|
||||
"sandbox_id", # sandbox_id
|
||||
[], # files
|
||||
),
|
||||
},
|
||||
)
|
||||
@@ -401,7 +459,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
_, text_output, stdout, stderr, sandbox_id = await self.execute_code(
|
||||
_, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code(
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
code=input_data.setup_code,
|
||||
language=input_data.language,
|
||||
@@ -500,6 +558,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
||||
"Hello World\n", # stdout_logs
|
||||
"", # stderr_logs
|
||||
sandbox_id, # sandbox_id
|
||||
[], # files
|
||||
),
|
||||
},
|
||||
)
|
||||
@@ -508,7 +567,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||
results, text_output, stdout, stderr, _, _ = await self.execute_code(
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
code=input_data.step_code,
|
||||
language=input_data.language,
|
||||
|
||||
@@ -38,6 +38,7 @@ class Flag(str, Enum):
|
||||
AGENT_ACTIVITY = "agent-activity"
|
||||
ENABLE_PLATFORM_PAYMENT = "enable-platform-payment"
|
||||
CHAT = "chat"
|
||||
COPILOT_SDK = "copilot-sdk"
|
||||
|
||||
|
||||
def is_configured() -> bool:
|
||||
|
||||
288
autogpt_platform/backend/backend/util/sandbox_files.py
Normal file
288
autogpt_platform/backend/backend/util/sandbox_files.py
Normal file
@@ -0,0 +1,288 @@
|
||||
"""
|
||||
Shared utilities for extracting and storing files from E2B sandboxes.
|
||||
|
||||
This module provides common file extraction and workspace storage functionality
|
||||
for blocks that run code in E2B sandboxes (Claude Code, Code Executor, etc.).
|
||||
"""
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import mimetypes
|
||||
import shlex
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.util.file import store_media_file
|
||||
from backend.util.type import MediaFileType
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||
|
||||
from backend.executor.utils import ExecutionContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Text file extensions that can be safely read and stored as text
|
||||
TEXT_EXTENSIONS = {
|
||||
".txt",
|
||||
".md",
|
||||
".html",
|
||||
".htm",
|
||||
".css",
|
||||
".js",
|
||||
".ts",
|
||||
".jsx",
|
||||
".tsx",
|
||||
".json",
|
||||
".xml",
|
||||
".yaml",
|
||||
".yml",
|
||||
".toml",
|
||||
".ini",
|
||||
".cfg",
|
||||
".conf",
|
||||
".py",
|
||||
".rb",
|
||||
".php",
|
||||
".java",
|
||||
".c",
|
||||
".cpp",
|
||||
".h",
|
||||
".hpp",
|
||||
".cs",
|
||||
".go",
|
||||
".rs",
|
||||
".swift",
|
||||
".kt",
|
||||
".scala",
|
||||
".sh",
|
||||
".bash",
|
||||
".zsh",
|
||||
".sql",
|
||||
".graphql",
|
||||
".env",
|
||||
".gitignore",
|
||||
".dockerfile",
|
||||
"Dockerfile",
|
||||
".vue",
|
||||
".svelte",
|
||||
".astro",
|
||||
".mdx",
|
||||
".rst",
|
||||
".tex",
|
||||
".csv",
|
||||
".log",
|
||||
}
|
||||
|
||||
|
||||
class SandboxFileOutput(BaseModel):
|
||||
"""A file extracted from a sandbox and optionally stored in workspace."""
|
||||
|
||||
path: str
|
||||
"""Full path in the sandbox."""
|
||||
|
||||
relative_path: str
|
||||
"""Path relative to the working directory."""
|
||||
|
||||
name: str
|
||||
"""Filename only."""
|
||||
|
||||
content: str
|
||||
"""File content as text (for backward compatibility)."""
|
||||
|
||||
workspace_ref: str | None = None
|
||||
"""Workspace reference (workspace://{id}#mime) if stored, None otherwise."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtractedFile:
|
||||
"""Internal representation of an extracted file before storage."""
|
||||
|
||||
path: str
|
||||
relative_path: str
|
||||
name: str
|
||||
content: bytes
|
||||
is_text: bool
|
||||
|
||||
|
||||
async def extract_sandbox_files(
|
||||
sandbox: "BaseAsyncSandbox",
|
||||
working_directory: str,
|
||||
since_timestamp: str | None = None,
|
||||
text_only: bool = True,
|
||||
) -> list[ExtractedFile]:
|
||||
"""
|
||||
Extract files from an E2B sandbox.
|
||||
|
||||
Args:
|
||||
sandbox: The E2B sandbox instance
|
||||
working_directory: Directory to search for files
|
||||
since_timestamp: ISO timestamp - only return files modified after this time
|
||||
text_only: If True, only extract text files (default). If False, extract all files.
|
||||
|
||||
Returns:
|
||||
List of ExtractedFile objects with path, content, and metadata
|
||||
"""
|
||||
files: list[ExtractedFile] = []
|
||||
|
||||
try:
|
||||
# Build find command
|
||||
safe_working_dir = shlex.quote(working_directory)
|
||||
timestamp_filter = ""
|
||||
if since_timestamp:
|
||||
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
||||
|
||||
find_result = await sandbox.commands.run(
|
||||
f"find {safe_working_dir} -type f "
|
||||
f"{timestamp_filter}"
|
||||
f"-not -path '*/node_modules/*' "
|
||||
f"-not -path '*/.git/*' "
|
||||
f"2>/dev/null"
|
||||
)
|
||||
|
||||
if not find_result.stdout:
|
||||
return files
|
||||
|
||||
for file_path in find_result.stdout.strip().split("\n"):
|
||||
if not file_path:
|
||||
continue
|
||||
|
||||
# Check if it's a text file
|
||||
is_text = any(file_path.endswith(ext) for ext in TEXT_EXTENSIONS)
|
||||
|
||||
# Skip non-text files if text_only mode
|
||||
if text_only and not is_text:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Read file content as bytes
|
||||
content = await sandbox.files.read(file_path, format="bytes")
|
||||
if isinstance(content, str):
|
||||
content = content.encode("utf-8")
|
||||
elif isinstance(content, bytearray):
|
||||
content = bytes(content)
|
||||
|
||||
# Extract filename from path
|
||||
file_name = file_path.split("/")[-1]
|
||||
|
||||
# Calculate relative path
|
||||
relative_path = file_path
|
||||
if file_path.startswith(working_directory):
|
||||
relative_path = file_path[len(working_directory) :]
|
||||
if relative_path.startswith("/"):
|
||||
relative_path = relative_path[1:]
|
||||
|
||||
files.append(
|
||||
ExtractedFile(
|
||||
path=file_path,
|
||||
relative_path=relative_path,
|
||||
name=file_name,
|
||||
content=content,
|
||||
is_text=is_text,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to read file {file_path}: {e}")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"File extraction failed: {e}")
|
||||
|
||||
return files
|
||||
|
||||
|
||||
async def store_sandbox_files(
|
||||
extracted_files: list[ExtractedFile],
|
||||
execution_context: "ExecutionContext",
|
||||
) -> list[SandboxFileOutput]:
|
||||
"""
|
||||
Store extracted sandbox files to workspace and return output objects.
|
||||
|
||||
Args:
|
||||
extracted_files: List of files extracted from sandbox
|
||||
execution_context: Execution context for workspace storage
|
||||
|
||||
Returns:
|
||||
List of SandboxFileOutput objects with workspace refs
|
||||
"""
|
||||
outputs: list[SandboxFileOutput] = []
|
||||
|
||||
for file in extracted_files:
|
||||
# Decode content for text files (for backward compat content field)
|
||||
if file.is_text:
|
||||
try:
|
||||
content_str = file.content.decode("utf-8", errors="replace")
|
||||
except Exception:
|
||||
content_str = ""
|
||||
else:
|
||||
content_str = f"[Binary file: {len(file.content)} bytes]"
|
||||
|
||||
# Build data URI (needed for storage and as binary fallback)
|
||||
mime_type = mimetypes.guess_type(file.name)[0] or "application/octet-stream"
|
||||
data_uri = f"data:{mime_type};base64,{base64.b64encode(file.content).decode()}"
|
||||
|
||||
# Try to store in workspace
|
||||
workspace_ref: str | None = None
|
||||
try:
|
||||
result = await store_media_file(
|
||||
file=MediaFileType(data_uri),
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
if result.startswith("workspace://"):
|
||||
workspace_ref = result
|
||||
elif not file.is_text:
|
||||
# Non-workspace context (graph execution): store_media_file
|
||||
# returned a data URI — use it as content so binary data isn't lost.
|
||||
content_str = result
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to store file {file.name} to workspace: {e}")
|
||||
# For binary files, fall back to data URI to prevent data loss
|
||||
if not file.is_text:
|
||||
content_str = data_uri
|
||||
|
||||
outputs.append(
|
||||
SandboxFileOutput(
|
||||
path=file.path,
|
||||
relative_path=file.relative_path,
|
||||
name=file.name,
|
||||
content=content_str,
|
||||
workspace_ref=workspace_ref,
|
||||
)
|
||||
)
|
||||
|
||||
return outputs
|
||||
|
||||
|
||||
async def extract_and_store_sandbox_files(
|
||||
sandbox: "BaseAsyncSandbox",
|
||||
working_directory: str,
|
||||
execution_context: "ExecutionContext",
|
||||
since_timestamp: str | None = None,
|
||||
text_only: bool = True,
|
||||
) -> list[SandboxFileOutput]:
|
||||
"""
|
||||
Extract files from sandbox and store them in workspace.
|
||||
|
||||
This is the main entry point combining extraction and storage.
|
||||
|
||||
Args:
|
||||
sandbox: The E2B sandbox instance
|
||||
working_directory: Directory to search for files
|
||||
execution_context: Execution context for workspace storage
|
||||
since_timestamp: ISO timestamp - only return files modified after this time
|
||||
text_only: If True, only extract text files
|
||||
|
||||
Returns:
|
||||
List of SandboxFileOutput objects with content and workspace refs
|
||||
"""
|
||||
extracted = await extract_sandbox_files(
|
||||
sandbox=sandbox,
|
||||
working_directory=working_directory,
|
||||
since_timestamp=since_timestamp,
|
||||
text_only=text_only,
|
||||
)
|
||||
|
||||
return await store_sandbox_files(extracted, execution_context)
|
||||
94
autogpt_platform/backend/poetry.lock
generated
94
autogpt_platform/backend/poetry.lock
generated
@@ -897,6 +897,29 @@ files = [
|
||||
{file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "claude-agent-sdk"
|
||||
version = "0.1.35"
|
||||
description = "Python SDK for Claude Code"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "claude_agent_sdk-0.1.35-py3-none-macosx_11_0_arm64.whl", hash = "sha256:df67f4deade77b16a9678b3a626c176498e40417f33b04beda9628287f375591"},
|
||||
{file = "claude_agent_sdk-0.1.35-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:14963944f55ded7c8ed518feebfa5b4284aa6dd8d81aeff2e5b21a962ce65097"},
|
||||
{file = "claude_agent_sdk-0.1.35-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:84344dcc535d179c1fc8a11c6f34c37c3b583447bdf09d869effb26514fd7a65"},
|
||||
{file = "claude_agent_sdk-0.1.35-py3-none-win_amd64.whl", hash = "sha256:1b3d54b47448c93f6f372acd4d1757f047c3c1e8ef5804be7a1e3e53e2c79a5f"},
|
||||
{file = "claude_agent_sdk-0.1.35.tar.gz", hash = "sha256:0f98e2b3c71ca85abfc042e7a35c648df88e87fda41c52e6779ef7b038dcbb52"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
anyio = ">=4.0.0"
|
||||
mcp = ">=0.1.0"
|
||||
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["anyio[trio] (>=4.0.0)", "mypy (>=1.0.0)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.20.0)", "pytest-cov (>=4.0.0)", "ruff (>=0.1.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "cleo"
|
||||
version = "2.1.0"
|
||||
@@ -2593,6 +2616,18 @@ http2 = ["h2 (>=3,<5)"]
|
||||
socks = ["socksio (==1.*)"]
|
||||
zstd = ["zstandard (>=0.18.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "httpx-sse"
|
||||
version = "0.4.3"
|
||||
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc"},
|
||||
{file = "httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "huggingface-hub"
|
||||
version = "1.4.1"
|
||||
@@ -3310,6 +3345,39 @@ files = [
|
||||
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mcp"
|
||||
version = "1.26.0"
|
||||
description = "Model Context Protocol SDK"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "mcp-1.26.0-py3-none-any.whl", hash = "sha256:904a21c33c25aa98ddbeb47273033c435e595bbacfdb177f4bd87f6dceebe1ca"},
|
||||
{file = "mcp-1.26.0.tar.gz", hash = "sha256:db6e2ef491eecc1a0d93711a76f28dec2e05999f93afd48795da1c1137142c66"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
anyio = ">=4.5"
|
||||
httpx = ">=0.27.1"
|
||||
httpx-sse = ">=0.4"
|
||||
jsonschema = ">=4.20.0"
|
||||
pydantic = ">=2.11.0,<3.0.0"
|
||||
pydantic-settings = ">=2.5.2"
|
||||
pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
||||
python-multipart = ">=0.0.9"
|
||||
pywin32 = {version = ">=310", markers = "sys_platform == \"win32\""}
|
||||
sse-starlette = ">=1.6.1"
|
||||
starlette = ">=0.27"
|
||||
typing-extensions = ">=4.9.0"
|
||||
typing-inspection = ">=0.4.1"
|
||||
uvicorn = {version = ">=0.31.1", markers = "sys_platform != \"emscripten\""}
|
||||
|
||||
[package.extras]
|
||||
cli = ["python-dotenv (>=1.0.0)", "typer (>=0.16.0)"]
|
||||
rich = ["rich (>=13.9.4)"]
|
||||
ws = ["websockets (>=15.0.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "mdurl"
|
||||
version = "0.1.2"
|
||||
@@ -5994,7 +6062,7 @@ description = "Python for Window Extensions"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Windows\""
|
||||
markers = "sys_platform == \"win32\" or platform_system == \"Windows\""
|
||||
files = [
|
||||
{file = "pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3"},
|
||||
{file = "pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b"},
|
||||
@@ -6974,6 +7042,28 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
|
||||
pymysql = ["pymysql"]
|
||||
sqlcipher = ["sqlcipher3_binary"]
|
||||
|
||||
[[package]]
|
||||
name = "sse-starlette"
|
||||
version = "3.2.0"
|
||||
description = "SSE plugin for Starlette"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "sse_starlette-3.2.0-py3-none-any.whl", hash = "sha256:5876954bd51920fc2cd51baee47a080eb88a37b5b784e615abb0b283f801cdbf"},
|
||||
{file = "sse_starlette-3.2.0.tar.gz", hash = "sha256:8127594edfb51abe44eac9c49e59b0b01f1039d0c7461c6fd91d4e03b70da422"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
anyio = ">=4.7.0"
|
||||
starlette = ">=0.49.1"
|
||||
|
||||
[package.extras]
|
||||
daphne = ["daphne (>=4.2.0)"]
|
||||
examples = ["aiosqlite (>=0.21.0)", "fastapi (>=0.115.12)", "sqlalchemy[asyncio] (>=2.0.41)", "uvicorn (>=0.34.0)"]
|
||||
granian = ["granian (>=2.3.1)"]
|
||||
uvicorn = ["uvicorn (>=0.34.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "stagehand"
|
||||
version = "0.5.9"
|
||||
@@ -8440,4 +8530,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
|
||||
content-hash = "942dea6daf671c3be65a22f3445feda26c1af9409d7173765e9a0742f0aa05dc"
|
||||
|
||||
@@ -16,6 +16,7 @@ anthropic = "^0.79.0"
|
||||
apscheduler = "^3.11.1"
|
||||
autogpt-libs = { path = "../autogpt_libs", develop = true }
|
||||
bleach = { extras = ["css"], version = "^6.2.0" }
|
||||
claude-agent-sdk = "^0.1.0"
|
||||
click = "^8.2.0"
|
||||
cryptography = "^46.0"
|
||||
discord-py = "^2.5.2"
|
||||
|
||||
0
autogpt_platform/backend/test/chat/__init__.py
Normal file
0
autogpt_platform/backend/test/chat/__init__.py
Normal file
133
autogpt_platform/backend/test/chat/test_security_hooks.py
Normal file
133
autogpt_platform/backend/test/chat/test_security_hooks.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""Tests for SDK security hooks — workspace paths, tool access, and deny messages.
|
||||
|
||||
These are pure unit tests with no external dependencies (no SDK, no DB, no server).
|
||||
They validate that the security hooks correctly block unauthorized paths,
|
||||
tool access, and dangerous input patterns.
|
||||
|
||||
Note: Bash command validation was removed — the SDK built-in Bash tool is not in
|
||||
allowed_tools, and the bash_exec MCP tool has kernel-level network isolation
|
||||
(unshare --net) making command-level parsing unnecessary.
|
||||
"""
|
||||
|
||||
from backend.api.features.chat.sdk.security_hooks import (
|
||||
_validate_tool_access,
|
||||
_validate_workspace_path,
|
||||
)
|
||||
|
||||
SDK_CWD = "/tmp/copilot-test-session"
|
||||
|
||||
|
||||
def _is_denied(result: dict) -> bool:
|
||||
hook = result.get("hookSpecificOutput", {})
|
||||
return hook.get("permissionDecision") == "deny"
|
||||
|
||||
|
||||
def _reason(result: dict) -> str:
|
||||
return result.get("hookSpecificOutput", {}).get("permissionDecisionReason", "")
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Workspace path validation (Read, Write, Edit, etc.)
|
||||
# ============================================================
|
||||
|
||||
|
||||
class TestWorkspacePathValidation:
|
||||
def test_path_in_workspace(self):
|
||||
result = _validate_workspace_path(
|
||||
"Read", {"file_path": f"{SDK_CWD}/file.txt"}, SDK_CWD
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
def test_path_outside_workspace(self):
|
||||
result = _validate_workspace_path("Read", {"file_path": "/etc/passwd"}, SDK_CWD)
|
||||
assert _is_denied(result)
|
||||
|
||||
def test_tool_results_allowed(self):
|
||||
result = _validate_workspace_path(
|
||||
"Read",
|
||||
{"file_path": "~/.claude/projects/abc/tool-results/out.txt"},
|
||||
SDK_CWD,
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
def test_claude_settings_blocked(self):
|
||||
result = _validate_workspace_path(
|
||||
"Read", {"file_path": "~/.claude/settings.json"}, SDK_CWD
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
def test_claude_projects_without_tool_results(self):
|
||||
result = _validate_workspace_path(
|
||||
"Read", {"file_path": "~/.claude/projects/abc/credentials.json"}, SDK_CWD
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
def test_no_path_allowed(self):
|
||||
"""Glob/Grep without path defaults to cwd — should be allowed."""
|
||||
result = _validate_workspace_path("Grep", {"pattern": "foo"}, SDK_CWD)
|
||||
assert not _is_denied(result)
|
||||
|
||||
def test_path_traversal_with_dotdot(self):
|
||||
result = _validate_workspace_path(
|
||||
"Read", {"file_path": f"{SDK_CWD}/../../../etc/passwd"}, SDK_CWD
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Tool access validation
|
||||
# ============================================================
|
||||
|
||||
|
||||
class TestToolAccessValidation:
|
||||
def test_blocked_tools(self):
|
||||
for tool in ("bash", "shell", "exec", "terminal", "command"):
|
||||
result = _validate_tool_access(tool, {})
|
||||
assert _is_denied(result), f"Tool '{tool}' should be blocked"
|
||||
|
||||
def test_bash_builtin_blocked(self):
|
||||
"""SDK built-in Bash (capital) is blocked as defence-in-depth."""
|
||||
result = _validate_tool_access("Bash", {"command": "echo hello"}, SDK_CWD)
|
||||
assert _is_denied(result)
|
||||
assert "Bash" in _reason(result)
|
||||
|
||||
def test_workspace_tools_delegate(self):
|
||||
result = _validate_tool_access(
|
||||
"Read", {"file_path": f"{SDK_CWD}/file.txt"}, SDK_CWD
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
def test_dangerous_pattern_blocked(self):
|
||||
result = _validate_tool_access("SomeUnknownTool", {"data": "sudo rm -rf /"})
|
||||
assert _is_denied(result)
|
||||
|
||||
def test_safe_unknown_tool_allowed(self):
|
||||
result = _validate_tool_access("SomeSafeTool", {"data": "hello world"})
|
||||
assert not _is_denied(result)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Deny message quality (ntindle feedback)
|
||||
# ============================================================
|
||||
|
||||
|
||||
class TestDenyMessageClarity:
|
||||
"""Deny messages must include [SECURITY] and 'cannot be bypassed'
|
||||
so the model knows the restriction is enforced, not a suggestion."""
|
||||
|
||||
def test_blocked_tool_message(self):
|
||||
reason = _reason(_validate_tool_access("bash", {}))
|
||||
assert "[SECURITY]" in reason
|
||||
assert "cannot be bypassed" in reason
|
||||
|
||||
def test_bash_builtin_blocked_message(self):
|
||||
reason = _reason(_validate_tool_access("Bash", {"command": "echo hello"}))
|
||||
assert "[SECURITY]" in reason
|
||||
assert "cannot be bypassed" in reason
|
||||
|
||||
def test_workspace_path_message(self):
|
||||
reason = _reason(
|
||||
_validate_workspace_path("Read", {"file_path": "/etc/passwd"}, SDK_CWD)
|
||||
)
|
||||
assert "[SECURITY]" in reason
|
||||
assert "cannot be bypassed" in reason
|
||||
@@ -1,30 +1,100 @@
|
||||
// import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { cn } from "@/lib/utils";
|
||||
import React, { memo } from "react";
|
||||
import { BlockMenu } from "./NewBlockMenu/BlockMenu/BlockMenu";
|
||||
import { useNewControlPanel } from "./useNewControlPanel";
|
||||
// import { NewSaveControl } from "../SaveControl/NewSaveControl";
|
||||
import { GraphExecutionID } from "@/lib/autogpt-server-api";
|
||||
// import { ControlPanelButton } from "../ControlPanelButton";
|
||||
// import { GraphSearchMenu } from "../GraphMenu/GraphMenu";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { CustomNode } from "../FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { NewSaveControl } from "./NewSaveControl/NewSaveControl";
|
||||
import { UndoRedoButtons } from "./UndoRedoButtons";
|
||||
|
||||
export const NewControlPanel = memo(() => {
|
||||
useNewControlPanel({});
|
||||
export type Control = {
|
||||
icon: React.ReactNode;
|
||||
label: string;
|
||||
disabled?: boolean;
|
||||
onClick: () => void;
|
||||
};
|
||||
|
||||
return (
|
||||
<section
|
||||
className={cn(
|
||||
"absolute left-4 top-10 z-10 overflow-hidden rounded-[1rem] border-none bg-white p-0 shadow-[0_1px_5px_0_rgba(0,0,0,0.1)]",
|
||||
)}
|
||||
>
|
||||
<div className="flex flex-col items-center justify-center rounded-[1rem] p-0">
|
||||
<BlockMenu />
|
||||
<Separator className="text-[#E1E1E1]" />
|
||||
<NewSaveControl />
|
||||
<Separator className="text-[#E1E1E1]" />
|
||||
<UndoRedoButtons />
|
||||
</div>
|
||||
</section>
|
||||
);
|
||||
});
|
||||
export type NewControlPanelProps = {
|
||||
flowExecutionID?: GraphExecutionID | undefined;
|
||||
visualizeBeads?: "no" | "static" | "animate";
|
||||
pinSavePopover?: boolean;
|
||||
pinBlocksPopover?: boolean;
|
||||
nodes?: CustomNode[];
|
||||
onNodeSelect?: (nodeId: string) => void;
|
||||
onNodeHover?: (nodeId: string) => void;
|
||||
};
|
||||
export const NewControlPanel = memo(
|
||||
({
|
||||
flowExecutionID: _flowExecutionID,
|
||||
visualizeBeads: _visualizeBeads,
|
||||
pinSavePopover: _pinSavePopover,
|
||||
pinBlocksPopover: _pinBlocksPopover,
|
||||
nodes: _nodes,
|
||||
onNodeSelect: _onNodeSelect,
|
||||
onNodeHover: _onNodeHover,
|
||||
}: NewControlPanelProps) => {
|
||||
const _isGraphSearchEnabled = useGetFlag(Flag.GRAPH_SEARCH);
|
||||
|
||||
const {
|
||||
// agentDescription,
|
||||
// setAgentDescription,
|
||||
// saveAgent,
|
||||
// agentName,
|
||||
// setAgentName,
|
||||
// savedAgent,
|
||||
// isSaving,
|
||||
// isRunning,
|
||||
// isStopping,
|
||||
} = useNewControlPanel({});
|
||||
|
||||
return (
|
||||
<section
|
||||
className={cn(
|
||||
"absolute left-4 top-10 z-10 overflow-hidden rounded-[1rem] border-none bg-white p-0 shadow-[0_1px_5px_0_rgba(0,0,0,0.1)]",
|
||||
)}
|
||||
>
|
||||
<div className="flex flex-col items-center justify-center rounded-[1rem] p-0">
|
||||
<BlockMenu />
|
||||
{/* <Separator className="text-[#E1E1E1]" />
|
||||
{isGraphSearchEnabled && (
|
||||
<>
|
||||
<GraphSearchMenu
|
||||
nodes={nodes}
|
||||
blockMenuSelected={blockMenuSelected}
|
||||
setBlockMenuSelected={setBlockMenuSelected}
|
||||
onNodeSelect={onNodeSelect}
|
||||
onNodeHover={onNodeHover}
|
||||
/>
|
||||
<Separator className="text-[#E1E1E1]" />
|
||||
</>
|
||||
)}
|
||||
{controls.map((control, index) => (
|
||||
<ControlPanelButton
|
||||
key={index}
|
||||
onClick={() => control.onClick()}
|
||||
data-id={`control-button-${index}`}
|
||||
data-testid={`blocks-control-${control.label.toLowerCase()}-button`}
|
||||
disabled={control.disabled || false}
|
||||
className="rounded-none"
|
||||
>
|
||||
{control.icon}
|
||||
</ControlPanelButton>
|
||||
))} */}
|
||||
<Separator className="text-[#E1E1E1]" />
|
||||
<NewSaveControl />
|
||||
<Separator className="text-[#E1E1E1]" />
|
||||
<UndoRedoButtons />
|
||||
</div>
|
||||
</section>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
export default NewControlPanel;
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { CustomNode } from "../../../FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useGraphSearch } from "../GraphMenuSearchBar/useGraphMenuSearchBar";
|
||||
import { CustomNode } from "../../../FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
|
||||
interface UseGraphMenuProps {
|
||||
nodes: CustomNode[];
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import React from "react";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import { beautifyString, categoryColorMap } from "@/lib/utils";
|
||||
import { beautifyString, getPrimaryCategoryColor } from "@/lib/utils";
|
||||
import { SearchableNode } from "../GraphMenuSearchBar/useGraphMenuSearchBar";
|
||||
import { TextRenderer } from "@/components/__legacy__/ui/render";
|
||||
import {
|
||||
@@ -73,12 +73,14 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
|
||||
}
|
||||
|
||||
const nodeTitle =
|
||||
(node.data?.metadata?.customized_name as string) ||
|
||||
beautifyString(node.data?.title || "").replace(/ Block$/, "");
|
||||
const nodeType = beautifyString(node.data?.title || "").replace(
|
||||
/ Block$/,
|
||||
"",
|
||||
);
|
||||
node.data?.metadata?.customized_name ||
|
||||
beautifyString(node.data?.blockType || "").replace(
|
||||
/ Block$/,
|
||||
"",
|
||||
);
|
||||
const nodeType = beautifyString(
|
||||
node.data?.blockType || "",
|
||||
).replace(/ Block$/, "");
|
||||
|
||||
return (
|
||||
<TooltipProvider key={node.id}>
|
||||
@@ -98,13 +100,7 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
|
||||
onMouseLeave={() => onNodeHover?.(null)}
|
||||
>
|
||||
<div
|
||||
className={`h-full w-3 rounded-l-[7px] ${
|
||||
(node.data?.categories?.[0]?.category &&
|
||||
categoryColorMap[
|
||||
node.data.categories[0].category
|
||||
]) ||
|
||||
"bg-gray-300 dark:bg-slate-700"
|
||||
}`}
|
||||
className={`h-full w-3 rounded-l-[7px] ${getPrimaryCategoryColor(node.data?.categories)}`}
|
||||
/>
|
||||
<div className="mx-3 flex flex-1 items-center justify-between">
|
||||
<div className="mr-2 min-w-0">
|
||||
@@ -133,10 +129,9 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
|
||||
<div className="font-semibold">
|
||||
Node Type: {nodeType}
|
||||
</div>
|
||||
{!!node.data?.metadata?.customized_name && (
|
||||
{node.data?.metadata?.customized_name && (
|
||||
<div className="text-xs text-gray-500">
|
||||
Custom Name:{" "}
|
||||
{String(node.data.metadata.customized_name)}
|
||||
Custom Name: {node.data.metadata.customized_name}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useState, useMemo, useDeferredValue } from "react";
|
||||
import { CustomNode } from "../../../FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import jaro from "jaro-winkler";
|
||||
|
||||
@@ -67,10 +67,10 @@ function calculateNodeScore(
|
||||
const nodeTitle = (node.data?.title || "").toLowerCase(); // This includes the ID
|
||||
const nodeId = (node.id || "").toLowerCase();
|
||||
const nodeDescription = (node.data?.description || "").toLowerCase();
|
||||
const blockType = (node.data?.title || "").toLowerCase();
|
||||
const blockType = (node.data?.blockType || "").toLowerCase();
|
||||
const beautifiedBlockType = beautifyString(blockType).toLowerCase();
|
||||
const customizedName = String(
|
||||
node.data?.metadata?.customized_name || "",
|
||||
const customizedName = (
|
||||
node.data?.metadata?.customized_name || ""
|
||||
).toLowerCase();
|
||||
|
||||
// Get input and output names with defensive checks
|
||||
|
||||
@@ -1,18 +1,54 @@
|
||||
import { GraphID } from "@/lib/autogpt-server-api";
|
||||
import { useSearchParams } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
|
||||
export interface NewControlPanelProps {
|
||||
// flowExecutionID: GraphExecutionID | undefined;
|
||||
visualizeBeads?: "no" | "static" | "animate";
|
||||
}
|
||||
|
||||
export const useNewControlPanel = ({
|
||||
// flowExecutionID,
|
||||
visualizeBeads: _visualizeBeads,
|
||||
}: NewControlPanelProps) => {
|
||||
const [blockMenuSelected, setBlockMenuSelected] = useState<
|
||||
"save" | "block" | "search" | ""
|
||||
>("");
|
||||
const query = useSearchParams();
|
||||
const _graphVersion = query.get("flowVersion");
|
||||
const _graphVersionParsed = _graphVersion
|
||||
? parseInt(_graphVersion)
|
||||
: undefined;
|
||||
|
||||
const _flowID = (query.get("flowID") as GraphID | null) ?? undefined;
|
||||
// const {
|
||||
// agentDescription,
|
||||
// setAgentDescription,
|
||||
// saveAgent,
|
||||
// agentName,
|
||||
// setAgentName,
|
||||
// savedAgent,
|
||||
// isSaving,
|
||||
// isRunning,
|
||||
// isStopping,
|
||||
// } = useAgentGraph(
|
||||
// flowID,
|
||||
// graphVersion,
|
||||
// flowExecutionID,
|
||||
// visualizeBeads !== "no",
|
||||
// );
|
||||
|
||||
return {
|
||||
blockMenuSelected,
|
||||
setBlockMenuSelected,
|
||||
// agentDescription,
|
||||
// setAgentDescription,
|
||||
// saveAgent,
|
||||
// agentName,
|
||||
// setAgentName,
|
||||
// savedAgent,
|
||||
// isSaving,
|
||||
// isRunning,
|
||||
// isStopping,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -0,0 +1,443 @@
|
||||
import React, { useCallback, useMemo, useState, useDeferredValue } from "react";
|
||||
import { Card, CardContent, CardHeader } from "@/components/__legacy__/ui/card";
|
||||
import { Label } from "@/components/__legacy__/ui/label";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Input } from "@/components/__legacy__/ui/input";
|
||||
import { TextRenderer } from "@/components/__legacy__/ui/render";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/__legacy__/ui/popover";
|
||||
import {
|
||||
Block,
|
||||
BlockIORootSchema,
|
||||
BlockUIType,
|
||||
GraphInputSchema,
|
||||
GraphOutputSchema,
|
||||
SpecialBlockID,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import { MagnifyingGlassIcon, PlusIcon } from "@radix-ui/react-icons";
|
||||
import { IconToyBrick } from "@/components/__legacy__/ui/icons";
|
||||
import { getPrimaryCategoryColor } from "@/lib/utils";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { GraphMeta } from "@/lib/autogpt-server-api";
|
||||
import jaro from "jaro-winkler";
|
||||
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
|
||||
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
|
||||
uiKey?: string;
|
||||
inputSchema: BlockIORootSchema | GraphInputSchema;
|
||||
outputSchema: BlockIORootSchema | GraphOutputSchema;
|
||||
hardcodedValues?: Record<string, any>;
|
||||
_cached?: {
|
||||
blockName: string;
|
||||
beautifiedName: string;
|
||||
description: string;
|
||||
};
|
||||
};
|
||||
|
||||
// Hook to preprocess blocks with cached expensive operations
|
||||
const useSearchableBlocks = (blocks: _Block[]): _Block[] => {
|
||||
return useMemo(
|
||||
() =>
|
||||
blocks.map((block) => {
|
||||
if (!block._cached) {
|
||||
block._cached = {
|
||||
blockName: block.name.toLowerCase(),
|
||||
beautifiedName: beautifyString(block.name).toLowerCase(),
|
||||
description: block.description.toLowerCase(),
|
||||
};
|
||||
}
|
||||
return block;
|
||||
}),
|
||||
[blocks],
|
||||
);
|
||||
};
|
||||
|
||||
interface BlocksControlProps {
|
||||
blocks: _Block[];
|
||||
addBlock: (
|
||||
id: string,
|
||||
name: string,
|
||||
hardcodedValues: Record<string, any>,
|
||||
) => void;
|
||||
pinBlocksPopover: boolean;
|
||||
flows: GraphMeta[];
|
||||
nodes: CustomNode[];
|
||||
}
|
||||
|
||||
/**
|
||||
* A React functional component that displays a control for managing blocks.
|
||||
*
|
||||
* @component
|
||||
* @param {Object} BlocksControlProps - The properties for the BlocksControl component.
|
||||
* @param {Block[]} BlocksControlProps.blocks - An array of blocks to be displayed and filtered.
|
||||
* @param {(id: string, name: string) => void} BlocksControlProps.addBlock - A function to call when a block is added.
|
||||
* @returns The rendered BlocksControl component.
|
||||
*/
|
||||
export function BlocksControl({
|
||||
blocks: _blocks,
|
||||
addBlock,
|
||||
pinBlocksPopover,
|
||||
flows,
|
||||
nodes,
|
||||
}: BlocksControlProps) {
|
||||
const [searchQuery, setSearchQuery] = useState("");
|
||||
const deferredSearchQuery = useDeferredValue(searchQuery);
|
||||
const [selectedCategory, setSelectedCategory] = useState<string | null>(null);
|
||||
|
||||
const blocks = useSearchableBlocks(_blocks);
|
||||
|
||||
const graphHasWebhookNodes = nodes.some((n) =>
|
||||
[BlockUIType.WEBHOOK, BlockUIType.WEBHOOK_MANUAL].includes(n.data.uiType),
|
||||
);
|
||||
const graphHasInputNodes = nodes.some(
|
||||
(n) => n.data.uiType == BlockUIType.INPUT,
|
||||
);
|
||||
|
||||
const filteredAvailableBlocks = useMemo(() => {
|
||||
const blockList = blocks
|
||||
.filter((b) => b.uiType !== BlockUIType.AGENT)
|
||||
.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
// Agent blocks are created from GraphMeta which doesn't include schemas.
|
||||
// Schemas will be fetched on-demand when the block is actually added.
|
||||
const agentBlockList = flows
|
||||
.map((flow): _Block => {
|
||||
return {
|
||||
id: SpecialBlockID.AGENT,
|
||||
name: flow.name,
|
||||
description:
|
||||
`Ver.${flow.version}` +
|
||||
(flow.description ? ` | ${flow.description}` : ""),
|
||||
categories: [{ category: "AGENT", description: "" }],
|
||||
// Empty schemas - will be populated when block is added
|
||||
inputSchema: { type: "object", properties: {} },
|
||||
outputSchema: { type: "object", properties: {} },
|
||||
staticOutput: false,
|
||||
uiType: BlockUIType.AGENT,
|
||||
costs: [],
|
||||
uiKey: flow.id,
|
||||
hardcodedValues: {
|
||||
graph_id: flow.id,
|
||||
graph_version: flow.version,
|
||||
// Schemas will be fetched on-demand when block is added
|
||||
},
|
||||
};
|
||||
})
|
||||
.map(
|
||||
(agentBlock): _Block => ({
|
||||
...agentBlock,
|
||||
_cached: {
|
||||
blockName: agentBlock.name.toLowerCase(),
|
||||
beautifiedName: beautifyString(agentBlock.name).toLowerCase(),
|
||||
description: agentBlock.description.toLowerCase(),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
return blockList
|
||||
.concat(agentBlockList)
|
||||
.map((block) => ({
|
||||
block,
|
||||
score: blockScoreForQuery(block, deferredSearchQuery),
|
||||
}))
|
||||
.filter(
|
||||
({ block, score }) =>
|
||||
score > 0 &&
|
||||
(!selectedCategory ||
|
||||
block.categories.some((cat) => cat.category === selectedCategory)),
|
||||
)
|
||||
.sort((a, b) => b.score - a.score)
|
||||
.map(({ block }) => ({
|
||||
...block,
|
||||
notAvailable:
|
||||
(block.uiType == BlockUIType.WEBHOOK &&
|
||||
graphHasWebhookNodes &&
|
||||
"Agents can only have one webhook-triggered block") ||
|
||||
(block.uiType == BlockUIType.WEBHOOK &&
|
||||
graphHasInputNodes &&
|
||||
"Webhook-triggered blocks can't be used together with input blocks") ||
|
||||
(block.uiType == BlockUIType.INPUT &&
|
||||
graphHasWebhookNodes &&
|
||||
"Input blocks can't be used together with a webhook-triggered block") ||
|
||||
null,
|
||||
}));
|
||||
}, [
|
||||
blocks,
|
||||
flows,
|
||||
selectedCategory,
|
||||
deferredSearchQuery,
|
||||
graphHasInputNodes,
|
||||
graphHasWebhookNodes,
|
||||
]);
|
||||
|
||||
const resetFilters = useCallback(() => {
|
||||
setSearchQuery("");
|
||||
setSelectedCategory(null);
|
||||
}, []);
|
||||
|
||||
// Handler to add a block, fetching graph data on-demand for agent blocks
|
||||
const handleAddBlock = useCallback(
|
||||
async (block: _Block & { notAvailable: string | null }) => {
|
||||
if (block.notAvailable) return;
|
||||
|
||||
// For agent blocks, fetch the full graph to get schemas
|
||||
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
|
||||
const graphID = block.hardcodedValues.graph_id as string;
|
||||
const graphVersion = block.hardcodedValues.graph_version as number;
|
||||
const graphData = okData(
|
||||
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
|
||||
);
|
||||
|
||||
if (graphData) {
|
||||
addBlock(block.id, block.name, {
|
||||
...block.hardcodedValues,
|
||||
input_schema: graphData.input_schema,
|
||||
output_schema: graphData.output_schema,
|
||||
});
|
||||
} else {
|
||||
// Fallback: add without schemas (will be incomplete)
|
||||
console.error("Failed to fetch graph data for agent block");
|
||||
addBlock(block.id, block.name, block.hardcodedValues || {});
|
||||
}
|
||||
} else {
|
||||
addBlock(block.id, block.name, block.hardcodedValues || {});
|
||||
}
|
||||
},
|
||||
[addBlock],
|
||||
);
|
||||
|
||||
// Extract unique categories from blocks
|
||||
const categories = useMemo(() => {
|
||||
return Array.from(
|
||||
new Set([
|
||||
null,
|
||||
...blocks
|
||||
.flatMap((block) => block.categories.map((cat) => cat.category))
|
||||
.sort(),
|
||||
]),
|
||||
);
|
||||
}, [blocks]);
|
||||
|
||||
return (
|
||||
<Popover
|
||||
open={pinBlocksPopover ? true : undefined}
|
||||
onOpenChange={(open) => open || resetFilters()}
|
||||
>
|
||||
<Tooltip delayDuration={500}>
|
||||
<TooltipTrigger asChild>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
data-id="blocks-control-popover-trigger"
|
||||
data-testid="blocks-control-blocks-button"
|
||||
name="Blocks"
|
||||
className="dark:hover:bg-slate-800"
|
||||
>
|
||||
<IconToyBrick />
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="right">Blocks</TooltipContent>
|
||||
</Tooltip>
|
||||
<PopoverContent
|
||||
side="right"
|
||||
sideOffset={22}
|
||||
align="start"
|
||||
className="absolute -top-3 w-[17rem] rounded-xl border-none p-0 shadow-none md:w-[30rem]"
|
||||
data-id="blocks-control-popover-content"
|
||||
>
|
||||
<Card className="p-3 pb-0 dark:bg-slate-900">
|
||||
<CardHeader className="flex flex-col gap-x-8 gap-y-1 p-3 px-2">
|
||||
<div className="items-center justify-between">
|
||||
<Label
|
||||
htmlFor="search-blocks"
|
||||
className="whitespace-nowrap text-base font-bold text-black dark:text-white 2xl:text-xl"
|
||||
data-id="blocks-control-label"
|
||||
data-testid="blocks-control-blocks-label"
|
||||
>
|
||||
Blocks
|
||||
</Label>
|
||||
</div>
|
||||
<div className="relative flex items-center">
|
||||
<MagnifyingGlassIcon className="absolute m-2 h-5 w-5 text-gray-500 dark:text-gray-400" />
|
||||
<Input
|
||||
id="search-blocks"
|
||||
type="text"
|
||||
placeholder="Search blocks"
|
||||
value={searchQuery}
|
||||
onChange={(e) => setSearchQuery(e.target.value)}
|
||||
className="rounded-lg px-8 py-5 dark:bg-slate-800 dark:text-white"
|
||||
data-id="blocks-control-search-input"
|
||||
autoComplete="off"
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
className="mt-2 flex flex-wrap gap-2"
|
||||
data-testid="blocks-categories-list"
|
||||
>
|
||||
{categories.map((category) => {
|
||||
const color = getPrimaryCategoryColor([
|
||||
{ category: category || "All", description: "" },
|
||||
]);
|
||||
const colorClass =
|
||||
selectedCategory === category ? `${color}` : "";
|
||||
return (
|
||||
<div
|
||||
key={category}
|
||||
data-testid="blocks-category"
|
||||
role="button"
|
||||
className={`cursor-pointer rounded-xl border px-2 py-2 text-xs font-medium dark:border-slate-700 dark:text-white ${colorClass}`}
|
||||
onClick={() =>
|
||||
setSelectedCategory(
|
||||
selectedCategory === category ? null : category,
|
||||
)
|
||||
}
|
||||
>
|
||||
{beautifyString((category || "All").toLowerCase())}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</CardHeader>
|
||||
<CardContent className="overflow-scroll border-t border-t-gray-200 p-0 dark:border-t-slate-700">
|
||||
<ScrollArea
|
||||
className="h-[60vh] w-full"
|
||||
data-id="blocks-control-scroll-area"
|
||||
>
|
||||
{filteredAvailableBlocks.map((block) => (
|
||||
<Card
|
||||
key={block.uiKey || block.id}
|
||||
className={`m-2 my-4 flex h-20 shadow-none dark:border-slate-700 dark:bg-slate-800 dark:text-slate-100 dark:hover:bg-slate-700 ${
|
||||
block.notAvailable
|
||||
? "cursor-not-allowed opacity-50"
|
||||
: "cursor-move hover:shadow-lg"
|
||||
}`}
|
||||
data-id={`block-card-${block.id}`}
|
||||
draggable={!block.notAvailable}
|
||||
onDragStart={(e) => {
|
||||
if (block.notAvailable) return;
|
||||
e.dataTransfer.effectAllowed = "copy";
|
||||
e.dataTransfer.setData(
|
||||
"application/reactflow",
|
||||
JSON.stringify({
|
||||
blockId: block.id,
|
||||
blockName: block.name,
|
||||
hardcodedValues: block?.hardcodedValues || {},
|
||||
}),
|
||||
);
|
||||
}}
|
||||
onClick={() => handleAddBlock(block)}
|
||||
title={block.notAvailable ?? undefined}
|
||||
>
|
||||
<div
|
||||
className={`-ml-px h-full w-3 rounded-l-xl ${getPrimaryCategoryColor(block.categories)}`}
|
||||
></div>
|
||||
|
||||
<div className="mx-3 flex flex-1 items-center justify-between">
|
||||
<div className="mr-2 min-w-0">
|
||||
<span
|
||||
className="block truncate pb-1 text-sm font-semibold dark:text-white"
|
||||
data-id={`block-name-${block.id}`}
|
||||
data-type={block.uiType}
|
||||
data-testid={`block-name-${block.id}`}
|
||||
>
|
||||
<TextRenderer
|
||||
value={beautifyString(block.name).replace(
|
||||
/ Block$/,
|
||||
"",
|
||||
)}
|
||||
truncateLengthLimit={45}
|
||||
/>
|
||||
</span>
|
||||
<span
|
||||
className="block break-all text-xs font-normal text-gray-500 dark:text-gray-400"
|
||||
data-testid={`block-description-${block.id}`}
|
||||
>
|
||||
<TextRenderer
|
||||
value={block.description}
|
||||
truncateLengthLimit={165}
|
||||
/>
|
||||
</span>
|
||||
</div>
|
||||
<div
|
||||
className="flex flex-shrink-0 items-center gap-1"
|
||||
data-id={`block-tooltip-${block.id}`}
|
||||
data-testid={`block-add`}
|
||||
>
|
||||
<PlusIcon className="h-6 w-6 rounded-lg bg-gray-200 stroke-black stroke-[0.5px] p-1 dark:bg-gray-700 dark:stroke-white" />
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
))}
|
||||
</ScrollArea>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluates how well a block matches the search query and returns a relevance score.
|
||||
* The scoring algorithm works as follows:
|
||||
* - Returns 1 if no query (all blocks match equally)
|
||||
* - Normalized query for case-insensitive matching
|
||||
* - Returns 3 for exact substring matches in block name (highest priority)
|
||||
* - Returns 2 when all query words appear in the block name (regardless of order)
|
||||
* - Returns 1.X for blocks with names similar to query using Jaro-Winkler distance (X is similarity score)
|
||||
* - Returns 0.5 when all query words appear in the block description (lowest priority)
|
||||
* - Returns 0 for no match
|
||||
*
|
||||
* Higher scores will appear first in search results.
|
||||
*/
|
||||
function blockScoreForQuery(block: _Block, query: string): number {
|
||||
if (!query) return 1;
|
||||
const normalizedQuery = query.toLowerCase().trim();
|
||||
const queryWords = normalizedQuery.split(/\s+/);
|
||||
|
||||
// Use cached values for performance
|
||||
const { blockName, beautifiedName, description } = block._cached!;
|
||||
|
||||
// 1. Exact match in name (highest priority)
|
||||
if (
|
||||
blockName.includes(normalizedQuery) ||
|
||||
beautifiedName.includes(normalizedQuery)
|
||||
) {
|
||||
return 3;
|
||||
}
|
||||
|
||||
// 2. All query words in name (regardless of order)
|
||||
const allWordsInName = queryWords.every(
|
||||
(word) => blockName.includes(word) || beautifiedName.includes(word),
|
||||
);
|
||||
if (allWordsInName) return 2;
|
||||
|
||||
// 3. Similarity with name (Jaro-Winkler)
|
||||
const similarityThreshold = 0.65;
|
||||
const nameSimilarity = jaro(blockName, normalizedQuery);
|
||||
const beautifiedSimilarity = jaro(beautifiedName, normalizedQuery);
|
||||
const maxSimilarity = Math.max(nameSimilarity, beautifiedSimilarity);
|
||||
if (maxSimilarity > similarityThreshold) {
|
||||
return 1 + maxSimilarity; // Score between 1 and 2
|
||||
}
|
||||
|
||||
// 4. All query words in description (lower priority)
|
||||
const allWordsInDescription = queryWords.every((word) =>
|
||||
description.includes(word),
|
||||
);
|
||||
if (allWordsInDescription) return 0.5;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
import React from "react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { LogOut } from "lucide-react";
|
||||
import { ClockIcon, WarningIcon } from "@phosphor-icons/react";
|
||||
import { IconPlay, IconSquare } from "@/components/__legacy__/ui/icons";
|
||||
|
||||
interface Props {
|
||||
onClickAgentOutputs?: () => void;
|
||||
onClickRunAgent?: () => void;
|
||||
onClickStopRun: () => void;
|
||||
onClickScheduleButton?: () => void;
|
||||
isRunning: boolean;
|
||||
isDisabled: boolean;
|
||||
className?: string;
|
||||
resolutionModeActive?: boolean;
|
||||
}
|
||||
|
||||
export const BuildActionBar: React.FC<Props> = ({
|
||||
onClickAgentOutputs,
|
||||
onClickRunAgent,
|
||||
onClickStopRun,
|
||||
onClickScheduleButton,
|
||||
isRunning,
|
||||
isDisabled,
|
||||
className,
|
||||
resolutionModeActive = false,
|
||||
}) => {
|
||||
const buttonClasses =
|
||||
"flex items-center gap-2 text-sm font-medium md:text-lg";
|
||||
|
||||
// Show resolution mode message instead of action buttons
|
||||
if (resolutionModeActive) {
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"flex w-fit select-none items-center justify-center p-4",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<div className="flex items-center gap-3 rounded-lg border border-amber-300 bg-amber-50 px-4 py-3 dark:border-amber-700 dark:bg-amber-900/30">
|
||||
<WarningIcon className="size-5 text-amber-600 dark:text-amber-400" />
|
||||
<span className="text-sm font-medium text-amber-800 dark:text-amber-200">
|
||||
Remove incompatible connections to continue
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"flex w-fit select-none items-center justify-center p-4",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<div className="flex gap-1 md:gap-4">
|
||||
{onClickAgentOutputs && (
|
||||
<Button
|
||||
className={buttonClasses}
|
||||
variant="outline"
|
||||
size="primary"
|
||||
onClick={onClickAgentOutputs}
|
||||
title="View agent outputs"
|
||||
>
|
||||
<LogOut className="hidden size-5 md:flex" /> Agent Outputs
|
||||
</Button>
|
||||
)}
|
||||
|
||||
{!isRunning ? (
|
||||
<Button
|
||||
className={cn(
|
||||
buttonClasses,
|
||||
onClickRunAgent && isDisabled
|
||||
? "cursor-default opacity-50 hover:bg-accent"
|
||||
: "",
|
||||
)}
|
||||
variant="accent"
|
||||
size="primary"
|
||||
onClick={onClickRunAgent}
|
||||
disabled={!onClickRunAgent}
|
||||
title="Run the agent"
|
||||
aria-label="Run the agent"
|
||||
data-testid="primary-action-run-agent"
|
||||
data-tutorial-id="primary-action-run-agent"
|
||||
>
|
||||
<IconPlay /> Run
|
||||
</Button>
|
||||
) : (
|
||||
<Button
|
||||
className={buttonClasses}
|
||||
variant="destructive"
|
||||
size="primary"
|
||||
onClick={onClickStopRun}
|
||||
title="Stop the agent"
|
||||
data-id="primary-action-stop-agent"
|
||||
>
|
||||
<IconSquare /> Stop
|
||||
</Button>
|
||||
)}
|
||||
|
||||
{onClickScheduleButton && (
|
||||
<Button
|
||||
className={buttonClasses}
|
||||
variant="outline"
|
||||
size="primary"
|
||||
onClick={onClickScheduleButton}
|
||||
title="Set up a run schedule for the agent"
|
||||
data-id="primary-action-schedule-agent"
|
||||
>
|
||||
<ClockIcon className="hidden h-5 w-5 md:flex" />
|
||||
Schedule Run
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,33 @@
|
||||
import {
|
||||
BaseEdge,
|
||||
ConnectionLineComponentProps,
|
||||
Node,
|
||||
getBezierPath,
|
||||
Position,
|
||||
} from "@xyflow/react";
|
||||
|
||||
export default function ConnectionLine<NodeType extends Node>({
|
||||
fromPosition,
|
||||
fromHandle,
|
||||
fromX,
|
||||
fromY,
|
||||
toPosition,
|
||||
toX,
|
||||
toY,
|
||||
}: ConnectionLineComponentProps<NodeType>) {
|
||||
const sourceX =
|
||||
fromPosition === Position.Right
|
||||
? fromX + ((fromHandle?.width ?? 0) / 2 - 5)
|
||||
: fromX - ((fromHandle?.width ?? 0) / 2 - 5);
|
||||
|
||||
const [path] = getBezierPath({
|
||||
sourceX: sourceX,
|
||||
sourceY: fromY,
|
||||
sourcePosition: fromPosition,
|
||||
targetX: toX,
|
||||
targetY: toY,
|
||||
targetPosition: toPosition,
|
||||
});
|
||||
|
||||
return <BaseEdge path={path} style={{ strokeWidth: 2, stroke: "#555" }} />;
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
import { Card, CardContent } from "@/components/__legacy__/ui/card";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { cn } from "@/lib/utils";
|
||||
import React from "react";
|
||||
|
||||
/**
|
||||
* Represents a control element for the ControlPanel Component.
|
||||
* @type {Object} Control
|
||||
* @property {React.ReactNode} icon - The icon of the control from lucide-react https://lucide.dev/icons/
|
||||
* @property {string} label - The label of the control, to be leveraged by ToolTip.
|
||||
* @property {onclick} onClick - The function to be executed when the control is clicked.
|
||||
*/
|
||||
export type Control = {
|
||||
icon: React.ReactNode;
|
||||
label: string;
|
||||
disabled?: boolean;
|
||||
onClick: () => void;
|
||||
};
|
||||
|
||||
interface ControlPanelProps {
|
||||
controls: Control[];
|
||||
topChildren?: React.ReactNode;
|
||||
botChildren?: React.ReactNode;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* ControlPanel component displays a panel with controls as icons.tsx with the ability to take in children.
|
||||
* @param {Object} ControlPanelProps - The properties of the control panel component.
|
||||
* @param {Array} ControlPanelProps.controls - An array of control objects representing actions to be preformed.
|
||||
* @param {Array} ControlPanelProps.children - The child components of the control panel.
|
||||
* @param {string} ControlPanelProps.className - Additional CSS class names for the control panel.
|
||||
* @returns The rendered control panel component.
|
||||
*/
|
||||
export const ControlPanel = ({
|
||||
controls,
|
||||
topChildren,
|
||||
botChildren,
|
||||
className,
|
||||
}: ControlPanelProps) => {
|
||||
return (
|
||||
<Card className={cn("m-4 mt-24 w-14 dark:bg-slate-900", className)}>
|
||||
<CardContent className="p-0">
|
||||
<div className="flex flex-col items-center gap-3 rounded-xl py-3">
|
||||
{topChildren}
|
||||
<Separator className="dark:bg-slate-700" />
|
||||
{controls.map((control, index) => (
|
||||
<Tooltip key={index} delayDuration={500}>
|
||||
<TooltipTrigger asChild>
|
||||
<div>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={() => control.onClick()}
|
||||
data-id={`control-button-${index}`}
|
||||
data-testid={`blocks-control-${control.label.toLowerCase()}-button`}
|
||||
disabled={control.disabled || false}
|
||||
className="dark:bg-slate-900 dark:text-slate-100 dark:hover:bg-slate-800"
|
||||
>
|
||||
{control.icon}
|
||||
<span className="sr-only">{control.label}</span>
|
||||
</Button>
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent
|
||||
side="right"
|
||||
className="dark:bg-slate-800 dark:text-slate-100"
|
||||
>
|
||||
{control.label}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
))}
|
||||
<Separator className="dark:bg-slate-700" />
|
||||
{botChildren}
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
export default ControlPanel;
|
||||
@@ -0,0 +1,240 @@
|
||||
import React, {
|
||||
useCallback,
|
||||
useContext,
|
||||
useEffect,
|
||||
useState,
|
||||
useRef,
|
||||
} from "react";
|
||||
import {
|
||||
BaseEdge,
|
||||
EdgeLabelRenderer,
|
||||
EdgeProps,
|
||||
useReactFlow,
|
||||
XYPosition,
|
||||
Edge,
|
||||
Node,
|
||||
} from "@xyflow/react";
|
||||
import "./customedge.css";
|
||||
import { X } from "lucide-react";
|
||||
import { BuilderContext } from "../Flow/Flow";
|
||||
import { NodeExecutionResult } from "@/lib/autogpt-server-api";
|
||||
import { useCustomEdge } from "./useCustomEdge";
|
||||
|
||||
export type CustomEdgeData = {
|
||||
edgeColor: string;
|
||||
sourcePos?: XYPosition;
|
||||
isStatic?: boolean;
|
||||
beadUp: number;
|
||||
beadDown: number;
|
||||
beadData?: Map<string, NodeExecutionResult["status"]>;
|
||||
};
|
||||
|
||||
type Bead = {
|
||||
t: number;
|
||||
targetT: number;
|
||||
startTime: number;
|
||||
};
|
||||
|
||||
export type CustomEdge = Edge<CustomEdgeData, "custom">;
|
||||
|
||||
export function CustomEdge({
|
||||
id,
|
||||
data,
|
||||
selected,
|
||||
sourceX,
|
||||
sourceY,
|
||||
targetX,
|
||||
targetY,
|
||||
markerEnd,
|
||||
}: EdgeProps<CustomEdge>) {
|
||||
const [beads, setBeads] = useState<{
|
||||
beads: Bead[];
|
||||
created: number;
|
||||
destroyed: number;
|
||||
}>({ beads: [], created: 0, destroyed: 0 });
|
||||
const beadsRef = useRef(beads);
|
||||
const { svgPath, length, getPointForT, getTForDistance } = useCustomEdge(
|
||||
sourceX - 5,
|
||||
sourceY - 5,
|
||||
targetX + 3,
|
||||
targetY - 5,
|
||||
);
|
||||
const { deleteElements } = useReactFlow<Node, CustomEdge>();
|
||||
const builderContext = useContext(BuilderContext);
|
||||
const { visualizeBeads } = builderContext ?? {
|
||||
visualizeBeads: "no",
|
||||
};
|
||||
|
||||
// Check if this edge is broken (during resolution mode)
|
||||
const isBroken =
|
||||
builderContext?.resolutionMode?.active &&
|
||||
builderContext?.resolutionMode?.brokenEdgeIds?.includes(id);
|
||||
|
||||
const onEdgeRemoveClick = () => {
|
||||
deleteElements({ edges: [{ id }] });
|
||||
};
|
||||
|
||||
const animationDuration = 500; // Duration in milliseconds for bead to travel the curve
|
||||
const beadDiameter = 12;
|
||||
const deltaTime = 16;
|
||||
|
||||
const setTargetPositions = useCallback(
|
||||
(beads: Bead[]) => {
|
||||
const distanceBetween = Math.min(
|
||||
(length - beadDiameter) / (beads.length + 1),
|
||||
beadDiameter,
|
||||
);
|
||||
|
||||
return beads.map((bead, index) => {
|
||||
const distanceFromEnd = beadDiameter * 1.35;
|
||||
const targetPosition = distanceBetween * index + distanceFromEnd;
|
||||
const t = getTForDistance(-targetPosition);
|
||||
|
||||
return {
|
||||
...bead,
|
||||
t: visualizeBeads === "animate" ? bead.t : t,
|
||||
targetT: t,
|
||||
} as Bead;
|
||||
});
|
||||
},
|
||||
[getTForDistance, length, visualizeBeads],
|
||||
);
|
||||
|
||||
beadsRef.current = beads;
|
||||
useEffect(() => {
|
||||
const beadUp: number = data?.beadUp ?? 0;
|
||||
const beadDown: number = data?.beadDown ?? 0;
|
||||
|
||||
if (
|
||||
beadUp === 0 &&
|
||||
beadDown === 0 &&
|
||||
(beads.created > 0 || beads.destroyed > 0)
|
||||
) {
|
||||
setBeads({ beads: [], created: 0, destroyed: 0 });
|
||||
return;
|
||||
}
|
||||
|
||||
// Add beads
|
||||
if (beadUp > beads.created) {
|
||||
setBeads(({ beads, created, destroyed }) => {
|
||||
const newBeads = [];
|
||||
for (let i = 0; i < beadUp - created; i++) {
|
||||
newBeads.push({ t: 0, targetT: 0, startTime: Date.now() });
|
||||
}
|
||||
|
||||
const b = setTargetPositions([...beads, ...newBeads]);
|
||||
return { beads: b, created: beadUp, destroyed };
|
||||
});
|
||||
}
|
||||
|
||||
// Animate and remove beads
|
||||
const interval = setInterval(
|
||||
({ current: beads }) => {
|
||||
// If there are no beads visible or moving, stop re-rendering
|
||||
if (
|
||||
(beadUp === beads.created && beads.created === beads.destroyed) ||
|
||||
beads.beads.every((bead) => bead.t >= bead.targetT)
|
||||
) {
|
||||
clearInterval(interval);
|
||||
return;
|
||||
}
|
||||
|
||||
setBeads(({ beads, created, destroyed }) => {
|
||||
let destroyedCount = 0;
|
||||
|
||||
const newBeads = beads
|
||||
.map((bead) => {
|
||||
const progressIncrement = deltaTime / animationDuration;
|
||||
const t = Math.min(
|
||||
bead.t + bead.targetT * progressIncrement,
|
||||
bead.targetT,
|
||||
);
|
||||
|
||||
return { ...bead, t };
|
||||
})
|
||||
.filter((bead, index) => {
|
||||
const removeCount = beadDown - destroyed;
|
||||
if (bead.t >= bead.targetT && index < removeCount) {
|
||||
destroyedCount++;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
return {
|
||||
beads: setTargetPositions(newBeads),
|
||||
created,
|
||||
destroyed: destroyed + destroyedCount,
|
||||
};
|
||||
});
|
||||
},
|
||||
deltaTime,
|
||||
beadsRef,
|
||||
);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [data?.beadUp, data?.beadDown, setTargetPositions, visualizeBeads]);
|
||||
|
||||
const middle = getPointForT(0.5);
|
||||
|
||||
// Determine edge color - red for broken edges
|
||||
const baseColor = data?.edgeColor ?? "#555555";
|
||||
const edgeColor = isBroken ? "#ef4444" : baseColor;
|
||||
// Add opacity to hex color (99 = 60% opacity, 80 = 50% opacity)
|
||||
const strokeColor = isBroken
|
||||
? `${edgeColor}99`
|
||||
: selected
|
||||
? edgeColor
|
||||
: `${edgeColor}80`;
|
||||
|
||||
return (
|
||||
<>
|
||||
<BaseEdge
|
||||
path={svgPath}
|
||||
markerEnd={markerEnd}
|
||||
style={{
|
||||
stroke: strokeColor,
|
||||
strokeWidth: data?.isStatic ? 2.5 : 2,
|
||||
strokeDasharray: data?.isStatic ? "5 3" : undefined,
|
||||
}}
|
||||
className="data-sentry-unmask transition-all duration-200"
|
||||
/>
|
||||
<path
|
||||
d={svgPath}
|
||||
fill="none"
|
||||
strokeOpacity={0}
|
||||
strokeWidth={20}
|
||||
className="data-sentry-unmask react-flow__edge-interaction"
|
||||
/>
|
||||
<EdgeLabelRenderer>
|
||||
<div
|
||||
style={{
|
||||
position: "absolute",
|
||||
transform: `translate(-50%, -50%) translate(${middle.x}px,${middle.y}px)`,
|
||||
pointerEvents: "all",
|
||||
}}
|
||||
className="edge-label-renderer"
|
||||
>
|
||||
<button
|
||||
className="edge-label-button opacity-0 transition-opacity duration-200 hover:opacity-100"
|
||||
onClick={onEdgeRemoveClick}
|
||||
>
|
||||
<X className="size-4" />
|
||||
</button>
|
||||
</div>
|
||||
</EdgeLabelRenderer>
|
||||
{beads.beads.map((bead, index) => {
|
||||
const pos = getPointForT(bead.t);
|
||||
return (
|
||||
<circle
|
||||
key={index}
|
||||
cx={pos.x}
|
||||
cy={pos.y}
|
||||
r={beadDiameter / 2} // Bead radius
|
||||
fill={data?.edgeColor ?? "#555555"}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
.edge-label-renderer {
|
||||
position: absolute;
|
||||
pointer-events: all;
|
||||
}
|
||||
|
||||
.edge-label-button {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
background: #eee;
|
||||
border: 1px solid #fff;
|
||||
cursor: pointer;
|
||||
border-radius: 50%;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
padding: 0;
|
||||
color: #555;
|
||||
opacity: 0;
|
||||
transition:
|
||||
opacity 0.2s ease-in-out,
|
||||
background-color 0.2s ease-in-out;
|
||||
}
|
||||
|
||||
.edge-label-button.visible {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.edge-label-button:hover {
|
||||
box-shadow: 0 0 6px 2px rgba(0, 0, 0, 0.08);
|
||||
background: #f0f0f0;
|
||||
}
|
||||
|
||||
.edge-label-button svg {
|
||||
width: 14px;
|
||||
height: 14px;
|
||||
}
|
||||
|
||||
.react-flow__edge-interaction {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.react-flow__edges > svg:has(> g.selected) {
|
||||
z-index: 10 !important;
|
||||
}
|
||||
|
||||
.react-flow__edgelabel-renderer {
|
||||
z-index: 11 !important;
|
||||
}
|
||||
@@ -0,0 +1,157 @@
|
||||
import { useCallback, useMemo } from "react";
|
||||
|
||||
type XYPosition = {
|
||||
x: number;
|
||||
y: number;
|
||||
};
|
||||
|
||||
export type BezierPath = {
|
||||
sourcePosition: XYPosition;
|
||||
control1: XYPosition;
|
||||
control2: XYPosition;
|
||||
targetPosition: XYPosition;
|
||||
};
|
||||
|
||||
export function useCustomEdge(
|
||||
sourceX: number,
|
||||
sourceY: number,
|
||||
targetX: number,
|
||||
targetY: number,
|
||||
) {
|
||||
const path: BezierPath = useMemo(() => {
|
||||
const xDifference = Math.abs(sourceX - targetX);
|
||||
const yDifference = Math.abs(sourceY - targetY);
|
||||
const xControlDistance =
|
||||
sourceX < targetX ? 64 : Math.max(xDifference / 2, 64);
|
||||
const yControlDistance = yDifference < 128 && sourceX > targetX ? -64 : 0;
|
||||
|
||||
return {
|
||||
sourcePosition: { x: sourceX, y: sourceY },
|
||||
control1: {
|
||||
x: sourceX + xControlDistance,
|
||||
y: sourceY + yControlDistance,
|
||||
},
|
||||
control2: {
|
||||
x: targetX - xControlDistance,
|
||||
y: targetY + yControlDistance,
|
||||
},
|
||||
targetPosition: { x: targetX, y: targetY },
|
||||
};
|
||||
}, [sourceX, sourceY, targetX, targetY]);
|
||||
|
||||
const svgPath = useMemo(
|
||||
() =>
|
||||
`M ${path.sourcePosition.x} ${path.sourcePosition.y} ` +
|
||||
`C ${path.control1.x} ${path.control1.y} ${path.control2.x} ${path.control2.y} ` +
|
||||
`${path.targetPosition.x}, ${path.targetPosition.y}`,
|
||||
[path],
|
||||
);
|
||||
|
||||
const getPointForT = useCallback(
|
||||
(t: number) => {
|
||||
// Bezier formula: (1-t)^3 * p0 + 3*(1-t)^2*t*p1 + 3*(1-t)*t^2*p2 + t^3*p3
|
||||
const x =
|
||||
Math.pow(1 - t, 3) * path.sourcePosition.x +
|
||||
3 * Math.pow(1 - t, 2) * t * path.control1.x +
|
||||
3 * (1 - t) * Math.pow(t, 2) * path.control2.x +
|
||||
Math.pow(t, 3) * path.targetPosition.x;
|
||||
|
||||
const y =
|
||||
Math.pow(1 - t, 3) * path.sourcePosition.y +
|
||||
3 * Math.pow(1 - t, 2) * t * path.control1.y +
|
||||
3 * (1 - t) * Math.pow(t, 2) * path.control2.y +
|
||||
Math.pow(t, 3) * path.targetPosition.y;
|
||||
|
||||
return { x, y };
|
||||
},
|
||||
[path],
|
||||
);
|
||||
|
||||
const getArcLength = useCallback(
|
||||
(t: number, samples: number = 100) => {
|
||||
let length = 0;
|
||||
let prevPoint = getPointForT(0);
|
||||
|
||||
for (let i = 1; i <= samples; i++) {
|
||||
const currT = (i / samples) * t;
|
||||
const currPoint = getPointForT(currT);
|
||||
length += Math.sqrt(
|
||||
Math.pow(currPoint.x - prevPoint.x, 2) +
|
||||
Math.pow(currPoint.y - prevPoint.y, 2),
|
||||
);
|
||||
prevPoint = currPoint;
|
||||
}
|
||||
|
||||
return length;
|
||||
},
|
||||
[getPointForT],
|
||||
);
|
||||
|
||||
const length = useMemo(() => {
|
||||
return getArcLength(1);
|
||||
}, [getArcLength]);
|
||||
|
||||
const getBezierDerivative = useCallback(
|
||||
(t: number) => {
|
||||
const mt = 1 - t;
|
||||
const x =
|
||||
3 *
|
||||
(mt * mt * (path.control1.x - path.sourcePosition.x) +
|
||||
2 * mt * t * (path.control2.x - path.control1.x) +
|
||||
t * t * (path.targetPosition.x - path.control2.x));
|
||||
const y =
|
||||
3 *
|
||||
(mt * mt * (path.control1.y - path.sourcePosition.y) +
|
||||
2 * mt * t * (path.control2.y - path.control1.y) +
|
||||
t * t * (path.targetPosition.y - path.control2.y));
|
||||
return { x, y };
|
||||
},
|
||||
[path],
|
||||
);
|
||||
|
||||
const getTForDistance = useCallback(
|
||||
(distance: number, epsilon: number = 0.0001) => {
|
||||
if (distance < 0) {
|
||||
distance = length + distance; // If distance is negative, calculate from the end of the curve
|
||||
}
|
||||
|
||||
let t = distance / getArcLength(1);
|
||||
let prevT = 0;
|
||||
|
||||
while (Math.abs(t - prevT) > epsilon) {
|
||||
prevT = t;
|
||||
const length = getArcLength(t);
|
||||
const derivative = Math.sqrt(
|
||||
Math.pow(getBezierDerivative(t).x, 2) +
|
||||
Math.pow(getBezierDerivative(t).y, 2),
|
||||
);
|
||||
t -= (length - distance) / derivative;
|
||||
t = Math.max(0, Math.min(1, t)); // Clamp t between 0 and 1
|
||||
}
|
||||
|
||||
return t;
|
||||
},
|
||||
[getArcLength, getBezierDerivative, length],
|
||||
);
|
||||
|
||||
const getPointAtDistance = useCallback(
|
||||
(distance: number) => {
|
||||
if (distance < 0) {
|
||||
distance = length + distance; // If distance is negative, calculate from the end of the curve
|
||||
}
|
||||
|
||||
const t = getTForDistance(distance);
|
||||
return getPointForT(t);
|
||||
},
|
||||
[getTForDistance, getPointForT, length],
|
||||
);
|
||||
|
||||
return {
|
||||
path,
|
||||
svgPath,
|
||||
length,
|
||||
getPointForT,
|
||||
getTForDistance,
|
||||
getPointAtDistance,
|
||||
};
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,244 @@
|
||||
import React from "react";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/__legacy__/ui/dialog";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { AlertTriangle, XCircle, PlusCircle } from "lucide-react";
|
||||
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
|
||||
|
||||
interface IncompatibilityDialogProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
onConfirm: () => void;
|
||||
currentVersion: number;
|
||||
latestVersion: number;
|
||||
agentName: string;
|
||||
incompatibilities: IncompatibilityInfo;
|
||||
}
|
||||
|
||||
export const IncompatibilityDialog: React.FC<IncompatibilityDialogProps> = ({
|
||||
isOpen,
|
||||
onClose,
|
||||
onConfirm,
|
||||
currentVersion,
|
||||
latestVersion,
|
||||
agentName,
|
||||
incompatibilities,
|
||||
}) => {
|
||||
const hasMissingInputs = incompatibilities.missingInputs.length > 0;
|
||||
const hasMissingOutputs = incompatibilities.missingOutputs.length > 0;
|
||||
const hasNewInputs = incompatibilities.newInputs.length > 0;
|
||||
const hasNewOutputs = incompatibilities.newOutputs.length > 0;
|
||||
const hasNewRequired = incompatibilities.newRequiredInputs.length > 0;
|
||||
const hasTypeMismatches = incompatibilities.inputTypeMismatches.length > 0;
|
||||
|
||||
const hasInputChanges = hasMissingInputs || hasNewInputs;
|
||||
const hasOutputChanges = hasMissingOutputs || hasNewOutputs;
|
||||
|
||||
return (
|
||||
<Dialog open={isOpen} onOpenChange={(open) => !open && onClose()}>
|
||||
<DialogContent className="max-w-lg">
|
||||
<DialogHeader>
|
||||
<DialogTitle className="flex items-center gap-2">
|
||||
<AlertTriangle className="h-5 w-5 text-amber-500" />
|
||||
Incompatible Update
|
||||
</DialogTitle>
|
||||
<DialogDescription>
|
||||
Updating <strong>{beautifyString(agentName)}</strong> from v
|
||||
{currentVersion} to v{latestVersion} will break some connections.
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="space-y-4 py-2">
|
||||
{/* Input changes - two column layout */}
|
||||
{hasInputChanges && (
|
||||
<TwoColumnSection
|
||||
title="Input Changes"
|
||||
leftIcon={<XCircle className="h-4 w-4 text-red-500" />}
|
||||
leftTitle="Removed"
|
||||
leftItems={incompatibilities.missingInputs}
|
||||
rightIcon={<PlusCircle className="h-4 w-4 text-green-500" />}
|
||||
rightTitle="Added"
|
||||
rightItems={incompatibilities.newInputs}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Output changes - two column layout */}
|
||||
{hasOutputChanges && (
|
||||
<TwoColumnSection
|
||||
title="Output Changes"
|
||||
leftIcon={<XCircle className="h-4 w-4 text-red-500" />}
|
||||
leftTitle="Removed"
|
||||
leftItems={incompatibilities.missingOutputs}
|
||||
rightIcon={<PlusCircle className="h-4 w-4 text-green-500" />}
|
||||
rightTitle="Added"
|
||||
rightItems={incompatibilities.newOutputs}
|
||||
/>
|
||||
)}
|
||||
|
||||
{hasTypeMismatches && (
|
||||
<SingleColumnSection
|
||||
icon={<XCircle className="h-4 w-4 text-red-500" />}
|
||||
title="Type Changed"
|
||||
description="These connected inputs have a different type:"
|
||||
items={incompatibilities.inputTypeMismatches.map(
|
||||
(m) => `${m.name} (${m.oldType} → ${m.newType})`,
|
||||
)}
|
||||
/>
|
||||
)}
|
||||
|
||||
{hasNewRequired && (
|
||||
<SingleColumnSection
|
||||
icon={<PlusCircle className="h-4 w-4 text-amber-500" />}
|
||||
title="New Required Inputs"
|
||||
description="These inputs are now required:"
|
||||
items={incompatibilities.newRequiredInputs}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<Alert variant="warning">
|
||||
<AlertDescription>
|
||||
If you proceed, you'll need to remove the broken connections
|
||||
before you can save or run your agent.
|
||||
</AlertDescription>
|
||||
</Alert>
|
||||
|
||||
<DialogFooter className="gap-2 sm:gap-0">
|
||||
<Button variant="outline" onClick={onClose}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="destructive"
|
||||
onClick={onConfirm}
|
||||
className="bg-amber-600 hover:bg-amber-700"
|
||||
>
|
||||
Update Anyway
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
);
|
||||
};
|
||||
|
||||
interface TwoColumnSectionProps {
|
||||
title: string;
|
||||
leftIcon: React.ReactNode;
|
||||
leftTitle: string;
|
||||
leftItems: string[];
|
||||
rightIcon: React.ReactNode;
|
||||
rightTitle: string;
|
||||
rightItems: string[];
|
||||
}
|
||||
|
||||
const TwoColumnSection: React.FC<TwoColumnSectionProps> = ({
|
||||
title,
|
||||
leftIcon,
|
||||
leftTitle,
|
||||
leftItems,
|
||||
rightIcon,
|
||||
rightTitle,
|
||||
rightItems,
|
||||
}) => (
|
||||
<div className="rounded-md border border-gray-200 p-3 dark:border-gray-700">
|
||||
<span className="font-medium">{title}</span>
|
||||
<div className="mt-2 grid grid-cols-2 items-start gap-4">
|
||||
{/* Left column - Breaking changes */}
|
||||
<div className="min-w-0">
|
||||
<div className="flex items-center gap-1.5 text-sm text-gray-500 dark:text-gray-400">
|
||||
{leftIcon}
|
||||
<span>{leftTitle}</span>
|
||||
</div>
|
||||
<ul className="mt-1.5 space-y-1">
|
||||
{leftItems.length > 0 ? (
|
||||
leftItems.map((item) => (
|
||||
<li
|
||||
key={item}
|
||||
className="text-sm text-gray-700 dark:text-gray-300"
|
||||
>
|
||||
<code className="rounded bg-red-50 px-1 py-0.5 font-mono text-xs text-red-700 dark:bg-red-900/30 dark:text-red-300">
|
||||
{item}
|
||||
</code>
|
||||
</li>
|
||||
))
|
||||
) : (
|
||||
<li className="text-sm italic text-gray-400 dark:text-gray-500">
|
||||
None
|
||||
</li>
|
||||
)}
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
{/* Right column - Possible solutions */}
|
||||
<div className="min-w-0">
|
||||
<div className="flex items-center gap-1.5 text-sm text-gray-500 dark:text-gray-400">
|
||||
{rightIcon}
|
||||
<span>{rightTitle}</span>
|
||||
</div>
|
||||
<ul className="mt-1.5 space-y-1">
|
||||
{rightItems.length > 0 ? (
|
||||
rightItems.map((item) => (
|
||||
<li
|
||||
key={item}
|
||||
className="text-sm text-gray-700 dark:text-gray-300"
|
||||
>
|
||||
<code className="rounded bg-green-50 px-1 py-0.5 font-mono text-xs text-green-700 dark:bg-green-900/30 dark:text-green-300">
|
||||
{item}
|
||||
</code>
|
||||
</li>
|
||||
))
|
||||
) : (
|
||||
<li className="text-sm italic text-gray-400 dark:text-gray-500">
|
||||
None
|
||||
</li>
|
||||
)}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
interface SingleColumnSectionProps {
|
||||
icon: React.ReactNode;
|
||||
title: string;
|
||||
description: string;
|
||||
items: string[];
|
||||
}
|
||||
|
||||
const SingleColumnSection: React.FC<SingleColumnSectionProps> = ({
|
||||
icon,
|
||||
title,
|
||||
description,
|
||||
items,
|
||||
}) => (
|
||||
<div className="rounded-md border border-gray-200 p-3 dark:border-gray-700">
|
||||
<div className="flex items-center gap-2">
|
||||
{icon}
|
||||
<span className="font-medium">{title}</span>
|
||||
</div>
|
||||
<p className="mt-1 text-sm text-gray-500 dark:text-gray-400">
|
||||
{description}
|
||||
</p>
|
||||
<ul className="mt-2 space-y-1">
|
||||
{items.map((item) => (
|
||||
<li
|
||||
key={item}
|
||||
className="ml-4 list-disc text-sm text-gray-700 dark:text-gray-300"
|
||||
>
|
||||
<code className="rounded bg-gray-100 px-1 py-0.5 font-mono text-xs dark:bg-gray-800">
|
||||
{item}
|
||||
</code>
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
);
|
||||
|
||||
export default IncompatibilityDialog;
|
||||
@@ -0,0 +1,130 @@
|
||||
import React from "react";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { ArrowUp, AlertTriangle, Info } from "lucide-react";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
interface SubAgentUpdateBarProps {
|
||||
currentVersion: number;
|
||||
latestVersion: number;
|
||||
isCompatible: boolean;
|
||||
incompatibilities: IncompatibilityInfo | null;
|
||||
onUpdate: () => void;
|
||||
isInResolutionMode?: boolean;
|
||||
}
|
||||
|
||||
export const SubAgentUpdateBar: React.FC<SubAgentUpdateBarProps> = ({
|
||||
currentVersion,
|
||||
latestVersion,
|
||||
isCompatible,
|
||||
incompatibilities,
|
||||
onUpdate,
|
||||
isInResolutionMode = false,
|
||||
}) => {
|
||||
if (isInResolutionMode) {
|
||||
return <ResolutionModeBar incompatibilities={incompatibilities} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex items-center justify-between gap-2 rounded-t-lg bg-blue-50 px-3 py-2 dark:bg-blue-900/30">
|
||||
<div className="flex items-center gap-2">
|
||||
<ArrowUp className="h-4 w-4 text-blue-600 dark:text-blue-400" />
|
||||
<span className="text-sm text-blue-700 dark:text-blue-300">
|
||||
Update available (v{currentVersion} → v{latestVersion})
|
||||
</span>
|
||||
{!isCompatible && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<AlertTriangle className="h-4 w-4 text-amber-500" />
|
||||
</TooltipTrigger>
|
||||
<TooltipContent className="max-w-xs">
|
||||
<p className="font-medium">Incompatible changes detected</p>
|
||||
<p className="text-xs text-gray-400">
|
||||
Click Update to see details
|
||||
</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
</div>
|
||||
<Button
|
||||
size="sm"
|
||||
variant={isCompatible ? "default" : "outline"}
|
||||
onClick={onUpdate}
|
||||
className={cn(
|
||||
"h-7 text-xs",
|
||||
!isCompatible && "border-amber-500 text-amber-600 hover:bg-amber-50",
|
||||
)}
|
||||
>
|
||||
Update
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
interface ResolutionModeBarProps {
|
||||
incompatibilities: IncompatibilityInfo | null;
|
||||
}
|
||||
|
||||
const ResolutionModeBar: React.FC<ResolutionModeBarProps> = ({
|
||||
incompatibilities,
|
||||
}) => {
|
||||
const formatIncompatibilities = () => {
|
||||
if (!incompatibilities) return "No incompatibilities";
|
||||
|
||||
const items: string[] = [];
|
||||
|
||||
if (incompatibilities.missingInputs.length > 0) {
|
||||
items.push(
|
||||
`Missing inputs: ${incompatibilities.missingInputs.join(", ")}`,
|
||||
);
|
||||
}
|
||||
if (incompatibilities.missingOutputs.length > 0) {
|
||||
items.push(
|
||||
`Missing outputs: ${incompatibilities.missingOutputs.join(", ")}`,
|
||||
);
|
||||
}
|
||||
if (incompatibilities.newRequiredInputs.length > 0) {
|
||||
items.push(
|
||||
`New required inputs: ${incompatibilities.newRequiredInputs.join(", ")}`,
|
||||
);
|
||||
}
|
||||
if (incompatibilities.inputTypeMismatches.length > 0) {
|
||||
const mismatches = incompatibilities.inputTypeMismatches
|
||||
.map((m) => `${m.name} (${m.oldType} → ${m.newType})`)
|
||||
.join(", ");
|
||||
items.push(`Type changed: ${mismatches}`);
|
||||
}
|
||||
|
||||
return items.join("\n");
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex items-center justify-between gap-2 rounded-t-lg bg-amber-50 px-3 py-2 dark:bg-amber-900/30">
|
||||
<div className="flex items-center gap-2">
|
||||
<AlertTriangle className="h-4 w-4 text-amber-600 dark:text-amber-400" />
|
||||
<span className="text-sm text-amber-700 dark:text-amber-300">
|
||||
Remove incompatible connections
|
||||
</span>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Info className="h-4 w-4 cursor-help text-amber-500" />
|
||||
</TooltipTrigger>
|
||||
<TooltipContent className="max-w-sm whitespace-pre-line">
|
||||
<p className="font-medium">Incompatible changes:</p>
|
||||
<p className="mt-1 text-xs">{formatIncompatibilities()}</p>
|
||||
<p className="mt-2 text-xs text-gray-400">
|
||||
Delete the red connections to continue
|
||||
</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default SubAgentUpdateBar;
|
||||
@@ -0,0 +1,131 @@
|
||||
.custom-node {
|
||||
color: #000000;
|
||||
box-sizing: border-box;
|
||||
transition: border-color 0.3s ease-in-out;
|
||||
}
|
||||
|
||||
.custom-node .custom-switch {
|
||||
padding: 0.5rem 1.25rem;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
.error-message {
|
||||
color: #d9534f;
|
||||
font-size: 13px;
|
||||
padding-left: 0.5rem;
|
||||
}
|
||||
|
||||
/* Existing styles */
|
||||
.handle-container {
|
||||
display: flex;
|
||||
position: relative;
|
||||
margin-bottom: 0px;
|
||||
padding: 5px;
|
||||
min-height: 44px;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.react-flow__handle {
|
||||
background: transparent;
|
||||
width: auto;
|
||||
height: auto;
|
||||
border: 0;
|
||||
position: relative;
|
||||
transform: none;
|
||||
}
|
||||
|
||||
.border-error {
|
||||
border: 1px solid #d9534f;
|
||||
}
|
||||
|
||||
.select-input {
|
||||
width: 100%;
|
||||
padding: 5px;
|
||||
border-radius: 4px;
|
||||
border: 1px solid #000;
|
||||
background: #fff;
|
||||
color: #000;
|
||||
}
|
||||
|
||||
.radio-label {
|
||||
display: block;
|
||||
margin: 5px 0;
|
||||
color: #000;
|
||||
}
|
||||
|
||||
.number-input {
|
||||
width: 100%;
|
||||
padding: 5px;
|
||||
border-radius: 4px;
|
||||
background: #fff;
|
||||
color: #000;
|
||||
}
|
||||
|
||||
.array-item-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
.array-item-input {
|
||||
flex-grow: 1;
|
||||
padding: 5px;
|
||||
border-radius: 4px;
|
||||
border: 1px solid #000;
|
||||
background: #fff;
|
||||
color: #000;
|
||||
}
|
||||
|
||||
.array-item-remove {
|
||||
background: #d9534f;
|
||||
border: none;
|
||||
color: white;
|
||||
cursor: pointer;
|
||||
margin-left: 5px;
|
||||
border-radius: 4px;
|
||||
padding: 5px 10px;
|
||||
}
|
||||
|
||||
.array-item-add {
|
||||
background: #5bc0de;
|
||||
border: none;
|
||||
color: white;
|
||||
cursor: pointer;
|
||||
border-radius: 4px;
|
||||
padding: 5px 10px;
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
.error-message {
|
||||
color: #d9534f;
|
||||
font-size: 13px;
|
||||
margin-top: 5px;
|
||||
margin-left: 5px;
|
||||
}
|
||||
|
||||
/* Styles for node states */
|
||||
.completed {
|
||||
border-color: #27ae60; /* Green border for completed nodes */
|
||||
}
|
||||
|
||||
.running {
|
||||
border-color: #f39c12; /* Orange border for running nodes */
|
||||
}
|
||||
|
||||
.failed {
|
||||
border-color: #c0392b; /* Red border for failed nodes */
|
||||
}
|
||||
|
||||
.incomplete {
|
||||
border-color: #9f14ab; /* Pink border for incomplete nodes */
|
||||
}
|
||||
|
||||
.queued {
|
||||
border-color: #25e6e6; /* Cyan border for queued nodes */
|
||||
}
|
||||
|
||||
.custom-switch {
|
||||
padding-left: 2px;
|
||||
}
|
||||
@@ -0,0 +1,166 @@
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Clipboard, Maximize2 } from "lucide-react";
|
||||
import React, { useMemo, useState } from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "../../../../../components/__legacy__/ui/table";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
|
||||
import ExpandableOutputDialog from "./ExpandableOutputDialog";
|
||||
|
||||
type DataTableProps = {
|
||||
title?: string;
|
||||
truncateLongData?: boolean;
|
||||
data: { [key: string]: Array<any> };
|
||||
};
|
||||
|
||||
export default function DataTable({
|
||||
title,
|
||||
truncateLongData,
|
||||
data,
|
||||
}: DataTableProps) {
|
||||
const { toast } = useToast();
|
||||
const enableEnhancedOutputHandling = useGetFlag(
|
||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||
);
|
||||
const [expandedDialog, setExpandedDialog] = useState<{
|
||||
isOpen: boolean;
|
||||
execId: string;
|
||||
pinName: string;
|
||||
data: any[];
|
||||
} | null>(null);
|
||||
|
||||
// Prepare renderers for each item when enhanced mode is enabled
|
||||
const getItemRenderer = useMemo(() => {
|
||||
if (!enableEnhancedOutputHandling) return null;
|
||||
return (item: unknown) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
return globalRegistry.getRenderer(item, metadata);
|
||||
};
|
||||
}, [enableEnhancedOutputHandling]);
|
||||
|
||||
const copyData = (pin: string, data: string) => {
|
||||
navigator.clipboard.writeText(data).then(() => {
|
||||
toast({
|
||||
title: `"${pin}" data copied to clipboard!`,
|
||||
duration: 2000,
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const openExpandedView = (pinName: string, pinData: any[]) => {
|
||||
setExpandedDialog({
|
||||
isOpen: true,
|
||||
execId: title || "Unknown Execution",
|
||||
pinName,
|
||||
data: pinData,
|
||||
});
|
||||
};
|
||||
|
||||
const closeExpandedView = () => {
|
||||
setExpandedDialog(null);
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
{title && <strong className="mt-2 flex justify-center">{title}</strong>}
|
||||
<Table className="cursor-default select-text">
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Pin</TableHead>
|
||||
<TableHead>Data</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{Object.entries(data).map(([key, value]) => (
|
||||
<TableRow className="group" key={key}>
|
||||
<TableCell className="cursor-text">
|
||||
{beautifyString(key)}
|
||||
</TableCell>
|
||||
<TableCell className="cursor-text">
|
||||
<div className="flex min-h-9 items-center whitespace-pre-wrap">
|
||||
<div className="absolute right-1 top-auto m-1 hidden gap-1 group-hover:flex">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon"
|
||||
onClick={() => openExpandedView(key, value)}
|
||||
title="Expand Full View"
|
||||
>
|
||||
<Maximize2 size={18} />
|
||||
</Button>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon"
|
||||
onClick={() =>
|
||||
copyData(
|
||||
beautifyString(key),
|
||||
value
|
||||
.map((i) =>
|
||||
typeof i === "object"
|
||||
? JSON.stringify(i, null, 2)
|
||||
: String(i),
|
||||
)
|
||||
.join(", "),
|
||||
)
|
||||
}
|
||||
title="Copy Data"
|
||||
>
|
||||
<Clipboard size={18} />
|
||||
</Button>
|
||||
</div>
|
||||
{value.map((item, index) => {
|
||||
const renderer = getItemRenderer?.(item);
|
||||
if (enableEnhancedOutputHandling && renderer) {
|
||||
const metadata: OutputMetadata = {};
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<OutputItem
|
||||
value={item}
|
||||
metadata={metadata}
|
||||
renderer={renderer}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
|
||||
{expandedDialog && (
|
||||
<ExpandableOutputDialog
|
||||
isOpen={expandedDialog.isOpen}
|
||||
onClose={closeExpandedView}
|
||||
execId={expandedDialog.execId}
|
||||
pinName={expandedDialog.pinName}
|
||||
data={expandedDialog.data}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,269 @@
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { Clipboard, Maximize2 } from "lucide-react";
|
||||
import React, { FC, useMemo, useState } from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
||||
import { ScrollArea } from "../../../../../components/__legacy__/ui/scroll-area";
|
||||
import { Separator } from "../../../../../components/__legacy__/ui/separator";
|
||||
import { Switch } from "../../../../../components/atoms/Switch/Switch";
|
||||
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
|
||||
|
||||
interface ExpandableOutputDialogProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
execId: string;
|
||||
pinName: string;
|
||||
data: any[];
|
||||
}
|
||||
|
||||
const ExpandableOutputDialog: FC<ExpandableOutputDialogProps> = ({
|
||||
isOpen,
|
||||
onClose,
|
||||
execId,
|
||||
pinName,
|
||||
data,
|
||||
}) => {
|
||||
const { toast } = useToast();
|
||||
const enableEnhancedOutputHandling = useGetFlag(
|
||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||
);
|
||||
const [useEnhancedRenderer, setUseEnhancedRenderer] = useState(false);
|
||||
|
||||
// Prepare items for the enhanced renderer system
|
||||
const outputItems = useMemo(() => {
|
||||
if (!data || !useEnhancedRenderer) return [];
|
||||
|
||||
const items: Array<{
|
||||
key: string;
|
||||
label: string;
|
||||
value: unknown;
|
||||
metadata?: OutputMetadata;
|
||||
renderer: any;
|
||||
}> = [];
|
||||
|
||||
data.forEach((value, index) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
|
||||
// Extract metadata from the value if it's an object
|
||||
if (
|
||||
typeof value === "object" &&
|
||||
value !== null &&
|
||||
!React.isValidElement(value)
|
||||
) {
|
||||
const objValue = value as any;
|
||||
if (objValue.type) metadata.type = objValue.type;
|
||||
if (objValue.mimeType) metadata.mimeType = objValue.mimeType;
|
||||
if (objValue.filename) metadata.filename = objValue.filename;
|
||||
if (objValue.language) metadata.language = objValue.language;
|
||||
}
|
||||
|
||||
const renderer = globalRegistry.getRenderer(value, metadata);
|
||||
if (renderer) {
|
||||
items.push({
|
||||
key: `item-${index}`,
|
||||
label: index === 0 ? beautifyString(pinName) : "",
|
||||
value,
|
||||
metadata,
|
||||
renderer,
|
||||
});
|
||||
} else {
|
||||
// Fallback to text renderer
|
||||
const textRenderer = globalRegistry
|
||||
.getAllRenderers()
|
||||
.find((r) => r.name === "TextRenderer");
|
||||
if (textRenderer) {
|
||||
items.push({
|
||||
key: `item-${index}`,
|
||||
label: index === 0 ? beautifyString(pinName) : "",
|
||||
value:
|
||||
typeof value === "string"
|
||||
? value
|
||||
: JSON.stringify(value, null, 2),
|
||||
metadata,
|
||||
renderer: textRenderer,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return items;
|
||||
}, [data, useEnhancedRenderer, pinName]);
|
||||
|
||||
const copyData = () => {
|
||||
const formattedData = data
|
||||
.map((item) =>
|
||||
typeof item === "object" ? JSON.stringify(item, null, 2) : String(item),
|
||||
)
|
||||
.join("\n\n");
|
||||
|
||||
navigator.clipboard.writeText(formattedData).then(() => {
|
||||
toast({
|
||||
title: `"${beautifyString(pinName)}" data copied to clipboard!`,
|
||||
duration: 2000,
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title={
|
||||
<div className="flex items-center justify-between pr-8">
|
||||
<div className="flex items-center gap-2">
|
||||
<Maximize2 size={20} />
|
||||
Full Output Preview
|
||||
</div>
|
||||
{enableEnhancedOutputHandling && (
|
||||
<div className="flex items-center gap-3">
|
||||
<label
|
||||
htmlFor="enhanced-rendering-toggle"
|
||||
className="cursor-pointer select-none text-sm font-normal text-gray-600"
|
||||
>
|
||||
Enhanced Rendering
|
||||
</label>
|
||||
<Switch
|
||||
id="enhanced-rendering-toggle"
|
||||
checked={useEnhancedRenderer}
|
||||
onCheckedChange={setUseEnhancedRenderer}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
}
|
||||
controlled={{
|
||||
isOpen,
|
||||
set: (open) => {
|
||||
if (!open) onClose();
|
||||
},
|
||||
}}
|
||||
onClose={onClose}
|
||||
styling={{
|
||||
maxWidth: "56rem",
|
||||
width: "90vw",
|
||||
height: "90vh",
|
||||
}}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div className="flex h-full flex-col">
|
||||
<div className="pb-4">
|
||||
<p className="text-sm text-zinc-600">
|
||||
Execution ID: <span className="font-mono text-xs">{execId}</span>
|
||||
<br />
|
||||
Pin:{" "}
|
||||
<span className="font-semibold">{beautifyString(pinName)}</span>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-1 flex-col overflow-hidden">
|
||||
{useEnhancedRenderer && outputItems.length > 0 && (
|
||||
<div className="border-b px-4 py-2">
|
||||
<OutputActions
|
||||
items={outputItems.map((item) => ({
|
||||
value: item.value,
|
||||
metadata: item.metadata,
|
||||
renderer: item.renderer,
|
||||
}))}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<ScrollArea className="h-full">
|
||||
<div className="p-4">
|
||||
{data.length > 0 ? (
|
||||
useEnhancedRenderer ? (
|
||||
<div className="space-y-4">
|
||||
{outputItems.map((item) => (
|
||||
<OutputItem
|
||||
key={item.key}
|
||||
value={item.value}
|
||||
metadata={item.metadata}
|
||||
renderer={item.renderer}
|
||||
label={item.label}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
) : (
|
||||
<div className="space-y-4">
|
||||
{data.map((item, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className="rounded-lg border bg-gray-50 p-4"
|
||||
>
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
<span className="text-sm font-medium text-gray-600">
|
||||
Item {index + 1} of {data.length}
|
||||
</span>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => {
|
||||
const itemData =
|
||||
typeof item === "object"
|
||||
? JSON.stringify(item, null, 2)
|
||||
: String(item);
|
||||
navigator.clipboard
|
||||
.writeText(itemData)
|
||||
.then(() => {
|
||||
toast({
|
||||
title: `Item ${index + 1} copied to clipboard!`,
|
||||
duration: 2000,
|
||||
});
|
||||
});
|
||||
}}
|
||||
className="flex items-center gap-1"
|
||||
>
|
||||
<Clipboard size={14} />
|
||||
Copy Item
|
||||
</Button>
|
||||
</div>
|
||||
<Separator className="mb-3" />
|
||||
<div className="whitespace-pre-wrap break-words font-mono text-sm">
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={false}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)
|
||||
) : (
|
||||
<div className="py-8 text-center text-gray-500">
|
||||
No data available
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
</div>
|
||||
|
||||
<Dialog.Footer className="flex justify-between">
|
||||
<div className="text-sm text-gray-600">
|
||||
{data.length} item{data.length !== 1 ? "s" : ""} total
|
||||
</div>
|
||||
<div className="flex gap-2">
|
||||
{!useEnhancedRenderer && (
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={copyData}
|
||||
className="flex items-center gap-1"
|
||||
>
|
||||
<Clipboard size={16} />
|
||||
Copy All
|
||||
</Button>
|
||||
)}
|
||||
<Button onClick={onClose}>Close</Button>
|
||||
</div>
|
||||
</Dialog.Footer>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
};
|
||||
|
||||
export default ExpandableOutputDialog;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,103 @@
|
||||
/* flow.css or index.css */
|
||||
|
||||
body {
|
||||
font-family:
|
||||
-apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu",
|
||||
"Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
|
||||
}
|
||||
|
||||
code {
|
||||
font-family:
|
||||
source-code-pro, Menlo, Monaco, Consolas, "Courier New", monospace;
|
||||
}
|
||||
|
||||
.modal {
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
right: auto;
|
||||
bottom: auto;
|
||||
margin-right: -50%;
|
||||
transform: translate(-50%, -50%);
|
||||
background: #ffffff;
|
||||
padding: 20px;
|
||||
border: 1px solid #ccc;
|
||||
border-radius: 4px;
|
||||
color: #000000;
|
||||
}
|
||||
|
||||
.overlay {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background-color: rgba(0, 0, 0, 0.75);
|
||||
}
|
||||
|
||||
.modal h2 {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
.modal button {
|
||||
margin-right: 10px;
|
||||
}
|
||||
|
||||
.modal form {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.modal form div {
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: -600px;
|
||||
width: 350px;
|
||||
height: calc(100vh - 68px); /* Full height minus top offset */
|
||||
background-color: #ffffff;
|
||||
color: #000000;
|
||||
padding: 20px;
|
||||
transition: left 0.3s ease;
|
||||
z-index: 1000;
|
||||
overflow-y: auto;
|
||||
margin-top: 68px; /* Margin to push content below the top fixed area */
|
||||
}
|
||||
|
||||
.sidebar.open {
|
||||
left: 0;
|
||||
}
|
||||
|
||||
.sidebar h3 {
|
||||
margin: 0 0 10px;
|
||||
}
|
||||
|
||||
.sidebar input {
|
||||
margin: 0 0 10px;
|
||||
}
|
||||
|
||||
.sidebarNodeRowStyle {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
background-color: #e2e2e2;
|
||||
padding: 10px;
|
||||
margin-bottom: 10px;
|
||||
border-radius: 10px;
|
||||
cursor: grab;
|
||||
}
|
||||
|
||||
.sidebarNodeRowStyle.dragging {
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.flow-container {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100vw;
|
||||
height: 100vh;
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
import React from "react";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/__legacy__/ui/popover";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { MagnifyingGlassIcon } from "@radix-ui/react-icons";
|
||||
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import { GraphSearchContent } from "../NewControlPanel/NewSearchGraph/GraphMenuContent/GraphContent";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { useGraphMenu } from "../NewControlPanel/NewSearchGraph/GraphMenu/useGraphMenu";
|
||||
|
||||
interface GraphSearchControlProps {
|
||||
nodes: CustomNode[];
|
||||
onNodeSelect: (nodeId: string) => void;
|
||||
onNodeHover?: (nodeId: string | null) => void;
|
||||
}
|
||||
|
||||
export function GraphSearchControl({
|
||||
nodes,
|
||||
onNodeSelect,
|
||||
onNodeHover,
|
||||
}: GraphSearchControlProps) {
|
||||
// Use the same hook as GraphSearchMenu for consistency
|
||||
const {
|
||||
open,
|
||||
searchQuery,
|
||||
setSearchQuery,
|
||||
filteredNodes,
|
||||
handleNodeSelect,
|
||||
handleOpenChange,
|
||||
} = useGraphMenu({
|
||||
nodes,
|
||||
blockMenuSelected: "", // We don't need to track this in the old control panel
|
||||
setBlockMenuSelected: () => {}, // Not needed in this context
|
||||
onNodeSelect,
|
||||
});
|
||||
|
||||
return (
|
||||
<Popover open={open} onOpenChange={handleOpenChange}>
|
||||
<Tooltip delayDuration={500}>
|
||||
<TooltipTrigger asChild>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
data-id="graph-search-control-trigger"
|
||||
data-testid="graph-search-control-button"
|
||||
name="Search"
|
||||
className="dark:hover:bg-slate-800"
|
||||
>
|
||||
<MagnifyingGlassIcon className="h-5 w-5" />
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="right">Search Graph</TooltipContent>
|
||||
</Tooltip>
|
||||
|
||||
<PopoverContent
|
||||
side="right"
|
||||
sideOffset={22}
|
||||
align="start"
|
||||
alignOffset={-50} // Offset upward to align with control panel top
|
||||
className="absolute -top-3 w-[17rem] rounded-xl border-none p-0 shadow-none md:w-[30rem]"
|
||||
data-id="graph-search-popover-content"
|
||||
>
|
||||
<GraphSearchContent
|
||||
searchQuery={searchQuery}
|
||||
onSearchChange={setSearchQuery}
|
||||
filteredNodes={filteredNodes}
|
||||
onNodeSelect={handleNodeSelect}
|
||||
onNodeHover={onNodeHover}
|
||||
/>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
import React, { FC, useEffect, useState } from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import { Textarea } from "../../../../../components/__legacy__/ui/textarea";
|
||||
import { Maximize2, Minimize2, Clipboard } from "lucide-react";
|
||||
import { createPortal } from "react-dom";
|
||||
import { toast } from "../../../../../components/molecules/Toast/use-toast";
|
||||
|
||||
interface ModalProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
onSave: (value: string) => void;
|
||||
title?: string;
|
||||
defaultValue: string;
|
||||
}
|
||||
|
||||
const InputModalComponent: FC<ModalProps> = ({
|
||||
isOpen,
|
||||
onClose,
|
||||
onSave,
|
||||
title,
|
||||
defaultValue,
|
||||
}) => {
|
||||
const [tempValue, setTempValue] = useState(defaultValue);
|
||||
const [isMaximized, setIsMaximized] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (isOpen) {
|
||||
setTempValue(defaultValue);
|
||||
setIsMaximized(false);
|
||||
}
|
||||
}, [isOpen, defaultValue]);
|
||||
|
||||
const handleSave = () => {
|
||||
onSave(tempValue);
|
||||
onClose();
|
||||
};
|
||||
|
||||
const toggleSize = () => {
|
||||
setIsMaximized(!isMaximized);
|
||||
};
|
||||
|
||||
const copyValue = () => {
|
||||
navigator.clipboard.writeText(tempValue).then(() => {
|
||||
toast({
|
||||
title: "Input value copied to clipboard!",
|
||||
duration: 2000,
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
if (!isOpen) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const modalContent = (
|
||||
<div
|
||||
id="modal-content"
|
||||
className={`fixed rounded-lg border-[1.5px] bg-white p-5 ${
|
||||
isMaximized ? "inset-[128px] flex flex-col" : `w-[90%] max-w-[800px]`
|
||||
}`}
|
||||
>
|
||||
<h2 className="mb-4 text-center text-lg font-semibold">
|
||||
{title || "Enter input text"}
|
||||
</h2>
|
||||
<div className="nowheel relative flex-grow">
|
||||
<Textarea
|
||||
className="h-full min-h-[200px] w-full resize-none"
|
||||
value={tempValue}
|
||||
onChange={(e) => setTempValue(e.target.value)}
|
||||
/>
|
||||
<div className="absolute bottom-2 right-2 flex space-x-2">
|
||||
<Button onClick={copyValue} size="icon" variant="outline">
|
||||
<Clipboard size={18} />
|
||||
</Button>
|
||||
<Button onClick={toggleSize} size="icon" variant="outline">
|
||||
{isMaximized ? <Minimize2 size={18} /> : <Maximize2 size={18} />}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="mt-4 flex justify-end space-x-2">
|
||||
<Button onClick={onClose} variant="outline">
|
||||
Cancel
|
||||
</Button>
|
||||
<Button onClick={handleSave}>Save</Button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
return (
|
||||
<>
|
||||
{isMaximized ? (
|
||||
createPortal(
|
||||
<div className="fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
|
||||
{modalContent}
|
||||
</div>,
|
||||
document.body,
|
||||
)
|
||||
) : (
|
||||
<div className="nodrag fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
|
||||
{modalContent}
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default InputModalComponent;
|
||||
@@ -0,0 +1,163 @@
|
||||
import { BlockIOSubSchema } from "@/lib/autogpt-server-api/types";
|
||||
import {
|
||||
cn,
|
||||
beautifyString,
|
||||
getTypeBgColor,
|
||||
getTypeTextColor,
|
||||
getEffectiveType,
|
||||
} from "@/lib/utils";
|
||||
import { FC, memo, useCallback } from "react";
|
||||
import { Handle, Position } from "@xyflow/react";
|
||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||
|
||||
type HandleProps = {
|
||||
keyName: string;
|
||||
schema: BlockIOSubSchema;
|
||||
isConnected: boolean;
|
||||
isRequired?: boolean;
|
||||
side: "left" | "right";
|
||||
title?: string;
|
||||
className?: string;
|
||||
isBroken?: boolean;
|
||||
};
|
||||
|
||||
// Move the constant out of the component to avoid re-creation on every render.
|
||||
const TYPE_NAME: Record<string, string> = {
|
||||
string: "text",
|
||||
number: "number",
|
||||
integer: "integer",
|
||||
boolean: "true/false",
|
||||
object: "object",
|
||||
array: "list",
|
||||
null: "null",
|
||||
};
|
||||
|
||||
// Extract and memoize the Dot component so that it doesn't re-render unnecessarily.
|
||||
const Dot: FC<{ isConnected: boolean; type?: string; isBroken?: boolean }> =
|
||||
memo(({ isConnected, type, isBroken }) => {
|
||||
const color = isBroken
|
||||
? "border-red-500 bg-red-100 dark:bg-red-900/30"
|
||||
: isConnected
|
||||
? getTypeBgColor(type || "any")
|
||||
: "border-gray-300 dark:border-gray-600";
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"m-1 h-4 w-4 rounded-full border-2 bg-white transition-colors duration-100 group-hover:bg-gray-300 dark:bg-slate-800 dark:group-hover:bg-gray-700",
|
||||
color,
|
||||
isBroken && "opacity-50",
|
||||
)}
|
||||
/>
|
||||
);
|
||||
});
|
||||
Dot.displayName = "Dot";
|
||||
|
||||
const NodeHandle: FC<HandleProps> = ({
|
||||
keyName,
|
||||
schema,
|
||||
isConnected,
|
||||
isRequired,
|
||||
side,
|
||||
title,
|
||||
className,
|
||||
isBroken = false,
|
||||
}) => {
|
||||
// Extract effective type from schema (handles anyOf/oneOf/allOf wrappers)
|
||||
const effectiveType = getEffectiveType(schema);
|
||||
|
||||
const typeClass = `text-sm ${getTypeTextColor(effectiveType || "any")} ${
|
||||
side === "left" ? "text-left" : "text-right"
|
||||
}`;
|
||||
|
||||
const label = (
|
||||
<div className={cn("flex flex-grow flex-row", isBroken && "opacity-50")}>
|
||||
<span
|
||||
className={cn(
|
||||
"data-sentry-unmask text-m green flex items-end pr-2 text-gray-900 dark:text-gray-100",
|
||||
className,
|
||||
isBroken && "text-red-500 line-through",
|
||||
)}
|
||||
>
|
||||
{title || schema.title || beautifyString(keyName.toLowerCase())}
|
||||
{isRequired ? "*" : ""}
|
||||
</span>
|
||||
<span
|
||||
className={cn(
|
||||
`${typeClass} data-sentry-unmask flex items-end`,
|
||||
isBroken && "text-red-400",
|
||||
)}
|
||||
>
|
||||
({TYPE_NAME[effectiveType as keyof typeof TYPE_NAME] || "any"})
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
|
||||
// Use a native HTML onContextMenu handler instead of wrapping a large node with a Radix ContextMenu trigger.
|
||||
const handleContextMenu = useCallback(
|
||||
(e: React.MouseEvent<HTMLDivElement>) => {
|
||||
e.preventDefault();
|
||||
// Optionally, you can trigger a custom, lightweight context menu here.
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
if (side === "left") {
|
||||
return (
|
||||
<div
|
||||
key={keyName}
|
||||
className={cn("handle-container", isBroken && "pointer-events-none")}
|
||||
onContextMenu={handleContextMenu}
|
||||
>
|
||||
<Handle
|
||||
type="target"
|
||||
data-testid={`input-handle-${keyName}`}
|
||||
position={Position.Left}
|
||||
id={keyName}
|
||||
className={cn("group -ml-[38px]", isBroken && "cursor-not-allowed")}
|
||||
isConnectable={!isBroken}
|
||||
>
|
||||
<div className="pointer-events-none flex items-center">
|
||||
<Dot
|
||||
isConnected={isConnected}
|
||||
type={effectiveType}
|
||||
isBroken={isBroken}
|
||||
/>
|
||||
{label}
|
||||
</div>
|
||||
</Handle>
|
||||
<InformationTooltip description={schema.description} />
|
||||
</div>
|
||||
);
|
||||
} else {
|
||||
return (
|
||||
<div
|
||||
key={keyName}
|
||||
className={cn(
|
||||
"handle-container justify-end",
|
||||
isBroken && "pointer-events-none",
|
||||
)}
|
||||
onContextMenu={handleContextMenu}
|
||||
>
|
||||
<Handle
|
||||
type="source"
|
||||
data-testid={`output-handle-${keyName}`}
|
||||
position={Position.Right}
|
||||
id={keyName}
|
||||
className={cn("group -mr-[38px]", isBroken && "cursor-not-allowed")}
|
||||
isConnectable={!isBroken}
|
||||
>
|
||||
<div className="pointer-events-none flex items-center">
|
||||
{label}
|
||||
<Dot
|
||||
isConnected={isConnected}
|
||||
type={effectiveType}
|
||||
isBroken={isBroken}
|
||||
/>
|
||||
</div>
|
||||
</Handle>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
export default memo(NodeHandle);
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,158 @@
|
||||
import React, { useContext, useMemo, useState } from "react";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Maximize2 } from "lucide-react";
|
||||
import * as Separator from "@radix-ui/react-separator";
|
||||
import { ContentRenderer } from "@/components/__legacy__/ui/render";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
|
||||
import { BuilderContext } from "./Flow/Flow";
|
||||
import ExpandableOutputDialog from "./ExpandableOutputDialog";
|
||||
|
||||
type NodeOutputsProps = {
|
||||
title?: string;
|
||||
truncateLongData?: boolean;
|
||||
data: { [key: string]: Array<any> };
|
||||
};
|
||||
|
||||
export default function NodeOutputs({
|
||||
title,
|
||||
truncateLongData,
|
||||
data,
|
||||
}: NodeOutputsProps) {
|
||||
const builderContext = useContext(BuilderContext);
|
||||
const enableEnhancedOutputHandling = useGetFlag(
|
||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||
);
|
||||
|
||||
const [expandedDialog, setExpandedDialog] = useState<{
|
||||
isOpen: boolean;
|
||||
execId: string;
|
||||
pinName: string;
|
||||
data: any[];
|
||||
} | null>(null);
|
||||
|
||||
if (!builderContext) {
|
||||
throw new Error(
|
||||
"BuilderContext consumer must be inside FlowEditor component",
|
||||
);
|
||||
}
|
||||
|
||||
const { getNodeTitle } = builderContext;
|
||||
|
||||
// Prepare renderers for each item when enhanced mode is enabled
|
||||
const getItemRenderer = useMemo(() => {
|
||||
if (!enableEnhancedOutputHandling) return null;
|
||||
return (item: unknown) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
return globalRegistry.getRenderer(item, metadata);
|
||||
};
|
||||
}, [enableEnhancedOutputHandling]);
|
||||
|
||||
const getBeautifiedPinName = (pin: string) => {
|
||||
if (!pin.startsWith("tools_^_")) {
|
||||
return beautifyString(pin);
|
||||
}
|
||||
// Special handling for tool pins: replace node ID with node title
|
||||
const toolNodeID = pin.slice(8).split("_~_")[0]; // tools_^_{node_id}_~_{field}
|
||||
const toolNodeTitle = getNodeTitle(toolNodeID);
|
||||
return toolNodeTitle
|
||||
? beautifyString(pin.replace(toolNodeID, toolNodeTitle))
|
||||
: beautifyString(pin);
|
||||
};
|
||||
|
||||
const openExpandedView = (pinName: string, pinData: any[]) => {
|
||||
setExpandedDialog({
|
||||
isOpen: true,
|
||||
execId: title || "Node Output",
|
||||
pinName,
|
||||
data: pinData,
|
||||
});
|
||||
};
|
||||
|
||||
const closeExpandedView = () => {
|
||||
setExpandedDialog(null);
|
||||
};
|
||||
return (
|
||||
<div className="m-4 space-y-4">
|
||||
{title && <strong className="mt-2flex">{title}</strong>}
|
||||
{Object.entries(data).map(([pin, dataArray]) => (
|
||||
<div key={pin} className="group">
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center">
|
||||
<strong className="mr-2">Pin:</strong>
|
||||
<span>{getBeautifiedPinName(pin)}</span>
|
||||
</div>
|
||||
{(truncateLongData || dataArray.length > 10) && (
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => openExpandedView(pin, dataArray)}
|
||||
className="hidden items-center gap-1 group-hover:flex"
|
||||
title="Expand Full View"
|
||||
>
|
||||
<Maximize2 size={14} />
|
||||
Expand
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
<div className="mt-2">
|
||||
<strong className="mr-2">Data:</strong>
|
||||
<div className="mt-1">
|
||||
{dataArray.slice(0, 10).map((item, index) => {
|
||||
const renderer = getItemRenderer?.(item);
|
||||
if (enableEnhancedOutputHandling && renderer) {
|
||||
const metadata: OutputMetadata = {};
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<OutputItem
|
||||
value={item}
|
||||
metadata={metadata}
|
||||
renderer={renderer}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
{dataArray.length > 10 && (
|
||||
<span style={{ color: "#888" }}>
|
||||
<br />
|
||||
<b>⋮</b>
|
||||
<br />
|
||||
<span>and {dataArray.length - 10} more</span>
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
<Separator.Root className="my-4 h-[1px] bg-gray-300" />
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{expandedDialog && (
|
||||
<ExpandableOutputDialog
|
||||
isOpen={expandedDialog.isOpen}
|
||||
onClose={closeExpandedView}
|
||||
execId={expandedDialog.execId}
|
||||
pinName={expandedDialog.pinName}
|
||||
data={expandedDialog.data}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,205 @@
|
||||
import { FC, useCallback, useEffect, useState } from "react";
|
||||
|
||||
import NodeHandle from "@/app/(platform)/build/components/legacy-builder/NodeHandle";
|
||||
import type {
|
||||
BlockIOTableSubSchema,
|
||||
TableCellValue,
|
||||
TableRow,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import type { ConnectedEdge } from "./CustomNode/CustomNode";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { PlusIcon, XIcon } from "@phosphor-icons/react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
|
||||
interface NodeTableInputProps {
|
||||
/** Unique identifier for the node in the builder graph */
|
||||
nodeId: string;
|
||||
/** Key identifier for this specific input field within the node */
|
||||
selfKey: string;
|
||||
/** Schema definition for the table structure */
|
||||
schema: BlockIOTableSubSchema;
|
||||
/** Column headers for the table */
|
||||
headers: string[];
|
||||
/** Initial row data for the table */
|
||||
rows?: TableRow[];
|
||||
/** Validation errors mapped by field key */
|
||||
errors: { [key: string]: string | undefined };
|
||||
/** Graph connections between nodes in the builder */
|
||||
connections: ConnectedEdge[];
|
||||
/** Callback when table data changes */
|
||||
handleInputChange: (key: string, value: TableRow[]) => void;
|
||||
/** Callback when input field is clicked (for builder selection) */
|
||||
handleInputClick: (key: string) => void;
|
||||
/** Additional CSS classes */
|
||||
className?: string;
|
||||
/** Display name for the input field */
|
||||
displayName?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Table input component for the workflow builder interface.
|
||||
*
|
||||
* This component is specifically designed for use in the agent builder where users
|
||||
* design workflows with connected nodes. It includes graph connection capabilities
|
||||
* via NodeHandle and is tightly integrated with the builder's state management.
|
||||
*
|
||||
* @warning Do NOT use this component in runtime/execution contexts (like RunAgentInputs).
|
||||
* For runtime table inputs, use a simpler implementation without builder-specific features.
|
||||
*
|
||||
* @example
|
||||
* ```tsx
|
||||
* <NodeTableInput
|
||||
* nodeId="node-123"
|
||||
* selfKey="table_data"
|
||||
* schema={tableSchema}
|
||||
* headers={["Name", "Value"]}
|
||||
* rows={existingData}
|
||||
* connections={graphConnections}
|
||||
* handleInputChange={handleChange}
|
||||
* handleInputClick={handleClick}
|
||||
* errors={{}}
|
||||
* />
|
||||
* ```
|
||||
*
|
||||
* @see Used exclusively in: `/app/(platform)/build/components/legacy-builder/NodeInputs.tsx`
|
||||
*/
|
||||
export const NodeTableInput: FC<NodeTableInputProps> = ({
|
||||
nodeId,
|
||||
selfKey,
|
||||
schema,
|
||||
headers,
|
||||
rows = [],
|
||||
errors,
|
||||
connections,
|
||||
handleInputChange,
|
||||
handleInputClick: _handleInputClick,
|
||||
className,
|
||||
displayName,
|
||||
}) => {
|
||||
const [tableData, setTableData] = useState<TableRow[]>(rows);
|
||||
|
||||
// Sync with parent state when rows change
|
||||
useEffect(() => {
|
||||
setTableData(rows);
|
||||
}, [rows]);
|
||||
|
||||
const isConnected = (key: string) =>
|
||||
connections.some((c) => c.targetHandle === key && c.target === nodeId);
|
||||
|
||||
const updateTableData = useCallback(
|
||||
(newData: TableRow[]) => {
|
||||
setTableData(newData);
|
||||
handleInputChange(selfKey, newData);
|
||||
},
|
||||
[selfKey, handleInputChange],
|
||||
);
|
||||
|
||||
const updateCell = (
|
||||
rowIndex: number,
|
||||
header: string,
|
||||
value: TableCellValue,
|
||||
) => {
|
||||
const newData = [...tableData];
|
||||
if (!newData[rowIndex]) {
|
||||
newData[rowIndex] = {};
|
||||
}
|
||||
newData[rowIndex][header] = value;
|
||||
updateTableData(newData);
|
||||
};
|
||||
|
||||
const addRow = () => {
|
||||
if (!headers || headers.length === 0) {
|
||||
return;
|
||||
}
|
||||
const newRow: TableRow = {};
|
||||
headers.forEach((header) => {
|
||||
newRow[header] = "";
|
||||
});
|
||||
updateTableData([...tableData, newRow]);
|
||||
};
|
||||
|
||||
const removeRow = (index: number) => {
|
||||
const newData = tableData.filter((_, i) => i !== index);
|
||||
updateTableData(newData);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={cn("w-full space-y-2", className)}>
|
||||
<NodeHandle
|
||||
title={displayName || selfKey}
|
||||
keyName={selfKey}
|
||||
schema={schema}
|
||||
isConnected={isConnected(selfKey)}
|
||||
isRequired={false}
|
||||
side="left"
|
||||
/>
|
||||
|
||||
{!isConnected(selfKey) && (
|
||||
<div className="nodrag overflow-x-auto">
|
||||
<table className="w-full border-collapse">
|
||||
<thead>
|
||||
<tr>
|
||||
{headers.map((header, index) => (
|
||||
<th
|
||||
key={index}
|
||||
className="border border-gray-300 bg-gray-100 px-2 py-1 text-left text-sm font-medium dark:border-gray-600 dark:bg-gray-800"
|
||||
>
|
||||
{header}
|
||||
</th>
|
||||
))}
|
||||
<th className="w-10"></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{tableData.map((row, rowIndex) => (
|
||||
<tr key={rowIndex}>
|
||||
{headers.map((header, colIndex) => (
|
||||
<td
|
||||
key={colIndex}
|
||||
className="border border-gray-300 p-1 dark:border-gray-600"
|
||||
>
|
||||
<Input
|
||||
id={`${selfKey}-${rowIndex}-${header}`}
|
||||
label={header}
|
||||
type="text"
|
||||
value={String(row[header] || "")}
|
||||
onChange={(e) =>
|
||||
updateCell(rowIndex, header, e.target.value)
|
||||
}
|
||||
className="h-8 w-full"
|
||||
placeholder={`Enter ${header}`}
|
||||
/>
|
||||
</td>
|
||||
))}
|
||||
<td className="p-1">
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => removeRow(rowIndex)}
|
||||
className="h-8 w-8 p-0"
|
||||
>
|
||||
<XIcon />
|
||||
</Button>
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<Button
|
||||
className="mt-2 bg-gray-200 font-normal text-black hover:text-white dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600"
|
||||
onClick={addRow}
|
||||
size="small"
|
||||
>
|
||||
<PlusIcon className="mr-2" /> Add Row
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{errors[selfKey] && (
|
||||
<span className="text-sm text-red-500">{errors[selfKey]}</span>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,311 @@
|
||||
"use client";
|
||||
|
||||
import React, { useEffect, useState, useRef } from "react";
|
||||
import ReactMarkdown from "react-markdown";
|
||||
|
||||
import type { GraphID } from "@/lib/autogpt-server-api/types";
|
||||
import { askOtto } from "@/app/(platform)/build/actions";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { environment } from "@/services/environment";
|
||||
|
||||
interface Message {
|
||||
type: "user" | "assistant";
|
||||
content: string;
|
||||
}
|
||||
|
||||
export default function OttoChatWidget({
|
||||
graphID,
|
||||
className,
|
||||
}: {
|
||||
graphID?: GraphID;
|
||||
className?: string;
|
||||
}): React.ReactNode {
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const [messages, setMessages] = useState<Message[]>([]);
|
||||
const [inputValue, setInputValue] = useState("");
|
||||
const [isProcessing, setIsProcessing] = useState(false);
|
||||
const [includeGraphData, setIncludeGraphData] = useState(false);
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
// Add welcome message when component mounts
|
||||
if (messages.length === 0) {
|
||||
setMessages([
|
||||
{
|
||||
type: "assistant",
|
||||
content: "Hello, I am Otto! Ask me anything about AutoGPT!",
|
||||
},
|
||||
]);
|
||||
}
|
||||
}, [messages.length]);
|
||||
|
||||
useEffect(() => {
|
||||
// Scroll to bottom whenever messages change
|
||||
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
|
||||
}, [messages]);
|
||||
|
||||
const handleSubmit = async (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
if (!inputValue.trim() || isProcessing) return;
|
||||
|
||||
const userMessage = inputValue.trim();
|
||||
setInputValue("");
|
||||
setIsProcessing(true);
|
||||
|
||||
// Add user message to chat
|
||||
setMessages((prev) => [...prev, { type: "user", content: userMessage }]);
|
||||
|
||||
// Add temporary processing message
|
||||
setMessages((prev) => [
|
||||
...prev,
|
||||
{ type: "assistant", content: "Processing your question..." },
|
||||
]);
|
||||
|
||||
const conversationHistory = messages.reduce<
|
||||
{ query: string; response: string }[]
|
||||
>((acc, msg, i, arr) => {
|
||||
if (
|
||||
msg.type === "user" &&
|
||||
i + 1 < arr.length &&
|
||||
arr[i + 1].type === "assistant" &&
|
||||
arr[i + 1].content !== "Processing your question..."
|
||||
) {
|
||||
acc.push({
|
||||
query: msg.content,
|
||||
response: arr[i + 1].content,
|
||||
});
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
try {
|
||||
const data = await askOtto(
|
||||
userMessage,
|
||||
conversationHistory,
|
||||
includeGraphData,
|
||||
graphID,
|
||||
);
|
||||
|
||||
// Check if the response contains an error
|
||||
if ("error" in data && data.error === true) {
|
||||
// Handle different error types
|
||||
let errorMessage =
|
||||
"Sorry, there was an error processing your message. Please try again.";
|
||||
|
||||
if (data.answer === "Authentication required") {
|
||||
errorMessage = "Please sign in to use the chat feature.";
|
||||
} else if (data.answer === "Failed to connect to Otto service") {
|
||||
errorMessage =
|
||||
"Otto service is currently unavailable. Please try again later.";
|
||||
} else if (data.answer.includes("timed out")) {
|
||||
errorMessage = "Request timed out. Please try again later.";
|
||||
}
|
||||
|
||||
// Remove processing message and add error message
|
||||
setMessages((prev) => [
|
||||
...prev.slice(0, -1),
|
||||
{ type: "assistant", content: errorMessage },
|
||||
]);
|
||||
} else {
|
||||
// Remove processing message and add actual response
|
||||
setMessages((prev) => [
|
||||
...prev.slice(0, -1),
|
||||
{ type: "assistant", content: data.answer },
|
||||
]);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unexpected error in chat widget:", error);
|
||||
setMessages((prev) => [
|
||||
...prev.slice(0, -1),
|
||||
{
|
||||
type: "assistant",
|
||||
content:
|
||||
"An unexpected error occurred. Please refresh the page and try again.",
|
||||
},
|
||||
]);
|
||||
} finally {
|
||||
setIsProcessing(false);
|
||||
setIncludeGraphData(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Don't render the chat widget if we're not on the build page or in local mode
|
||||
if (environment.isLocal()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!isOpen) {
|
||||
return (
|
||||
<div className={className}>
|
||||
<button
|
||||
onClick={() => setIsOpen(true)}
|
||||
className="inline-flex h-14 w-14 items-center justify-center whitespace-nowrap rounded-2xl bg-[rgba(65,65,64,1)] text-neutral-50 shadow transition-colors hover:bg-neutral-900/90 focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950 disabled:pointer-events-none disabled:opacity-50 dark:bg-neutral-50 dark:text-neutral-900 dark:hover:bg-neutral-50/90 dark:focus-visible:ring-neutral-300"
|
||||
aria-label="Open chat widget"
|
||||
>
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
className="h-6 w-6"
|
||||
stroke="currentColor"
|
||||
strokeWidth="2"
|
||||
fill="none"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
>
|
||||
<path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"flex h-[600px] w-[600px] flex-col rounded-lg border bg-background shadow-xl",
|
||||
className,
|
||||
"z-40",
|
||||
)}
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between border-b p-4">
|
||||
<h2 className="font-semibold">Otto Assistant</h2>
|
||||
<button
|
||||
onClick={() => setIsOpen(false)}
|
||||
className="text-muted-foreground transition-colors hover:text-foreground"
|
||||
aria-label="Close chat"
|
||||
>
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
className="h-5 w-5"
|
||||
stroke="currentColor"
|
||||
strokeWidth="2"
|
||||
fill="none"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
>
|
||||
<line x1="18" y1="6" x2="6" y2="18" />
|
||||
<line x1="6" y1="6" x2="18" y2="18" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Messages */}
|
||||
<div className="flex-1 space-y-4 overflow-y-auto p-4">
|
||||
{messages.map((message, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className={`flex ${message.type === "user" ? "justify-end" : "justify-start"}`}
|
||||
>
|
||||
<div
|
||||
className={`max-w-[80%] rounded-lg p-3 ${
|
||||
message.type === "user"
|
||||
? "ml-4 bg-black text-white"
|
||||
: "mr-4 bg-[#8b5cf6] text-white"
|
||||
}`}
|
||||
>
|
||||
{message.type === "user" ? (
|
||||
message.content
|
||||
) : (
|
||||
<ReactMarkdown
|
||||
className="prose prose-sm dark:prose-invert max-w-none"
|
||||
components={{
|
||||
p: ({ children }) => (
|
||||
<p className="mb-2 last:mb-0">{children}</p>
|
||||
),
|
||||
code(props) {
|
||||
const { children, className, node: _, ...rest } = props;
|
||||
const match = /language-(\w+)/.exec(className || "");
|
||||
return match ? (
|
||||
<pre className="overflow-x-auto rounded-md bg-muted-foreground/20 p-3">
|
||||
<code className="font-mono text-sm" {...rest}>
|
||||
{children}
|
||||
</code>
|
||||
</pre>
|
||||
) : (
|
||||
<code
|
||||
className="rounded-md bg-muted-foreground/20 px-1 py-0.5 font-mono text-sm"
|
||||
{...rest}
|
||||
>
|
||||
{children}
|
||||
</code>
|
||||
);
|
||||
},
|
||||
ul: ({ children }) => (
|
||||
<ul className="mb-2 list-disc pl-4 last:mb-0">
|
||||
{children}
|
||||
</ul>
|
||||
),
|
||||
ol: ({ children }) => (
|
||||
<ol className="mb-2 list-decimal pl-4 last:mb-0">
|
||||
{children}
|
||||
</ol>
|
||||
),
|
||||
li: ({ children }) => (
|
||||
<li className="mb-1 last:mb-0">{children}</li>
|
||||
),
|
||||
}}
|
||||
>
|
||||
{message.content}
|
||||
</ReactMarkdown>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
<div ref={messagesEndRef} />
|
||||
</div>
|
||||
|
||||
{/* Input */}
|
||||
<form onSubmit={handleSubmit} className="border-t p-4">
|
||||
<div className="flex flex-col gap-2">
|
||||
<div className="flex gap-2">
|
||||
<input
|
||||
type="text"
|
||||
value={inputValue}
|
||||
onChange={(e) => setInputValue(e.target.value)}
|
||||
placeholder="Type your message..."
|
||||
className="flex-1 rounded-md border bg-background px-3 py-2 focus:outline-none focus:ring-2 focus:ring-primary"
|
||||
disabled={isProcessing}
|
||||
/>
|
||||
<button
|
||||
type="submit"
|
||||
disabled={isProcessing}
|
||||
className="rounded-md bg-primary px-4 py-2 text-primary-foreground transition-colors hover:bg-primary/90 disabled:opacity-50"
|
||||
>
|
||||
Send
|
||||
</button>
|
||||
</div>
|
||||
{graphID && (
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setIncludeGraphData((prev) => !prev);
|
||||
}}
|
||||
className={`flex items-center gap-2 rounded border px-2 py-1.5 text-sm transition-all duration-200 ${
|
||||
includeGraphData
|
||||
? "border-primary/30 bg-primary/10 text-primary hover:shadow-[0_0_10px_3px_rgba(139,92,246,0.3)]"
|
||||
: "border-transparent bg-muted text-muted-foreground hover:bg-muted/80 hover:shadow-[0_0_10px_3px_rgba(139,92,246,0.15)]"
|
||||
}`}
|
||||
>
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
className="h-4 w-4"
|
||||
stroke="currentColor"
|
||||
strokeWidth="2"
|
||||
fill="none"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
>
|
||||
<rect x="3" y="3" width="18" height="18" rx="2" ry="2" />
|
||||
<circle cx="8.5" cy="8.5" r="1.5" />
|
||||
<polyline points="21 15 16 10 5 21" />
|
||||
</svg>
|
||||
{includeGraphData
|
||||
? "Graph data will be included"
|
||||
: "Include graph data"}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
import React, { FC } from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import { NodeExecutionResult } from "@/lib/autogpt-server-api/types";
|
||||
import DataTable from "./DataTable";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
|
||||
interface OutputModalProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
executionResults: {
|
||||
execId: string;
|
||||
data: NodeExecutionResult["output_data"];
|
||||
}[];
|
||||
}
|
||||
|
||||
const OutputModalComponent: FC<OutputModalProps> = ({
|
||||
isOpen,
|
||||
onClose,
|
||||
executionResults,
|
||||
}) => {
|
||||
if (!isOpen) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="nodrag nowheel fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
|
||||
<div className="w-[500px] max-w-[90%] rounded-lg border-[1.5px] bg-white p-5">
|
||||
<strong>Output Data History</strong>
|
||||
<div className="my-2 max-h-[384px] flex-grow overflow-y-auto rounded-md p-2">
|
||||
{executionResults.map((data, i) => (
|
||||
<>
|
||||
<DataTable
|
||||
key={i}
|
||||
title={data.execId}
|
||||
data={data.data}
|
||||
truncateLongData={true}
|
||||
/>
|
||||
<Separator />
|
||||
</>
|
||||
))}
|
||||
</div>
|
||||
<div className="mt-2.5 flex justify-end gap-2.5">
|
||||
<Button onClick={onClose}>Close</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default OutputModalComponent;
|
||||
@@ -0,0 +1,96 @@
|
||||
import { useCallback } from "react";
|
||||
|
||||
import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import type {
|
||||
CredentialsMetaInput,
|
||||
Graph,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
|
||||
interface RunInputDialogProps {
|
||||
isOpen: boolean;
|
||||
doClose: () => void;
|
||||
graph: Graph;
|
||||
doRun?: (
|
||||
inputs: Record<string, any>,
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
) => Promise<void> | void;
|
||||
doCreateSchedule?: (
|
||||
cronExpression: string,
|
||||
scheduleName: string,
|
||||
inputs: Record<string, any>,
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
) => Promise<void> | void;
|
||||
}
|
||||
|
||||
export function RunnerInputDialog({
|
||||
isOpen,
|
||||
doClose,
|
||||
graph,
|
||||
doRun,
|
||||
doCreateSchedule,
|
||||
}: RunInputDialogProps) {
|
||||
const handleRun = useCallback(
|
||||
doRun
|
||||
? async (
|
||||
inputs: Record<string, any>,
|
||||
credentials_inputs: Record<string, CredentialsMetaInput>,
|
||||
) => {
|
||||
await doRun(inputs, credentials_inputs);
|
||||
doClose();
|
||||
}
|
||||
: async () => {},
|
||||
[doRun, doClose],
|
||||
);
|
||||
|
||||
const handleSchedule = useCallback(
|
||||
doCreateSchedule
|
||||
? async (
|
||||
cronExpression: string,
|
||||
scheduleName: string,
|
||||
inputs: Record<string, any>,
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
) => {
|
||||
await doCreateSchedule(
|
||||
cronExpression,
|
||||
scheduleName,
|
||||
inputs,
|
||||
credentialsInputs,
|
||||
);
|
||||
doClose();
|
||||
}
|
||||
: async () => {},
|
||||
[doCreateSchedule, doClose],
|
||||
);
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Run your agent"
|
||||
controlled={{
|
||||
isOpen,
|
||||
set: (open) => {
|
||||
if (!open) doClose();
|
||||
},
|
||||
}}
|
||||
onClose={doClose}
|
||||
styling={{
|
||||
maxWidth: "56rem",
|
||||
width: "90vw",
|
||||
}}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div className="flex flex-col p-10">
|
||||
<p className="mt-2 text-sm text-zinc-600">{graph.name}</p>
|
||||
<AgentRunDraftView
|
||||
className="p-0"
|
||||
graph={graph}
|
||||
doRun={doRun ? handleRun : undefined}
|
||||
onRun={doRun ? undefined : doClose}
|
||||
doCreateSchedule={doCreateSchedule ? handleSchedule : undefined}
|
||||
onCreateSchedule={doCreateSchedule ? undefined : doClose}
|
||||
/>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
import React from "react";
|
||||
import {
|
||||
Sheet,
|
||||
SheetContent,
|
||||
SheetHeader,
|
||||
SheetTitle,
|
||||
SheetDescription,
|
||||
} from "@/components/__legacy__/ui/sheet";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import { Label } from "@/components/__legacy__/ui/label";
|
||||
import { Textarea } from "@/components/__legacy__/ui/textarea";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Clipboard } from "lucide-react";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
|
||||
export type OutputNodeInfo = {
|
||||
metadata: {
|
||||
name: string;
|
||||
description: string;
|
||||
};
|
||||
result?: any;
|
||||
};
|
||||
|
||||
interface OutputModalProps {
|
||||
isOpen: boolean;
|
||||
doClose: () => void;
|
||||
outputs: OutputNodeInfo[];
|
||||
graphExecutionError?: string | null;
|
||||
}
|
||||
|
||||
const formatOutput = (output: any): string => {
|
||||
if (typeof output === "object") {
|
||||
try {
|
||||
if (
|
||||
Array.isArray(output) &&
|
||||
output.every((item) => typeof item === "string")
|
||||
) {
|
||||
return output.join("\n").replace(/\\n/g, "\n");
|
||||
}
|
||||
return JSON.stringify(output, null, 2);
|
||||
} catch (error) {
|
||||
return `Error formatting output: ${(error as Error).message}`;
|
||||
}
|
||||
}
|
||||
if (typeof output === "string") {
|
||||
return output.replace(/\\n/g, "\n");
|
||||
}
|
||||
return String(output);
|
||||
};
|
||||
|
||||
export function RunnerOutputUI({
|
||||
isOpen,
|
||||
doClose,
|
||||
outputs,
|
||||
graphExecutionError,
|
||||
}: OutputModalProps) {
|
||||
const { toast } = useToast();
|
||||
|
||||
const copyOutput = (name: string, output: any) => {
|
||||
const formattedOutput = formatOutput(output);
|
||||
navigator.clipboard.writeText(formattedOutput).then(() => {
|
||||
toast({
|
||||
title: `"${name}" output copied to clipboard!`,
|
||||
duration: 2000,
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const adjustTextareaHeight = (textarea: HTMLTextAreaElement) => {
|
||||
textarea.style.height = "auto";
|
||||
textarea.style.height = `${textarea.scrollHeight}px`;
|
||||
};
|
||||
|
||||
return (
|
||||
<Sheet open={isOpen} onOpenChange={doClose}>
|
||||
<SheetContent
|
||||
side="right"
|
||||
className="flex h-full w-full flex-col overflow-hidden sm:max-w-[600px]"
|
||||
>
|
||||
<SheetHeader className="px-2 py-2">
|
||||
<SheetTitle className="text-xl">Run Outputs</SheetTitle>
|
||||
<SheetDescription className="mt-1 text-sm">
|
||||
View the outputs from your agent run.
|
||||
</SheetDescription>
|
||||
</SheetHeader>
|
||||
<div className="flex-grow overflow-y-auto px-2 py-2">
|
||||
<ScrollArea className="h-full overflow-auto pr-4">
|
||||
<div className="space-y-4">
|
||||
{graphExecutionError && (
|
||||
<div className="rounded-md border border-red-200 bg-red-50 p-3 dark:border-red-800 dark:bg-red-900/20">
|
||||
<p className="text-sm text-red-800 dark:text-red-200">
|
||||
<strong>Error:</strong> {graphExecutionError}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
{outputs && outputs.length > 0 ? (
|
||||
outputs.map((output, i) => (
|
||||
<div key={i} className="space-y-1">
|
||||
<Label className="text-base font-semibold">
|
||||
{output.metadata.name || "Unnamed Output"}
|
||||
</Label>
|
||||
|
||||
{output.metadata.description && (
|
||||
<Label className="block text-sm text-gray-600">
|
||||
{output.metadata.description}
|
||||
</Label>
|
||||
)}
|
||||
|
||||
<div className="group relative rounded-md bg-gray-100 p-2">
|
||||
<Button
|
||||
className="absolute right-1 top-1 z-10 m-1 hidden p-2 group-hover:block"
|
||||
variant="outline"
|
||||
size="icon"
|
||||
onClick={() =>
|
||||
copyOutput(
|
||||
output.metadata.name || "Unnamed Output",
|
||||
output.result,
|
||||
)
|
||||
}
|
||||
title="Copy Output"
|
||||
>
|
||||
<Clipboard size={18} />
|
||||
</Button>
|
||||
<Textarea
|
||||
readOnly
|
||||
value={formatOutput(output.result ?? "No output yet")}
|
||||
className="w-full resize-none whitespace-pre-wrap break-words border-none bg-transparent text-sm"
|
||||
style={{
|
||||
height: "auto",
|
||||
minHeight: "2.5rem",
|
||||
maxHeight: "400px",
|
||||
}}
|
||||
ref={(el) => {
|
||||
if (el) {
|
||||
adjustTextareaHeight(el);
|
||||
if (el.scrollHeight > 400) {
|
||||
el.style.height = "400px";
|
||||
}
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
))
|
||||
) : (
|
||||
<p>No output blocks available.</p>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
</div>
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
);
|
||||
}
|
||||
|
||||
export default RunnerOutputUI;
|
||||
@@ -0,0 +1,117 @@
|
||||
import React, {
|
||||
useState,
|
||||
forwardRef,
|
||||
useImperativeHandle,
|
||||
useMemo,
|
||||
} from "react";
|
||||
import { Node } from "@xyflow/react";
|
||||
import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import {
|
||||
BlockUIType,
|
||||
CredentialsMetaInput,
|
||||
Graph,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
|
||||
import { RunnerInputDialog } from "./RunnerInputUI";
|
||||
|
||||
interface RunnerUIWrapperProps {
|
||||
graph: Graph;
|
||||
nodes: Node<CustomNodeData>[];
|
||||
graphExecutionError?: string | null;
|
||||
saveAndRun: (
|
||||
inputs: Record<string, any>,
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
) => void;
|
||||
createRunSchedule: (
|
||||
cronExpression: string,
|
||||
scheduleName: string,
|
||||
inputs: Record<string, any>,
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
) => Promise<void>;
|
||||
}
|
||||
|
||||
export interface RunnerUIWrapperRef {
|
||||
openRunInputDialog: () => void;
|
||||
openRunnerOutput: () => void;
|
||||
runOrOpenInput: () => void;
|
||||
}
|
||||
|
||||
const RunnerUIWrapper = forwardRef<RunnerUIWrapperRef, RunnerUIWrapperProps>(
|
||||
(
|
||||
{ graph, nodes, graphExecutionError, saveAndRun, createRunSchedule },
|
||||
ref,
|
||||
) => {
|
||||
const [isRunInputDialogOpen, setIsRunInputDialogOpen] = useState(false);
|
||||
const [isRunnerOutputOpen, setIsRunnerOutputOpen] = useState(false);
|
||||
|
||||
const graphInputs = graph.input_schema.properties;
|
||||
|
||||
const graphOutputs = useMemo((): OutputNodeInfo[] => {
|
||||
const outputNodes = nodes.filter(
|
||||
(node) => node.data.uiType === BlockUIType.OUTPUT,
|
||||
);
|
||||
|
||||
return outputNodes.map(
|
||||
(node) =>
|
||||
({
|
||||
metadata: {
|
||||
name: node.data.hardcodedValues.name || "Output",
|
||||
description:
|
||||
node.data.hardcodedValues.description ||
|
||||
"Output from the agent",
|
||||
},
|
||||
result:
|
||||
(node.data.executionResults as any)
|
||||
?.map((result: any) => result?.data?.output)
|
||||
.join("\n--\n") || "No output yet",
|
||||
}) satisfies OutputNodeInfo,
|
||||
);
|
||||
}, [nodes]);
|
||||
|
||||
const openRunInputDialog = () => setIsRunInputDialogOpen(true);
|
||||
const openRunnerOutput = () => setIsRunnerOutputOpen(true);
|
||||
|
||||
const runOrOpenInput = () => {
|
||||
if (
|
||||
Object.keys(graphInputs).length > 0 ||
|
||||
Object.keys(graph.credentials_input_schema.properties).length > 0
|
||||
) {
|
||||
openRunInputDialog();
|
||||
} else {
|
||||
saveAndRun({}, {});
|
||||
}
|
||||
};
|
||||
|
||||
useImperativeHandle(
|
||||
ref,
|
||||
() =>
|
||||
({
|
||||
openRunInputDialog,
|
||||
openRunnerOutput,
|
||||
runOrOpenInput,
|
||||
}) satisfies RunnerUIWrapperRef,
|
||||
);
|
||||
|
||||
return (
|
||||
<>
|
||||
<RunnerInputDialog
|
||||
isOpen={isRunInputDialogOpen}
|
||||
doClose={() => setIsRunInputDialogOpen(false)}
|
||||
graph={graph}
|
||||
doRun={saveAndRun}
|
||||
doCreateSchedule={createRunSchedule}
|
||||
/>
|
||||
<RunnerOutputUI
|
||||
isOpen={isRunnerOutputOpen}
|
||||
doClose={() => setIsRunnerOutputOpen(false)}
|
||||
outputs={graphOutputs}
|
||||
graphExecutionError={graphExecutionError}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
RunnerUIWrapper.displayName = "RunnerUIWrapper";
|
||||
|
||||
export default RunnerUIWrapper;
|
||||
@@ -0,0 +1,217 @@
|
||||
import React, { useEffect, useState } from "react";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/__legacy__/ui/popover";
|
||||
import { Card, CardContent, CardFooter } from "@/components/__legacy__/ui/card";
|
||||
import { Input } from "@/components/__legacy__/ui/input";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { GraphMeta } from "@/lib/autogpt-server-api";
|
||||
import { Label } from "@/components/__legacy__/ui/label";
|
||||
import { IconSave } from "@/components/__legacy__/ui/icons";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { getGetV2ListMySubmissionsQueryKey } from "@/app/api/__generated__/endpoints/store/store";
|
||||
import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
|
||||
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
||||
import { CalendarClockIcon } from "lucide-react";
|
||||
|
||||
interface SaveControlProps {
|
||||
agentMeta: GraphMeta | null;
|
||||
agentName: string;
|
||||
agentDescription: string;
|
||||
agentRecommendedScheduleCron: string;
|
||||
canSave: boolean;
|
||||
onSave: () => Promise<void>;
|
||||
onNameChange: (name: string) => void;
|
||||
onDescriptionChange: (description: string) => void;
|
||||
onRecommendedScheduleCronChange: (cron: string) => void;
|
||||
pinSavePopover: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* A SaveControl component to be used within the ControlPanel. It allows the user to save the agent.
|
||||
* @param {Object} SaveControlProps - The properties of the SaveControl component.
|
||||
* @param {GraphMeta | null} SaveControlProps.agentMeta - The agent's metadata, or null if creating a new agent.
|
||||
* @param {string} SaveControlProps.agentName - The agent's name.
|
||||
* @param {string} SaveControlProps.agentDescription - The agent's description.
|
||||
* @param {boolean} SaveControlProps.canSave - Whether the button to save the agent should be enabled.
|
||||
* @param {() => void} SaveControlProps.onSave - Function to save the agent.
|
||||
* @param {(name: string) => void} SaveControlProps.onNameChange - Function to handle name changes.
|
||||
* @param {(description: string) => void} SaveControlProps.onDescriptionChange - Function to handle description changes.
|
||||
* @returns The SaveControl component.
|
||||
*/
|
||||
export const SaveControl = ({
|
||||
agentMeta,
|
||||
canSave,
|
||||
onSave,
|
||||
agentName,
|
||||
onNameChange,
|
||||
agentDescription,
|
||||
onDescriptionChange,
|
||||
agentRecommendedScheduleCron,
|
||||
onRecommendedScheduleCronChange,
|
||||
pinSavePopover,
|
||||
}: SaveControlProps) => {
|
||||
/**
|
||||
* Note for improvement:
|
||||
* At the moment we are leveraging onDescriptionChange and onNameChange to handle the changes in the description and name of the agent.
|
||||
* We should migrate this to be handled with form controls and a form library.
|
||||
*/
|
||||
|
||||
const { toast } = useToast();
|
||||
const queryClient = useQueryClient();
|
||||
const [cronScheduleDialogOpen, setCronScheduleDialogOpen] = useState(false);
|
||||
|
||||
const handleScheduleChange = (cronExpression: string) => {
|
||||
onRecommendedScheduleCronChange(cronExpression);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = async (event: KeyboardEvent) => {
|
||||
if ((event.ctrlKey || event.metaKey) && event.key === "s") {
|
||||
event.preventDefault(); // Stop the browser default action
|
||||
await onSave(); // Call your save function
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListMySubmissionsQueryKey(),
|
||||
});
|
||||
toast({
|
||||
duration: 2000,
|
||||
title: "All changes saved successfully!",
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
window.addEventListener("keydown", handleKeyDown);
|
||||
|
||||
return () => {
|
||||
window.removeEventListener("keydown", handleKeyDown);
|
||||
};
|
||||
}, [onSave, toast]);
|
||||
|
||||
return (
|
||||
<Popover open={pinSavePopover ? true : undefined}>
|
||||
<Tooltip delayDuration={500}>
|
||||
<TooltipTrigger asChild>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
data-id="save-control-popover-trigger"
|
||||
data-testid="blocks-control-save-button"
|
||||
name="Save"
|
||||
>
|
||||
<IconSave className="dark:text-gray-300" />
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="right">Save</TooltipContent>
|
||||
</Tooltip>
|
||||
<PopoverContent
|
||||
side="right"
|
||||
sideOffset={15}
|
||||
align="start"
|
||||
data-id="save-control-popover-content"
|
||||
className="w-96 max-w-[400px]"
|
||||
>
|
||||
<Card className="border-none shadow-none dark:bg-slate-900">
|
||||
<CardContent className="p-4">
|
||||
<div className="space-y-3">
|
||||
<div>
|
||||
<Label htmlFor="name" className="dark:text-gray-300">
|
||||
Name
|
||||
</Label>
|
||||
<Input
|
||||
id="name"
|
||||
placeholder="Enter your agent name"
|
||||
value={agentName}
|
||||
onChange={(e) => onNameChange(e.target.value)}
|
||||
data-id="save-control-name-input"
|
||||
data-testid="save-control-name-input"
|
||||
maxLength={100}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="description" className="dark:text-gray-300">
|
||||
Description
|
||||
</Label>
|
||||
<Input
|
||||
id="description"
|
||||
placeholder="Your agent description"
|
||||
value={agentDescription}
|
||||
onChange={(e) => onDescriptionChange(e.target.value)}
|
||||
data-id="save-control-description-input"
|
||||
data-testid="save-control-description-input"
|
||||
maxLength={500}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label className="dark:text-gray-300">
|
||||
Recommended Schedule
|
||||
</Label>
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={() => setCronScheduleDialogOpen(true)}
|
||||
className="mt-1 w-full min-w-0 justify-start text-sm"
|
||||
data-id="save-control-recommended-schedule-button"
|
||||
data-testid="save-control-recommended-schedule-button"
|
||||
>
|
||||
<CalendarClockIcon className="mr-2 h-4 w-4 flex-shrink-0" />
|
||||
<span className="min-w-0 flex-1 truncate">
|
||||
{agentRecommendedScheduleCron
|
||||
? humanizeCronExpression(agentRecommendedScheduleCron)
|
||||
: "Set schedule"}
|
||||
</span>
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{agentMeta?.version && (
|
||||
<div>
|
||||
<Label htmlFor="version" className="dark:text-gray-300">
|
||||
Version
|
||||
</Label>
|
||||
<Input
|
||||
id="version"
|
||||
placeholder="Version"
|
||||
value={agentMeta?.version || "-"}
|
||||
disabled
|
||||
data-testid="save-control-version-output"
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</CardContent>
|
||||
<CardFooter className="flex flex-col items-stretch gap-2">
|
||||
<Button
|
||||
className="w-full dark:bg-slate-700 dark:text-slate-100 dark:hover:bg-slate-800"
|
||||
onClick={onSave}
|
||||
data-id="save-control-save-agent"
|
||||
data-testid="save-control-save-agent-button"
|
||||
disabled={!canSave}
|
||||
>
|
||||
Save Agent
|
||||
</Button>
|
||||
</CardFooter>
|
||||
</Card>
|
||||
</PopoverContent>
|
||||
<CronExpressionDialog
|
||||
open={cronScheduleDialogOpen}
|
||||
setOpen={setCronScheduleDialogOpen}
|
||||
onSubmit={handleScheduleChange}
|
||||
defaultCronExpression={agentRecommendedScheduleCron}
|
||||
title="Recommended Schedule"
|
||||
/>
|
||||
</Popover>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,95 @@
|
||||
import { CustomNodeData } from "./CustomNode/CustomNode";
|
||||
import { CustomEdgeData } from "./CustomEdge/CustomEdge";
|
||||
import { Edge } from "@xyflow/react";
|
||||
|
||||
type ActionType =
|
||||
| "ADD_NODE"
|
||||
| "DELETE_NODE"
|
||||
| "ADD_EDGE"
|
||||
| "DELETE_EDGE"
|
||||
| "UPDATE_NODE"
|
||||
| "MOVE_NODE"
|
||||
| "UPDATE_INPUT"
|
||||
| "UPDATE_NODE_POSITION";
|
||||
|
||||
type AddNodePayload = { node: CustomNodeData };
|
||||
type DeleteNodePayload = { nodeId: string };
|
||||
type AddEdgePayload = { edge: Edge<CustomEdgeData> };
|
||||
type DeleteEdgePayload = { edgeId: string };
|
||||
type UpdateNodePayload = { nodeId: string; newData: Partial<CustomNodeData> };
|
||||
type MoveNodePayload = { nodeId: string; position: { x: number; y: number } };
|
||||
type UpdateInputPayload = {
|
||||
nodeId: string;
|
||||
oldValues: { [key: string]: any };
|
||||
newValues: { [key: string]: any };
|
||||
};
|
||||
type UpdateNodePositionPayload = {
|
||||
nodeId: string;
|
||||
oldPosition: { x: number; y: number };
|
||||
newPosition: { x: number; y: number };
|
||||
};
|
||||
|
||||
type ActionPayload =
|
||||
| AddNodePayload
|
||||
| DeleteNodePayload
|
||||
| AddEdgePayload
|
||||
| DeleteEdgePayload
|
||||
| UpdateNodePayload
|
||||
| MoveNodePayload
|
||||
| UpdateInputPayload
|
||||
| UpdateNodePositionPayload;
|
||||
|
||||
type Action = {
|
||||
type: ActionType;
|
||||
payload: ActionPayload;
|
||||
undo: () => void;
|
||||
redo: () => void;
|
||||
};
|
||||
|
||||
class History {
|
||||
private past: Action[] = [];
|
||||
private future: Action[] = [];
|
||||
|
||||
push(action: Action) {
|
||||
this.past.push(action);
|
||||
this.future = [];
|
||||
}
|
||||
|
||||
undo() {
|
||||
const action = this.past.pop();
|
||||
if (action) {
|
||||
action.undo();
|
||||
this.future.push(action);
|
||||
}
|
||||
}
|
||||
|
||||
redo() {
|
||||
const action = this.future.pop();
|
||||
if (action) {
|
||||
action.redo();
|
||||
this.past.push(action);
|
||||
}
|
||||
}
|
||||
|
||||
canUndo(): boolean {
|
||||
return this.past.length > 0;
|
||||
}
|
||||
|
||||
canRedo(): boolean {
|
||||
return this.future.length > 0;
|
||||
}
|
||||
|
||||
clear() {
|
||||
this.past = [];
|
||||
this.future = [];
|
||||
}
|
||||
|
||||
getHistoryState() {
|
||||
return {
|
||||
past: [...this.past],
|
||||
future: [...this.future],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export const history = new History();
|
||||
@@ -0,0 +1,569 @@
|
||||
import Shepherd from "shepherd.js";
|
||||
import "shepherd.js/dist/css/shepherd.css";
|
||||
import { Key, storage } from "@/services/storage/local-storage";
|
||||
import { analytics } from "@/services/analytics";
|
||||
|
||||
export const startTutorial = (
|
||||
emptyNodeList: (forceEmpty: boolean) => boolean,
|
||||
setPinBlocksPopover: (value: boolean) => void,
|
||||
setPinSavePopover: (value: boolean) => void,
|
||||
) => {
|
||||
const tour = new Shepherd.Tour({
|
||||
useModalOverlay: true,
|
||||
defaultStepOptions: {
|
||||
cancelIcon: { enabled: true },
|
||||
scrollTo: { behavior: "smooth", block: "center" },
|
||||
},
|
||||
});
|
||||
|
||||
// CSS classes for disabling and highlighting blocks
|
||||
const disableClass = "disable-blocks";
|
||||
const highlightClass = "highlight-block";
|
||||
let isConnecting = false;
|
||||
|
||||
// Helper function to disable all blocks except the target block
|
||||
const disableOtherBlocks = (targetBlockSelector: string) => {
|
||||
document.querySelectorAll('[data-id^="block-card-"]').forEach((block) => {
|
||||
block.classList.toggle(disableClass, !block.matches(targetBlockSelector));
|
||||
block.classList.toggle(
|
||||
highlightClass,
|
||||
block.matches(targetBlockSelector),
|
||||
);
|
||||
});
|
||||
};
|
||||
|
||||
// Helper function to enable all blocks
|
||||
const enableAllBlocks = () => {
|
||||
document.querySelectorAll('[data-id^="block-card-"]').forEach((block) => {
|
||||
block.classList.remove(disableClass, highlightClass);
|
||||
});
|
||||
};
|
||||
|
||||
// Inject CSS for disabling and highlighting blocks
|
||||
const injectStyles = () => {
|
||||
const style = document.createElement("style");
|
||||
style.textContent = `
|
||||
.${disableClass} {
|
||||
pointer-events: none;
|
||||
opacity: 0.5;
|
||||
}
|
||||
.${highlightClass} {
|
||||
background-color: #ffeb3b;
|
||||
border: 2px solid #fbc02d;
|
||||
transition: background-color 0.3s, border-color 0.3s;
|
||||
}
|
||||
`;
|
||||
document.head.appendChild(style);
|
||||
};
|
||||
|
||||
// Helper function to check if an element is present in the DOM
|
||||
const waitForElement = (selector: string): Promise<void> => {
|
||||
return new Promise((resolve) => {
|
||||
const checkElement = () => {
|
||||
if (document.querySelector(selector)) {
|
||||
resolve();
|
||||
} else {
|
||||
setTimeout(checkElement, 10);
|
||||
}
|
||||
};
|
||||
checkElement();
|
||||
});
|
||||
};
|
||||
|
||||
// Function to detect the correct connection and advance the tour
|
||||
const detectConnection = () => {
|
||||
const checkForConnection = () => {
|
||||
const correctConnection = document.querySelector(
|
||||
'[data-testid^="rf__edge-"]',
|
||||
);
|
||||
if (correctConnection) {
|
||||
tour.show("press-run-again");
|
||||
} else {
|
||||
setTimeout(checkForConnection, 100);
|
||||
}
|
||||
};
|
||||
|
||||
checkForConnection();
|
||||
};
|
||||
|
||||
// Define state management functions to handle connection state
|
||||
function startConnecting() {
|
||||
isConnecting = true;
|
||||
}
|
||||
|
||||
function stopConnecting() {
|
||||
isConnecting = false;
|
||||
}
|
||||
|
||||
// Reset connection state when revisiting the step
|
||||
function resetConnectionState() {
|
||||
stopConnecting();
|
||||
}
|
||||
|
||||
// Event handlers for mouse down and up to manage connection state
|
||||
function handleMouseDown() {
|
||||
startConnecting();
|
||||
setTimeout(() => {
|
||||
if (isConnecting) {
|
||||
tour.next();
|
||||
}
|
||||
}, 100);
|
||||
}
|
||||
// Event handler for mouse up to check if the connection was successful
|
||||
function handleMouseUp(event: { target: any }) {
|
||||
const target = event.target;
|
||||
const validConnectionPoint = document.querySelector(
|
||||
'[data-testid^="rf__node-"]:nth-child(2) [data-id$="-a-target"]',
|
||||
);
|
||||
|
||||
if (validConnectionPoint && !validConnectionPoint.contains(target)) {
|
||||
setTimeout(() => {
|
||||
if (!document.querySelector('[data-testid^="rf__edge-"]')) {
|
||||
stopConnecting();
|
||||
tour.show("connect-blocks-output");
|
||||
}
|
||||
}, 200);
|
||||
} else {
|
||||
stopConnecting();
|
||||
}
|
||||
}
|
||||
|
||||
// Define the fitViewToScreen function
|
||||
const fitViewToScreen = () => {
|
||||
const fitViewButton = document.querySelector(
|
||||
".react-flow__controls-fitview",
|
||||
) as HTMLButtonElement;
|
||||
if (fitViewButton) {
|
||||
fitViewButton.click();
|
||||
}
|
||||
};
|
||||
|
||||
injectStyles();
|
||||
|
||||
const warningText = emptyNodeList(false)
|
||||
? ""
|
||||
: "<br/><br/><b>Caution: Clicking next will start a tutorial and will clear the current flow.</b>";
|
||||
|
||||
tour.addStep({
|
||||
id: "starting-step",
|
||||
title: "Welcome to the Tutorial",
|
||||
text: `This is the AutoGPT builder! ${warningText}`,
|
||||
buttons: [
|
||||
{
|
||||
text: "Skip Tutorial",
|
||||
action: () => {
|
||||
tour.cancel(); // Ends the tour
|
||||
storage.set(Key.SHEPHERD_TOUR, "skipped"); // Set the tutorial as skipped in local storage
|
||||
},
|
||||
classes: "shepherd-button-secondary", // Optionally add a class for styling the skip button differently
|
||||
},
|
||||
{
|
||||
text: "Next",
|
||||
action: () => {
|
||||
emptyNodeList(true);
|
||||
tour.next();
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "open-block-step",
|
||||
title: "Open Blocks Menu",
|
||||
text: "Please click the block button to open the blocks menu.",
|
||||
attachTo: {
|
||||
element: '[data-id="blocks-control-popover-trigger"]',
|
||||
on: "right",
|
||||
},
|
||||
advanceOn: {
|
||||
selector: '[data-id="blocks-control-popover-trigger"]',
|
||||
event: "click",
|
||||
},
|
||||
buttons: [],
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "scroll-block-menu",
|
||||
title: "Scroll Down or Search",
|
||||
text: 'Scroll down or search in the blocks menu for the "Calculator Block" and press the block to add it.',
|
||||
attachTo: {
|
||||
element: '[data-id="blocks-control-popover-content"]',
|
||||
on: "right",
|
||||
},
|
||||
buttons: [],
|
||||
beforeShowPromise: () =>
|
||||
waitForElement('[data-id="blocks-control-popover-content"]').then(() => {
|
||||
disableOtherBlocks(
|
||||
'[data-id="block-card-b1ab9b19-67a6-406d-abf5-2dba76d00c79"]',
|
||||
);
|
||||
}),
|
||||
advanceOn: {
|
||||
selector: '[data-id="block-card-b1ab9b19-67a6-406d-abf5-2dba76d00c79"]',
|
||||
event: "click",
|
||||
},
|
||||
when: {
|
||||
show: () => setPinBlocksPopover(true),
|
||||
hide: enableAllBlocks,
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "focus-new-block",
|
||||
title: "New Block",
|
||||
text: "This is the Calculator Block! Let's go over how it works.",
|
||||
attachTo: { element: `[data-id="custom-node-1"]`, on: "left" },
|
||||
beforeShowPromise: () => waitForElement('[data-id="custom-node-1"]'),
|
||||
buttons: [
|
||||
{
|
||||
text: "Next",
|
||||
action: tour.next,
|
||||
},
|
||||
],
|
||||
when: {
|
||||
show: () => {
|
||||
setPinBlocksPopover(false);
|
||||
setTimeout(() => {
|
||||
fitViewToScreen();
|
||||
}, 100);
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "input-to-block",
|
||||
title: "Input to the Block",
|
||||
text: "This is the input pin for the block. You can input the output of other blocks here; this block takes numbers as input.",
|
||||
attachTo: { element: '[data-nodeid="1"]', on: "left" },
|
||||
buttons: [
|
||||
{
|
||||
text: "Back",
|
||||
action: tour.back,
|
||||
},
|
||||
{
|
||||
text: "Next",
|
||||
action: tour.next,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "output-from-block",
|
||||
title: "Output from the Block",
|
||||
text: "This is the output pin for the block. You can connect this to another block to pass the output along.",
|
||||
attachTo: { element: '[data-handlepos="right"]', on: "right" },
|
||||
buttons: [
|
||||
{
|
||||
text: "Back",
|
||||
action: tour.back,
|
||||
},
|
||||
{
|
||||
text: "Next",
|
||||
action: tour.next,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "select-operation-and-input",
|
||||
title: "Select Operation and Input Numbers",
|
||||
text: "Select any mathematical operation you'd like to perform, and enter numbers in both input fields.",
|
||||
attachTo: { element: '[data-id="input-handles"]', on: "right" },
|
||||
buttons: [
|
||||
{
|
||||
text: "Back",
|
||||
action: tour.back,
|
||||
},
|
||||
{
|
||||
text: "Next",
|
||||
action: tour.next,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "press-initial-save-button",
|
||||
title: "Press Save",
|
||||
text: "First we need to save the flow before we can run it!",
|
||||
attachTo: {
|
||||
element: '[data-id="save-control-popover-trigger"]',
|
||||
on: "left",
|
||||
},
|
||||
advanceOn: {
|
||||
selector: '[data-id="save-control-popover-trigger"]',
|
||||
event: "click",
|
||||
},
|
||||
buttons: [
|
||||
{
|
||||
text: "Back",
|
||||
action: tour.back,
|
||||
},
|
||||
],
|
||||
when: {
|
||||
hide: () => setPinSavePopover(true),
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "save-agent-details",
|
||||
title: "Save the Agent",
|
||||
text: "Enter a name for your agent, add an optional description, and then click 'Save agent' to save your flow.",
|
||||
attachTo: {
|
||||
element: '[data-id="save-control-popover-content"]',
|
||||
on: "top",
|
||||
},
|
||||
buttons: [],
|
||||
beforeShowPromise: () =>
|
||||
waitForElement('[data-id="save-control-popover-content"]'),
|
||||
advanceOn: {
|
||||
selector: '[data-id="save-control-save-agent"]',
|
||||
event: "click",
|
||||
},
|
||||
when: {
|
||||
hide: () => setPinSavePopover(false),
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "press-run",
|
||||
title: "Press Run",
|
||||
text: "Start your first flow by pressing the Run button!",
|
||||
attachTo: {
|
||||
element: '[data-tutorial-id="primary-action-run-agent"]',
|
||||
on: "top",
|
||||
},
|
||||
advanceOn: {
|
||||
selector: '[data-tutorial-id="primary-action-run-agent"]',
|
||||
event: "click",
|
||||
},
|
||||
buttons: [],
|
||||
beforeShowPromise: () =>
|
||||
waitForElement('[data-tutorial-id="primary-action-run-agent"]'),
|
||||
when: {
|
||||
hide: () => {
|
||||
setTimeout(() => {
|
||||
fitViewToScreen();
|
||||
}, 500);
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "wait-for-processing",
|
||||
title: "Processing",
|
||||
text: "Let's wait for the block to finish being processed...",
|
||||
attachTo: {
|
||||
element: '[data-id^="badge-"][data-id$="-QUEUED"]',
|
||||
on: "bottom",
|
||||
},
|
||||
buttons: [],
|
||||
beforeShowPromise: () =>
|
||||
waitForElement('[data-id^="badge-"][data-id$="-QUEUED"]').then(
|
||||
fitViewToScreen,
|
||||
),
|
||||
when: {
|
||||
show: () => {
|
||||
waitForElement('[data-id^="badge-"][data-id$="-COMPLETED"]').then(
|
||||
() => {
|
||||
tour.next();
|
||||
},
|
||||
);
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "check-output",
|
||||
title: "Check the Output",
|
||||
text: "Check here to see the output of the block after running the flow.",
|
||||
attachTo: { element: '[data-id="latest-output"]', on: "top" },
|
||||
beforeShowPromise: () =>
|
||||
new Promise((resolve) => {
|
||||
setTimeout(() => {
|
||||
waitForElement('[data-id="latest-output"]').then(resolve);
|
||||
}, 100);
|
||||
}),
|
||||
buttons: [
|
||||
{
|
||||
text: "Next",
|
||||
action: tour.next,
|
||||
},
|
||||
],
|
||||
when: {
|
||||
show: () => {
|
||||
fitViewToScreen();
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "copy-paste-block",
|
||||
title: "Copy and Paste the Block",
|
||||
text: "Let’s duplicate this block. Click and hold the block with your mouse, then press Ctrl+C (Cmd+C on Mac) to copy and Ctrl+V (Cmd+V on Mac) to paste.",
|
||||
attachTo: { element: '[data-testid^="rf__node-"]', on: "top" },
|
||||
buttons: [
|
||||
{
|
||||
text: "Back",
|
||||
action: tour.back,
|
||||
},
|
||||
],
|
||||
when: {
|
||||
show: () => {
|
||||
fitViewToScreen();
|
||||
waitForElement('[data-testid^="rf__node-"]:nth-child(2)').then(() => {
|
||||
tour.next();
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "focus-second-block",
|
||||
title: "Focus on the New Block",
|
||||
text: "This is your copied Calculator Block. Now, let’s move it to the side of the first block.",
|
||||
attachTo: { element: '[data-testid^="rf__node-"]:nth-child(2)', on: "top" },
|
||||
beforeShowPromise: () =>
|
||||
waitForElement('[data-testid^="rf__node-"]:nth-child(2)'),
|
||||
buttons: [
|
||||
{
|
||||
text: "Next",
|
||||
action: tour.next,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "connect-blocks-output",
|
||||
title: "Connect the Blocks: Output",
|
||||
text: "Now, let's connect the output of the first Calculator Block to the input of the second Calculator Block. Drag from the output pin of the first block to the input pin (A) of the second block.",
|
||||
attachTo: {
|
||||
element:
|
||||
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
|
||||
on: "bottom",
|
||||
},
|
||||
|
||||
buttons: [
|
||||
{
|
||||
text: "Back",
|
||||
action: tour.back,
|
||||
},
|
||||
],
|
||||
beforeShowPromise: () => {
|
||||
return waitForElement(
|
||||
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
|
||||
);
|
||||
},
|
||||
when: {
|
||||
show: () => {
|
||||
fitViewToScreen();
|
||||
resetConnectionState(); // Reset state when revisiting this step
|
||||
tour.modal.show();
|
||||
const outputPin = document.querySelector(
|
||||
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
|
||||
);
|
||||
if (outputPin) {
|
||||
outputPin.addEventListener("mousedown", handleMouseDown);
|
||||
}
|
||||
},
|
||||
hide: () => {
|
||||
const outputPin = document.querySelector(
|
||||
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
|
||||
);
|
||||
if (outputPin) {
|
||||
outputPin.removeEventListener("mousedown", handleMouseDown);
|
||||
}
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "connect-blocks-input",
|
||||
title: "Connect the Blocks: Input",
|
||||
text: "Now, connect the output to the input pin of the second block (A).",
|
||||
attachTo: {
|
||||
element: '[data-testid^="rf__node-"]:nth-child(2) [data-id$="-a-target"]',
|
||||
on: "top",
|
||||
},
|
||||
buttons: [],
|
||||
beforeShowPromise: () => {
|
||||
return waitForElement(
|
||||
'[data-testid^="rf__node-"]:nth-child(2) [data-id$="-a-target"]',
|
||||
).then(() => {
|
||||
detectConnection();
|
||||
});
|
||||
},
|
||||
when: {
|
||||
show: () => {
|
||||
tour.modal.show();
|
||||
document.addEventListener("mouseup", handleMouseUp, true);
|
||||
},
|
||||
hide: () => {
|
||||
tour.modal.hide();
|
||||
document.removeEventListener("mouseup", handleMouseUp, true);
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "press-run-again",
|
||||
title: "Press Run Again",
|
||||
text: "Now, press the Run button again to execute the flow with the new Calculator Block added!",
|
||||
attachTo: {
|
||||
element: '[data-tutorial-id="primary-action-run-agent"]',
|
||||
on: "top",
|
||||
},
|
||||
advanceOn: {
|
||||
selector: '[data-tutorial-id="primary-action-run-agent"]',
|
||||
event: "click",
|
||||
},
|
||||
buttons: [],
|
||||
beforeShowPromise: () =>
|
||||
waitForElement('[data-tutorial-id="primary-action-run-agent"]'),
|
||||
when: {
|
||||
hide: () => {
|
||||
setTimeout(() => {
|
||||
fitViewToScreen();
|
||||
}, 500);
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
tour.addStep({
|
||||
id: "congratulations",
|
||||
title: "Congratulations!",
|
||||
text: "You have successfully created your first flow. Watch for the outputs in the blocks!",
|
||||
beforeShowPromise: () => waitForElement('[data-id="latest-output"]'),
|
||||
when: {
|
||||
show: () => tour.modal.hide(),
|
||||
},
|
||||
buttons: [
|
||||
{
|
||||
text: "Finish",
|
||||
action: tour.complete,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// Unpin blocks and save menu when the tour is completed or canceled
|
||||
tour.on("complete", () => {
|
||||
setPinBlocksPopover(false);
|
||||
setPinSavePopover(false);
|
||||
storage.set(Key.SHEPHERD_TOUR, "completed"); // Optionally mark the tutorial as completed
|
||||
});
|
||||
|
||||
for (const step of tour.steps) {
|
||||
step.on("show", () => {
|
||||
"use client";
|
||||
console.debug("sendTutorialStep");
|
||||
|
||||
analytics.sendGAEvent("event", "tutorial_step_shown", { value: step.id });
|
||||
});
|
||||
}
|
||||
|
||||
tour.on("cancel", () => {
|
||||
setPinBlocksPopover(false);
|
||||
setPinSavePopover(false);
|
||||
storage.set(Key.SHEPHERD_TOUR, "canceled"); // Optionally mark the tutorial as canceled
|
||||
});
|
||||
|
||||
tour.start();
|
||||
};
|
||||
@@ -0,0 +1,142 @@
|
||||
import { useCallback } from "react";
|
||||
import { Node, Edge, useReactFlow } from "@xyflow/react";
|
||||
import { Key, storage } from "@/services/storage/local-storage";
|
||||
import { ConnectedEdge } from "./CustomNode/CustomNode";
|
||||
|
||||
interface CopyableData {
|
||||
nodes: Node[];
|
||||
edges: Edge[];
|
||||
}
|
||||
|
||||
export function useCopyPaste(getNextNodeId: () => string) {
|
||||
const { setNodes, addEdges, getNodes, getEdges, getViewport } =
|
||||
useReactFlow();
|
||||
|
||||
const handleCopyPaste = useCallback(
|
||||
(event: KeyboardEvent) => {
|
||||
if (event.ctrlKey || event.metaKey) {
|
||||
if (event.key === "c" || event.key === "C") {
|
||||
const selectedNodes = getNodes().filter((node) => node.selected);
|
||||
const selectedNodeIds = new Set(selectedNodes.map((node) => node.id));
|
||||
|
||||
// Only copy edges where both source and target nodes are selected
|
||||
const selectedEdges = getEdges().filter(
|
||||
(edge) =>
|
||||
edge.selected &&
|
||||
selectedNodeIds.has(edge.source) &&
|
||||
selectedNodeIds.has(edge.target),
|
||||
);
|
||||
|
||||
const copiedData: CopyableData = {
|
||||
nodes: selectedNodes.map((node) => ({
|
||||
...node,
|
||||
data: {
|
||||
...node.data,
|
||||
connections: node.data.connections || [], // Preserve connections
|
||||
},
|
||||
})),
|
||||
edges: selectedEdges,
|
||||
};
|
||||
|
||||
storage.set(Key.COPIED_FLOW_DATA, JSON.stringify(copiedData));
|
||||
}
|
||||
if (event.key === "v" || event.key === "V") {
|
||||
const copiedDataString = storage.get(Key.COPIED_FLOW_DATA);
|
||||
if (copiedDataString) {
|
||||
const copiedData = JSON.parse(copiedDataString) as CopyableData;
|
||||
const oldToNewIdMap: Record<string, string> = {};
|
||||
|
||||
// Get fresh viewport values at paste time to ensure correct positioning
|
||||
const { x, y, zoom } = getViewport();
|
||||
const viewportCenter = {
|
||||
x: (window.innerWidth / 2 - x) / zoom,
|
||||
y: (window.innerHeight / 2 - y) / zoom,
|
||||
};
|
||||
|
||||
let minX = Infinity,
|
||||
minY = Infinity,
|
||||
maxX = -Infinity,
|
||||
maxY = -Infinity;
|
||||
copiedData.nodes.forEach((node: Node) => {
|
||||
minX = Math.min(minX, node.position.x);
|
||||
minY = Math.min(minY, node.position.y);
|
||||
maxX = Math.max(maxX, node.position.x);
|
||||
maxY = Math.max(maxY, node.position.y);
|
||||
});
|
||||
|
||||
const offsetX = viewportCenter.x - (minX + maxX) / 2;
|
||||
const offsetY = viewportCenter.y - (minY + maxY) / 2;
|
||||
|
||||
const pastedNodes = copiedData.nodes.map((node: Node) => {
|
||||
const newNodeId = getNextNodeId();
|
||||
oldToNewIdMap[node.id] = newNodeId;
|
||||
return {
|
||||
...node,
|
||||
id: newNodeId, // Generate unique ID for the pasted node
|
||||
selected: true, // Select the pasted nodes so they're visible
|
||||
position: {
|
||||
x: node.position.x + offsetX,
|
||||
y: node.position.y + offsetY,
|
||||
},
|
||||
data: {
|
||||
...node.data,
|
||||
backend_id: undefined, // Clear backend_id so the new node.id is used when saving
|
||||
connections: node.data.connections || [], // Preserve connections
|
||||
status: undefined,
|
||||
executionResults: undefined,
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const pastedEdges = copiedData.edges.map((edge) => {
|
||||
const newSourceId = oldToNewIdMap[edge.source] ?? edge.source;
|
||||
const newTargetId = oldToNewIdMap[edge.target] ?? edge.target;
|
||||
return {
|
||||
...edge,
|
||||
id: `${newSourceId}_${edge.sourceHandle}_${newTargetId}_${edge.targetHandle}_${Date.now()}`,
|
||||
source: newSourceId,
|
||||
target: newTargetId,
|
||||
};
|
||||
});
|
||||
|
||||
setNodes((existingNodes) => [
|
||||
...existingNodes.map((node) => ({ ...node, selected: false })),
|
||||
...pastedNodes,
|
||||
]);
|
||||
addEdges(pastedEdges);
|
||||
|
||||
setNodes((nodes) => {
|
||||
return nodes.map((node) => {
|
||||
const nodeConnections = getEdges()
|
||||
.filter(
|
||||
(edge: Edge) =>
|
||||
edge.source === node.id || edge.target === node.id,
|
||||
)
|
||||
.map(
|
||||
(edge: Edge): ConnectedEdge => ({
|
||||
id: edge.id,
|
||||
source: edge.source,
|
||||
target: edge.target,
|
||||
sourceHandle: edge.sourceHandle!,
|
||||
targetHandle: edge.targetHandle!,
|
||||
}),
|
||||
);
|
||||
|
||||
return {
|
||||
...node,
|
||||
data: {
|
||||
...node.data,
|
||||
connections: nodeConnections,
|
||||
},
|
||||
};
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
[setNodes, addEdges, getNodes, getEdges, getNextNodeId, getViewport],
|
||||
);
|
||||
|
||||
return handleCopyPaste;
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks";
|
||||
import { RunAgentTool } from "../../tools/RunAgent/RunAgent";
|
||||
import { RunBlockTool } from "../../tools/RunBlock/RunBlock";
|
||||
import { SearchDocsTool } from "../../tools/SearchDocs/SearchDocs";
|
||||
import { GenericTool } from "../../tools/GenericTool/GenericTool";
|
||||
import { ViewAgentOutputTool } from "../../tools/ViewAgentOutput/ViewAgentOutput";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -255,6 +256,16 @@ export const ChatMessagesContainer = ({
|
||||
/>
|
||||
);
|
||||
default:
|
||||
// Render a generic tool indicator for SDK built-in
|
||||
// tools (Read, Glob, Grep, etc.) or any unrecognized tool
|
||||
if (part.type.startsWith("tool-")) {
|
||||
return (
|
||||
<GenericTool
|
||||
key={`${message.id}-${i}`}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
})}
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
"use client";
|
||||
|
||||
import { ToolUIPart } from "ai";
|
||||
import { GearIcon } from "@phosphor-icons/react";
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
|
||||
interface Props {
|
||||
part: ToolUIPart;
|
||||
}
|
||||
|
||||
function extractToolName(part: ToolUIPart): string {
|
||||
// ToolUIPart.type is "tool-{name}", extract the name portion.
|
||||
return part.type.replace(/^tool-/, "");
|
||||
}
|
||||
|
||||
function formatToolName(name: string): string {
|
||||
// "search_docs" → "Search docs", "Read" → "Read"
|
||||
return name.replace(/_/g, " ").replace(/^\w/, (c) => c.toUpperCase());
|
||||
}
|
||||
|
||||
function getAnimationText(part: ToolUIPart): string {
|
||||
const label = formatToolName(extractToolName(part));
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Running ${label}…`;
|
||||
case "output-available":
|
||||
return `${label} completed`;
|
||||
case "output-error":
|
||||
return `${label} failed`;
|
||||
default:
|
||||
return `Running ${label}…`;
|
||||
}
|
||||
}
|
||||
|
||||
export function GenericTool({ part }: Props) {
|
||||
const isStreaming =
|
||||
part.state === "input-streaming" || part.state === "input-available";
|
||||
const isError = part.state === "output-error";
|
||||
|
||||
return (
|
||||
<div className="py-2">
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<GearIcon
|
||||
size={14}
|
||||
weight="regular"
|
||||
className={
|
||||
isError
|
||||
? "text-red-500"
|
||||
: isStreaming
|
||||
? "animate-spin text-neutral-500"
|
||||
: "text-neutral-400"
|
||||
}
|
||||
/>
|
||||
<MorphingTextAnimation
|
||||
text={getAnimationText(part)}
|
||||
className={isError ? "text-red-500" : undefined}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,180 @@
|
||||
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
|
||||
import React from "react";
|
||||
import {
|
||||
Card,
|
||||
CardContent,
|
||||
CardHeader,
|
||||
CardTitle,
|
||||
} from "@/components/__legacy__/ui/card";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { TextRenderer } from "@/components/__legacy__/ui/render";
|
||||
import Link from "next/link";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTrigger,
|
||||
} from "@/components/__legacy__/ui/dialog";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/__legacy__/ui/dropdown-menu";
|
||||
import { ChevronDownIcon, EnterIcon } from "@radix-ui/react-icons";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/__legacy__/ui/table";
|
||||
import { formatDistanceToNow } from "date-fns";
|
||||
import { DialogTitle } from "@/components/__legacy__/ui/dialog";
|
||||
import { AgentImportForm } from "./AgentImportForm";
|
||||
|
||||
export const AgentFlowList = ({
|
||||
flows,
|
||||
executions,
|
||||
selectedFlow,
|
||||
onSelectFlow,
|
||||
className,
|
||||
}: {
|
||||
flows: LibraryAgent[];
|
||||
executions?: GraphExecutionMeta[];
|
||||
selectedFlow: LibraryAgent | null;
|
||||
onSelectFlow: (f: LibraryAgent) => void;
|
||||
className?: string;
|
||||
}) => {
|
||||
return (
|
||||
<Card className={className}>
|
||||
<CardHeader className="flex-row items-center justify-between space-x-3 space-y-0">
|
||||
<CardTitle>Agents</CardTitle>
|
||||
|
||||
<div className="flex items-center">
|
||||
{/* Split "Create" button */}
|
||||
<Button variant="outline" className="rounded-r-none">
|
||||
<Link href="/build">Create</Link>
|
||||
</Button>
|
||||
<Dialog>
|
||||
{/* https://ui.shadcn.com/docs/components/dialog#notes */}
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button
|
||||
variant="outline"
|
||||
className={"rounded-l-none border-l-0 px-2"}
|
||||
data-testid="create-agent-dropdown"
|
||||
>
|
||||
<ChevronDownIcon />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
|
||||
<DropdownMenuContent>
|
||||
<DialogTrigger asChild>
|
||||
<DropdownMenuItem data-testid="import-agent-from-file">
|
||||
<EnterIcon className="mr-2" /> Import from file
|
||||
</DropdownMenuItem>
|
||||
</DialogTrigger>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
|
||||
<DialogContent>
|
||||
<DialogHeader>
|
||||
<DialogTitle className="sr-only">Import Agent</DialogTitle>
|
||||
<h2 className="text-lg font-semibold">
|
||||
Import an Agent from a file
|
||||
</h2>
|
||||
</DialogHeader>
|
||||
<AgentImportForm />
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
</div>
|
||||
</CardHeader>
|
||||
|
||||
<CardContent>
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Name</TableHead>
|
||||
{/* <TableHead>Status</TableHead> */}
|
||||
{/* <TableHead>Last updated</TableHead> */}
|
||||
{executions && (
|
||||
<TableHead className="md:hidden lg:table-cell">
|
||||
# of runs
|
||||
</TableHead>
|
||||
)}
|
||||
{executions && <TableHead>Last run</TableHead>}
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody data-testid="agent-flow-list-body">
|
||||
{flows
|
||||
.map((flow) => {
|
||||
let runCount = 0,
|
||||
lastRun: GraphExecutionMeta | null = null;
|
||||
if (executions) {
|
||||
const _flowRuns = executions.filter(
|
||||
(r) => r.graph_id == flow.graph_id,
|
||||
);
|
||||
runCount = _flowRuns.length;
|
||||
lastRun =
|
||||
runCount == 0
|
||||
? null
|
||||
: _flowRuns.reduce((a, c) => {
|
||||
const aTime = a.started_at?.getTime() ?? 0;
|
||||
const cTime = c.started_at?.getTime() ?? 0;
|
||||
return aTime > cTime ? a : c;
|
||||
});
|
||||
}
|
||||
return { flow, runCount, lastRun };
|
||||
})
|
||||
.sort((a, b) => {
|
||||
if (!a.lastRun && !b.lastRun) return 0;
|
||||
if (!a.lastRun) return 1;
|
||||
if (!b.lastRun) return -1;
|
||||
const bTime = b.lastRun.started_at?.getTime() ?? 0;
|
||||
const aTime = a.lastRun.started_at?.getTime() ?? 0;
|
||||
return bTime - aTime;
|
||||
})
|
||||
.map(({ flow, runCount, lastRun }) => (
|
||||
<TableRow
|
||||
key={flow.id}
|
||||
data-testid={flow.id}
|
||||
data-name={flow.name}
|
||||
className="cursor-pointer"
|
||||
onClick={() => onSelectFlow(flow)}
|
||||
data-state={selectedFlow?.id == flow.id ? "selected" : null}
|
||||
>
|
||||
<TableCell>
|
||||
<TextRenderer value={flow.name} truncateLengthLimit={30} />
|
||||
</TableCell>
|
||||
{/* <TableCell><FlowStatusBadge status={flow.status ?? "active"} /></TableCell> */}
|
||||
{/* <TableCell>
|
||||
{flow.updatedAt ?? "???"}
|
||||
</TableCell> */}
|
||||
{executions && (
|
||||
<TableCell className="md:hidden lg:table-cell">
|
||||
{runCount}
|
||||
</TableCell>
|
||||
)}
|
||||
{executions &&
|
||||
(!lastRun ? (
|
||||
<TableCell />
|
||||
) : (
|
||||
<TableCell title={lastRun.started_at?.toString() ?? ""}>
|
||||
{lastRun.started_at
|
||||
? formatDistanceToNow(lastRun.started_at, {
|
||||
addSuffix: true,
|
||||
})
|
||||
: "—"}
|
||||
</TableCell>
|
||||
))}
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
export default AgentFlowList;
|
||||
@@ -0,0 +1,175 @@
|
||||
import { z } from "zod";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useForm } from "react-hook-form";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import React, { useState } from "react";
|
||||
import {
|
||||
Form,
|
||||
FormControl,
|
||||
FormField,
|
||||
FormItem,
|
||||
FormLabel,
|
||||
FormMessage,
|
||||
} from "@/components/__legacy__/ui/form";
|
||||
import { Input } from "@/components/__legacy__/ui/input";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Textarea } from "@/components/__legacy__/ui/textarea";
|
||||
import { EnterIcon } from "@radix-ui/react-icons";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import {
|
||||
Graph,
|
||||
GraphCreatable,
|
||||
sanitizeImportedGraph,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
|
||||
// Add this custom schema for File type
|
||||
const fileSchema = z.custom<File>((val) => val instanceof File, {
|
||||
message: "Must be a File object",
|
||||
});
|
||||
|
||||
const formSchema = z.object({
|
||||
agentFile: fileSchema,
|
||||
agentName: z.string().min(1, "Agent name is required"),
|
||||
agentDescription: z.string(),
|
||||
importAsTemplate: z.boolean(),
|
||||
});
|
||||
|
||||
export const AgentImportForm: React.FC<
|
||||
React.FormHTMLAttributes<HTMLFormElement>
|
||||
> = ({ className, ...props }) => {
|
||||
const [agentObject, setAgentObject] = useState<GraphCreatable | null>(null);
|
||||
const api = useBackendAPI();
|
||||
|
||||
const form = useForm<z.infer<typeof formSchema>>({
|
||||
resolver: zodResolver(formSchema),
|
||||
defaultValues: {
|
||||
agentName: "",
|
||||
agentDescription: "",
|
||||
importAsTemplate: false,
|
||||
},
|
||||
});
|
||||
|
||||
function onSubmit(values: z.infer<typeof formSchema>) {
|
||||
if (!agentObject) {
|
||||
form.setError("root", { message: "No Agent object to save" });
|
||||
return;
|
||||
}
|
||||
const payload: GraphCreatable = {
|
||||
...agentObject,
|
||||
name: values.agentName,
|
||||
description: values.agentDescription,
|
||||
is_active: !values.importAsTemplate,
|
||||
};
|
||||
|
||||
api
|
||||
.createGraph(payload, "upload")
|
||||
.then((response) => {
|
||||
const qID = "flowID";
|
||||
window.location.href = `/build?${qID}=${response.id}`;
|
||||
})
|
||||
.catch((error) => {
|
||||
const entity_type = "agent";
|
||||
form.setError("root", {
|
||||
message: `Could not create ${entity_type}: ${error}`,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return (
|
||||
<Form {...form}>
|
||||
<form
|
||||
onSubmit={form.handleSubmit(onSubmit)}
|
||||
className={cn("space-y-4", className)}
|
||||
{...props}
|
||||
>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="agentFile"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>Agent file</FormLabel>
|
||||
<FormControl className="cursor-pointer">
|
||||
<Input
|
||||
type="file"
|
||||
accept="application/json"
|
||||
data-testid="import-agent-file-input"
|
||||
onChange={(e) => {
|
||||
const file = e.target.files?.[0];
|
||||
if (file) {
|
||||
field.onChange(file);
|
||||
const reader = new FileReader();
|
||||
// Attach parser to file reader
|
||||
reader.onload = (event) => {
|
||||
try {
|
||||
const obj = JSON.parse(
|
||||
event.target?.result as string,
|
||||
);
|
||||
if (
|
||||
!["name", "description", "nodes", "links"].every(
|
||||
(key) => key in obj && obj[key] != null,
|
||||
)
|
||||
) {
|
||||
throw new Error(
|
||||
"Invalid agent object in file: " +
|
||||
JSON.stringify(obj, null, 2),
|
||||
);
|
||||
}
|
||||
const graph = obj as Graph;
|
||||
sanitizeImportedGraph(graph);
|
||||
setAgentObject(graph);
|
||||
form.setValue("agentName", graph.name);
|
||||
form.setValue("agentDescription", graph.description);
|
||||
} catch (error) {
|
||||
console.error("Error loading agent file:", error);
|
||||
}
|
||||
};
|
||||
// Load file
|
||||
reader.readAsText(file);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="agentName"
|
||||
disabled={!agentObject}
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>Agent name</FormLabel>
|
||||
<FormControl>
|
||||
<Input {...field} data-testid="agent-name-input" />
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="agentDescription"
|
||||
disabled={!agentObject}
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>Agent description</FormLabel>
|
||||
<FormControl>
|
||||
<Textarea {...field} data-testid="agent-description-input" />
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
<Button
|
||||
type="submit"
|
||||
className="w-full"
|
||||
disabled={!agentObject}
|
||||
data-testid="import-agent-submit"
|
||||
>
|
||||
<EnterIcon className="mr-2" /> Import & Edit
|
||||
</Button>
|
||||
</form>
|
||||
</Form>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,243 @@
|
||||
import React, { useEffect, useState } from "react";
|
||||
import {
|
||||
Graph,
|
||||
GraphExecutionMeta,
|
||||
LibraryAgent,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import {
|
||||
Card,
|
||||
CardContent,
|
||||
CardHeader,
|
||||
CardTitle,
|
||||
} from "@/components/__legacy__/ui/card";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuLabel,
|
||||
DropdownMenuRadioGroup,
|
||||
DropdownMenuRadioItem,
|
||||
DropdownMenuSeparator,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/__legacy__/ui/dropdown-menu";
|
||||
import { Button, buttonVariants } from "@/components/__legacy__/ui/button";
|
||||
import {
|
||||
ClockIcon,
|
||||
ExitIcon,
|
||||
Pencil2Icon,
|
||||
PlayIcon,
|
||||
TrashIcon,
|
||||
} from "@radix-ui/react-icons";
|
||||
import Link from "next/link";
|
||||
import { exportAsJSONFile } from "@/lib/utils";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
} from "@/components/__legacy__/ui/dialog";
|
||||
import useAgentGraph from "@/hooks/useAgentGraph";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import { FlowRunsStatus } from "./FlowRunsStatus";
|
||||
import { RunnerInputDialog } from "../../build/components/legacy-builder/RunnerInputUI";
|
||||
|
||||
export const FlowInfo: React.FC<
|
||||
React.HTMLAttributes<HTMLDivElement> & {
|
||||
flow: LibraryAgent;
|
||||
executions: GraphExecutionMeta[];
|
||||
flowVersion?: number | "all";
|
||||
refresh: () => void;
|
||||
}
|
||||
> = ({ flow, executions, flowVersion, refresh, ...props }) => {
|
||||
const { savedAgent, saveAndRun, stopRun, isRunning } = useAgentGraph(
|
||||
flow.graph_id,
|
||||
flow.graph_version,
|
||||
undefined,
|
||||
false,
|
||||
);
|
||||
|
||||
const api = useBackendAPI();
|
||||
|
||||
const [flowVersions, setFlowVersions] = useState<Graph[] | null>(null);
|
||||
const [selectedVersion, setSelectedFlowVersion] = useState(
|
||||
flowVersion ?? "all",
|
||||
);
|
||||
const selectedFlowVersion: Graph | undefined = flowVersions?.find(
|
||||
(v) =>
|
||||
v.version ==
|
||||
(selectedVersion == "all" ? flow.graph_version : selectedVersion),
|
||||
);
|
||||
|
||||
const hasInputs = Object.keys(flow.input_schema.properties).length > 0;
|
||||
const hasCredentialsInputs =
|
||||
Object.keys(flow.credentials_input_schema.properties).length > 0;
|
||||
|
||||
const [isDeleteModalOpen, setIsDeleteModalOpen] = useState(false);
|
||||
const [isRunDialogOpen, setIsRunDialogOpen] = useState(false);
|
||||
const isDisabled = !selectedFlowVersion;
|
||||
|
||||
useEffect(() => {
|
||||
api
|
||||
.getGraphAllVersions(flow.graph_id)
|
||||
.then((result) => setFlowVersions(result));
|
||||
}, [flow.graph_id, api]);
|
||||
|
||||
const openRunDialog = () => setIsRunDialogOpen(true);
|
||||
|
||||
const runOrOpenInput = () => {
|
||||
if (hasInputs || hasCredentialsInputs) {
|
||||
openRunDialog();
|
||||
} else {
|
||||
saveAndRun({}, {});
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Card {...props}>
|
||||
<CardHeader className="">
|
||||
<CardTitle>
|
||||
{flow.name} <span className="font-light">v{flow.graph_version}</span>
|
||||
</CardTitle>
|
||||
<div className="flex flex-col space-y-2 py-6">
|
||||
{(flowVersions?.length ?? 0) > 1 && (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="outline">
|
||||
<ClockIcon className="mr-2" />
|
||||
{selectedVersion == "all"
|
||||
? "All versions"
|
||||
: `Version ${selectedVersion}`}
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent className="w-56">
|
||||
<DropdownMenuLabel>Choose a version</DropdownMenuLabel>
|
||||
<DropdownMenuSeparator />
|
||||
<DropdownMenuRadioGroup
|
||||
value={String(selectedVersion)}
|
||||
onValueChange={(choice: string) =>
|
||||
setSelectedFlowVersion(
|
||||
choice == "all" ? choice : Number(choice),
|
||||
)
|
||||
}
|
||||
>
|
||||
<DropdownMenuRadioItem value="all">
|
||||
All versions
|
||||
</DropdownMenuRadioItem>
|
||||
{flowVersions?.map((v) => (
|
||||
<DropdownMenuRadioItem
|
||||
key={v.version}
|
||||
value={v.version.toString()}
|
||||
>
|
||||
Version {v.version}
|
||||
{v.is_active ? " (active)" : ""}
|
||||
</DropdownMenuRadioItem>
|
||||
))}
|
||||
</DropdownMenuRadioGroup>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
)}
|
||||
{flow.can_access_graph && (
|
||||
<Link
|
||||
className={buttonVariants({ variant: "default" })}
|
||||
href={`/build?flowID=${flow.graph_id}&flowVersion=${flow.graph_version}`}
|
||||
>
|
||||
<Pencil2Icon className="mr-2" />
|
||||
Open in Builder
|
||||
</Link>
|
||||
)}
|
||||
{flow.can_access_graph && (
|
||||
<Button
|
||||
variant="outline"
|
||||
className="px-2.5"
|
||||
title="Export to a JSON-file"
|
||||
data-testid="export-button"
|
||||
onClick={() =>
|
||||
api
|
||||
.getGraph(flow.graph_id, selectedFlowVersion!.version, true)
|
||||
.then((graph) =>
|
||||
exportAsJSONFile(
|
||||
graph,
|
||||
`${flow.name}_v${selectedFlowVersion!.version}.json`,
|
||||
),
|
||||
)
|
||||
}
|
||||
>
|
||||
<ExitIcon className="mr-2" /> Export
|
||||
</Button>
|
||||
)}
|
||||
<Button
|
||||
variant="secondary"
|
||||
className="bg-purple-500 text-white hover:bg-purple-700"
|
||||
onClick={!isRunning ? runOrOpenInput : stopRun}
|
||||
disabled={isDisabled}
|
||||
title={!isRunning ? "Run Agent" : "Stop Agent"}
|
||||
>
|
||||
<PlayIcon className="mr-2" />
|
||||
{isRunning ? "Stop Agent" : "Run Agent"}
|
||||
</Button>
|
||||
{flow.can_access_graph && (
|
||||
<Button
|
||||
variant="destructive"
|
||||
onClick={() => setIsDeleteModalOpen(true)}
|
||||
data-testid="delete-button"
|
||||
>
|
||||
<TrashIcon className="mr-2" />
|
||||
Delete Agent
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<FlowRunsStatus
|
||||
flows={[flow]}
|
||||
executions={executions.filter(
|
||||
(execution) =>
|
||||
execution.graph_id == flow.graph_id &&
|
||||
(selectedVersion == "all" ||
|
||||
execution.graph_version == selectedVersion),
|
||||
)}
|
||||
/>
|
||||
</CardContent>
|
||||
<Dialog open={isDeleteModalOpen} onOpenChange={setIsDeleteModalOpen}>
|
||||
<DialogContent>
|
||||
<DialogHeader>
|
||||
<DialogTitle>Delete Agent</DialogTitle>
|
||||
<DialogDescription>
|
||||
Are you sure you want to delete this agent? <br />
|
||||
This action cannot be undone.
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<DialogFooter>
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={() => setIsDeleteModalOpen(false)}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="destructive"
|
||||
onClick={() => {
|
||||
api.deleteLibraryAgent(flow.id).then(() => {
|
||||
setIsDeleteModalOpen(false);
|
||||
refresh();
|
||||
});
|
||||
}}
|
||||
>
|
||||
Delete
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
{savedAgent && (
|
||||
<RunnerInputDialog
|
||||
isOpen={isRunDialogOpen}
|
||||
doClose={() => setIsRunDialogOpen(false)}
|
||||
graph={savedAgent}
|
||||
doRun={saveAndRun}
|
||||
/>
|
||||
)}
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
export default FlowInfo;
|
||||
@@ -0,0 +1,142 @@
|
||||
import React, { useCallback, useEffect, useState } from "react";
|
||||
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
|
||||
import {
|
||||
Card,
|
||||
CardContent,
|
||||
CardHeader,
|
||||
CardTitle,
|
||||
} from "@/components/__legacy__/ui/card";
|
||||
import Link from "next/link";
|
||||
import { Button, buttonVariants } from "@/components/__legacy__/ui/button";
|
||||
import { IconSquare } from "@/components/__legacy__/ui/icons";
|
||||
import { ExitIcon, Pencil2Icon } from "@radix-ui/react-icons";
|
||||
import { format } from "date-fns";
|
||||
import { FlowRunStatusBadge } from "@/app/(platform)/monitoring/components/FlowRunStatusBadge";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import RunnerOutputUI, {
|
||||
OutputNodeInfo,
|
||||
} from "../../build/components/legacy-builder/RunnerOutputUI";
|
||||
|
||||
export const FlowRunInfo: React.FC<
|
||||
React.HTMLAttributes<HTMLDivElement> & {
|
||||
agent: LibraryAgent;
|
||||
execution: GraphExecutionMeta;
|
||||
}
|
||||
> = ({ agent, execution, ...props }) => {
|
||||
const [isOutputOpen, setIsOutputOpen] = useState(false);
|
||||
const [blockOutputs, setBlockOutputs] = useState<OutputNodeInfo[]>([]);
|
||||
const api = useBackendAPI();
|
||||
|
||||
const fetchBlockResults = useCallback(async () => {
|
||||
const graph = await api.getGraph(agent.graph_id, agent.graph_version);
|
||||
const graphExecution = await api.getGraphExecutionInfo(
|
||||
agent.graph_id,
|
||||
execution.id,
|
||||
);
|
||||
|
||||
// Transform results to BlockOutput format
|
||||
setBlockOutputs(
|
||||
Object.entries(graphExecution.outputs).flatMap(([key, values]) =>
|
||||
values.map(
|
||||
(value) =>
|
||||
({
|
||||
metadata: {
|
||||
name: graph.output_schema.properties[key].title || "Output",
|
||||
description:
|
||||
graph.output_schema.properties[key].description ||
|
||||
"Output from the agent",
|
||||
},
|
||||
result: value,
|
||||
}) satisfies OutputNodeInfo,
|
||||
),
|
||||
),
|
||||
);
|
||||
}, [api, agent.graph_id, agent.graph_version, execution.id]);
|
||||
|
||||
// Fetch graph and execution data
|
||||
useEffect(() => {
|
||||
if (!isOutputOpen) return;
|
||||
fetchBlockResults();
|
||||
}, [isOutputOpen, fetchBlockResults]);
|
||||
|
||||
if (execution.graph_id != agent.graph_id) {
|
||||
throw new Error(
|
||||
`FlowRunInfo can't be used with non-matching execution.graph_id and flow.id`,
|
||||
);
|
||||
}
|
||||
|
||||
const handleStopRun = useCallback(() => {
|
||||
api.stopGraphExecution(agent.graph_id, execution.id);
|
||||
}, [api, agent.graph_id, execution.id]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Card {...props}>
|
||||
<CardHeader className="flex-row items-center justify-between space-x-3 space-y-0">
|
||||
<div>
|
||||
<CardTitle>
|
||||
{agent.name}{" "}
|
||||
<span className="font-light">v{execution.graph_version}</span>
|
||||
</CardTitle>
|
||||
</div>
|
||||
<div className="flex space-x-2">
|
||||
{execution.status === "RUNNING" && (
|
||||
<Button onClick={handleStopRun} variant="destructive">
|
||||
<IconSquare className="mr-2" /> Stop Run
|
||||
</Button>
|
||||
)}
|
||||
<Button onClick={() => setIsOutputOpen(true)} variant="outline">
|
||||
<ExitIcon className="mr-2" /> View Outputs
|
||||
</Button>
|
||||
{agent.can_access_graph && (
|
||||
<Link
|
||||
className={buttonVariants({ variant: "default" })}
|
||||
href={`/build?flowID=${execution.graph_id}&flowVersion=${execution.graph_version}&flowExecutionID=${execution.id}`}
|
||||
>
|
||||
<Pencil2Icon className="mr-2" /> Open in Builder
|
||||
</Link>
|
||||
)}
|
||||
</div>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<p className="hidden">
|
||||
<strong>Agent ID:</strong> <code>{agent.graph_id}</code>
|
||||
</p>
|
||||
<p className="hidden">
|
||||
<strong>Run ID:</strong> <code>{execution.id}</code>
|
||||
</p>
|
||||
<div>
|
||||
<strong>Status:</strong>{" "}
|
||||
<FlowRunStatusBadge status={execution.status} />
|
||||
</div>
|
||||
<p>
|
||||
<strong>Started:</strong>{" "}
|
||||
{execution.started_at
|
||||
? format(execution.started_at, "yyyy-MM-dd HH:mm:ss")
|
||||
: "—"}
|
||||
</p>
|
||||
<p>
|
||||
<strong>Finished:</strong>{" "}
|
||||
{execution.ended_at
|
||||
? format(execution.ended_at, "yyyy-MM-dd HH:mm:ss")
|
||||
: "—"}
|
||||
</p>
|
||||
{execution.stats && (
|
||||
<p>
|
||||
<strong>Duration (run time):</strong>{" "}
|
||||
{execution.stats.duration.toFixed(1)} (
|
||||
{execution.stats.node_exec_time.toFixed(1)}) seconds
|
||||
</p>
|
||||
)}
|
||||
</CardContent>
|
||||
</Card>
|
||||
<RunnerOutputUI
|
||||
isOpen={isOutputOpen}
|
||||
doClose={() => setIsOutputOpen(false)}
|
||||
outputs={blockOutputs}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default FlowRunInfo;
|
||||
@@ -0,0 +1,25 @@
|
||||
import React from "react";
|
||||
import { Badge } from "@/components/__legacy__/ui/badge";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { GraphExecutionMeta } from "@/lib/autogpt-server-api";
|
||||
|
||||
export const FlowRunStatusBadge: React.FC<{
|
||||
status: GraphExecutionMeta["status"];
|
||||
className?: string;
|
||||
}> = ({ status, className }) => (
|
||||
<Badge
|
||||
variant="default"
|
||||
className={cn(
|
||||
status === "RUNNING"
|
||||
? "bg-blue-500 dark:bg-blue-700"
|
||||
: status === "QUEUED"
|
||||
? "bg-yellow-500 dark:bg-yellow-600"
|
||||
: status === "COMPLETED"
|
||||
? "bg-green-500 dark:bg-green-600"
|
||||
: "bg-red-500 dark:bg-red-700",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
{status}
|
||||
</Badge>
|
||||
);
|
||||
@@ -0,0 +1,92 @@
|
||||
import React from "react";
|
||||
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
|
||||
import {
|
||||
Card,
|
||||
CardContent,
|
||||
CardHeader,
|
||||
CardTitle,
|
||||
} from "@/components/__legacy__/ui/card";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/__legacy__/ui/table";
|
||||
import { format } from "date-fns";
|
||||
import { FlowRunStatusBadge } from "@/app/(platform)/monitoring/components/FlowRunStatusBadge";
|
||||
import { TextRenderer } from "../../../../components/__legacy__/ui/render";
|
||||
|
||||
export const FlowRunsList: React.FC<{
|
||||
flows: LibraryAgent[];
|
||||
executions: GraphExecutionMeta[];
|
||||
className?: string;
|
||||
selectedRun?: GraphExecutionMeta | null;
|
||||
onSelectRun: (r: GraphExecutionMeta) => void;
|
||||
}> = ({ flows, executions, selectedRun, onSelectRun, className }) => (
|
||||
<Card className={className}>
|
||||
<CardHeader>
|
||||
<CardTitle>Runs</CardTitle>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Agent</TableHead>
|
||||
<TableHead>Started</TableHead>
|
||||
<TableHead>Status</TableHead>
|
||||
<TableHead>Duration</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody data-testid="flow-runs-list-body">
|
||||
{executions.map((execution) => (
|
||||
<TableRow
|
||||
key={execution.id}
|
||||
data-testid={`flow-run-${execution.id}-graph-${execution.graph_id}`}
|
||||
data-runid={execution.id}
|
||||
data-graphid={execution.graph_id}
|
||||
className="cursor-pointer"
|
||||
onClick={() => onSelectRun(execution)}
|
||||
data-state={selectedRun?.id == execution.id ? "selected" : null}
|
||||
>
|
||||
<TableCell>
|
||||
<TextRenderer
|
||||
value={
|
||||
flows.find((f) => f.graph_id == execution.graph_id)?.name
|
||||
}
|
||||
truncateLengthLimit={30}
|
||||
/>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{execution.started_at
|
||||
? format(execution.started_at, "HH:mm")
|
||||
: "—"}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<FlowRunStatusBadge
|
||||
status={execution.status}
|
||||
className="w-full justify-center"
|
||||
/>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{execution.stats
|
||||
? formatDuration(execution.stats.duration)
|
||||
: ""}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
|
||||
function formatDuration(seconds: number): string {
|
||||
return (
|
||||
(seconds < 100 ? seconds.toPrecision(2) : Math.round(seconds)).toString() +
|
||||
"s"
|
||||
);
|
||||
}
|
||||
|
||||
export default FlowRunsList;
|
||||
@@ -0,0 +1,131 @@
|
||||
import React, { useState } from "react";
|
||||
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
|
||||
import { CardTitle } from "@/components/__legacy__/ui/card";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/__legacy__/ui/popover";
|
||||
import { Calendar } from "@/components/__legacy__/ui/calendar";
|
||||
import { FlowRunsTimeline } from "@/app/(platform)/monitoring/components/FlowRunsTimeline";
|
||||
|
||||
export const FlowRunsStatus: React.FC<{
|
||||
flows: LibraryAgent[];
|
||||
executions: GraphExecutionMeta[];
|
||||
title?: string;
|
||||
className?: string;
|
||||
}> = ({ flows, executions: executions, title, className }) => {
|
||||
/* "dateMin": since the first flow in the dataset
|
||||
* number > 0: custom date (unix timestamp)
|
||||
* number < 0: offset relative to Date.now() (in seconds) */
|
||||
const [selected, setSelected] = useState<Date>();
|
||||
const [statsSince, setStatsSince] = useState<number | "dataMin">(-24 * 3600);
|
||||
const statsSinceTimestamp = // unix timestamp or null
|
||||
typeof statsSince == "string"
|
||||
? null
|
||||
: statsSince < 0
|
||||
? Date.now() + statsSince * 1000
|
||||
: statsSince;
|
||||
const filteredFlowRuns =
|
||||
statsSinceTimestamp != null
|
||||
? executions.filter(
|
||||
(fr) =>
|
||||
fr.started_at && fr.started_at.getTime() > statsSinceTimestamp,
|
||||
)
|
||||
: executions;
|
||||
|
||||
return (
|
||||
<div className={className}>
|
||||
<div className="flex flex-row items-center justify-between">
|
||||
<CardTitle>{title || "Stats"}</CardTitle>
|
||||
<div className="flex flex-wrap space-x-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setStatsSince(-2 * 3600)}
|
||||
>
|
||||
2h
|
||||
</Button>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setStatsSince(-8 * 3600)}
|
||||
>
|
||||
8h
|
||||
</Button>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setStatsSince(-24 * 3600)}
|
||||
>
|
||||
24h
|
||||
</Button>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setStatsSince(-7 * 24 * 3600)}
|
||||
>
|
||||
7d
|
||||
</Button>
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<Button variant={"outline"} size="sm">
|
||||
Custom
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="w-auto p-0" align="start">
|
||||
<Calendar
|
||||
mode="single"
|
||||
selected={selected}
|
||||
onSelect={(_, selectedDay) => {
|
||||
setSelected(selectedDay);
|
||||
setStatsSince(selectedDay.getTime());
|
||||
}}
|
||||
/>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setStatsSince("dataMin")}
|
||||
>
|
||||
All
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
<FlowRunsTimeline
|
||||
flows={flows}
|
||||
executions={executions}
|
||||
dataMin={statsSince}
|
||||
className="mt-3"
|
||||
/>
|
||||
<hr className="my-4" />
|
||||
<div>
|
||||
<p>
|
||||
<strong>Total runs:</strong> {filteredFlowRuns.length}
|
||||
</p>
|
||||
<p>
|
||||
<strong>Total run time:</strong>{" "}
|
||||
{filteredFlowRuns.reduce(
|
||||
(total, run) => total + (run.stats?.node_exec_time ?? 0),
|
||||
0,
|
||||
)}{" "}
|
||||
seconds
|
||||
</p>
|
||||
{filteredFlowRuns.some((r) => r.stats) && (
|
||||
<p>
|
||||
<strong>Total cost:</strong> $
|
||||
{(
|
||||
filteredFlowRuns.reduce(
|
||||
(total, run) => total + (run.stats?.cost ?? 0),
|
||||
0,
|
||||
) / 100
|
||||
).toFixed(2)}
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
export default FlowRunsStatus;
|
||||
@@ -0,0 +1,189 @@
|
||||
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
|
||||
import {
|
||||
ComposedChart,
|
||||
DefaultLegendContentProps,
|
||||
Legend,
|
||||
Line,
|
||||
ResponsiveContainer,
|
||||
Scatter,
|
||||
Tooltip,
|
||||
XAxis,
|
||||
YAxis,
|
||||
} from "recharts";
|
||||
import { differenceInHours, format } from "date-fns";
|
||||
import { Card } from "@/components/__legacy__/ui/card";
|
||||
import { cn, hashString } from "@/lib/utils";
|
||||
import React from "react";
|
||||
import { FlowRunStatusBadge } from "@/app/(platform)/monitoring/components/FlowRunStatusBadge";
|
||||
|
||||
export const FlowRunsTimeline = ({
|
||||
flows,
|
||||
executions,
|
||||
dataMin,
|
||||
className,
|
||||
}: {
|
||||
flows: LibraryAgent[];
|
||||
executions: GraphExecutionMeta[];
|
||||
dataMin: "dataMin" | number;
|
||||
className?: string;
|
||||
}) => (
|
||||
/* TODO: make logarithmic? */
|
||||
<ResponsiveContainer width="100%" height={120} className={className}>
|
||||
<ComposedChart>
|
||||
<XAxis
|
||||
dataKey="time"
|
||||
type="number"
|
||||
domain={[
|
||||
typeof dataMin == "string"
|
||||
? dataMin
|
||||
: dataMin < 0
|
||||
? Date.now() + dataMin * 1000
|
||||
: dataMin,
|
||||
Date.now(),
|
||||
]}
|
||||
allowDataOverflow={true}
|
||||
tickFormatter={(unixTime) => {
|
||||
const now = new Date();
|
||||
const time = new Date(unixTime);
|
||||
return differenceInHours(now, time) < 24
|
||||
? format(time, "HH:mm")
|
||||
: format(time, "yyyy-MM-dd HH:mm");
|
||||
}}
|
||||
name="Time"
|
||||
scale="time"
|
||||
/>
|
||||
<YAxis
|
||||
dataKey="_duration"
|
||||
name="Duration (s)"
|
||||
tickFormatter={(s) => (s > 90 ? `${Math.round(s / 60)}m` : `${s}s`)}
|
||||
/>
|
||||
<Tooltip
|
||||
content={({ payload }) => {
|
||||
if (payload && payload.length) {
|
||||
const data: GraphExecutionMeta & {
|
||||
time: number;
|
||||
_duration: number;
|
||||
} = payload[0].payload;
|
||||
const flow = flows.find((f) => f.graph_id === data.graph_id);
|
||||
return (
|
||||
<Card className="p-2 text-xs leading-normal">
|
||||
<p>
|
||||
<strong>Agent:</strong> {flow ? flow.name : "Unknown"}
|
||||
</p>
|
||||
<div>
|
||||
<strong>Status:</strong>
|
||||
<FlowRunStatusBadge
|
||||
status={data.status}
|
||||
className="px-1.5 py-0"
|
||||
/>
|
||||
</div>
|
||||
<p>
|
||||
<strong>Started:</strong>{" "}
|
||||
{data.started_at
|
||||
? format(data.started_at, "yyyy-MM-dd HH:mm:ss")
|
||||
: "—"}
|
||||
</p>
|
||||
{data.stats && (
|
||||
<p>
|
||||
<strong>Duration / run time:</strong>{" "}
|
||||
{formatDuration(data.stats.duration)} /{" "}
|
||||
{formatDuration(data.stats.node_exec_time)}
|
||||
</p>
|
||||
)}
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}}
|
||||
/>
|
||||
{flows.map((flow) => (
|
||||
<Scatter
|
||||
key={flow.id}
|
||||
data={executions
|
||||
.filter((e) => e.graph_id == flow.graph_id && e.started_at)
|
||||
.map((e) => ({
|
||||
...e,
|
||||
time:
|
||||
(e.started_at?.getTime() ?? 0) +
|
||||
(e.stats?.node_exec_time ?? 0) * 1000,
|
||||
_duration: e.stats?.node_exec_time ?? 0,
|
||||
}))}
|
||||
name={flow.name}
|
||||
fill={`hsl(${(hashString(flow.id) * 137.5) % 360}, 70%, 50%)`}
|
||||
/>
|
||||
))}
|
||||
{executions
|
||||
.filter((e) => e.started_at && e.ended_at)
|
||||
.map((execution) => (
|
||||
<Line
|
||||
key={execution.id}
|
||||
type="linear"
|
||||
dataKey="_duration"
|
||||
data={[
|
||||
{
|
||||
...execution,
|
||||
time: execution.started_at!.getTime(),
|
||||
_duration: 0,
|
||||
},
|
||||
{
|
||||
...execution,
|
||||
time: execution.ended_at!.getTime(),
|
||||
_duration: execution.stats?.node_exec_time ?? 0,
|
||||
},
|
||||
]}
|
||||
stroke={`hsl(${(hashString(execution.graph_id) * 137.5) % 360}, 70%, 50%)`}
|
||||
strokeWidth={2}
|
||||
dot={false}
|
||||
legendType="none"
|
||||
/>
|
||||
))}
|
||||
<Legend
|
||||
content={<ScrollableLegend />}
|
||||
wrapperStyle={{
|
||||
bottom: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
width: "100%",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
</ComposedChart>
|
||||
</ResponsiveContainer>
|
||||
);
|
||||
|
||||
export default FlowRunsTimeline;
|
||||
|
||||
const ScrollableLegend: React.FC<
|
||||
DefaultLegendContentProps & { className?: string }
|
||||
> = ({ payload, className }) => {
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"space-x-3 overflow-x-auto whitespace-nowrap px-4 text-sm",
|
||||
className,
|
||||
)}
|
||||
style={{ scrollbarWidth: "none" }}
|
||||
>
|
||||
{payload?.map((entry, index) => {
|
||||
if (entry.type == "none") return;
|
||||
return (
|
||||
<span key={`item-${index}`} className="inline-flex items-center">
|
||||
<span
|
||||
className="mr-1 inline-block size-2.5 rounded-full"
|
||||
style={{ backgroundColor: entry.color }}
|
||||
/>
|
||||
<span>{entry.value}</span>
|
||||
</span>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
function formatDuration(seconds: number): string {
|
||||
return (
|
||||
(seconds < 100 ? seconds.toPrecision(2) : Math.round(seconds)).toString() +
|
||||
"s"
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,285 @@
|
||||
import { LibraryAgent } from "@/lib/autogpt-server-api";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Card } from "@/components/__legacy__/ui/card";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/__legacy__/ui/table";
|
||||
import { Badge } from "@/components/__legacy__/ui/badge";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import { ClockIcon, Loader2 } from "lucide-react";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
||||
import { useUserTimezone } from "@/lib/hooks/useUserTimezone";
|
||||
import {
|
||||
formatScheduleTime,
|
||||
getTimezoneAbbreviation,
|
||||
} from "@/lib/timezone-utils";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/__legacy__/ui/select";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/__legacy__/ui/dialog";
|
||||
import { TextRenderer } from "../../../../components/__legacy__/ui/render";
|
||||
import { Input } from "../../../../components/__legacy__/ui/input";
|
||||
import { Label } from "../../../../components/__legacy__/ui/label";
|
||||
|
||||
interface SchedulesTableProps {
|
||||
schedules: GraphExecutionJobInfo[];
|
||||
agents: LibraryAgent[];
|
||||
onRemoveSchedule: (scheduleId: string, enabled: boolean) => void;
|
||||
sortColumn: keyof GraphExecutionJobInfo;
|
||||
sortDirection: "asc" | "desc";
|
||||
onSort: (column: keyof GraphExecutionJobInfo) => void;
|
||||
}
|
||||
|
||||
export const SchedulesTable = ({
|
||||
schedules,
|
||||
agents,
|
||||
onRemoveSchedule,
|
||||
sortColumn,
|
||||
sortDirection,
|
||||
onSort,
|
||||
}: SchedulesTableProps) => {
|
||||
const { toast } = useToast();
|
||||
const router = useRouter();
|
||||
const [selectedAgent, setSelectedAgent] = useState<string>(""); // Library Agent ID
|
||||
const [selectedVersion, setSelectedVersion] = useState<number>(0); // Graph version
|
||||
const [maxVersion, setMaxVersion] = useState<number>(0);
|
||||
const [isDialogOpen, setIsDialogOpen] = useState(false);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [selectedFilter, setSelectedFilter] = useState<string>(""); // Graph ID
|
||||
|
||||
// Get user's timezone for displaying schedule times
|
||||
const userTimezone = useUserTimezone() ?? "UTC";
|
||||
|
||||
const filteredAndSortedSchedules = [...schedules]
|
||||
.filter(
|
||||
(schedule) => !selectedFilter || schedule.graph_id === selectedFilter,
|
||||
)
|
||||
.sort((a, b) => {
|
||||
const aValue = a[sortColumn];
|
||||
const bValue = b[sortColumn];
|
||||
if (sortDirection === "asc") {
|
||||
return String(aValue).localeCompare(String(bValue));
|
||||
}
|
||||
return String(bValue).localeCompare(String(aValue));
|
||||
});
|
||||
|
||||
const handleToggleSchedule = (scheduleId: string, enabled: boolean) => {
|
||||
onRemoveSchedule(scheduleId, enabled);
|
||||
if (!enabled) {
|
||||
toast({
|
||||
title: "Schedule Disabled",
|
||||
description: "The schedule has been successfully disabled.",
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const handleNewSchedule = () => {
|
||||
setIsDialogOpen(true);
|
||||
};
|
||||
|
||||
const handleAgentSelect = (agentId: string) => {
|
||||
setSelectedAgent(agentId);
|
||||
const agent = agents.find((a) => a.id === agentId);
|
||||
setMaxVersion(agent!.graph_version);
|
||||
setSelectedVersion(agent!.graph_version);
|
||||
};
|
||||
|
||||
const handleVersionSelect = (version: string) => {
|
||||
setSelectedVersion(parseInt(version));
|
||||
};
|
||||
|
||||
const handleSchedule = async () => {
|
||||
if (!selectedAgent || !selectedVersion) {
|
||||
toast({
|
||||
title: "Invalid Input",
|
||||
description: "Please select an agent and a version.",
|
||||
variant: "destructive",
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (selectedVersion < 1 || selectedVersion > maxVersion) {
|
||||
toast({
|
||||
title: "Invalid Version",
|
||||
description: `Please select a version between 1 and ${maxVersion}.`,
|
||||
variant: "destructive",
|
||||
});
|
||||
return;
|
||||
}
|
||||
setIsLoading(true);
|
||||
const agent = agents.find((a) => a.id == selectedAgent)!;
|
||||
try {
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
router.push(
|
||||
`/build?flowID=${agent.graph_id}&flowVersion=${agent.graph_version}&open_scheduling=true`,
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("Navigation error:", error);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Card className="h-fit p-4">
|
||||
<Dialog open={isDialogOpen} onOpenChange={setIsDialogOpen}>
|
||||
<DialogContent>
|
||||
<DialogHeader>
|
||||
<DialogTitle>Select Agent for New Schedule</DialogTitle>
|
||||
</DialogHeader>
|
||||
<Select onValueChange={handleAgentSelect}>
|
||||
<SelectTrigger className="w-full">
|
||||
<SelectValue placeholder="Select an agent" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{agents.map((agent, i) => (
|
||||
<SelectItem key={agent.id + i} value={agent.id}>
|
||||
<TextRenderer value={agent.name} truncateLengthLimit={30} />
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
<Label className="mt-4">
|
||||
Select version between 1 and {maxVersion}
|
||||
</Label>
|
||||
<Input
|
||||
type="number"
|
||||
min={1}
|
||||
max={selectedAgent ? maxVersion : 0}
|
||||
value={selectedVersion}
|
||||
onChange={(e) => handleVersionSelect(e.target.value)}
|
||||
placeholder="Select version"
|
||||
className="w-full"
|
||||
/>
|
||||
<Button
|
||||
onClick={handleSchedule}
|
||||
disabled={isLoading || !selectedAgent}
|
||||
className="mt-4"
|
||||
>
|
||||
{isLoading ? (
|
||||
<>
|
||||
<Loader2 className="mr-2 h-4 w-4 animate-spin" />
|
||||
Loading...
|
||||
</>
|
||||
) : (
|
||||
"Schedule"
|
||||
)}
|
||||
</Button>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<h3 className="text-lg font-semibold">Schedules</h3>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
<Select onValueChange={setSelectedFilter}>
|
||||
<SelectTrigger className="h-8 w-[180px] rounded-md px-3 text-xs">
|
||||
<SelectValue placeholder="Filter by graph" />
|
||||
</SelectTrigger>
|
||||
<SelectContent className="text-xs">
|
||||
{agents.map((agent) => (
|
||||
<SelectItem key={agent.id} value={agent.graph_id}>
|
||||
{agent.name}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
<Button size="sm" variant="outline" onClick={handleNewSchedule}>
|
||||
<ClockIcon className="mr-2 h-4 w-4" />
|
||||
New Schedule
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
<ScrollArea className="max-h-[400px]">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead
|
||||
onClick={() => onSort("graph_id")}
|
||||
className="cursor-pointer"
|
||||
>
|
||||
Graph Name
|
||||
</TableHead>
|
||||
<TableHead className="cursor-pointer">Graph Version</TableHead>
|
||||
<TableHead
|
||||
onClick={() => onSort("next_run_time")}
|
||||
className="cursor-pointer"
|
||||
>
|
||||
Next Execution
|
||||
</TableHead>
|
||||
<TableHead
|
||||
onClick={() => onSort("cron")}
|
||||
className="cursor-pointer"
|
||||
>
|
||||
Schedule
|
||||
</TableHead>
|
||||
<TableHead>Timezone</TableHead>
|
||||
<TableHead>Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{filteredAndSortedSchedules.length === 0 ? (
|
||||
<TableRow>
|
||||
<TableCell
|
||||
colSpan={6}
|
||||
className="py-8 text-center text-lg text-gray-400"
|
||||
>
|
||||
No schedules are available
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
) : (
|
||||
filteredAndSortedSchedules.map((schedule) => (
|
||||
<TableRow key={schedule.id}>
|
||||
<TableCell className="font-medium">
|
||||
{agents.find((a) => a.graph_id === schedule.graph_id)
|
||||
?.name || schedule.graph_id}
|
||||
</TableCell>
|
||||
<TableCell>{schedule.graph_version}</TableCell>
|
||||
<TableCell>
|
||||
{formatScheduleTime(schedule.next_run_time, userTimezone)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<Badge variant="secondary">
|
||||
{humanizeCronExpression(schedule.cron)}
|
||||
</Badge>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<span className="text-sm text-muted-foreground">
|
||||
{schedule.timezone
|
||||
? getTimezoneAbbreviation(schedule.timezone)
|
||||
: userTimezone && getTimezoneAbbreviation(userTimezone)}
|
||||
</span>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex space-x-2">
|
||||
<Button
|
||||
variant={"destructive"}
|
||||
onClick={() => handleToggleSchedule(schedule.id, false)}
|
||||
>
|
||||
Remove
|
||||
</Button>
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))
|
||||
)}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</ScrollArea>
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,24 @@
|
||||
export default function AgentsFlowListSkeleton() {
|
||||
return (
|
||||
<div className="mx-auto max-w-4xl p-4">
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<h1 className="text-2xl font-bold">Agents</h1>
|
||||
<div className="h-10 w-24 animate-pulse rounded bg-gray-200"></div>
|
||||
</div>
|
||||
<div className="rounded-lg bg-white p-4 shadow">
|
||||
<div className="mb-4 grid grid-cols-3 gap-4 font-medium text-gray-500">
|
||||
<div>Name</div>
|
||||
<div># of runs</div>
|
||||
<div>Last run</div>
|
||||
</div>
|
||||
{[...Array(3)].map((_, index) => (
|
||||
<div key={index} className="mb-4 grid grid-cols-3 gap-4">
|
||||
<div className="h-6 animate-pulse rounded bg-gray-200"></div>
|
||||
<div className="h-6 animate-pulse rounded bg-gray-200"></div>
|
||||
<div className="h-6 animate-pulse rounded bg-gray-200"></div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
export default function FlowRunsListSkeleton() {
|
||||
return (
|
||||
<div className="mx-auto max-w-4xl p-4">
|
||||
<div className="rounded-lg bg-white p-4 shadow">
|
||||
<h2 className="mb-4 text-xl font-semibold">Runs</h2>
|
||||
<div className="mb-4 grid grid-cols-4 gap-4 text-sm font-medium text-gray-500">
|
||||
<div>Agent</div>
|
||||
<div>Started</div>
|
||||
<div>Status</div>
|
||||
<div>Duration</div>
|
||||
</div>
|
||||
{[...Array(4)].map((_, index) => (
|
||||
<div key={index} className="mb-4 grid grid-cols-4 gap-4">
|
||||
<div className="h-5 animate-pulse rounded bg-gray-200"></div>
|
||||
<div className="h-5 animate-pulse rounded bg-gray-200"></div>
|
||||
<div className="h-5 animate-pulse rounded bg-gray-200"></div>
|
||||
<div className="h-5 animate-pulse rounded bg-gray-200"></div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
export default function FlowRunsStatusSkeleton() {
|
||||
return (
|
||||
<div className="mx-auto max-w-4xl p-4">
|
||||
<div className="rounded-lg bg-white p-4 shadow">
|
||||
<div className="mb-6 flex items-center justify-between">
|
||||
<h2 className="text-xl font-semibold">Stats</h2>
|
||||
<div className="flex space-x-2">
|
||||
{["2h", "8h", "24h", "7d", "Custom", "All"].map((btn) => (
|
||||
<div
|
||||
key={btn}
|
||||
className="h-8 w-16 animate-pulse rounded bg-gray-200"
|
||||
></div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Placeholder for the line chart */}
|
||||
<div className="mb-6 h-64 w-full animate-pulse rounded bg-gray-200"></div>
|
||||
|
||||
{/* Placeholders for total runs and total run time */}
|
||||
<div className="space-y-2">
|
||||
<div className="h-6 w-1/3 animate-pulse rounded bg-gray-200"></div>
|
||||
<div className="h-6 w-1/2 animate-pulse rounded bg-gray-200"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
import AgentFlowListSkeleton from "@/app/(platform)/monitoring/components/skeletons/AgentFlowListSkeleton";
|
||||
import React from "react";
|
||||
import FlowRunsListSkeleton from "@/app/(platform)/monitoring/components/skeletons/FlowRunsListSkeleton";
|
||||
import FlowRunsStatusSkeleton from "@/app/(platform)/monitoring/components/skeletons/FlowRunsStatusSkeleton";
|
||||
|
||||
export default function MonitorLoadingSkeleton() {
|
||||
return (
|
||||
<div className="space-y-4 p-4">
|
||||
<div className="grid grid-cols-1 gap-4 md:grid-cols-3">
|
||||
{/* Agents Section */}
|
||||
<AgentFlowListSkeleton />
|
||||
|
||||
{/* Runs Section */}
|
||||
<FlowRunsListSkeleton />
|
||||
|
||||
{/* Stats Section */}
|
||||
<FlowRunsStatusSkeleton />
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
151
autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx
Normal file
151
autogpt_platform/frontend/src/app/(platform)/monitoring/page.tsx
Normal file
@@ -0,0 +1,151 @@
|
||||
"use client";
|
||||
import React, { useCallback, useEffect, useState } from "react";
|
||||
|
||||
import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import {
|
||||
useGetV1ListExecutionSchedulesForAUser,
|
||||
useDeleteV1DeleteExecutionSchedule,
|
||||
} from "@/app/api/__generated__/endpoints/schedules/schedules";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
|
||||
import { Card } from "@/components/__legacy__/ui/card";
|
||||
import { SchedulesTable } from "@/app/(platform)/monitoring/components/SchedulesTable";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import AgentFlowList from "./components/AgentFlowList";
|
||||
import FlowRunsList from "./components/FlowRunsList";
|
||||
import FlowRunInfo from "./components/FlowRunInfo";
|
||||
import FlowInfo from "./components/FlowInfo";
|
||||
import FlowRunsStatus from "./components/FlowRunsStatus";
|
||||
|
||||
const Monitor = () => {
|
||||
const [flows, setFlows] = useState<LibraryAgent[]>([]);
|
||||
const [executions, setExecutions] = useState<GraphExecutionMeta[]>([]);
|
||||
const [selectedFlow, setSelectedFlow] = useState<LibraryAgent | null>(null);
|
||||
const [selectedRun, setSelectedRun] = useState<GraphExecutionMeta | null>(
|
||||
null,
|
||||
);
|
||||
const [sortColumn, setSortColumn] =
|
||||
useState<keyof GraphExecutionJobInfo>("id");
|
||||
const [sortDirection, setSortDirection] = useState<"asc" | "desc">("asc");
|
||||
const api = useBackendAPI();
|
||||
|
||||
// Use generated API hooks for schedules
|
||||
const { data: schedulesResponse, refetch: refetchSchedules } =
|
||||
useGetV1ListExecutionSchedulesForAUser();
|
||||
const deleteScheduleMutation = useDeleteV1DeleteExecutionSchedule();
|
||||
|
||||
const schedules = okData(schedulesResponse) ?? [];
|
||||
|
||||
const removeSchedule = useCallback(
|
||||
async (scheduleId: string) => {
|
||||
await deleteScheduleMutation.mutateAsync({ scheduleId });
|
||||
refetchSchedules();
|
||||
},
|
||||
[deleteScheduleMutation, refetchSchedules],
|
||||
);
|
||||
|
||||
const fetchAgents = useCallback(() => {
|
||||
api.listLibraryAgents().then((response) => {
|
||||
setFlows(response.agents);
|
||||
});
|
||||
api.getExecutions().then((executions) => {
|
||||
setExecutions(executions);
|
||||
});
|
||||
}, [api]);
|
||||
|
||||
useEffect(() => {
|
||||
fetchAgents();
|
||||
}, [fetchAgents]);
|
||||
|
||||
useEffect(() => {
|
||||
const intervalId = setInterval(() => fetchAgents(), 5000);
|
||||
return () => clearInterval(intervalId);
|
||||
}, [fetchAgents, flows]);
|
||||
|
||||
const column1 = "md:col-span-2 xl:col-span-3 xxl:col-span-2";
|
||||
const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3";
|
||||
const column3 = "col-span-full xl:col-span-4 xxl:col-span-5";
|
||||
|
||||
const handleSort = (column: keyof GraphExecutionJobInfo) => {
|
||||
if (sortColumn === column) {
|
||||
setSortDirection(sortDirection === "asc" ? "desc" : "asc");
|
||||
} else {
|
||||
setSortColumn(column);
|
||||
setSortDirection("asc");
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div
|
||||
className="grid grid-cols-1 gap-4 p-4 md:grid-cols-5 lg:grid-cols-4 xl:grid-cols-10"
|
||||
data-testid="monitor-page"
|
||||
>
|
||||
<AgentFlowList
|
||||
className={column1}
|
||||
flows={flows}
|
||||
executions={executions}
|
||||
selectedFlow={selectedFlow}
|
||||
onSelectFlow={(f) => {
|
||||
setSelectedRun(null);
|
||||
setSelectedFlow(f.id == selectedFlow?.id ? null : f);
|
||||
}}
|
||||
/>
|
||||
<FlowRunsList
|
||||
className={column2}
|
||||
flows={flows}
|
||||
executions={[
|
||||
...(selectedFlow
|
||||
? executions.filter((v) => v.graph_id == selectedFlow.graph_id)
|
||||
: executions),
|
||||
].sort((a, b) => {
|
||||
const aTime = a.started_at?.getTime() ?? 0;
|
||||
const bTime = b.started_at?.getTime() ?? 0;
|
||||
return bTime - aTime;
|
||||
})}
|
||||
selectedRun={selectedRun}
|
||||
onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)}
|
||||
/>
|
||||
{(selectedRun && (
|
||||
<FlowRunInfo
|
||||
agent={
|
||||
selectedFlow ||
|
||||
flows.find((f) => f.graph_id == selectedRun.graph_id)!
|
||||
}
|
||||
execution={selectedRun}
|
||||
className={column3}
|
||||
/>
|
||||
)) ||
|
||||
(selectedFlow && (
|
||||
<FlowInfo
|
||||
flow={selectedFlow}
|
||||
executions={executions.filter(
|
||||
(e) => e.graph_id == selectedFlow.graph_id,
|
||||
)}
|
||||
className={column3}
|
||||
refresh={() => {
|
||||
fetchAgents();
|
||||
setSelectedFlow(null);
|
||||
setSelectedRun(null);
|
||||
}}
|
||||
/>
|
||||
)) || (
|
||||
<Card className={`p-6 ${column3}`}>
|
||||
<FlowRunsStatus flows={flows} executions={executions} />
|
||||
</Card>
|
||||
)}
|
||||
<div className="col-span-full xl:col-span-6">
|
||||
<SchedulesTable
|
||||
schedules={schedules} // all schedules
|
||||
agents={flows} // for filtering purpose
|
||||
onRemoveSchedule={removeSchedule}
|
||||
sortColumn={sortColumn}
|
||||
sortDirection={sortDirection}
|
||||
onSort={handleSort}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Monitor;
|
||||
@@ -7022,29 +7022,24 @@
|
||||
"input_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Input Schema"
|
||||
"title": "Input Schema",
|
||||
"description": "Full JSON schema for block inputs"
|
||||
},
|
||||
"output_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Output Schema"
|
||||
"title": "Output Schema",
|
||||
"description": "Full JSON schema for block outputs"
|
||||
},
|
||||
"required_inputs": {
|
||||
"items": { "$ref": "#/components/schemas/BlockInputFieldInfo" },
|
||||
"type": "array",
|
||||
"title": "Required Inputs",
|
||||
"description": "List of required input fields for this block"
|
||||
"description": "List of input fields for this block"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"name",
|
||||
"description",
|
||||
"categories",
|
||||
"input_schema",
|
||||
"output_schema"
|
||||
],
|
||||
"required": ["id", "name", "description", "categories"],
|
||||
"title": "BlockInfoSummary",
|
||||
"description": "Summary of a block for search results."
|
||||
},
|
||||
@@ -7090,7 +7085,7 @@
|
||||
"usage_hint": {
|
||||
"type": "string",
|
||||
"title": "Usage Hint",
|
||||
"default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema."
|
||||
"default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the fields listed in required_inputs."
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
@@ -10495,7 +10490,10 @@
|
||||
"operation_started",
|
||||
"operation_pending",
|
||||
"operation_in_progress",
|
||||
"input_validation_error"
|
||||
"input_validation_error",
|
||||
"web_fetch",
|
||||
"bash_exec",
|
||||
"operation_status"
|
||||
],
|
||||
"title": "ResponseType",
|
||||
"description": "Types of tool responses."
|
||||
|
||||
1043
autogpt_platform/frontend/src/hooks/useAgentGraph.tsx
Normal file
1043
autogpt_platform/frontend/src/hooks/useAgentGraph.tsx
Normal file
File diff suppressed because it is too large
Load Diff
@@ -12,6 +12,7 @@ export const PROTECTED_PAGES = [
|
||||
"/onboarding",
|
||||
"/profile",
|
||||
"/library",
|
||||
"/monitoring",
|
||||
] as const;
|
||||
|
||||
export const ADMIN_PAGES = ["/admin"] as const;
|
||||
|
||||
@@ -2,6 +2,7 @@ import { type ClassValue, clsx } from "clsx";
|
||||
import _isEmpty from "lodash/isEmpty";
|
||||
import { twMerge } from "tailwind-merge";
|
||||
|
||||
import { NodeDimension } from "@/app/(platform)/build/components/legacy-builder/Flow/Flow";
|
||||
import {
|
||||
BlockIOObjectSubSchema,
|
||||
BlockIORootSchema,
|
||||
@@ -331,6 +332,81 @@ export function getPrimaryCategoryColor(categories: Category[]): string {
|
||||
);
|
||||
}
|
||||
|
||||
function rectanglesOverlap(
|
||||
rect1: { x: number; y: number; width: number; height?: number },
|
||||
rect2: { x: number; y: number; width: number; height?: number },
|
||||
): boolean {
|
||||
const x1 = rect1.x,
|
||||
y1 = rect1.y,
|
||||
w1 = rect1.width,
|
||||
h1 = rect1.height ?? 100;
|
||||
const x2 = rect2.x,
|
||||
y2 = rect2.y,
|
||||
w2 = rect2.width,
|
||||
h2 = rect2.height ?? 100;
|
||||
|
||||
// Check if the rectangles do not overlap
|
||||
return !(x1 + w1 <= x2 || x1 >= x2 + w2 || y1 + h1 <= y2 || y1 >= y2 + h2);
|
||||
}
|
||||
|
||||
export function findNewlyAddedBlockCoordinates(
|
||||
nodeDimensions: NodeDimension,
|
||||
newWidth: number,
|
||||
margin: number,
|
||||
zoom: number,
|
||||
) {
|
||||
const nodeDimensionArray = Object.values(nodeDimensions);
|
||||
|
||||
for (let i = nodeDimensionArray.length - 1; i >= 0; i--) {
|
||||
const lastNode = nodeDimensionArray[i];
|
||||
const lastNodeHeight = lastNode.height ?? 100;
|
||||
|
||||
// Right of the last node
|
||||
let newX = lastNode.x + lastNode.width + margin;
|
||||
let newY = lastNode.y;
|
||||
let newRect = { x: newX, y: newY, width: newWidth, height: 100 / zoom };
|
||||
|
||||
const collisionRight = nodeDimensionArray.some((node) =>
|
||||
rectanglesOverlap(newRect, node),
|
||||
);
|
||||
|
||||
if (!collisionRight) {
|
||||
return { x: newX, y: newY };
|
||||
}
|
||||
|
||||
// Left of the last node
|
||||
newX = lastNode.x - newWidth - margin;
|
||||
newRect = { x: newX, y: newY, width: newWidth, height: 100 / zoom };
|
||||
|
||||
const collisionLeft = nodeDimensionArray.some((node) =>
|
||||
rectanglesOverlap(newRect, node),
|
||||
);
|
||||
|
||||
if (!collisionLeft) {
|
||||
return { x: newX, y: newY };
|
||||
}
|
||||
|
||||
// Below the last node
|
||||
newX = lastNode.x;
|
||||
newY = lastNode.y + lastNodeHeight + margin;
|
||||
newRect = { x: newX, y: newY, width: newWidth, height: 100 / zoom };
|
||||
|
||||
const collisionBelow = nodeDimensionArray.some((node) =>
|
||||
rectanglesOverlap(newRect, node),
|
||||
);
|
||||
|
||||
if (!collisionBelow) {
|
||||
return { x: newX, y: newY };
|
||||
}
|
||||
}
|
||||
|
||||
// Default position if no space is found
|
||||
return {
|
||||
x: 0,
|
||||
y: 0,
|
||||
};
|
||||
}
|
||||
|
||||
export function hasNonNullNonObjectValue(obj: any): boolean {
|
||||
if (obj !== null && typeof obj === "object") {
|
||||
return Object.values(obj).some((value) => hasNonNullNonObjectValue(value));
|
||||
|
||||
140
autogpt_platform/frontend/src/tests/monitor.spec.ts
Normal file
140
autogpt_platform/frontend/src/tests/monitor.spec.ts
Normal file
@@ -0,0 +1,140 @@
|
||||
import test, { expect, TestInfo } from "@playwright/test";
|
||||
|
||||
import { BuildPage } from "./pages/build.page";
|
||||
import { MonitorPage } from "./pages/monitor.page";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import * as fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { LoginPage } from "./pages/login.page";
|
||||
import { getTestUser } from "./utils/auth";
|
||||
import { hasUrl } from "./utils/assertion";
|
||||
import {
|
||||
navigateToLibrary,
|
||||
clickFirstAgent,
|
||||
runAgent,
|
||||
waitForAgentPageLoad,
|
||||
} from "./pages/library.page";
|
||||
|
||||
test.describe.configure({
|
||||
mode: "parallel",
|
||||
timeout: 30000,
|
||||
});
|
||||
// --8<-- [start:AttachAgentId]
|
||||
test.beforeEach(async ({ page }, testInfo: TestInfo) => {
|
||||
const loginPage = new LoginPage(page);
|
||||
const testUser = await getTestUser();
|
||||
const monitorPage = new MonitorPage(page);
|
||||
|
||||
// Start each test with login using worker auth
|
||||
await page.goto("/login");
|
||||
await loginPage.login(testUser.email, testUser.password);
|
||||
await hasUrl(page, "/marketplace");
|
||||
|
||||
// Navigate to library and run the first agent
|
||||
await navigateToLibrary(page);
|
||||
await clickFirstAgent(page);
|
||||
await waitForAgentPageLoad(page);
|
||||
await runAgent(page);
|
||||
|
||||
// Navigate to monitoring page
|
||||
await page.goto("/monitoring");
|
||||
await test.expect(monitorPage.isLoaded()).resolves.toBeTruthy();
|
||||
|
||||
// Generate a test ID for tracking
|
||||
const id = uuidv4();
|
||||
testInfo.attach("agent-id", { body: id });
|
||||
});
|
||||
// --8<-- [end:AttachAgentId]
|
||||
|
||||
test.afterAll(async () => {
|
||||
// clear out the downloads folder
|
||||
const downloadsFolder = process.cwd() + "/downloads";
|
||||
console.log(`clearing out the downloads folder ${downloadsFolder}/monitor`);
|
||||
|
||||
await fs.rm(`${downloadsFolder}/monitor`, {
|
||||
recursive: true,
|
||||
force: true,
|
||||
});
|
||||
});
|
||||
|
||||
test.skip("user can export and import agents", async ({
|
||||
page,
|
||||
}, testInfo: TestInfo) => {
|
||||
const monitorPage = new MonitorPage(page);
|
||||
const buildPage = new BuildPage(page);
|
||||
|
||||
// --8<-- [start:ReadAgentId]
|
||||
if (testInfo.attachments.length === 0 || !testInfo.attachments[0].body) {
|
||||
throw new Error("No agent id attached to the test");
|
||||
}
|
||||
|
||||
const testAttachName = testInfo.attachments[0].body.toString();
|
||||
// --8<-- [end:ReadAgentId]
|
||||
const agents = await monitorPage.listAgents();
|
||||
|
||||
const downloadPromise = page.waitForEvent("download");
|
||||
|
||||
const agent = agents.find(
|
||||
(a: any) => a.name === `test-agent-${testAttachName}`,
|
||||
);
|
||||
|
||||
if (!agent) throw new Error(`Agent ${testAttachName} not found`);
|
||||
|
||||
await monitorPage.exportToFile(agent);
|
||||
const download = await downloadPromise;
|
||||
|
||||
// Wait for the download process to complete and save the downloaded file somewhere.
|
||||
await download.saveAs(
|
||||
`${monitorPage.downloadsFolder}/monitor/${download.suggestedFilename()}`,
|
||||
);
|
||||
|
||||
console.log(`downloaded file to ${download.suggestedFilename()}`);
|
||||
|
||||
expect(download.suggestedFilename()).toBeDefined();
|
||||
expect(download.suggestedFilename()).toContain("test-agent-");
|
||||
expect(download.suggestedFilename()).toContain("v1.json");
|
||||
|
||||
// import the agent
|
||||
const preImportAgents = await monitorPage.listAgents();
|
||||
|
||||
const filesInFolder = await fs.readdir(
|
||||
`${monitorPage.downloadsFolder}/monitor`,
|
||||
);
|
||||
|
||||
const importFile = filesInFolder.find((f) => f.includes(testAttachName));
|
||||
if (!importFile) {
|
||||
throw new Error(`No import file found for agent ${testAttachName}`);
|
||||
}
|
||||
|
||||
const baseName = importFile.split(".")[0];
|
||||
|
||||
await monitorPage.importFromFile(
|
||||
path.resolve(monitorPage.downloadsFolder, "monitor"),
|
||||
importFile,
|
||||
baseName + "-imported",
|
||||
);
|
||||
|
||||
// You'll be dropped at the build page, so hit run and then go back to monitor
|
||||
await buildPage.runAgent();
|
||||
await monitorPage.navbar.clickMonitorLink();
|
||||
|
||||
const postImportAgents = await monitorPage.listAgents();
|
||||
|
||||
expect(postImportAgents.length).toBeGreaterThan(preImportAgents.length);
|
||||
|
||||
console.log(`postImportAgents: ${JSON.stringify(postImportAgents)}`);
|
||||
|
||||
const importedAgent = postImportAgents.find(
|
||||
(a: any) => a.name === `${baseName}-imported`,
|
||||
);
|
||||
|
||||
expect(importedAgent).toBeDefined();
|
||||
});
|
||||
|
||||
test.skip("user can view runs and agents", async ({ page }) => {
|
||||
const monitorPage = new MonitorPage(page);
|
||||
// const runs = await monitorPage.listRuns();
|
||||
const agents = await monitorPage.listAgents();
|
||||
|
||||
expect(agents.length).toBeGreaterThan(0);
|
||||
});
|
||||
@@ -27,7 +27,7 @@ export class BuildPage extends BasePage {
|
||||
await this.page
|
||||
.getByRole("button", { name: "Skip Tutorial", exact: true })
|
||||
.click({ timeout: 3000 });
|
||||
} catch (error) {
|
||||
} catch (_error) {
|
||||
console.info("Tutorial not shown or already dismissed");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,6 +252,21 @@ export class LibraryPage extends BasePage {
|
||||
]);
|
||||
}
|
||||
|
||||
async clickMonitoringLink(): Promise<void> {
|
||||
console.log(`clicking monitoring link in alert`);
|
||||
await this.page.getByRole("link", { name: "here" }).click();
|
||||
}
|
||||
|
||||
async isMonitoringAlertVisible(): Promise<boolean> {
|
||||
console.log(`checking if monitoring alert is visible`);
|
||||
try {
|
||||
const alertText = this.page.locator("text=/Prefer the old experience/");
|
||||
return await alertText.isVisible();
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async getSearchValue(): Promise<string> {
|
||||
console.log(`getting search input value`);
|
||||
try {
|
||||
|
||||
237
autogpt_platform/frontend/src/tests/pages/monitor.page.ts
Normal file
237
autogpt_platform/frontend/src/tests/pages/monitor.page.ts
Normal file
@@ -0,0 +1,237 @@
|
||||
import { Page } from "@playwright/test";
|
||||
import { BasePage } from "./base.page";
|
||||
import path from "path";
|
||||
|
||||
interface Agent {
|
||||
id: string;
|
||||
name: string;
|
||||
runCount: number;
|
||||
lastRun: string;
|
||||
}
|
||||
|
||||
interface Run {
|
||||
id: string;
|
||||
agentId: string;
|
||||
agentName: string;
|
||||
started: string;
|
||||
duration: number;
|
||||
status: string;
|
||||
}
|
||||
|
||||
interface Schedule {
|
||||
id: string;
|
||||
graphName: string;
|
||||
nextExecution: string;
|
||||
schedule: string;
|
||||
actions: string[];
|
||||
}
|
||||
|
||||
enum ImportType {
|
||||
AGENT = "agent",
|
||||
TEMPLATE = "template",
|
||||
}
|
||||
|
||||
export class MonitorPage extends BasePage {
|
||||
constructor(page: Page) {
|
||||
super(page);
|
||||
}
|
||||
|
||||
async isLoaded(): Promise<boolean> {
|
||||
console.log(`checking if monitor page is loaded`);
|
||||
try {
|
||||
// Wait for the monitor page
|
||||
await this.page.getByTestId("monitor-page").waitFor({
|
||||
state: "visible",
|
||||
timeout: 10_000,
|
||||
});
|
||||
|
||||
// Wait for table headers to be visible (indicates table structure is ready)
|
||||
await this.page.locator("thead th").first().waitFor({
|
||||
state: "visible",
|
||||
timeout: 15_000,
|
||||
});
|
||||
|
||||
// Wait for either a table row or an empty tbody to be present
|
||||
await Promise.race([
|
||||
// Wait for at least one row
|
||||
this.page.locator("tbody tr[data-testid]").first().waitFor({
|
||||
state: "visible",
|
||||
timeout: 15_000,
|
||||
}),
|
||||
// OR wait for an empty tbody (indicating no agents but table is loaded)
|
||||
this.page
|
||||
.locator("tbody[data-testid='agent-flow-list-body']:empty")
|
||||
.waitFor({
|
||||
state: "visible",
|
||||
timeout: 15_000,
|
||||
}),
|
||||
]);
|
||||
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async listAgents(): Promise<Agent[]> {
|
||||
console.log(`listing agents`);
|
||||
// Wait for table rows to be available
|
||||
const rows = await this.page.locator("tbody tr[data-testid]").all();
|
||||
|
||||
const agents: Agent[] = [];
|
||||
|
||||
for (const row of rows) {
|
||||
// Get the id from data-testid attribute
|
||||
const id = (await row.getAttribute("data-testid")) || "";
|
||||
|
||||
// Get columns - there are 3 cells per row (name, run count, last run)
|
||||
const cells = await row.locator("td").all();
|
||||
|
||||
// Extract name from first cell
|
||||
const name = (await row.getAttribute("data-name")) || "";
|
||||
|
||||
// Extract run count from second cell
|
||||
const runCountText = (await cells[1].textContent()) || "0";
|
||||
const runCount = parseInt(runCountText, 10);
|
||||
|
||||
// Extract last run from third cell's title attribute (contains full timestamp)
|
||||
// If no title, the cell will be empty indicating no last run
|
||||
const lastRunCell = cells[2];
|
||||
const lastRun = (await lastRunCell.getAttribute("title")) || "";
|
||||
|
||||
agents.push({
|
||||
id,
|
||||
name,
|
||||
runCount,
|
||||
lastRun,
|
||||
});
|
||||
}
|
||||
|
||||
agents.reduce((acc, agent) => {
|
||||
if (!agent.id.includes("flow-run")) {
|
||||
acc.push(agent);
|
||||
}
|
||||
return acc;
|
||||
}, [] as Agent[]);
|
||||
|
||||
return agents;
|
||||
}
|
||||
|
||||
async listRuns(filter?: Agent): Promise<Run[]> {
|
||||
console.log(`listing runs`);
|
||||
// Wait for the runs table to be loaded - look for table header "Agent"
|
||||
await this.page.locator("[data-testid='flow-runs-list-body']").waitFor({
|
||||
timeout: 10000,
|
||||
});
|
||||
|
||||
// Get all run rows
|
||||
const rows = await this.page
|
||||
.locator('tbody tr[data-testid^="flow-run-"]')
|
||||
.all();
|
||||
|
||||
const runs: Run[] = [];
|
||||
|
||||
for (const row of rows) {
|
||||
const runId = (await row.getAttribute("data-runid")) || "";
|
||||
const agentId = (await row.getAttribute("data-graphid")) || "";
|
||||
|
||||
// Get columns
|
||||
const cells = await row.locator("td").all();
|
||||
|
||||
// Parse data from cells
|
||||
const agentName = (await cells[0].textContent()) || "";
|
||||
const started = (await cells[1].textContent()) || "";
|
||||
const status = (await cells[2].locator("div").textContent()) || "";
|
||||
const duration = (await cells[3].textContent()) || "";
|
||||
|
||||
// Only add if no filter or if matches filter
|
||||
if (!filter || filter.id === agentId) {
|
||||
runs.push({
|
||||
id: runId,
|
||||
agentId: agentId,
|
||||
agentName: agentName.trim(),
|
||||
started: started.trim(),
|
||||
duration: parseFloat(duration.replace("s", "")),
|
||||
status: status.toLowerCase().trim(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return runs;
|
||||
}
|
||||
async listSchedules(): Promise<Schedule[]> {
|
||||
console.log(`listing schedules`);
|
||||
return [];
|
||||
}
|
||||
|
||||
async clickAgent(id: string) {
|
||||
console.log(`selecting agent ${id}`);
|
||||
await this.page.getByTestId(id).click();
|
||||
}
|
||||
|
||||
async clickCreateAgent(): Promise<void> {
|
||||
console.log(`clicking create agent`);
|
||||
await this.page.getByRole("link", { name: "Create" }).click();
|
||||
}
|
||||
|
||||
async importFromFile(
|
||||
directory: string,
|
||||
file: string,
|
||||
name?: string,
|
||||
description?: string,
|
||||
importType: ImportType = ImportType.AGENT,
|
||||
) {
|
||||
console.log(
|
||||
`importing from directory: ${directory} file: ${file} name: ${name} description: ${description} importType: ${importType}`,
|
||||
);
|
||||
await this.page.getByTestId("create-agent-dropdown").click();
|
||||
await this.page.getByTestId("import-agent-from-file").click();
|
||||
|
||||
await this.page
|
||||
.getByTestId("import-agent-file-input")
|
||||
.setInputFiles(path.join(directory, file));
|
||||
if (name) {
|
||||
console.log(`filling agent name: ${name}`);
|
||||
await this.page.getByTestId("agent-name-input").fill(name);
|
||||
}
|
||||
if (description) {
|
||||
console.log(`filling agent description: ${description}`);
|
||||
await this.page.getByTestId("agent-description-input").fill(description);
|
||||
}
|
||||
if (importType === ImportType.TEMPLATE) {
|
||||
console.log(`clicking import as template switch`);
|
||||
await this.page.getByTestId("import-as-template-switch").click();
|
||||
}
|
||||
console.log(`clicking import agent submit`);
|
||||
await this.page.getByTestId("import-agent-submit").click();
|
||||
}
|
||||
|
||||
async deleteAgent(agent: Agent) {
|
||||
console.log(`deleting agent ${agent.id} ${agent.name}`);
|
||||
}
|
||||
|
||||
async clickAllVersions(agent: Agent) {
|
||||
console.log(`clicking all versions for agent ${agent.id} ${agent.name}`);
|
||||
}
|
||||
|
||||
async openInBuilder(agent: Agent) {
|
||||
console.log(`opening agent ${agent.id} ${agent.name} in builder`);
|
||||
}
|
||||
|
||||
async exportToFile(agent: Agent) {
|
||||
await this.clickAgent(agent.id);
|
||||
|
||||
console.log(`exporting agent id: ${agent.id} name: ${agent.name} to file`);
|
||||
await this.page.getByTestId("export-button").click();
|
||||
}
|
||||
|
||||
async selectRun(agent: Agent, run: Run) {
|
||||
console.log(`selecting run ${run.id} for agent ${agent.id} ${agent.name}`);
|
||||
}
|
||||
|
||||
async openOutputs(agent: Agent, run: Run) {
|
||||
console.log(
|
||||
`opening outputs for run ${run.id} of agent ${agent.id} ${agent.name}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,10 @@ export class NavBar {
|
||||
await this.page.getByRole("link", { name: "Edit profile" }).click();
|
||||
}
|
||||
|
||||
async clickMonitorLink() {
|
||||
await this.page.getByTestId("navbar-link-library").click();
|
||||
}
|
||||
|
||||
async clickBuildLink() {
|
||||
const link = this.page.getByTestId("navbar-link-build");
|
||||
await link.waitFor({ state: "visible", timeout: 15000 });
|
||||
|
||||
@@ -563,7 +563,7 @@ The block supports conversation continuation through three mechanisms:
|
||||
|--------|-------------|------|
|
||||
| error | Error message if execution failed | str |
|
||||
| response | The output/response from Claude Code execution | str |
|
||||
| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', and 'content' fields. | List[FileOutput] |
|
||||
| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. workspace_ref contains a workspace:// URI if the file was stored to workspace. | List[SandboxFileOutput] |
|
||||
| conversation_history | Full conversation history including this turn. Pass this to conversation_history input to continue on a fresh sandbox if the previous sandbox timed out. | str |
|
||||
| session_id | Session ID for this conversation. Pass this back along with sandbox_id to continue the conversation. | str |
|
||||
| sandbox_id | ID of the sandbox instance. Pass this back along with session_id to continue the conversation. This is None if dispose_sandbox was True (sandbox was disposed). | str |
|
||||
|
||||
@@ -215,6 +215,7 @@ The sandbox includes pip and npm pre-installed. Set timeout to limit execution t
|
||||
| response | Text output (if any) of the main execution result | str |
|
||||
| stdout_logs | Standard output logs from execution | str |
|
||||
| stderr_logs | Standard error logs from execution | str |
|
||||
| files | Files created or modified during execution. Each file has path, name, content, and workspace_ref (if stored). | List[SandboxFileOutput] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
|
||||
Reference in New Issue
Block a user