Compare commits

...

62 Commits

Author SHA1 Message Date
Swifty
e8beb042a7 refactor chat container 2025-11-04 17:21:29 +01:00
Swifty
097d706e4f Merge branch 'swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' into swiftyos/chat-ui 2025-11-04 16:31:44 +01:00
Swifty
568e0bafff Merge branch 'swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' of github.com:Significant-Gravitas/AutoGPT into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-11-04 16:31:26 +01:00
Swifty
ecec417bb5 update backend 2025-11-04 16:31:19 +01:00
Swifty
a6941cad40 update frontend 2025-11-04 16:30:14 +01:00
Swifty
6c4d528fda update frontend 2025-11-04 14:51:46 +01:00
Swifty
844ae09440 updated prompt and tool naming 2025-11-04 10:55:33 +01:00
Swifty
93551a905c working on credentials flow 2025-11-04 10:54:39 +01:00
Swifty
3e68615b33 feat(frontend): Add markdown rendering and credentials setup to chat
This commit implements markdown rendering for chat messages and adds a
comprehensive credentials setup flow for the chat interface.

Key Changes:

Chat Credentials Setup:
- Add ChatCredentialsSetup component for managing multiple credentials
- Support all credential types (API key, OAuth2, password, host-scoped)
- Auto-detect existing credentials and mark as configured
- Auto-complete when all credentials are set up
- Reuse existing credential modals from AgentRunsView
- Add comprehensive Storybook stories

Markdown Rendering:
- Create MarkdownContent atom component for chat messages
- Support GitHub Flavored Markdown (tables, task lists, strikethrough)
- Add explicit HTML sanitization (skipHtml) for XSS protection
- Include comprehensive JSDoc documentation
- Add 16 Storybook story variants covering edge cases
- Apply to both ChatMessage and StreamingMessage components

Message Flow Improvements:
- Fix duplicate tool response messages in chat
- Remove unnecessary backend refresh after stream ends
- Local state now source of truth for messages during streaming
- Update message types to support multiple credentials
- Extract all missing credentials, not just first one

Type Safety & Code Quality:
- Replace all 'any' types with proper TypeScript interfaces
- Add proper type definitions for ReactMarkdown component props
- Fix color inconsistency (violet → purple) in StreamingMessage
- Pass onSendMessage callback through MessageList to ChatMessage

All changes follow frontend/CONTRIBUTING.md guidelines and pass
TypeScript compilation checks.
2025-11-04 10:17:54 +01:00
Swifty
606c92f8d0 feat(frontend): Enhance chat functionality and session management
- Added support for "example.com" in the Next.js configuration for external resources.
- Refactored `useChatPage` to improve session creation logic, ensuring sessions are only created when necessary.
- Introduced new helper functions for validating message structures and parsing tool responses in `ChatContainer`.
- Updated `useChatContainer` to utilize new helper functions for better message handling and response parsing.
- Implemented local storage management for session IDs, improving session persistence and validation.
- Added error handling for OAuth flows in `CredentialsInputs`, enhancing user experience during authentication.

These changes aim to streamline chat interactions and improve overall session management.
2025-11-04 09:24:50 +01:00
Swifty
465c7eebdf Merge remote-tracking branch 'origin/swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' into swiftyos/chat-ui 2025-11-03 16:32:55 +01:00
Swifty
456feabf5a Merge branch 'dev' into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-11-03 16:27:25 +01:00
Swifty
2390811181 Merge branch 'swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' of github.com:Significant-Gravitas/AutoGPT into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-11-03 16:16:42 +01:00
Swifty
6ade1c955d update openapi json 2025-11-03 16:16:36 +01:00
Swifty
b803911976 updated backend to auto restream after tool calls 2025-11-03 16:16:07 +01:00
Swifty
b72c6118ee Merge remote-tracking branch 'origin/swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' into swiftyos/chat-ui 2025-11-03 11:05:48 +01:00
Swifty
4f4916bb85 Merge branch 'dev' into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-11-03 11:05:25 +01:00
Swifty
cfd8131b63 Merge branch 'swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' into swiftyos/chat-ui 2025-11-03 10:57:23 +01:00
Swifty
2832b28c8e enable all tools 2025-11-03 10:45:13 +01:00
Swifty
141e5d9b2d delete chat tests 2025-10-31 13:45:12 +01:00
Swifty
348991464a possibly fix tests 2025-10-31 13:26:08 +01:00
Swifty
2535046ce6 updated openai spec 2025-10-31 12:30:35 +01:00
Swifty
5a2d8c0f2d fmt 2025-10-31 10:12:10 +01:00
Swifty
84505565bb fix useChatSession typing 2025-10-31 10:08:24 +01:00
Swifty
3386295ad7 Merge branch 'swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' into swiftyos/chat-ui 2025-10-31 09:55:18 +01:00
Swifty
c49ce3460b update openapi schema 2025-10-31 09:53:48 +01:00
Swifty
1351ca02b4 fix(frontend): Prevent duplicate tool response UI elements
- Replace all local messages with just completed assistant message on stream_end
- This prevents tool responses from showing twice when initialMessages updates
- Remove unused withFeatureFlag import from chat page
2025-10-30 17:08:38 +01:00
Swifty
922135239d chore(frontend): Update OpenAPI spec with relaxed sorting constraints 2025-10-30 17:01:08 +01:00
Swifty
6800af59bd chat ui 2025-10-30 16:56:25 +01:00
Swifty
91084bf051 fmt 2025-10-30 16:42:15 +01:00
Swifty
ac3ade977a Merge branch 'swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' of github.com:Significant-Gravitas/AutoGPT into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-30 16:32:53 +01:00
Swifty
92fe57d83f update backend to use feature flag 2025-10-30 16:32:31 +01:00
Swifty
6113807f23 Merge branch 'dev' into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-30 15:13:41 +01:00
Swifty
a570d8ccdf Merge branch 'swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' of github.com:Significant-Gravitas/AutoGPT into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-30 15:10:05 +01:00
Swifty
c136e08321 feat(chat): Enhance chat session management and error handling
- Added chat routes to the REST API for improved session management.
- Introduced RedisError exception for better error handling when interacting with Redis.
- Updated ChatSession model to include credentials and improved session validation.
- Enhanced error logging and handling in chat streaming functions to ensure robustness.
- Removed unused LOGIN_NEEDED response type from models for cleaner code.
2025-10-30 15:09:22 +01:00
Swifty
82977886f9 Merge branch 'dev' into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-29 16:13:10 +01:00
Swifty
8d9bcb620d Merge branch 'swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' of github.com:Significant-Gravitas/AutoGPT into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-29 12:50:51 +01:00
Swifty
70a7674748 refactor(chat): Remove unused parameters from stream_chat function and improve session error handling
- Removed `model` and `max_context` parameters from `stream_chat` function as they were not utilized.
- Updated error handling in `stream_chat_completion` to raise a `NotFoundError` with a descriptive message when a session is not found.
2025-10-29 12:50:34 +01:00
Swifty
320427d438 Merge branch 'dev' into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-29 12:38:29 +01:00
Swifty
246594b0e6 added tests 2025-10-29 12:35:09 +01:00
Swifty
ade058d456 add test fixtures 2025-10-29 11:52:14 +01:00
Swifty
82e1542812 Merge branch 'swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy' of github.com:Significant-Gravitas/AutoGPT into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-29 11:51:56 +01:00
Swifty
bac80f6312 add setup agent tool for scheduled agents 2025-10-29 11:51:50 +01:00
Swifty
59ccb7f5de add run agent tool 2025-10-29 11:51:31 +01:00
Swifty
4841010d9a update tool paramerter 2025-10-29 11:51:18 +01:00
Swifty
1e58aa4327 Merge branch 'dev' into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-28 15:51:29 +01:00
Swifty
75d7591a7c added get setup info tool 2025-10-27 16:21:04 +01:00
Swifty
6e1e1131b5 Merge branch 'dev' into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-27 10:44:28 +01:00
Swifty
cda25ec1b5 Merge branch 'dev' into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-24 16:45:19 +02:00
Swifty
8c96ee9b00 trim down agent details 2025-10-24 16:42:44 +02:00
Swifty
5bb6d6a09b added get_agent_details tool 2025-10-24 16:41:28 +02:00
Swifty
20cd99da9c tested streaming service with tool calling 2025-10-24 16:02:05 +02:00
Swifty
52a4e545d5 fmt 2025-10-24 13:55:59 +02:00
Swifty
5fd168a5d1 added tool execution 2025-10-24 13:54:12 +02:00
Swifty
0025f1b779 added inital tool calling 2025-10-24 12:06:11 +02:00
Swifty
ec3a528c19 update routes to use chat service 2025-10-24 11:49:29 +02:00
Swifty
b26b0cd23e update service to process chunks 2025-10-24 11:26:20 +02:00
Swifty
821d4cde21 use session as the key object 2025-10-24 10:48:32 +02:00
Swifty
3dbc9aeb1c added redis data storage 2025-10-24 10:16:30 +02:00
Swifty
4fe81924ff Merge branch 'dev' into swiftyos/secrt-1646-review-chat-logic-and-route-models-in-chatpy-and-modelspy 2025-10-24 09:37:19 +02:00
Swifty
2aa6a0668d yield pydantic models in the pure streaming function 2025-10-21 15:32:49 +02:00
Swifty
4a37bf93c5 chat system core 2025-10-21 15:26:41 +02:00
79 changed files with 10947 additions and 34 deletions

View File

@@ -27,6 +27,7 @@ import backend.server.v2.admin.credit_admin_routes
import backend.server.v2.admin.store_admin_routes
import backend.server.v2.builder
import backend.server.v2.builder.routes
import backend.server.v2.chat.routes as chat_routes
import backend.server.v2.library.db
import backend.server.v2.library.model
import backend.server.v2.library.routes
@@ -49,7 +50,12 @@ from backend.util.exceptions import (
NotAuthorizedError,
NotFoundError,
)
from backend.util.feature_flag import initialize_launchdarkly, shutdown_launchdarkly
from backend.util.feature_flag import (
Flag,
create_feature_flag_dependency,
initialize_launchdarkly,
shutdown_launchdarkly,
)
from backend.util.service import UnhealthyServiceError
settings = backend.util.settings.Settings()
@@ -284,6 +290,14 @@ app.include_router(
tags=["v1", "email"],
prefix="/api/email",
)
app.include_router(
chat_routes.router,
tags=["v2", "chat"],
prefix="/api/chat",
dependencies=[
fastapi.Depends(create_feature_flag_dependency(Flag.CHAT, default=True))
],
)
app.mount("/external-api", external_app)

View File

@@ -0,0 +1,114 @@
"""Configuration management for chat system."""
import os
from pathlib import Path
from pydantic import Field, field_validator
from pydantic_settings import BaseSettings
class ChatConfig(BaseSettings):
"""Configuration for the chat system."""
# OpenAI API Configuration
model: str = Field(
default="qwen/qwen3-235b-a22b-2507", description="Default model to use"
)
api_key: str | None = Field(default=None, description="OpenAI API key")
base_url: str | None = Field(
default="https://openrouter.ai/api/v1",
description="Base URL for API (e.g., for OpenRouter)",
)
# Session TTL Configuration - 12 hours
session_ttl: int = Field(default=43200, description="Session TTL in seconds")
# System Prompt Configuration
system_prompt_path: str = Field(
default="prompts/chat_system.md",
description="Path to system prompt file relative to chat module",
)
# Streaming Configuration
max_context_messages: int = Field(
default=50, ge=1, le=200, description="Maximum context messages"
)
stream_timeout: int = Field(default=300, description="Stream timeout in seconds")
max_retries: int = Field(default=3, description="Maximum number of retries")
@field_validator("api_key", mode="before")
@classmethod
def get_api_key(cls, v):
"""Get API key from environment if not provided."""
if v is None:
# Try to get from environment variables
# First check for CHAT_API_KEY (Pydantic prefix)
v = os.getenv("CHAT_API_KEY")
if not v:
# Fall back to OPEN_ROUTER_API_KEY
v = os.getenv("OPEN_ROUTER_API_KEY")
if not v:
# Fall back to OPENAI_API_KEY
v = os.getenv("OPENAI_API_KEY")
return v
@field_validator("base_url", mode="before")
@classmethod
def get_base_url(cls, v):
"""Get base URL from environment if not provided."""
if v is None:
# Check for OpenRouter or custom base URL
v = os.getenv("CHAT_BASE_URL")
if not v:
v = os.getenv("OPENROUTER_BASE_URL")
if not v:
v = os.getenv("OPENAI_BASE_URL")
if not v:
v = "https://openrouter.ai/api/v1"
return v
def get_system_prompt(self, **template_vars) -> str:
"""Load and render the system prompt from file.
Args:
**template_vars: Variables to substitute in the template
Returns:
Rendered system prompt string
"""
# Get the path relative to this module
module_dir = Path(__file__).parent
prompt_path = module_dir / self.system_prompt_path
# Check for .j2 extension first (Jinja2 template)
j2_path = Path(str(prompt_path) + ".j2")
if j2_path.exists():
try:
from jinja2 import Template
template = Template(j2_path.read_text())
return template.render(**template_vars)
except ImportError:
# Jinja2 not installed, fall back to reading as plain text
return j2_path.read_text()
# Check for markdown file
if prompt_path.exists():
content = prompt_path.read_text()
# Simple variable substitution if Jinja2 is not available
for key, value in template_vars.items():
placeholder = f"{{{key}}}"
content = content.replace(placeholder, str(value))
return content
raise FileNotFoundError(f"System prompt file not found: {prompt_path}")
class Config:
"""Pydantic config."""
env_file = ".env"
env_file_encoding = "utf-8"
extra = "ignore" # Ignore extra environment variables

View File

@@ -0,0 +1,199 @@
import logging
import uuid
from datetime import UTC, datetime
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionDeveloperMessageParam,
ChatCompletionFunctionMessageParam,
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
ChatCompletionToolMessageParam,
ChatCompletionUserMessageParam,
)
from openai.types.chat.chat_completion_assistant_message_param import FunctionCall
from openai.types.chat.chat_completion_message_tool_call_param import (
ChatCompletionMessageToolCallParam,
Function,
)
from pydantic import BaseModel
from backend.server.v2.chat.config import ChatConfig
from backend.util.cache import async_redis
from backend.util.exceptions import RedisError
logger = logging.getLogger(__name__)
config = ChatConfig()
class ChatMessage(BaseModel):
role: str
content: str
name: str | None = None
tool_call_id: str | None = None
refusal: str | None = None
tool_calls: list[dict] | None = None
function_call: dict | None = None
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ChatSession(BaseModel):
session_id: str
user_id: str | None
messages: list[ChatMessage]
usage: list[Usage]
credentials: dict[str, dict] = {} # Map of provider -> credential metadata
started_at: datetime
updated_at: datetime
@staticmethod
def new(user_id: str | None) -> "ChatSession":
return ChatSession(
session_id=str(uuid.uuid4()),
user_id=user_id,
messages=[],
usage=[],
credentials={},
started_at=datetime.now(UTC),
updated_at=datetime.now(UTC),
)
def to_openai_messages(self) -> list[ChatCompletionMessageParam]:
messages = []
for message in self.messages:
if message.role == "developer":
m = ChatCompletionDeveloperMessageParam(
role="developer",
content=message.content,
)
if message.name:
m["name"] = message.name
messages.append(m)
elif message.role == "system":
m = ChatCompletionSystemMessageParam(
role="system",
content=message.content,
)
if message.name:
m["name"] = message.name
messages.append(m)
elif message.role == "user":
m = ChatCompletionUserMessageParam(
role="user",
content=message.content,
)
if message.name:
m["name"] = message.name
messages.append(m)
elif message.role == "assistant":
m = ChatCompletionAssistantMessageParam(
role="assistant",
content=message.content,
)
if message.function_call:
m["function_call"] = FunctionCall(
arguments=message.function_call["arguments"],
name=message.function_call["name"],
)
if message.refusal:
m["refusal"] = message.refusal
if message.tool_calls:
t: list[ChatCompletionMessageToolCallParam] = []
for tool_call in message.tool_calls:
# Tool calls are stored with nested structure: {id, type, function: {name, arguments}}
function_data = tool_call.get("function", {})
# Skip tool calls that are missing required fields
if "id" not in tool_call or "name" not in function_data:
logger.warning(
f"Skipping invalid tool call: missing required fields. "
f"Got: {tool_call.keys()}, function keys: {function_data.keys()}"
)
continue
# Arguments are stored as a JSON string
arguments_str = function_data.get("arguments", "{}")
t.append(
ChatCompletionMessageToolCallParam(
id=tool_call["id"],
type="function",
function=Function(
arguments=arguments_str,
name=function_data["name"],
),
)
)
m["tool_calls"] = t
if message.name:
m["name"] = message.name
messages.append(m)
elif message.role == "tool":
messages.append(
ChatCompletionToolMessageParam(
role="tool",
content=message.content,
tool_call_id=message.tool_call_id or "",
)
)
elif message.role == "function":
messages.append(
ChatCompletionFunctionMessageParam(
role="function",
content=message.content,
name=message.name or "",
)
)
return messages
async def get_chat_session(
session_id: str,
user_id: str | None,
) -> ChatSession | None:
"""Get a chat session by ID."""
redis_key = f"chat:session:{session_id}"
raw_session: bytes | None = await async_redis.get(redis_key)
if raw_session is None:
logger.warning(f"Session {session_id} not found in Redis")
return None
try:
session = ChatSession.model_validate_json(raw_session)
except Exception as e:
logger.error(f"Failed to deserialize session {session_id}: {e}", exc_info=True)
raise RedisError(f"Corrupted session data for {session_id}") from e
if session.user_id is not None and session.user_id != user_id:
logger.warning(
f"Session {session_id} user id mismatch: {session.user_id} != {user_id}"
)
return None
return session
async def upsert_chat_session(
session: ChatSession,
) -> ChatSession:
"""Update a chat session with the given messages."""
redis_key = f"chat:session:{session.session_id}"
resp = await async_redis.setex(
redis_key, config.session_ttl, session.model_dump_json()
)
if not resp:
raise RedisError(
f"Failed to persist chat session {session.session_id} to Redis: {resp}"
)
return session

View File

@@ -0,0 +1,70 @@
import pytest
from backend.server.v2.chat.data import (
ChatMessage,
ChatSession,
Usage,
get_chat_session,
upsert_chat_session,
)
messages = [
ChatMessage(content="Hello, how are you?", role="user"),
ChatMessage(
content="I'm fine, thank you!",
role="assistant",
tool_calls=[
{
"id": "t123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "New York"}',
},
}
],
),
ChatMessage(
content="I'm using the tool to get the weather",
role="tool",
tool_call_id="t123",
),
]
@pytest.mark.asyncio
async def test_chatsession_serialization_deserialization():
s = ChatSession.new(user_id="abc123")
s.messages = messages
s.usage = [Usage(prompt_tokens=100, completion_tokens=200, total_tokens=300)]
serialized = s.model_dump_json()
s2 = ChatSession.model_validate_json(serialized)
assert s2.model_dump() == s.model_dump()
@pytest.mark.asyncio
async def test_chatsession_redis_storage():
s = ChatSession.new(user_id=None)
s.messages = messages
s = await upsert_chat_session(s)
s2 = await get_chat_session(
session_id=s.session_id,
user_id=s.user_id,
)
assert s2 == s
@pytest.mark.asyncio
async def test_chatsession_redis_storage_user_id_mismatch():
s = ChatSession.new(user_id="abc123")
s.messages = messages
s = await upsert_chat_session(s)
s2 = await get_chat_session(s.session_id, None)
assert s2 is None

View File

@@ -0,0 +1,100 @@
from enum import Enum
from typing import Any
from pydantic import BaseModel, Field
class ResponseType(str, Enum):
"""Types of streaming responses."""
TEXT_CHUNK = "text_chunk"
TEXT_ENDED = "text_ended"
TOOL_CALL = "tool_call"
TOOL_CALL_START = "tool_call_start"
TOOL_RESPONSE = "tool_response"
ERROR = "error"
USAGE = "usage"
STREAM_END = "stream_end"
class StreamBaseResponse(BaseModel):
"""Base response model for all streaming responses."""
type: ResponseType
timestamp: str | None = None
def to_sse(self) -> str:
"""Convert to SSE format."""
return f"data: {self.model_dump_json()}\n\n"
class StreamTextChunk(StreamBaseResponse):
"""Streaming text content from the assistant."""
type: ResponseType = ResponseType.TEXT_CHUNK
content: str = Field(..., description="Text content chunk")
class StreamToolCallStart(StreamBaseResponse):
"""Tool call started notification."""
type: ResponseType = ResponseType.TOOL_CALL_START
tool_id: str = Field(..., description="Unique tool call ID")
class StreamToolCall(StreamBaseResponse):
"""Tool invocation notification."""
type: ResponseType = ResponseType.TOOL_CALL
tool_id: str = Field(..., description="Unique tool call ID")
tool_name: str = Field(..., description="Name of the tool being called")
arguments: dict[str, Any] = Field(
default_factory=dict, description="Tool arguments"
)
class StreamToolExecutionResult(StreamBaseResponse):
"""Tool execution result."""
type: ResponseType = ResponseType.TOOL_RESPONSE
tool_id: str = Field(..., description="Tool call ID this responds to")
tool_name: str = Field(..., description="Name of the tool that was executed")
result: str | dict[str, Any] = Field(..., description="Tool execution result")
success: bool = Field(
default=True, description="Whether the tool execution succeeded"
)
class StreamUsage(StreamBaseResponse):
"""Token usage statistics."""
type: ResponseType = ResponseType.USAGE
prompt_tokens: int
completion_tokens: int
total_tokens: int
class StreamError(StreamBaseResponse):
"""Error response."""
type: ResponseType = ResponseType.ERROR
message: str = Field(..., description="Error message")
code: str | None = Field(default=None, description="Error code")
details: dict[str, Any] | None = Field(
default=None, description="Additional error details"
)
class StreamTextEnded(StreamBaseResponse):
"""Text streaming completed marker."""
type: ResponseType = ResponseType.TEXT_ENDED
class StreamEnd(StreamBaseResponse):
"""End of stream marker."""
type: ResponseType = ResponseType.STREAM_END
summary: dict[str, Any] | None = Field(
default=None, description="Stream summary statistics"
)

View File

@@ -0,0 +1,76 @@
# AutoGPT Agent Setup Assistant
Your name is Otto.
You work for AutoGPT as an AI Co-Pilot acting as an AI Forward Deployed Engineer.
You were made by AutoGPT.
AutoGPT is an AI Business Automation tool it help buisness capture the value from AI to accelerate there growth!
You help users find and set up AutoGPT agents to solve their business problems. **Bias toward action** - move quickly to get agents running.
## THE FLOW (Always Follow This Order)
1. **find_agent** → Search for agents that solve their problem
2. **get_agent_details** → Get comprehensive info about chosen agent
3. **get_required_setup_info** → Verify user has required credentials (MANDATORY before next step)
4. **schedule_agent** or **run_agent** → Execute the agent
## YOUR APPROACH
### STEP 1: UNDERSTAND THE PROBLEM (Quick)
- One or two targeted questions max
- What business problem are they trying to solve?
- Move quickly to searching for solutions
### STEP 2: FIND AGENTS
- Use `find_agent` immediately with relevant keywords
- Suggest the best option based on what you know
- Explain briefly how it solves their problem
- Ask them if they would like to use it, if they do move to step 3
### STEP 3: GET DETAILS
- Use `get_agent_details` on their chosen agent
- Explain what the agent does and its requirements
- Keep explanations brief and outcome-focused
### STEP 4: VERIFY SETUP (CRITICAL)
- **ALWAYS** use `get_required_setup_info` before proceeding
- This checks if user has all required credentials
- Tell user what credentials they need (if any)
- Explain credentials are added via the frontend interface
### STEP 5: EXECUTE
<<<<<<< Updated upstream
- Once credentials verified, use `schedule_agent` for scheduled and tirggered runs OR `run_agent` for immediate execution
=======
- Once credentials verified, use `schedule_agent` for scheduled runs OR `run_agent` for immediate execution
>>>>>>> Stashed changes
- Confirm successful setup/run
- Provide clear next steps
## KEY RULES
### What You DON'T Do:
- Don't help with login (frontend handles this)
- Don't help add credentials (frontend handles this)
- Don't skip `get_required_setup_info` (it's mandatory)
- Don't over-explain technical details
### What You DO:
- Act fast - get to agent discovery quickly
- Use tools proactively without asking permission
- Keep explanations short and business-focused
- Always verify credentials before setup/run
- Focus on outcomes and value
### Error Handling:
- If authentication needed → Tell user to sign in via the interface
- If credentials missing → Tell user what's needed and where to add them in the frontend
- If setup fails → Identify issue, provide clear fix
## SUCCESS LOOKS LIKE:
- User has an agent running within minutes
- User understands what their agent does
- User knows how to use their agent going forward
- Minimal back-and-forth, maximum action
**Remember: Speed to value. Find agent → Get details → Verify credentials → Run. Keep it simple, keep it moving.**

View File

@@ -0,0 +1,236 @@
"""Chat API routes for chat session management and streaming via SSE."""
import logging
from collections.abc import AsyncGenerator
from typing import Annotated
from autogpt_libs import auth
from fastapi import APIRouter, Depends, Query, Security
from fastapi.responses import StreamingResponse
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from pydantic import BaseModel
import backend.server.v2.chat.service as chat_service
from backend.server.v2.chat.config import ChatConfig
from backend.util.exceptions import NotFoundError
config = ChatConfig()
logger = logging.getLogger(__name__)
# Optional bearer token authentication
optional_bearer = HTTPBearer(auto_error=False)
router = APIRouter(
tags=["chat"],
)
def get_optional_user_id(
credentials: HTTPAuthorizationCredentials | None = Security(optional_bearer),
) -> str | None:
"""Extracts the user ID from a bearer JWT if present, otherwise returns None for anonymous access."""
if not credentials:
return None
try:
# Parse JWT token to get user ID
from autogpt_libs.auth.jwt_utils import parse_jwt_token
payload = parse_jwt_token(credentials.credentials)
return payload.get("sub")
except Exception as e:
logger.debug(f"Auth token validation failed (anonymous access): {e}")
return None
# ========== Request/Response Models ==========
class CreateSessionResponse(BaseModel):
"""Response model containing information on a newly created chat session."""
id: str
created_at: str
user_id: str | None
class SessionDetailResponse(BaseModel):
"""Response model providing complete details for a chat session, including messages."""
id: str
created_at: str
updated_at: str
user_id: str | None
messages: list[dict]
# ========== Routes ==========
@router.post(
"/sessions",
)
async def create_session(
user_id: Annotated[str | None, Depends(get_optional_user_id)],
) -> CreateSessionResponse:
"""
Create a new chat session.
Initiates a new chat session for either an authenticated or anonymous user.
Args:
user_id: The optional authenticated user ID parsed from the JWT. If missing, creates an anonymous session.
Returns:
CreateSessionResponse: Details of the created session.
"""
logger.info(f"Creating session with user_id: {user_id}")
session = await chat_service.create_chat_session(user_id)
return CreateSessionResponse(
id=session.session_id,
created_at=session.started_at.isoformat(),
user_id=session.user_id or None,
)
@router.get(
"/sessions/{session_id}",
)
async def get_session(
session_id: str,
user_id: Annotated[str | None, Depends(get_optional_user_id)],
) -> SessionDetailResponse:
"""
Retrieve the details of a specific chat session.
Looks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.
Args:
session_id: The unique identifier for the desired chat session.
user_id: The optional authenticated user ID, or None for anonymous access.
Returns:
SessionDetailResponse: Details for the requested session; raises NotFoundError if not found.
"""
session = await chat_service.get_session(session_id, user_id)
if not session:
raise NotFoundError(f"Session {session_id} not found")
return SessionDetailResponse(
id=session.session_id,
created_at=session.started_at.isoformat(),
updated_at=session.updated_at.isoformat(),
user_id=session.user_id or None,
messages=[message.model_dump() for message in session.messages],
)
@router.get(
"/sessions/{session_id}/stream",
)
async def stream_chat(
session_id: str,
message: Annotated[str, Query(min_length=1, max_length=10000)],
user_id: str | None = Depends(get_optional_user_id),
is_user_message: bool = Query(default=True),
):
"""
Stream chat responses for a session.
Streams the AI/completion responses in real time over Server-Sent Events (SSE), including:
- Text fragments as they are generated
- Tool call UI elements (if invoked)
- Tool execution results
Args:
session_id: The chat session identifier to associate with the streamed messages.
message: The user's new message to process.
user_id: Optional authenticated user ID.
is_user_message: Whether the message is a user message.
Returns:
StreamingResponse: SSE-formatted response chunks.
"""
# Validate session exists before starting the stream
# This prevents errors after the response has already started
session = await chat_service.get_session(session_id, user_id)
if not session:
raise NotFoundError(f"Session {session_id} not found. ")
if session.user_id is None and user_id is not None:
session = await chat_service.assign_user_to_session(session_id, user_id)
async def event_generator() -> AsyncGenerator[str, None]:
async for chunk in chat_service.stream_chat_completion(
session_id, message, is_user_message=is_user_message, user_id=user_id
):
with open("chunks.log", "a") as f:
f.write(f"{session_id}: {chunk}\n")
yield chunk.to_sse()
return StreamingResponse(
event_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Disable nginx buffering
},
)
@router.patch(
"/sessions/{session_id}/assign-user",
dependencies=[Security(auth.requires_user)],
status_code=200,
)
async def session_assign_user(
session_id: str,
user_id: Annotated[str, Security(auth.get_user_id)],
) -> dict:
"""
Assign an authenticated user to a chat session.
Used (typically post-login) to claim an existing anonymous session as the current authenticated user.
Args:
session_id: The identifier for the (previously anonymous) session.
user_id: The authenticated user's ID to associate with the session.
Returns:
dict: Status of the assignment.
"""
await chat_service.assign_user_to_session(session_id, user_id)
return {"status": "ok"}
# ========== Health Check ==========
@router.get("/health", status_code=200)
async def health_check() -> dict:
"""
Health check endpoint for the chat service.
Performs a full cycle test of session creation, assignment, and retrieval. Should always return healthy
if the service and data layer are operational.
Returns:
dict: A status dictionary indicating health, service name, and API version.
"""
session = await chat_service.create_chat_session(None)
await chat_service.assign_user_to_session(session.session_id, "test_user")
await chat_service.get_session(session.session_id, "test_user")
return {
"status": "healthy",
"service": "chat",
"version": "0.1.0",
}

View File

@@ -0,0 +1,472 @@
import logging
from collections.abc import AsyncGenerator
from datetime import UTC, datetime
from typing import Any
import orjson
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam
import backend.server.v2.chat.config
from backend.server.v2.chat.data import (
ChatMessage,
ChatSession,
Usage,
get_chat_session,
upsert_chat_session,
)
from backend.server.v2.chat.models import (
StreamBaseResponse,
StreamEnd,
StreamError,
StreamTextChunk,
StreamTextEnded,
StreamToolCall,
StreamToolCallStart,
StreamToolExecutionResult,
StreamUsage,
)
from backend.server.v2.chat.tools import execute_tool, tools
from backend.util.exceptions import NotFoundError
logger = logging.getLogger(__name__)
config = backend.server.v2.chat.config.ChatConfig()
client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
async def create_chat_session(
user_id: str | None = None,
) -> ChatSession:
"""
Create a new chat session and persist it to the database.
"""
session = ChatSession.new(user_id)
# Persist the session immediately so it can be used for streaming
return await upsert_chat_session(session)
async def get_session(
session_id: str,
user_id: str | None = None,
) -> ChatSession | None:
"""
Get a chat session by ID.
"""
return await get_chat_session(session_id, user_id)
async def assign_user_to_session(
session_id: str,
user_id: str,
) -> ChatSession:
"""
Assign a user to a chat session.
"""
session = await get_chat_session(session_id, None)
if not session:
raise NotFoundError(f"Session {session_id} not found")
session.user_id = user_id
return await upsert_chat_session(session)
async def stream_chat_completion(
session_id: str,
message: str | None = None,
is_user_message: bool = True,
user_id: str | None = None,
retry_count: int = 0,
) -> AsyncGenerator[StreamBaseResponse, None]:
"""Main entry point for streaming chat completions with database handling.
This function handles all database operations and delegates streaming
to the internal _stream_chat_chunks function.
Args:
session_id: Chat session ID
user_message: User's input message
user_id: User ID for authentication (None for anonymous)
Yields:
StreamBaseResponse objects formatted as SSE
Raises:
NotFoundError: If session_id is invalid
ValueError: If max_context_messages is exceeded
"""
logger.info(
f"Streaming chat completion for session {session_id} for message {message} and user id {user_id}. Message is user message: {is_user_message}"
)
session = await get_chat_session(session_id, user_id)
if not session:
raise NotFoundError(
f"Session {session_id} not found. Please create a new session first."
)
if message:
session.messages.append(
ChatMessage(
role="user" if is_user_message else "assistant", content=message
)
)
if len(session.messages) > config.max_context_messages:
raise ValueError(f"Max messages exceeded: {config.max_context_messages}")
logger.info(
f"Upserting session: {session.session_id} with user id {session.user_id}"
)
session = await upsert_chat_session(session)
assert session, "Session not found"
assistant_response = ChatMessage(
role="assistant",
content="",
)
has_yielded_end = False
has_yielded_error = False
has_done_tool_call = False
has_received_text = False
text_streaming_ended = False
messages_to_add: list[ChatMessage] = []
should_retry = False
try:
async for chunk in _stream_chat_chunks(
session=session,
tools=tools,
):
if isinstance(chunk, StreamTextChunk):
assistant_response.content += chunk.content
has_received_text = True
yield chunk
elif isinstance(chunk, StreamToolCallStart):
# Emit text_ended before first tool call, but only if we've received text
if has_received_text and not text_streaming_ended:
yield StreamTextEnded()
text_streaming_ended = True
yield chunk
elif isinstance(chunk, StreamToolCall):
# Just pass on the tool call notification
pass
elif isinstance(chunk, StreamToolExecutionResult):
result_content = (
chunk.result
if isinstance(chunk.result, str)
else orjson.dumps(chunk.result).decode("utf-8")
)
messages_to_add.append(
ChatMessage(
role="tool",
content=result_content,
tool_call_id=chunk.tool_id,
)
)
has_done_tool_call = True
# Track if any tool execution failed
if not chunk.success:
logger.warning(
f"Tool {chunk.tool_name} (ID: {chunk.tool_id}) execution failed"
)
yield chunk
elif isinstance(chunk, StreamEnd):
if not has_done_tool_call:
has_yielded_end = True
yield chunk
elif isinstance(chunk, StreamError):
has_yielded_error = True
elif isinstance(chunk, StreamUsage):
session.usage.append(
Usage(
prompt_tokens=chunk.prompt_tokens,
completion_tokens=chunk.completion_tokens,
total_tokens=chunk.total_tokens,
)
)
else:
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
except Exception as e:
logger.error(f"Error during stream: {e!s}", exc_info=True)
# Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.)
is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError))
if is_retryable and retry_count < config.max_retries:
logger.info(
f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}"
)
should_retry = True
else:
# Non-retryable error or max retries exceeded
# Save any partial progress before reporting error
if assistant_response.content or assistant_response.tool_calls:
messages_to_add.append(assistant_response)
session.messages.extend(messages_to_add)
await upsert_chat_session(session)
if not has_yielded_error:
error_message = str(e)
if not is_retryable:
error_message = f"Non-retryable error: {error_message}"
elif retry_count >= config.max_retries:
error_message = (
f"Max retries ({config.max_retries}) exceeded: {error_message}"
)
error_response = StreamError(
message=error_message,
timestamp=datetime.now(UTC).isoformat(),
)
yield error_response
if not has_yielded_end:
yield StreamEnd(
timestamp=datetime.now(UTC).isoformat(),
)
return
# Handle retry outside of exception handler to avoid nesting
if should_retry and retry_count < config.max_retries:
logger.info(
f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}"
)
async for chunk in stream_chat_completion(
session_id=session.session_id,
user_id=user_id,
retry_count=retry_count + 1,
):
yield chunk
return # Exit after retry to avoid double-saving in finally block
# Normal completion path - save session and handle tool call continuation
logger.info(
f"Upserting session: {session.session_id} with user id {session.user_id}"
)
# Only append assistant response if it has content or tool calls
# to avoid saving empty messages on errors
if assistant_response.content or assistant_response.tool_calls:
messages_to_add.append(assistant_response)
session.messages.extend(messages_to_add)
await upsert_chat_session(session)
# If we did a tool call, stream the chat completion again to get the next response
if has_done_tool_call:
logger.info(
"Tool call executed, streaming chat completion again to get assistant response"
)
async for chunk in stream_chat_completion(
session_id=session.session_id, user_id=user_id
):
yield chunk
async def _stream_chat_chunks(
session: ChatSession,
tools: list[ChatCompletionToolParam],
) -> AsyncGenerator[StreamBaseResponse, None]:
"""
Pure streaming function for OpenAI chat completions with tool calling.
This function is database-agnostic and focuses only on streaming logic.
Args:
messages: Conversation context as ChatCompletionMessageParam list
session_id: Session ID
user_id: User ID for tool execution
Yields:
SSE formatted JSON response objects
"""
model = config.model
logger.info("Starting pure chat stream")
# Loop to handle tool calls and continue conversation
while True:
try:
logger.info("Creating OpenAI chat completion stream...")
# Create the stream with proper types
stream = await client.chat.completions.create(
model=model,
messages=session.to_openai_messages(),
tools=tools,
tool_choice="auto",
stream=True,
)
# Variables to accumulate tool calls
tool_calls: list[dict[str, Any]] = []
active_tool_call_idx: int | None = None
finish_reason: str | None = None
# Track which tool call indices have had their start event emitted
emitted_start_for_idx: set[int] = set()
# Process the stream
chunk: ChatCompletionChunk
async for chunk in stream:
if chunk.usage:
yield StreamUsage(
prompt_tokens=chunk.usage.prompt_tokens,
completion_tokens=chunk.usage.completion_tokens,
total_tokens=chunk.usage.total_tokens,
)
if chunk.choices:
choice = chunk.choices[0]
delta = choice.delta
# Capture finish reason
if choice.finish_reason:
finish_reason = choice.finish_reason
logger.info(f"Finish reason: {finish_reason}")
# Handle content streaming
if delta.content:
# Stream the text chunk
text_response = StreamTextChunk(
content=delta.content,
timestamp=datetime.now(UTC).isoformat(),
)
yield text_response
# Handle tool calls
if delta.tool_calls:
for tc_chunk in delta.tool_calls:
idx = tc_chunk.index
# Update active tool call index if needed
if (
active_tool_call_idx is None
or active_tool_call_idx != idx
):
active_tool_call_idx = idx
# Ensure we have a tool call object at this index
while len(tool_calls) <= idx:
tool_calls.append(
{
"id": "",
"type": "function",
"function": {
"name": "",
"arguments": "",
},
},
)
# Accumulate the tool call data
if tc_chunk.id:
tool_calls[idx]["id"] = tc_chunk.id
if tc_chunk.function:
if tc_chunk.function.name:
tool_calls[idx]["function"][
"name"
] = tc_chunk.function.name
if tc_chunk.function.arguments:
tool_calls[idx]["function"][
"arguments"
] += tc_chunk.function.arguments
# Emit StreamToolCallStart only after we have the tool call ID
if (
idx not in emitted_start_for_idx
and tool_calls[idx]["id"]
):
yield StreamToolCallStart(
tool_id=tool_calls[idx]["id"],
timestamp=datetime.now(UTC).isoformat(),
)
emitted_start_for_idx.add(idx)
logger.info(f"Stream complete. Finish reason: {finish_reason}")
# Yield all accumulated tool calls after the stream is complete
# This ensures all tool call arguments have been fully received
for idx, tool_call in enumerate(tool_calls):
try:
async for tc in _yield_tool_call(tool_calls, idx, session):
yield tc
except (orjson.JSONDecodeError, KeyError, TypeError) as e:
logger.error(
f"Failed to parse tool call {idx}: {e}",
exc_info=True,
extra={"tool_call": tool_call},
)
yield StreamError(
message=f"Invalid tool call arguments for tool {tool_call.get('function', {}).get('name', 'unknown')}: {e}",
timestamp=datetime.now(UTC).isoformat(),
)
# Re-raise to trigger retry logic in the parent function
raise
yield StreamEnd(
timestamp=datetime.now(UTC).isoformat(),
)
return
except Exception as e:
logger.error(f"Error in stream: {e!s}", exc_info=True)
error_response = StreamError(
message=str(e),
timestamp=datetime.now(UTC).isoformat(),
)
yield error_response
yield StreamEnd(
timestamp=datetime.now(UTC).isoformat(),
)
return
async def _yield_tool_call(
tool_calls: list[dict[str, Any]],
yield_idx: int,
session: ChatSession,
) -> AsyncGenerator[StreamBaseResponse, None]:
"""
Yield a tool call and its execution result.
Raises:
orjson.JSONDecodeError: If tool call arguments cannot be parsed as JSON
KeyError: If expected tool call fields are missing
TypeError: If tool call structure is invalid
"""
logger.info(f"Yielding tool call: {tool_calls[yield_idx]}")
# Parse tool call arguments - exceptions will propagate to caller
arguments = orjson.loads(tool_calls[yield_idx]["function"]["arguments"])
yield StreamToolCall(
tool_id=tool_calls[yield_idx]["id"],
tool_name=tool_calls[yield_idx]["function"]["name"],
arguments=arguments,
timestamp=datetime.now(UTC).isoformat(),
)
tool_execution_response: StreamToolExecutionResult = await execute_tool(
tool_name=tool_calls[yield_idx]["function"]["name"],
parameters=arguments,
tool_call_id=tool_calls[yield_idx]["id"],
user_id=session.user_id,
session_id=session.session_id,
)
logger.info(f"Yielding Tool execution response: {tool_execution_response}")
yield tool_execution_response
if __name__ == "__main__":
import asyncio
async def main():
session = await create_chat_session()
async for chunk in stream_chat_completion(
session.session_id,
"Please find me an agent that can help me with my business. Call the tool twice once with the query 'money printing agent' and once with the query 'money generating agent'",
user_id=session.user_id,
):
print(chunk)
asyncio.run(main())

View File

@@ -0,0 +1,81 @@
import logging
from os import getenv
import pytest
import backend.server.v2.chat.service as chat_service
from backend.server.v2.chat.models import (
StreamEnd,
StreamError,
StreamTextChunk,
StreamToolExecutionResult,
)
logger = logging.getLogger(__name__)
@pytest.mark.asyncio(loop_scope="session")
async def test_stream_chat_completion():
"""
Test the stream_chat_completion function.
"""
api_key: str | None = getenv("OPEN_ROUTER_API_KEY")
if not api_key:
return pytest.skip("OPEN_ROUTER_API_KEY is not set, skipping test")
session = await chat_service.create_chat_session()
has_errors = False
has_ended = False
assistant_message = ""
async for chunk in chat_service.stream_chat_completion(
session.session_id, "Hello, how are you?", user_id=session.user_id
):
logger.info(chunk)
if isinstance(chunk, StreamError):
has_errors = True
if isinstance(chunk, StreamTextChunk):
assistant_message += chunk.content
if isinstance(chunk, StreamEnd):
has_ended = True
assert has_ended, "Chat completion did not end"
assert not has_errors, "Error occurred while streaming chat completion"
assert assistant_message, "Assistant message is empty"
@pytest.mark.asyncio(loop_scope="session")
async def test_stream_chat_completion_with_tool_calls():
"""
Test the stream_chat_completion function.
"""
api_key: str | None = getenv("OPEN_ROUTER_API_KEY")
if not api_key:
return pytest.skip("OPEN_ROUTER_API_KEY is not set, skipping test")
session = await chat_service.create_chat_session()
session = await chat_service.upsert_chat_session(session)
has_errors = False
has_ended = False
had_tool_calls = False
async for chunk in chat_service.stream_chat_completion(
session.session_id,
"Please find me an agent that can help me with my business. Use the query 'moneny printing agent'",
user_id=session.user_id,
):
logger.info(chunk)
if isinstance(chunk, StreamError):
has_errors = True
if isinstance(chunk, StreamEnd):
has_ended = True
if isinstance(chunk, StreamToolExecutionResult):
had_tool_calls = True
assert has_ended, "Chat completion did not end"
assert not has_errors, "Error occurred while streaming chat completion"
assert had_tool_calls, "Tool calls did not occur"
session = await chat_service.get_session(session.session_id)
assert session, "Session not found"
assert session.usage, "Usage is empty"

View File

@@ -0,0 +1,51 @@
from typing import TYPE_CHECKING, Any
from openai.types.chat import ChatCompletionToolParam
from .base import BaseTool
from .find_agent import FindAgentTool
from .get_agent_details import GetAgentDetailsTool
from .get_required_setup_info import GetRequiredSetupInfoTool
from .run_agent import RunAgentTool
from .setup_agent import SetupAgentTool
if TYPE_CHECKING:
from backend.server.v2.chat.models import StreamToolExecutionResult
# Initialize tool instances
find_agent_tool = FindAgentTool()
get_agent_details_tool = GetAgentDetailsTool()
get_required_setup_info_tool = GetRequiredSetupInfoTool()
setup_agent_tool = SetupAgentTool()
run_agent_tool = RunAgentTool()
# Export tools as OpenAI format
tools: list[ChatCompletionToolParam] = [
find_agent_tool.as_openai_tool(),
get_agent_details_tool.as_openai_tool(),
get_required_setup_info_tool.as_openai_tool(),
setup_agent_tool.as_openai_tool(),
run_agent_tool.as_openai_tool(),
]
async def execute_tool(
tool_name: str,
parameters: dict[str, Any],
user_id: str | None,
session_id: str,
tool_call_id: str,
) -> "StreamToolExecutionResult":
tool_map: dict[str, BaseTool] = {
"find_agent": find_agent_tool,
"get_agent_details": get_agent_details_tool,
"get_required_setup_info": get_required_setup_info_tool,
"setup_agent": setup_agent_tool,
"run_agent": run_agent_tool,
}
if tool_name not in tool_map:
raise ValueError(f"Tool {tool_name} not found")
return await tool_map[tool_name].execute(
user_id, session_id, tool_call_id, **parameters
)

View File

@@ -0,0 +1,449 @@
import uuid
from os import getenv
import pytest
from pydantic import SecretStr
from backend.blocks.firecrawl.scrape import FirecrawlScrapeBlock
from backend.blocks.io import AgentInputBlock, AgentOutputBlock
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.db import prisma
from backend.data.graph import Graph, Link, Node, create_graph
from backend.data.model import APIKeyCredentials
from backend.data.user import get_or_create_user
from backend.integrations.credentials_store import IntegrationCredentialsStore
from backend.server.v2.store import db as store_db
@pytest.fixture(scope="session")
async def setup_test_data():
"""
Set up test data for run_agent tests:
1. Create a test user
2. Create a test graph (agent input -> agent output)
3. Create a store listing and store listing version
4. Approve the store listing version
"""
# 1. Create a test user
user_data = {
"sub": f"test-user-{uuid.uuid4()}",
"email": f"test-{uuid.uuid4()}@example.com",
}
user = await get_or_create_user(user_data)
# 1b. Create a profile with username for the user (required for store agent lookup)
username = user.email.split("@")[0]
await prisma.profile.create(
data={
"userId": user.id,
"username": username,
"name": f"Test User {username}",
"description": "Test user profile",
"links": [], # Required field - empty array for test profiles
}
)
# 2. Create a test graph with agent input -> agent output
graph_id = str(uuid.uuid4())
# Create input node
input_node_id = str(uuid.uuid4())
input_block = AgentInputBlock()
input_node = Node(
id=input_node_id,
block_id=input_block.id,
input_default={
"name": "test_input",
"title": "Test Input",
"value": "",
"advanced": False,
"description": "Test input field",
"placeholder_values": [],
},
metadata={"position": {"x": 0, "y": 0}},
)
# Create output node
output_node_id = str(uuid.uuid4())
output_block = AgentOutputBlock()
output_node = Node(
id=output_node_id,
block_id=output_block.id,
input_default={
"name": "test_output",
"title": "Test Output",
"value": "",
"format": "",
"advanced": False,
"description": "Test output field",
},
metadata={"position": {"x": 200, "y": 0}},
)
# Create link from input to output
link = Link(
source_id=input_node_id,
sink_id=output_node_id,
source_name="result",
sink_name="value",
is_static=True,
)
# Create the graph
graph = Graph(
id=graph_id,
version=1,
is_active=True,
name="Test Agent",
description="A simple test agent for testing",
nodes=[input_node, output_node],
links=[link],
)
created_graph = await create_graph(graph, user.id)
# 3. Create a store listing and store listing version for the agent
# Use unique slug to avoid constraint violations
unique_slug = f"test-agent-{str(uuid.uuid4())[:8]}"
store_submission = await store_db.create_store_submission(
user_id=user.id,
agent_id=created_graph.id,
agent_version=created_graph.version,
slug=unique_slug,
name="Test Agent",
description="A simple test agent",
sub_heading="Test agent for unit tests",
categories=["testing"],
image_urls=["https://example.com/image.jpg"],
)
assert store_submission.store_listing_version_id is not None
# 4. Approve the store listing version
await store_db.review_store_submission(
store_listing_version_id=store_submission.store_listing_version_id,
is_approved=True,
external_comments="Approved for testing",
internal_comments="Test approval",
reviewer_id=user.id,
)
return {
"user": user,
"graph": created_graph,
"store_submission": store_submission,
}
@pytest.fixture(scope="session")
async def setup_llm_test_data():
"""
Set up test data for LLM agent tests:
1. Create a test user
2. Create test OpenAI credentials for the user
3. Create a test graph with input -> LLM block -> output
4. Create and approve a store listing
"""
key = getenv("OPENAI_API_KEY")
if not key:
return pytest.skip("OPENAI_API_KEY is not set")
# 1. Create a test user
user_data = {
"sub": f"test-user-{uuid.uuid4()}",
"email": f"test-{uuid.uuid4()}@example.com",
}
user = await get_or_create_user(user_data)
# 1b. Create a profile with username for the user (required for store agent lookup)
username = user.email.split("@")[0]
await prisma.profile.create(
data={
"userId": user.id,
"username": username,
"name": f"Test User {username}",
"description": "Test user profile for LLM tests",
"links": [], # Required field - empty array for test profiles
}
)
# 2. Create test OpenAI credentials for the user
credentials = APIKeyCredentials(
id=str(uuid.uuid4()),
provider="openai",
api_key=SecretStr("test-openai-api-key"),
title="Test OpenAI API Key",
expires_at=None,
)
# Store the credentials
creds_store = IntegrationCredentialsStore()
await creds_store.add_creds(user.id, credentials)
# 3. Create a test graph with input -> LLM block -> output
graph_id = str(uuid.uuid4())
# Create input node for the prompt
input_node_id = str(uuid.uuid4())
input_block = AgentInputBlock()
input_node = Node(
id=input_node_id,
block_id=input_block.id,
input_default={
"name": "user_prompt",
"title": "User Prompt",
"value": "",
"advanced": False,
"description": "Prompt for the LLM",
"placeholder_values": [],
},
metadata={"position": {"x": 0, "y": 0}},
)
# Create LLM block node
llm_node_id = str(uuid.uuid4())
llm_block = AITextGeneratorBlock()
llm_node = Node(
id=llm_node_id,
block_id=llm_block.id,
input_default={
"model": "gpt-4o-mini",
"sys_prompt": "You are a helpful assistant.",
"retry": 3,
"prompt_values": {},
"credentials": {
"provider": "openai",
"id": credentials.id,
"type": "api_key",
"title": credentials.title,
},
},
metadata={"position": {"x": 300, "y": 0}},
)
# Create output node
output_node_id = str(uuid.uuid4())
output_block = AgentOutputBlock()
output_node = Node(
id=output_node_id,
block_id=output_block.id,
input_default={
"name": "llm_response",
"title": "LLM Response",
"value": "",
"format": "",
"advanced": False,
"description": "Response from the LLM",
},
metadata={"position": {"x": 600, "y": 0}},
)
# Create links
# Link input.result -> llm.prompt
link1 = Link(
source_id=input_node_id,
sink_id=llm_node_id,
source_name="result",
sink_name="prompt",
is_static=True,
)
# Link llm.response -> output.value
link2 = Link(
source_id=llm_node_id,
sink_id=output_node_id,
source_name="response",
sink_name="value",
is_static=False,
)
# Create the graph
graph = Graph(
id=graph_id,
version=1,
is_active=True,
name="LLM Test Agent",
description="An agent that uses an LLM to process text",
nodes=[input_node, llm_node, output_node],
links=[link1, link2],
)
created_graph = await create_graph(graph, user.id)
# 4. Create and approve a store listing
unique_slug = f"llm-test-agent-{str(uuid.uuid4())[:8]}"
store_submission = await store_db.create_store_submission(
user_id=user.id,
agent_id=created_graph.id,
agent_version=created_graph.version,
slug=unique_slug,
name="LLM Test Agent",
description="An agent with LLM capabilities",
sub_heading="Test agent with OpenAI integration",
categories=["testing", "ai"],
image_urls=["https://example.com/image.jpg"],
)
assert store_submission.store_listing_version_id is not None
await store_db.review_store_submission(
store_listing_version_id=store_submission.store_listing_version_id,
is_approved=True,
external_comments="Approved for testing",
internal_comments="Test approval for LLM agent",
reviewer_id=user.id,
)
return {
"user": user,
"graph": created_graph,
"credentials": credentials,
"store_submission": store_submission,
}
@pytest.fixture(scope="session")
async def setup_firecrawl_test_data():
"""
Set up test data for Firecrawl agent tests (missing credentials scenario):
1. Create a test user (WITHOUT Firecrawl credentials)
2. Create a test graph with input -> Firecrawl block -> output
3. Create and approve a store listing
"""
# 1. Create a test user
user_data = {
"sub": f"test-user-{uuid.uuid4()}",
"email": f"test-{uuid.uuid4()}@example.com",
}
user = await get_or_create_user(user_data)
# 1b. Create a profile with username for the user (required for store agent lookup)
username = user.email.split("@")[0]
await prisma.profile.create(
data={
"userId": user.id,
"username": username,
"name": f"Test User {username}",
"description": "Test user profile for Firecrawl tests",
"links": [], # Required field - empty array for test profiles
}
)
# NOTE: We deliberately do NOT create Firecrawl credentials for this user
# This tests the scenario where required credentials are missing
# 2. Create a test graph with input -> Firecrawl block -> output
graph_id = str(uuid.uuid4())
# Create input node for the URL
input_node_id = str(uuid.uuid4())
input_block = AgentInputBlock()
input_node = Node(
id=input_node_id,
block_id=input_block.id,
input_default={
"name": "url",
"title": "URL to Scrape",
"value": "",
"advanced": False,
"description": "URL for Firecrawl to scrape",
"placeholder_values": [],
},
metadata={"position": {"x": 0, "y": 0}},
)
# Create Firecrawl block node
firecrawl_node_id = str(uuid.uuid4())
firecrawl_block = FirecrawlScrapeBlock()
firecrawl_node = Node(
id=firecrawl_node_id,
block_id=firecrawl_block.id,
input_default={
"limit": 10,
"only_main_content": True,
"max_age": 3600000,
"wait_for": 200,
"formats": ["markdown"],
"credentials": {
"provider": "firecrawl",
"id": "test-firecrawl-id",
"type": "api_key",
"title": "Firecrawl API Key",
},
},
metadata={"position": {"x": 300, "y": 0}},
)
# Create output node
output_node_id = str(uuid.uuid4())
output_block = AgentOutputBlock()
output_node = Node(
id=output_node_id,
block_id=output_block.id,
input_default={
"name": "scraped_data",
"title": "Scraped Data",
"value": "",
"format": "",
"advanced": False,
"description": "Data scraped by Firecrawl",
},
metadata={"position": {"x": 600, "y": 0}},
)
# Create links
# Link input.result -> firecrawl.url
link1 = Link(
source_id=input_node_id,
sink_id=firecrawl_node_id,
source_name="result",
sink_name="url",
is_static=True,
)
# Link firecrawl.markdown -> output.value
link2 = Link(
source_id=firecrawl_node_id,
sink_id=output_node_id,
source_name="markdown",
sink_name="value",
is_static=False,
)
# Create the graph
graph = Graph(
id=graph_id,
version=1,
is_active=True,
name="Firecrawl Test Agent",
description="An agent that uses Firecrawl to scrape websites",
nodes=[input_node, firecrawl_node, output_node],
links=[link1, link2],
)
created_graph = await create_graph(graph, user.id)
# 3. Create and approve a store listing
unique_slug = f"firecrawl-test-agent-{str(uuid.uuid4())[:8]}"
store_submission = await store_db.create_store_submission(
user_id=user.id,
agent_id=created_graph.id,
agent_version=created_graph.version,
slug=unique_slug,
name="Firecrawl Test Agent",
description="An agent with Firecrawl integration (no credentials)",
sub_heading="Test agent requiring Firecrawl credentials",
categories=["testing", "scraping"],
image_urls=["https://example.com/image.jpg"],
)
assert store_submission.store_listing_version_id is not None
await store_db.review_store_submission(
store_listing_version_id=store_submission.store_listing_version_id,
is_approved=True,
external_comments="Approved for testing",
internal_comments="Test approval for Firecrawl agent",
reviewer_id=user.id,
)
return {
"user": user,
"graph": created_graph,
"store_submission": store_submission,
}

View File

@@ -0,0 +1,118 @@
"""Base classes and shared utilities for chat tools."""
import logging
from typing import Any
from openai.types.chat import ChatCompletionToolParam
from backend.server.v2.chat.models import StreamToolExecutionResult
from .models import ErrorResponse, NeedLoginResponse, ToolResponseBase
logger = logging.getLogger(__name__)
class BaseTool:
"""Base class for all chat tools."""
@property
def name(self) -> str:
"""Tool name for OpenAI function calling."""
raise NotImplementedError
@property
def description(self) -> str:
"""Tool description for OpenAI."""
raise NotImplementedError
@property
def parameters(self) -> dict[str, Any]:
"""Tool parameters schema for OpenAI."""
raise NotImplementedError
@property
def requires_auth(self) -> bool:
"""Whether this tool requires authentication."""
return False
def as_openai_tool(self) -> ChatCompletionToolParam:
"""Convert to OpenAI tool format."""
return ChatCompletionToolParam(
type="function",
function={
"name": self.name,
"description": self.description,
"parameters": self.parameters,
},
)
async def execute(
self,
user_id: str | None,
session_id: str,
tool_call_id: str,
**kwargs,
) -> StreamToolExecutionResult:
"""Execute the tool with authentication check.
Args:
user_id: User ID (may be anonymous like "anon_123")
session_id: Chat session ID
**kwargs: Tool-specific parameters
Returns:
Pydantic response object
"""
if self.requires_auth and not user_id:
logger.error(
f"Attempted tool call for {self.name} but user not authenticated"
)
return StreamToolExecutionResult(
tool_id=tool_call_id,
tool_name=self.name,
result=NeedLoginResponse(
message=f"Please sign in to use {self.name}",
session_id=session_id,
).model_dump_json(),
success=False,
)
try:
result = await self._execute(user_id, session_id, **kwargs)
return StreamToolExecutionResult(
tool_id=tool_call_id,
tool_name=self.name,
result=result.model_dump_json(),
)
except Exception as e:
logger.error(f"Error in {self.name}: {e}", exc_info=True)
return StreamToolExecutionResult(
tool_id=tool_call_id,
tool_name=self.name,
result=ErrorResponse(
message=f"An error occurred while executing {self.name}",
error=str(e),
session_id=session_id,
).model_dump_json(),
success=False,
)
async def _execute(
self,
user_id: str | None,
session_id: str,
**kwargs,
) -> ToolResponseBase:
"""Internal execution logic to be implemented by subclasses.
Args:
user_id: User ID (authenticated or anonymous)
session_id: Chat session ID
**kwargs: Tool-specific parameters
Returns:
Pydantic response object
"""
raise NotImplementedError

View File

@@ -0,0 +1,149 @@
"""Tool for discovering agents from marketplace and user library."""
import logging
from typing import Any
from backend.server.v2.chat.tools.base import BaseTool
from backend.server.v2.chat.tools.models import (
AgentCarouselResponse,
AgentInfo,
ErrorResponse,
NoResultsResponse,
ToolResponseBase,
)
from backend.server.v2.store import db as store_db
from backend.util.exceptions import DatabaseError, NotFoundError
logger = logging.getLogger(__name__)
class FindAgentTool(BaseTool):
"""Tool for discovering agents based on user needs."""
@property
def name(self) -> str:
return "find_agent"
@property
def description(self) -> str:
return (
"Discover agents from the marketplace based on capabilities and user needs."
)
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query describing what the user wants to accomplish. Use single keywords for best results.",
},
},
"required": ["query"],
}
async def _execute(
self,
user_id: str | None,
session_id: str,
**kwargs,
) -> ToolResponseBase:
"""Search for agents in the marketplace.
Args:
user_id: User ID (may be anonymous)
session_id: Chat session ID
query: Search query
Returns:
AgentCarouselResponse: List of agents found in the marketplace
NoResultsResponse: No agents found in the marketplace
ErrorResponse: Error message
"""
query = kwargs.get("query", "").strip()
if not query:
return ErrorResponse(
message="Please provide a search query",
session_id=session_id,
)
agents = []
try:
logger.info(f"Searching marketplace for: {query}")
store_results = await store_db.get_store_agents(
search_query=query,
page_size=5,
)
logger.info(f"Find agents tool found {len(store_results.agents)} agents")
for agent in store_results.agents:
agent_id = f"{agent.creator}/{agent.slug}"
logger.info(f"Building agent ID = {agent_id}")
agents.append(
AgentInfo(
id=agent_id,
name=agent.agent_name,
description=agent.description or "",
source="marketplace",
in_library=False,
creator=agent.creator,
category="general",
rating=agent.rating,
runs=agent.runs,
is_featured=False,
),
)
except NotFoundError:
pass
except DatabaseError as e:
logger.error(f"Error searching agents: {e}", exc_info=True)
return ErrorResponse(
message="Failed to search for agents. Please try again.",
error=str(e),
session_id=session_id,
)
if not agents:
return NoResultsResponse(
message=f"No agents found matching '{query}'. Try different keywords or browse the marketplace. If you have 3 consecutive find_agent tool calls results and found no agents. Please stop trying and ask the user if there is anything else you can help with.",
session_id=session_id,
suggestions=[
"Try more general terms",
"Browse categories in the marketplace",
"Check spelling",
],
)
# Return formatted carousel
title = (
f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} for '{query}'"
)
return AgentCarouselResponse(
message="Now you have found some options for the user to choose from. Please ask the user if they would like to use any of these agents. If they do, please call the get_agent_details tool for this agent.",
title=title,
agents=agents,
count=len(agents),
session_id=session_id,
)
if __name__ == "__main__":
import asyncio
import prisma
find_agent_tool = FindAgentTool()
print(find_agent_tool.as_openai_tool())
async def main():
await prisma.Prisma().connect()
agents = await find_agent_tool.execute(
tool_call_id="tool_call_id",
query="Linkedin",
user_id="user",
session_id="session",
)
print(agents)
await prisma.Prisma().disconnect()
asyncio.run(main())

View File

@@ -0,0 +1,220 @@
"""Tool for getting detailed information about a specific agent."""
import logging
from typing import Any
from backend.data import graph as graph_db
from backend.data.model import CredentialsMetaInput
from backend.server.v2.chat.tools.base import BaseTool
from backend.server.v2.chat.tools.models import (
AgentDetails,
AgentDetailsResponse,
ErrorResponse,
ExecutionOptions,
ToolResponseBase,
)
from backend.server.v2.store import db as store_db
from backend.util.exceptions import DatabaseError, NotFoundError
logger = logging.getLogger(__name__)
class GetAgentDetailsTool(BaseTool):
"""Tool for getting detailed information about an agent."""
@property
def name(self) -> str:
return "get_agent_details"
@property
def description(self) -> str:
return "Get detailed information about a specific agent including inputs, credentials required, and execution options."
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"username_agent_slug": {
"type": "string",
"description": "The marketplace agent slug (e.g., 'username/agent-name')",
},
},
"required": ["username_agent_slug"],
}
async def _execute(
self,
user_id: str | None,
session_id: str,
**kwargs,
) -> ToolResponseBase:
"""Get detailed information about an agent.
Args:
user_id: User ID (may be anonymous)
session_id: Chat session ID
username_agent_slug: Agent ID or slug
Returns:
Pydantic response model
"""
agent_id = kwargs.get("username_agent_slug", "").strip()
if not agent_id or "/" not in agent_id:
return ErrorResponse(
message="Please provide an agent ID in format 'creator/agent-name'",
session_id=session_id,
)
try:
# Always try to get from marketplace first
graph = None
store_agent = None
# Check if it's a slug format (username/agent_name)
try:
# Parse username/agent_name from slug
username, agent_name = agent_id.split("/", 1)
store_agent = await store_db.get_store_agent_details(
username, agent_name
)
logger.info(f"Found agent {agent_id} in marketplace")
except NotFoundError as e:
logger.debug(f"Failed to get from marketplace: {e}")
return ErrorResponse(
message=f"Agent '{agent_id}' not found",
session_id=session_id,
)
except DatabaseError as e:
logger.error(f"Failed to get from marketplace: {e}")
return ErrorResponse(
message=f"Failed to get agent details: {e!s}",
session_id=session_id,
)
# If we found a store agent, get its graph
if store_agent:
try:
# Use get_available_graph to get the graph from store listing version
graph_meta = await store_db.get_available_graph(
store_agent.store_listing_version_id
)
# Now get the full graph with that ID
graph = await graph_db.get_graph(
graph_id=graph_meta.id,
version=graph_meta.version,
user_id=None, # Public access
include_subgraphs=True,
)
except NotFoundError as e:
logger.error(f"Failed to get graph for store agent: {e}")
return ErrorResponse(
message=f"Failed to get graph for store agent: {e!s}",
session_id=session_id,
)
except DatabaseError as e:
logger.error(f"Failed to get graph for store agent: {e}")
return ErrorResponse(
message=f"Failed to get graph for store agent: {e!s}",
session_id=session_id,
)
if not graph:
return ErrorResponse(
message=f"Agent '{agent_id}' not found",
session_id=session_id,
)
credentials_input_schema = graph.credentials_input_schema
# Extract credentials from the JSON schema properties
credentials = []
if (
isinstance(credentials_input_schema, dict)
and "properties" in credentials_input_schema
):
for cred_name, cred_schema in credentials_input_schema[
"properties"
].items():
# Extract credential metadata from the schema
# The schema properties contain provider info and other metadata
# Get provider from credentials_provider array or properties.provider.const
provider = "unknown"
if (
"credentials_provider" in cred_schema
and cred_schema["credentials_provider"]
):
provider = cred_schema["credentials_provider"][0]
elif (
"properties" in cred_schema
and "provider" in cred_schema["properties"]
):
provider = cred_schema["properties"]["provider"].get(
"const", "unknown"
)
# Get type from credentials_types array or properties.type.const
cred_type = "api_key" # Default
if (
"credentials_types" in cred_schema
and cred_schema["credentials_types"]
):
cred_type = cred_schema["credentials_types"][0]
elif (
"properties" in cred_schema
and "type" in cred_schema["properties"]
):
cred_type = cred_schema["properties"]["type"].get(
"const", "api_key"
)
credentials.append(
CredentialsMetaInput(
id=cred_name,
title=cred_schema.get("title", cred_name),
provider=provider, # type: ignore
type=cred_type,
)
)
trigger_info = (
graph.trigger_setup_info.model_dump()
if graph.trigger_setup_info
else None
)
agent_details = AgentDetails(
id=graph.id,
name=graph.name,
description=graph.description,
inputs=graph.input_schema,
credentials=credentials,
execution_options=ExecutionOptions(
# Currently a graph with a webhook can only be triggered by a webhook
manual=trigger_info is None,
scheduled=trigger_info is None,
webhook=trigger_info is not None,
),
trigger_info=trigger_info,
)
return AgentDetailsResponse(
message=f"Found agent '{agent_details.name}'. You do not need to run this tool again for this agent.",
session_id=session_id,
agent=agent_details,
user_authenticated=user_id is not None,
graph_id=graph.id,
graph_version=graph.version,
)
except Exception as e:
logger.error(f"Error getting agent details: {e}", exc_info=True)
return ErrorResponse(
message=f"Failed to get agent details: {e!s}",
error=str(e),
session_id=session_id,
)

View File

@@ -0,0 +1,309 @@
import uuid
import orjson
import pytest
from backend.server.v2.chat.tools._test_data import setup_llm_test_data, setup_test_data
from backend.server.v2.chat.tools.get_agent_details import GetAgentDetailsTool
# This is so the formatter doesn't remove the fixture imports
setup_llm_test_data = setup_llm_test_data
setup_test_data = setup_test_data
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_success(setup_test_data):
"""Test successfully getting agent details from marketplace"""
# Use test data from fixture
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format: username/slug
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check the basic structure
assert "agent" in result_data
assert "message" in result_data
assert "graph_id" in result_data
assert "graph_version" in result_data
assert "user_authenticated" in result_data
# Check agent details
agent = result_data["agent"]
assert agent["id"] == graph.id
assert agent["name"] == "Test Agent"
assert (
agent["description"] == "A simple test agent"
) # Description from store submission
assert "inputs" in agent
assert "credentials" in agent
assert "execution_options" in agent
# Check execution options
exec_options = agent["execution_options"]
assert "manual" in exec_options
assert "scheduled" in exec_options
assert "webhook" in exec_options
# Check inputs schema
assert isinstance(agent["inputs"], dict)
# Should have properties for the input fields
if "properties" in agent["inputs"]:
assert "test_input" in agent["inputs"]["properties"]
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_with_llm_credentials(setup_llm_test_data):
"""Test getting agent details for an agent that requires LLM credentials"""
# Use test data from fixture
user = setup_llm_test_data["user"]
store_submission = setup_llm_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check that agent details are returned
assert "agent" in result_data
agent = result_data["agent"]
# Check that credentials are listed
assert "credentials" in agent
credentials = agent["credentials"]
# The LLM agent should have OpenAI credentials listed
# Note: This depends on how the graph's credentials_input_schema is structured
assert isinstance(credentials, list)
# Check that inputs include the user_prompt
assert "inputs" in agent
if "properties" in agent["inputs"]:
assert "user_prompt" in agent["inputs"]["properties"]
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_invalid_format():
"""Test error handling when agent_id is not in correct format"""
tool = GetAgentDetailsTool()
# Execute with invalid format (no slash)
response = await tool.execute(
user_id=str(uuid.uuid4()),
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug="invalid-format",
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
assert "creator/agent-name" in result_data["message"]
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_empty_slug():
"""Test error handling when agent_id is empty"""
tool = GetAgentDetailsTool()
# Execute with empty slug
response = await tool.execute(
user_id=str(uuid.uuid4()),
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug="",
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
assert "creator/agent-name" in result_data["message"]
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_not_found():
"""Test error handling when agent is not found in marketplace"""
tool = GetAgentDetailsTool()
# Execute with non-existent agent
response = await tool.execute(
user_id=str(uuid.uuid4()),
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug="nonexistent/agent",
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
assert "not found" in result_data["message"].lower()
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_anonymous_user(setup_test_data):
"""Test getting agent details as an anonymous user (no user_id)"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool without a user_id (anonymous)
response = await tool.execute(
user_id=None,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should still get agent details
assert "agent" in result_data
assert "user_authenticated" in result_data
# User should be marked as not authenticated
assert result_data["user_authenticated"] is False
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_authenticated_user(setup_test_data):
"""Test getting agent details as an authenticated user"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool with a user_id (authenticated)
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should get agent details
assert "agent" in result_data
assert "user_authenticated" in result_data
# User should be marked as authenticated
assert result_data["user_authenticated"] is True
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_includes_execution_options(setup_test_data):
"""Test that agent details include execution options"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check execution options
assert "agent" in result_data
agent = result_data["agent"]
assert "execution_options" in agent
exec_options = agent["execution_options"]
# These should all be boolean values
assert isinstance(exec_options["manual"], bool)
assert isinstance(exec_options["scheduled"], bool)
assert isinstance(exec_options["webhook"], bool)
# For a regular agent (no webhook), manual and scheduled should be True
assert exec_options["manual"] is True
assert exec_options["scheduled"] is True
assert exec_options["webhook"] is False

View File

@@ -0,0 +1,183 @@
"""Tool for getting required setup information for an agent."""
import logging
from typing import Any
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.server.v2.chat.tools.base import BaseTool
from backend.server.v2.chat.tools.get_agent_details import GetAgentDetailsTool
from backend.server.v2.chat.tools.models import (
AgentDetailsResponse,
ErrorResponse,
SetupInfo,
SetupRequirementsResponse,
ToolResponseBase,
UserReadiness,
)
logger = logging.getLogger(__name__)
class GetRequiredSetupInfoTool(BaseTool):
"""Tool for getting required setup information including credentials and inputs."""
@property
def name(self) -> str:
return "get_required_setup_info"
@property
def description(self) -> str:
return """Check if an agent can be set up with the provided input data and credentials.
Call this AFTER get_agent_details to validate that you have all required inputs.
Pass the input dictionary you plan to use with run_agent or setup_agent to verify it's complete."""
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"username_agent_slug": {
"type": "string",
"description": "The marketplace agent slug (e.g., 'username/agent-name' or just 'agent-name' to search)",
},
"inputs": {
"type": "object",
"description": "The input dictionary you plan to provide. Should contain ALL required inputs from get_agent_details",
"additionalProperties": True,
},
},
"required": ["username_agent_slug"],
}
@property
def requires_auth(self) -> bool:
"""This tool requires authentication."""
return True
async def _execute(
self,
user_id: str | None,
session_id: str,
**kwargs,
) -> ToolResponseBase:
"""
Retrieve and validate the required setup information for running or configuring an agent.
This checks all required credentials and input fields based on the agent details,
and verifies user readiness to run the agent based on provided inputs and available credentials.
Args:
user_id: The authenticated user's ID (must not be None; authentication required).
session_id: The chat session ID.
agent_id: The agent's marketplace slug (e.g. 'username/agent-name'). Also accepts Graph ID.
agent_version: (Optional) Specific agent/graph version (if applicable).
Returns:
SetupRequirementsResponse containing:
- agent and graph info,
- credential and input requirements,
- user readiness and missing credentials/fields,
- setup instructions.
"""
assert (
user_id is not None
), "GetRequiredSetupInfoTool - This should never happen user_id is None when auth is required"
# Call _execute directly since we're calling internally from another tool
agent_details = await GetAgentDetailsTool()._execute(
user_id, session_id, **kwargs
)
if isinstance(agent_details, ErrorResponse):
return agent_details
if not isinstance(agent_details, AgentDetailsResponse):
return ErrorResponse(
message="Failed to get agent details",
session_id=session_id,
)
available_creds = await IntegrationCredentialsManager().store.get_all_creds(
user_id
)
required_credentials = []
# Check if user has credentials matching the required provider/type
for c in agent_details.agent.credentials:
# Check if any available credential matches this provider and type
has_matching_cred = any(
cred.provider == c.provider and cred.type == c.type
for cred in available_creds
)
if not has_matching_cred:
required_credentials.append(c)
required_fields = set(agent_details.agent.inputs.get("required", []))
provided_inputs = kwargs.get("inputs", {})
missing_inputs = required_fields - set(provided_inputs.keys())
missing_credentials = {c.id: c.model_dump() for c in required_credentials}
user_readiness = UserReadiness(
has_all_credentials=len(required_credentials) == 0,
missing_credentials=missing_credentials,
ready_to_run=len(missing_inputs) == 0 and len(required_credentials) == 0,
)
# Convert execution options to list of available modes
exec_opts = agent_details.agent.execution_options
execution_modes = []
if exec_opts.manual:
execution_modes.append("manual")
if exec_opts.scheduled:
execution_modes.append("scheduled")
if exec_opts.webhook:
execution_modes.append("webhook")
# Convert input schema to list of input field info
inputs_list = []
if (
isinstance(agent_details.agent.inputs, dict)
and "properties" in agent_details.agent.inputs
):
for field_name, field_schema in agent_details.agent.inputs[
"properties"
].items():
inputs_list.append(
{
"name": field_name,
"title": field_schema.get("title", field_name),
"type": field_schema.get("type", "string"),
"description": field_schema.get("description", ""),
"required": field_name
in agent_details.agent.inputs.get("required", []),
}
)
requirements = {
"credentials": agent_details.agent.credentials,
"inputs": inputs_list,
"execution_modes": execution_modes,
}
message = ""
if len(agent_details.agent.credentials) > 0:
message = "The user needs to enter credentials before proceeding. Please wait until you have a message informing you that the credentials have been entered."
elif len(inputs_list) > 0:
message = (
"The user needs to enter inputs before proceeding. Please wait until you have a message informing you that the inputs have been entered. The inputs are: "
+ ", ".join([input["name"] for input in inputs_list])
)
else:
message = "The agent is ready to run. Please call the run_agent tool with the agent ID."
return SetupRequirementsResponse(
message=message,
session_id=session_id,
setup_info=SetupInfo(
agent_id=agent_details.agent.id,
agent_name=agent_details.agent.name,
user_readiness=user_readiness,
requirements=requirements,
),
graph_id=agent_details.graph_id,
graph_version=agent_details.graph_version,
)

View File

@@ -0,0 +1,394 @@
import uuid
import orjson
import pytest
from backend.server.v2.chat.tools._test_data import (
setup_firecrawl_test_data,
setup_llm_test_data,
setup_test_data,
)
from backend.server.v2.chat.tools.get_required_setup_info import (
GetRequiredSetupInfoTool,
)
# This is so the formatter doesn't remove the fixture imports
setup_llm_test_data = setup_llm_test_data
setup_test_data = setup_test_data
setup_firecrawl_test_data = setup_firecrawl_test_data
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_success(setup_test_data):
"""Test successfully getting setup info for a simple agent"""
# Use test data from fixture
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetRequiredSetupInfoTool()
# Build the proper marketplace agent_id format: username/slug
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"test_input": "Hello World"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check the basic structure
assert "setup_info" in result_data
setup_info = result_data["setup_info"]
# Check agent info
assert "agent_id" in setup_info
assert setup_info["agent_id"] == graph.id
assert "agent_name" in setup_info
assert setup_info["agent_name"] == "Test Agent"
# Check requirements
assert "requirements" in setup_info
requirements = setup_info["requirements"]
assert "credentials" in requirements
assert "inputs" in requirements
assert "execution_modes" in requirements
# Simple agent should have no credentials required
assert isinstance(requirements["credentials"], list)
assert len(requirements["credentials"]) == 0
# Check inputs format
assert isinstance(requirements["inputs"], list)
if len(requirements["inputs"]) > 0:
first_input = requirements["inputs"][0]
assert "name" in first_input
assert "title" in first_input
assert "type" in first_input
# Check execution modes
assert isinstance(requirements["execution_modes"], list)
assert "manual" in requirements["execution_modes"]
assert "scheduled" in requirements["execution_modes"]
# Check user readiness
assert "user_readiness" in setup_info
user_readiness = setup_info["user_readiness"]
assert "has_all_credentials" in user_readiness
assert "ready_to_run" in user_readiness
# Simple agent with inputs provided should be ready
assert user_readiness["ready_to_run"] is True
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_missing_credentials(setup_firecrawl_test_data):
"""Test getting setup info for an agent requiring missing credentials"""
# Use test data from fixture
user = setup_firecrawl_test_data["user"]
store_submission = setup_firecrawl_test_data["store_submission"]
# Create the tool instance
tool = GetRequiredSetupInfoTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"url": "https://example.com"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check setup info
assert "setup_info" in result_data
setup_info = result_data["setup_info"]
# Check requirements
requirements = setup_info["requirements"]
# Should have Firecrawl credentials required
assert "credentials" in requirements
assert isinstance(requirements["credentials"], list)
assert len(requirements["credentials"]) > 0
# Check the credential requirement
firecrawl_cred = requirements["credentials"][0]
assert "provider" in firecrawl_cred
assert firecrawl_cred["provider"] == "firecrawl"
assert "type" in firecrawl_cred
assert firecrawl_cred["type"] == "api_key"
# Check user readiness - should NOT be ready since credentials are missing
user_readiness = setup_info["user_readiness"]
assert user_readiness["has_all_credentials"] is False
assert user_readiness["ready_to_run"] is False
# Check missing credentials
assert "missing_credentials" in user_readiness
assert isinstance(user_readiness["missing_credentials"], dict)
assert len(user_readiness["missing_credentials"]) > 0
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_with_available_credentials(setup_llm_test_data):
"""Test getting setup info when user has required credentials"""
# Use test data from fixture (includes OpenAI credentials)
user = setup_llm_test_data["user"]
store_submission = setup_llm_test_data["store_submission"]
# Create the tool instance
tool = GetRequiredSetupInfoTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"user_prompt": "What is 2+2?"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check setup info
setup_info = result_data["setup_info"]
# Check user readiness - should be ready since credentials are available
user_readiness = setup_info["user_readiness"]
assert user_readiness["has_all_credentials"] is True
assert user_readiness["ready_to_run"] is True
# Missing credentials should be empty
assert "missing_credentials" in user_readiness
assert len(user_readiness["missing_credentials"]) == 0
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_missing_inputs(setup_test_data):
"""Test getting setup info when required inputs are not provided"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetRequiredSetupInfoTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool WITHOUT providing inputs
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={}, # Empty inputs
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check setup info
setup_info = result_data["setup_info"]
# Check requirements
requirements = setup_info["requirements"]
assert "inputs" in requirements
assert isinstance(requirements["inputs"], list)
# User readiness depends on whether inputs are required or optional
user_readiness = setup_info["user_readiness"]
assert "ready_to_run" in user_readiness
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_invalid_agent():
"""Test getting setup info for a non-existent agent"""
# Create the tool instance
tool = GetRequiredSetupInfoTool()
# Execute with invalid agent ID
response = await tool.execute(
user_id=str(uuid.uuid4()),
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug="invalid/agent",
inputs={},
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
# Should indicate failure or not found
assert any(
phrase in result_data["message"].lower()
for phrase in ["not found", "failed", "error"]
)
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_graph_metadata(setup_test_data):
"""Test that setup info includes graph metadata"""
# Use test data from fixture
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetRequiredSetupInfoTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"test_input": "test"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check that graph_id and graph_version are included
assert "graph_id" in result_data
assert result_data["graph_id"] == graph.id
assert "graph_version" in result_data
assert result_data["graph_version"] == graph.version
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_inputs_structure(setup_test_data):
"""Test that inputs are properly structured as a list"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetRequiredSetupInfoTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check inputs structure
setup_info = result_data["setup_info"]
requirements = setup_info["requirements"]
# Inputs should be a list
assert isinstance(requirements["inputs"], list)
# Each input should have proper structure
for input_field in requirements["inputs"]:
assert isinstance(input_field, dict)
assert "name" in input_field
assert "title" in input_field
assert "type" in input_field
assert "description" in input_field
assert "required" in input_field
assert isinstance(input_field["required"], bool)
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_execution_modes_structure(setup_test_data):
"""Test that execution_modes are properly structured as a list"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetRequiredSetupInfoTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check execution modes structure
setup_info = result_data["setup_info"]
requirements = setup_info["requirements"]
# execution_modes should be a list of strings
assert isinstance(requirements["execution_modes"], list)
for mode in requirements["execution_modes"]:
assert isinstance(mode, str)
assert mode in ["manual", "scheduled", "webhook"]

View File

@@ -0,0 +1,279 @@
"""Pydantic models for tool responses."""
from enum import Enum
from typing import Any
from pydantic import BaseModel, Field
from backend.data.model import CredentialsMetaInput
class ResponseType(str, Enum):
"""Types of tool responses."""
AGENT_CAROUSEL = "agent_carousel"
AGENT_DETAILS = "agent_details"
AGENT_DETAILS_NEED_LOGIN = "agent_details_need_login"
AGENT_DETAILS_NEED_CREDENTIALS = "agent_details_need_credentials"
SETUP_REQUIREMENTS = "setup_requirements"
SCHEDULE_CREATED = "schedule_created"
WEBHOOK_CREATED = "webhook_created"
PRESET_CREATED = "preset_created"
EXECUTION_STARTED = "execution_started"
NEED_LOGIN = "need_login"
NEED_CREDENTIALS = "need_credentials"
INSUFFICIENT_CREDITS = "insufficient_credits"
VALIDATION_ERROR = "validation_error"
ERROR = "error"
NO_RESULTS = "no_results"
SUCCESS = "success"
# Base response model
class ToolResponseBase(BaseModel):
"""Base model for all tool responses."""
type: ResponseType
message: str
session_id: str | None = None
# Agent discovery models
class AgentInfo(BaseModel):
"""Information about an agent."""
id: str
name: str
description: str
source: str = Field(description="marketplace or library")
in_library: bool = False
creator: str | None = None
category: str | None = None
rating: float | None = None
runs: int | None = None
is_featured: bool | None = None
status: str | None = None
can_access_graph: bool | None = None
has_external_trigger: bool | None = None
new_output: bool | None = None
graph_id: str | None = None
class AgentCarouselResponse(ToolResponseBase):
"""Response for find_agent tool."""
type: ResponseType = ResponseType.AGENT_CAROUSEL
title: str = "Available Agents"
agents: list[AgentInfo]
count: int
class NoResultsResponse(ToolResponseBase):
"""Response when no agents found."""
type: ResponseType = ResponseType.NO_RESULTS
suggestions: list[str] = []
# Agent details models
class InputField(BaseModel):
"""Input field specification."""
name: str
type: str = "string"
description: str = ""
required: bool = False
default: Any | None = None
options: list[Any] | None = None
format: str | None = None
class ExecutionOptions(BaseModel):
"""Available execution options for an agent."""
manual: bool = True
scheduled: bool = True
webhook: bool = False
class AgentDetails(BaseModel):
"""Detailed agent information."""
id: str
name: str
description: str
in_library: bool = False
inputs: dict[str, Any] = {}
credentials: list[CredentialsMetaInput] = []
execution_options: ExecutionOptions = Field(default_factory=ExecutionOptions)
trigger_info: dict[str, Any] | None = None
class AgentDetailsResponse(ToolResponseBase):
"""Response for get_agent_details tool."""
type: ResponseType = ResponseType.AGENT_DETAILS
agent: AgentDetails
user_authenticated: bool = False
graph_id: str | None = None
graph_version: int | None = None
class AgentDetailsNeedLoginResponse(ToolResponseBase):
"""Response when agent details need login."""
type: ResponseType = ResponseType.AGENT_DETAILS_NEED_LOGIN
agent: AgentDetails
agent_info: dict[str, Any] | None = None
graph_id: str | None = None
graph_version: int | None = None
class AgentDetailsNeedCredentialsResponse(ToolResponseBase):
"""Response when agent needs credentials to be configured."""
type: ResponseType = ResponseType.NEED_CREDENTIALS
agent: AgentDetails
credentials_schema: dict[str, Any]
agent_info: dict[str, Any] | None = None
graph_id: str | None = None
graph_version: int | None = None
# Setup info models
class SetupRequirementInfo(BaseModel):
"""Setup requirement information."""
key: str
provider: str
required: bool = True
user_has: bool = False
credential_id: str | None = None
type: str | None = None
scopes: list[str] | None = None
description: str | None = None
class ExecutionModeInfo(BaseModel):
"""Execution mode information."""
type: str # manual, scheduled, webhook
description: str
supported: bool
config_required: dict[str, str] | None = None
trigger_info: dict[str, Any] | None = None
class UserReadiness(BaseModel):
"""User readiness status."""
has_all_credentials: bool = False
missing_credentials: dict[str, Any] = {}
ready_to_run: bool = False
class SetupInfo(BaseModel):
"""Complete setup information."""
agent_id: str
agent_name: str
requirements: dict[str, list[Any]] = Field(
default_factory=lambda: {
"credentials": [],
"inputs": [],
"execution_modes": [],
},
)
user_readiness: UserReadiness = Field(default_factory=UserReadiness)
setup_instructions: list[str] = []
class SetupRequirementsResponse(ToolResponseBase):
"""Response for get_required_setup_info tool."""
type: ResponseType = ResponseType.SETUP_REQUIREMENTS
setup_info: SetupInfo
graph_id: str | None = None
graph_version: int | None = None
# Setup agent models
class ScheduleCreatedResponse(ToolResponseBase):
"""Response for scheduled agent setup."""
type: ResponseType = ResponseType.SCHEDULE_CREATED
schedule_id: str
name: str
cron: str
timezone: str = "UTC"
next_run: str | None = None
graph_id: str
graph_name: str
class WebhookCreatedResponse(ToolResponseBase):
"""Response for webhook agent setup."""
type: ResponseType = ResponseType.WEBHOOK_CREATED
webhook_id: str
webhook_url: str
preset_id: str | None = None
name: str
graph_id: str
graph_name: str
class PresetCreatedResponse(ToolResponseBase):
"""Response for preset agent setup."""
type: ResponseType = ResponseType.PRESET_CREATED
preset_id: str
name: str
graph_id: str
graph_name: str
# Run agent models
class ExecutionStartedResponse(ToolResponseBase):
"""Response for agent execution started."""
type: ResponseType = ResponseType.EXECUTION_STARTED
execution_id: str
graph_id: str
graph_name: str
status: str = "QUEUED"
ended_at: str | None = None
outputs: dict[str, Any] | None = None
error: str | None = None
timeout_reached: bool | None = None
class InsufficientCreditsResponse(ToolResponseBase):
"""Response for insufficient credits."""
type: ResponseType = ResponseType.INSUFFICIENT_CREDITS
balance: float
class ValidationErrorResponse(ToolResponseBase):
"""Response for validation errors."""
type: ResponseType = ResponseType.VALIDATION_ERROR
error: str
details: dict[str, Any] | None = None
# Auth/error models
class NeedLoginResponse(ToolResponseBase):
"""Response when login is needed."""
type: ResponseType = ResponseType.NEED_LOGIN
agent_info: dict[str, Any] | None = None
class ErrorResponse(ToolResponseBase):
"""Response for errors."""
type: ResponseType = ResponseType.ERROR
error: str | None = None
details: dict[str, Any] | None = None

View File

@@ -0,0 +1,241 @@
"""Tool for running an agent manually (one-off execution)."""
import logging
from typing import Any
from backend.data.graph import get_graph
from backend.data.model import CredentialsMetaInput
from backend.executor import utils as execution_utils
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.server.v2.chat.tools.base import BaseTool
from backend.server.v2.chat.tools.get_required_setup_info import (
GetRequiredSetupInfoTool,
)
from backend.server.v2.chat.tools.models import (
ErrorResponse,
ExecutionStartedResponse,
SetupInfo,
SetupRequirementsResponse,
ToolResponseBase,
)
from backend.server.v2.library import db as library_db
from backend.server.v2.library import model as library_model
logger = logging.getLogger(__name__)
class RunAgentTool(BaseTool):
"""Tool for executing an agent manually with immediate results."""
@property
def name(self) -> str:
return "run_agent"
@property
def description(self) -> str:
return """Run an agent immediately (one-off manual execution).
IMPORTANT: Before calling this tool, you MUST first call get_agent_details to determine what inputs are required.
The 'inputs' parameter must be a dictionary containing ALL required input values identified by get_agent_details.
Example: If get_agent_details shows required inputs 'search_query' and 'max_results', you must pass:
inputs={"search_query": "user's query", "max_results": 10}"""
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"username_agent_slug": {
"type": "string",
"description": "The ID of the agent to run (graph ID or marketplace slug)",
},
"inputs": {
"type": "object",
"description": 'REQUIRED: Dictionary of input values. Must include ALL required inputs from get_agent_details. Format: {"input_name": value}',
"additionalProperties": True,
},
},
"required": ["username_agent_slug"],
}
@property
def requires_auth(self) -> bool:
"""This tool requires authentication."""
return True
async def _execute(
self,
user_id: str | None,
session_id: str,
**kwargs,
) -> ToolResponseBase:
"""Execute an agent manually.
Args:
user_id: Authenticated user ID
session_id: Chat session ID
**kwargs: Execution parameters
Returns:
JSON formatted execution result
"""
assert (
user_id is not None
), "User ID is required to run an agent. Superclass enforces authentication."
username_agent_slug = kwargs.get("username_agent_slug", "").strip()
inputs = kwargs.get("inputs", {})
# Call _execute directly since we're calling internally from another tool
response = await GetRequiredSetupInfoTool()._execute(
user_id, session_id, **kwargs
)
if not isinstance(response, SetupRequirementsResponse):
return ErrorResponse(
message="Failed to get required setup information",
session_id=session_id,
)
setup_info = SetupInfo.model_validate(response.setup_info)
if not setup_info.user_readiness.ready_to_run:
return ErrorResponse(
message=f"User is not ready to run the agent. User Readiness: {setup_info.user_readiness.model_dump_json()} Requirments: {setup_info.requirements}",
session_id=session_id,
)
# Get the graph using the graph_id and graph_version from the setup response
if not response.graph_id or not response.graph_version:
return ErrorResponse(
message=f"Graph information not available for {username_agent_slug}",
session_id=session_id,
)
graph = await get_graph(
graph_id=response.graph_id,
version=response.graph_version,
user_id=None, # Public access for store graphs
include_subgraphs=True,
)
if not graph:
return ErrorResponse(
message=f"Graph {username_agent_slug} ({response.graph_id}v{response.graph_version}) not found",
session_id=session_id,
)
# Check if we already have a library agent for this graph
existing_library_agent = await library_db.get_library_agent_by_graph_id(
graph_id=graph.id, user_id=user_id
)
if not existing_library_agent:
# Now we need to add the graph to the users library
library_agents: list[library_model.LibraryAgent] = (
await library_db.create_library_agent(
graph=graph,
user_id=user_id,
create_library_agents_for_sub_graphs=False,
)
)
assert len(library_agents) == 1, "Expected 1 library agent to be created"
library_agent = library_agents[0]
else:
library_agent = existing_library_agent
# Build credentials mapping for the graph
graph_credentials_inputs: dict[str, CredentialsMetaInput] = {}
# Get aggregated credentials requirements from the graph
aggregated_creds = graph.aggregate_credentials_inputs()
logger.debug(
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
)
if aggregated_creds:
# Get all available credentials for the user
creds_manager = IntegrationCredentialsManager()
available_creds = await creds_manager.store.get_all_creds(user_id)
# Track unmatched credentials for error reporting
missing_creds: list[str] = []
# For each required credential field, find a matching user credential
# field_info.provider is a frozenset because aggregate_credentials_inputs()
# combines requirements from multiple nodes. A credential matches if its
# provider is in the set of acceptable providers.
for credential_field_name, (
credential_requirements,
_node_fields,
) in aggregated_creds.items():
# Find first matching credential by provider and type
matching_cred = next(
(
cred
for cred in available_creds
if cred.provider in credential_requirements.provider
and cred.type in credential_requirements.supported_types
),
None,
)
if matching_cred:
# Use Pydantic validation to ensure type safety
try:
graph_credentials_inputs[credential_field_name] = (
CredentialsMetaInput(
id=matching_cred.id,
provider=matching_cred.provider, # type: ignore
type=matching_cred.type,
title=matching_cred.title,
)
)
except Exception as e:
logger.error(
f"Failed to create CredentialsMetaInput for field '{credential_field_name}': "
f"provider={matching_cred.provider}, type={matching_cred.type}, "
f"credential_id={matching_cred.id}",
exc_info=True,
)
missing_creds.append(
f"{credential_field_name} (validation failed: {e})"
)
else:
missing_creds.append(
f"{credential_field_name} "
f"(requires provider in {list(credential_requirements.provider)}, "
f"type in {list(credential_requirements.supported_types)})"
)
# Fail fast if any required credentials are missing
if missing_creds:
logger.warning(
f"Cannot execute agent - missing credentials: {missing_creds}"
)
return ErrorResponse(
message=f"Cannot execute agent: missing {len(missing_creds)} required credential(s). You need to call the get_required_setup_info tool to setup the credentials."
f"Please set up the following credentials: {', '.join(missing_creds)}",
session_id=session_id,
details={"missing_credentials": missing_creds},
)
logger.info(
f"Credential matching complete: {len(graph_credentials_inputs)}/{len(aggregated_creds)} matched"
)
# At this point we know the user is ready to run the agent
# So we can execute the agent
execution = await execution_utils.add_graph_execution(
graph_id=library_agent.graph_id,
user_id=user_id,
inputs=inputs,
graph_credentials_inputs=graph_credentials_inputs,
)
return ExecutionStartedResponse(
message="Agent execution successfully started. Do not run this tool again unless specifically asked to run the agent again.",
session_id=session_id,
execution_id=execution.id,
graph_id=library_agent.graph_id,
graph_name=library_agent.name,
)

View File

@@ -0,0 +1,151 @@
import uuid
import orjson
import pytest
from backend.server.v2.chat.tools._test_data import setup_llm_test_data, setup_test_data
from backend.server.v2.chat.tools.run_agent import RunAgentTool
# This is so the formatter doesn't remove the fixture imports
setup_llm_test_data = setup_llm_test_data
setup_test_data = setup_test_data
@pytest.mark.asyncio(scope="session")
async def test_run_agent(setup_test_data):
"""Test that the run_agent tool successfully executes an approved agent"""
# Use test data from fixture
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = RunAgentTool()
# Build the proper marketplace agent_id format: username/slug
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"test_input": "Hello World"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON to verify the execution started
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "execution_id" in result_data
assert "graph_id" in result_data
assert result_data["graph_id"] == graph.id
assert "graph_name" in result_data
assert result_data["graph_name"] == "Test Agent"
@pytest.mark.asyncio(scope="session")
async def test_run_agent_missing_inputs(setup_test_data):
"""Test that the run_agent tool returns error when inputs are missing"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = RunAgentTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool without required inputs
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={}, # Missing required input
)
# Verify that we get an error response
assert response is not None
assert hasattr(response, "result")
# The tool should return an ErrorResponse when setup info indicates not ready
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
@pytest.mark.asyncio(scope="session")
async def test_run_agent_invalid_agent_id(setup_test_data):
"""Test that the run_agent tool returns error for invalid agent ID"""
# Use test data from fixture
user = setup_test_data["user"]
# Create the tool instance
tool = RunAgentTool()
# Execute the tool with invalid agent ID
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug="invalid/agent-id",
inputs={"test_input": "Hello World"},
)
# Verify that we get an error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
# Should get an error about failed setup or not found
assert any(
phrase in result_data["message"].lower() for phrase in ["not found", "failed"]
)
@pytest.mark.asyncio(scope="session")
async def test_run_agent_with_llm_credentials(setup_llm_test_data):
"""Test that run_agent works with an agent requiring LLM credentials"""
# Use test data from fixture
user = setup_llm_test_data["user"]
graph = setup_llm_test_data["graph"]
store_submission = setup_llm_test_data["store_submission"]
# Create the tool instance
tool = RunAgentTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute the tool with a prompt for the LLM
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"user_prompt": "What is 2+2?"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON to verify the execution started
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should successfully start execution since credentials are available
assert "execution_id" in result_data
assert "graph_id" in result_data
assert result_data["graph_id"] == graph.id
assert "graph_name" in result_data
assert result_data["graph_name"] == "LLM Test Agent"

View File

@@ -0,0 +1,376 @@
"""Tool for setting up an agent with credentials and configuration."""
import logging
from typing import Any
from pydantic import BaseModel
from backend.data.graph import get_graph
from backend.data.model import CredentialsMetaInput
from backend.data.user import get_user_by_id
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.server.v2.chat.tools.get_required_setup_info import (
GetRequiredSetupInfoTool,
)
from backend.server.v2.chat.tools.models import (
ExecutionStartedResponse,
SetupInfo,
SetupRequirementsResponse,
)
from backend.server.v2.library import db as library_db
from backend.server.v2.library import model as library_model
from backend.util.clients import get_scheduler_client
from backend.util.timezone_utils import (
convert_utc_time_to_user_timezone,
get_user_timezone_or_utc,
)
from .base import BaseTool
from .models import ErrorResponse, ToolResponseBase
logger = logging.getLogger(__name__)
class AgentDetails(BaseModel):
graph_name: str
graph_id: str
graph_version: int
recommended_schedule_cron: str | None
required_credentials: dict[str, CredentialsMetaInput]
class SetupAgentTool(BaseTool):
"""Tool for setting up an agent with scheduled execution or webhook triggers."""
@property
def name(self) -> str:
return "schedule_agent"
@property
def description(self) -> str:
return """Set up an agent with credentials and configure it for scheduled execution or webhook triggers.
IMPORTANT: Before calling this tool, you MUST first call get_agent_details to determine what inputs are required.
For SCHEDULED execution:
- Cron format: "minute hour day month weekday" (e.g., "0 9 * * 1-5" = 9am weekdays)
- Common patterns: "0 * * * *" (hourly), "0 0 * * *" (daily at midnight), "0 9 * * 1" (Mondays at 9am)
- Timezone: Use IANA timezone names like "America/New_York", "Europe/London", "Asia/Tokyo"
- The 'inputs' parameter must contain ALL required inputs from get_agent_details as a dictionary
For WEBHOOK triggers:
- The agent will be triggered by external events
- Still requires all input values from get_agent_details"""
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"username_agent_slug": {
"type": "string",
"description": "The marketplace agent slug (e.g., 'username/agent-name')",
},
"setup_type": {
"type": "string",
"enum": ["schedule", "webhook"],
"description": "Type of setup: 'schedule' for cron, 'webhook' for triggers.",
},
"name": {
"type": "string",
"description": "Name for this setup/schedule (e.g., 'Daily Report', 'Weekly Summary')",
},
"description": {
"type": "string",
"description": "Description of this setup",
},
"cron": {
"type": "string",
"description": "Cron expression (5 fields: minute hour day month weekday). Examples: '0 9 * * 1-5' (9am weekdays), '*/30 * * * *' (every 30 min)",
},
"timezone": {
"type": "string",
"description": "IANA timezone (e.g., 'America/New_York', 'Europe/London', 'UTC'). Defaults to UTC if not specified.",
},
"inputs": {
"type": "object",
"description": 'REQUIRED: Dictionary with ALL required inputs from get_agent_details. Format: {"input_name": value}',
"additionalProperties": True,
},
"webhook_config": {
"type": "object",
"description": "Webhook configuration (required if setup_type is 'webhook')",
"additionalProperties": True,
},
},
"required": ["username_agent_slug", "setup_type"],
}
@property
def requires_auth(self) -> bool:
"""This tool requires authentication."""
return True
async def _execute(
self,
user_id: str | None,
session_id: str,
**kwargs,
) -> ToolResponseBase:
"""Set up an agent with configuration.
Args:
user_id: Authenticated user ID
session_id: Chat session ID
**kwargs: Setup parameters
Returns:
JSON formatted setup result
"""
assert (
user_id is not None
), "User ID is required to run an agent. Superclass enforces authentication."
setup_type = kwargs.get("setup_type", "schedule").strip()
if setup_type != "schedule":
return ErrorResponse(
message="Only schedule setup is supported at this time",
session_id=session_id,
)
else:
cron = kwargs.get("cron", "").strip()
cron_name = kwargs.get("name", "").strip()
if not cron or not cron_name:
return ErrorResponse(
message="Cron and name are required for schedule setup",
session_id=session_id,
)
username_agent_slug = kwargs.get("username_agent_slug", "").strip()
inputs = kwargs.get("inputs", {})
library_agent = await self._get_or_add_library_agent(
username_agent_slug, user_id, session_id, **kwargs
)
if not isinstance(library_agent, AgentDetails):
# library agent is an ErrorResponse
return library_agent
# At this point we know the user is ready to run the agent
# Create the schedule for the agent
from backend.server.v2.library import db as library_db
# Get the library agent model for scheduling
lib_agent = await library_db.get_library_agent_by_graph_id(
graph_id=library_agent.graph_id, user_id=user_id
)
if not lib_agent:
return ErrorResponse(
message=f"Library agent not found for graph {library_agent.graph_id}",
session_id=session_id,
)
return await self._add_graph_execution_schedule(
library_agent=lib_agent,
user_id=user_id,
cron=cron,
name=cron_name,
inputs=inputs,
credentials=library_agent.required_credentials,
session_id=session_id,
)
async def _add_graph_execution_schedule(
self,
library_agent: library_model.LibraryAgent,
user_id: str,
cron: str,
name: str,
inputs: dict[str, Any],
credentials: dict[str, CredentialsMetaInput],
session_id: str,
**kwargs,
) -> ExecutionStartedResponse | ErrorResponse:
# Use timezone from request if provided, otherwise fetch from user profile
user = await get_user_by_id(user_id)
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
# Map required credentials (schema field names) to actual user credential IDs
# credentials param contains CredentialsMetaInput with schema field names as keys
# We need to find the user's actual credentials that match the provider/type
creds_manager = IntegrationCredentialsManager()
user_credentials = await creds_manager.store.get_all_creds(user_id)
# Build a mapping from schema field name -> actual credential ID
resolved_credentials: dict[str, CredentialsMetaInput] = {}
missing_credentials: list[str] = []
for field_name, cred_meta in credentials.items():
# Find a matching credential from the user's credentials
matching_cred = next(
(
c
for c in user_credentials
if c.provider == cred_meta.provider and c.type == cred_meta.type
),
None,
)
if matching_cred:
# Use the actual credential ID instead of the schema field name
# Create a new CredentialsMetaInput with the actual credential ID
# but keep the same provider/type from the original meta
resolved_credentials[field_name] = CredentialsMetaInput(
id=matching_cred.id,
provider=cred_meta.provider,
type=cred_meta.type,
title=cred_meta.title,
)
else:
missing_credentials.append(
f"{cred_meta.title} ({cred_meta.provider}/{cred_meta.type})"
)
if missing_credentials:
return ErrorResponse(
message=f"Cannot execute agent: missing {len(missing_credentials)} required credential(s). You need to call the get_required_setup_info tool to setup the credentials.",
session_id=session_id,
)
result = await get_scheduler_client().add_execution_schedule(
user_id=user_id,
graph_id=library_agent.graph_id,
graph_version=library_agent.graph_version,
name=name,
cron=cron,
input_data=inputs,
input_credentials=resolved_credentials,
user_timezone=user_timezone,
)
# Convert the next_run_time back to user timezone for display
if result.next_run_time:
result.next_run_time = convert_utc_time_to_user_timezone(
result.next_run_time, user_timezone
)
return ExecutionStartedResponse(
message="Agent execution successfully scheduled. Do not run this tool again unless specifically asked to run the agent again.",
session_id=session_id,
execution_id=result.id,
graph_id=library_agent.graph_id,
graph_name=library_agent.name,
)
async def _get_or_add_library_agent(
self, agent_id: str, user_id: str, session_id: str, **kwargs
) -> AgentDetails | ErrorResponse:
# Call _execute directly since we're calling internally from another tool
response = await GetRequiredSetupInfoTool()._execute(
user_id, session_id, **kwargs
)
if not isinstance(response, SetupRequirementsResponse):
return ErrorResponse(
message="Failed to get required setup information",
session_id=session_id,
)
setup_info = SetupInfo.model_validate(response.setup_info)
if not setup_info.user_readiness.ready_to_run:
return ErrorResponse(
message=f"User is not ready to run the agent. User Readiness: {setup_info.user_readiness.model_dump_json()} Requirments: {setup_info.requirements}",
session_id=session_id,
)
# Get the graph using the graph_id and graph_version from the setup response
if not response.graph_id or not response.graph_version:
return ErrorResponse(
message=f"Graph information not available for {agent_id}",
session_id=session_id,
)
graph = await get_graph(
graph_id=response.graph_id,
version=response.graph_version,
user_id=None, # Public access for store graphs
include_subgraphs=True,
)
if not graph:
return ErrorResponse(
message=f"Graph {agent_id} ({response.graph_id}v{response.graph_version}) not found",
session_id=session_id,
)
recommended_schedule_cron = graph.recommended_schedule_cron
# Extract credentials from the JSON schema properties
credentials_input_schema = graph.credentials_input_schema
required_credentials: dict[str, CredentialsMetaInput] = {}
if (
isinstance(credentials_input_schema, dict)
and "properties" in credentials_input_schema
):
for cred_name, cred_schema in credentials_input_schema[
"properties"
].items():
# Get provider from credentials_provider array or properties.provider.const
provider = "unknown"
if (
"credentials_provider" in cred_schema
and cred_schema["credentials_provider"]
):
provider = cred_schema["credentials_provider"][0]
elif (
"properties" in cred_schema
and "provider" in cred_schema["properties"]
):
provider = cred_schema["properties"]["provider"].get(
"const", "unknown"
)
# Get type from credentials_types array or properties.type.const
cred_type = "api_key" # Default
if (
"credentials_types" in cred_schema
and cred_schema["credentials_types"]
):
cred_type = cred_schema["credentials_types"][0]
elif (
"properties" in cred_schema and "type" in cred_schema["properties"]
):
cred_type = cred_schema["properties"]["type"].get(
"const", "api_key"
)
required_credentials[cred_name] = CredentialsMetaInput(
id=cred_name,
title=cred_schema.get("title", cred_name),
provider=provider, # type: ignore
type=cred_type,
)
# Check if we already have a library agent for this graph
existing_library_agent = await library_db.get_library_agent_by_graph_id(
graph_id=graph.id, user_id=user_id
)
if not existing_library_agent:
# Now we need to add the graph to the users library
library_agents: list[library_model.LibraryAgent] = (
await library_db.create_library_agent(
graph=graph,
user_id=user_id,
create_library_agents_for_sub_graphs=False,
)
)
assert len(library_agents) == 1, "Expected 1 library agent to be created"
library_agent = library_agents[0]
else:
library_agent = existing_library_agent
return AgentDetails(
graph_name=graph.name,
graph_id=library_agent.graph_id,
graph_version=library_agent.graph_version,
recommended_schedule_cron=recommended_schedule_cron,
required_credentials=required_credentials,
)

View File

@@ -0,0 +1,394 @@
import uuid
import orjson
import pytest
from backend.server.v2.chat.tools._test_data import setup_llm_test_data, setup_test_data
from backend.server.v2.chat.tools.setup_agent import SetupAgentTool
from backend.util.clients import get_scheduler_client
# This is so the formatter doesn't remove the fixture imports
setup_llm_test_data = setup_llm_test_data
setup_test_data = setup_test_data
@pytest.mark.asyncio(scope="session")
async def test_setup_agent_missing_cron(setup_test_data):
"""Test error when cron is missing for schedule setup"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute without cron
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
inputs={"test_input": "Hello World"},
# Missing: cron and name
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
assert (
"cron" in result_data["message"].lower()
or "name" in result_data["message"].lower()
)
@pytest.mark.asyncio(scope="session")
async def test_setup_agent_webhook_not_supported(setup_test_data):
"""Test error when webhook setup is attempted"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute with webhook setup_type
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="webhook",
inputs={"test_input": "Hello World"},
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
message_lower = result_data["message"].lower()
assert "schedule" in message_lower and "supported" in message_lower
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_schedule_success(setup_test_data):
"""Test successfully setting up an agent with a schedule"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name="Test Schedule",
description="Test schedule description",
cron="0 9 * * *", # Daily at 9am
timezone="UTC",
inputs={"test_input": "Hello World"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check for execution started
assert "message" in result_data
assert "execution_id" in result_data
assert "graph_id" in result_data
assert "graph_name" in result_data
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_with_credentials(setup_llm_test_data):
"""Test setting up an agent that requires credentials"""
# Use test data from fixture (includes OpenAI credentials)
user = setup_llm_test_data["user"]
store_submission = setup_llm_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name="LLM Schedule",
description="LLM schedule with credentials",
cron="*/30 * * * *", # Every 30 minutes
timezone="America/New_York",
inputs={"user_prompt": "What is 2+2?"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should succeed since user has OpenAI credentials
assert "execution_id" in result_data
assert "graph_id" in result_data
@pytest.mark.asyncio(scope="session")
async def test_setup_agent_invalid_agent(setup_test_data):
"""Test error when agent doesn't exist"""
# Use test data from fixture
user = setup_test_data["user"]
# Create the tool instance
tool = SetupAgentTool()
# Execute with non-existent agent
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug="nonexistent/agent",
setup_type="schedule",
name="Test Schedule",
cron="0 9 * * *",
inputs={},
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
# Should fail to find the agent
assert any(
phrase in result_data["message"].lower()
for phrase in ["not found", "failed", "error"]
)
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_schedule_created_in_scheduler(setup_test_data):
"""Test that the schedule is actually created in the scheduler service"""
# Use test data from fixture
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Create a unique schedule name to identify this test
schedule_name = f"Test Schedule {uuid.uuid4()}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name=schedule_name,
description="Test schedule to verify credentials",
cron="0 0 * * *", # Daily at midnight
timezone="UTC",
inputs={"test_input": "Scheduled execution"},
)
# Verify the response
assert response is not None
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "execution_id" in result_data
# Now verify the schedule was created in the scheduler service
scheduler = get_scheduler_client()
schedules = await scheduler.get_execution_schedules(graph.id, user.id)
# Find our schedule
our_schedule = None
for schedule in schedules:
if schedule.name == schedule_name:
our_schedule = schedule
break
assert (
our_schedule is not None
), f"Schedule '{schedule_name}' not found in scheduler"
assert our_schedule.cron == "0 0 * * *"
assert our_schedule.graph_id == graph.id
# Clean up: delete the schedule
await scheduler.delete_schedule(our_schedule.id, user_id=user.id)
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_schedule_with_credentials_triggered(setup_llm_test_data):
"""Test that credentials are properly passed when a schedule is triggered"""
# Use test data from fixture (includes OpenAI credentials)
user = setup_llm_test_data["user"]
graph = setup_llm_test_data["graph"]
store_submission = setup_llm_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Create a unique schedule name
schedule_name = f"LLM Test Schedule {uuid.uuid4()}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name=schedule_name,
description="Test LLM schedule with credentials",
cron="* * * * *", # Every minute (for testing)
timezone="UTC",
inputs={"user_prompt": "Test prompt for credentials"},
)
# Verify the response
assert response is not None
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "execution_id" in result_data
# Get the schedule from the scheduler
scheduler = get_scheduler_client()
schedules = await scheduler.get_execution_schedules(graph.id, user.id)
# Find our schedule
our_schedule = None
for schedule in schedules:
if schedule.name == schedule_name:
our_schedule = schedule
break
assert our_schedule is not None, f"Schedule '{schedule_name}' not found"
# Verify the schedule has the correct input data
assert our_schedule.input_data is not None
assert "user_prompt" in our_schedule.input_data
assert our_schedule.input_data["user_prompt"] == "Test prompt for credentials"
# Verify credentials are stored in the schedule
# The credentials should be stored as input_credentials
assert our_schedule.input_credentials is not None
# The credentials should contain the OpenAI provider credential
# Note: The exact structure depends on how credentials are serialized
# We're checking that credentials data exists and has the right provider
if our_schedule.input_credentials:
# Convert to dict if needed
creds_dict = (
our_schedule.input_credentials
if isinstance(our_schedule.input_credentials, dict)
else {}
)
# Check if any credential has openai provider
has_openai_cred = False
for cred_key, cred_value in creds_dict.items():
if isinstance(cred_value, dict):
if cred_value.get("provider") == "openai":
has_openai_cred = True
# Verify the credential has the expected structure
assert "id" in cred_value or "api_key" in cred_value
break
# If we have LLM block, we should have stored credentials
assert has_openai_cred, "OpenAI credentials not found in schedule"
# Clean up: delete the schedule
await scheduler.delete_schedule(our_schedule.id, user_id=user.id)
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_creates_library_agent(setup_test_data):
"""Test that setup creates a library agent for the user"""
# Use test data from fixture
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name="Library Test Schedule",
cron="0 12 * * *", # Daily at noon
inputs={"test_input": "Library test"},
)
# Verify the response
assert response is not None
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "graph_id" in result_data
assert result_data["graph_id"] == graph.id
# Verify library agent was created
from backend.server.v2.library import db as library_db
library_agent = await library_db.get_library_agent_by_graph_id(
graph_id=graph.id, user_id=user.id
)
assert library_agent is not None
assert library_agent.graph_id == graph.id
assert library_agent.name == "Test Agent"

View File

@@ -20,6 +20,8 @@ from functools import wraps
from typing import Any, Callable, ParamSpec, Protocol, TypeVar, cast, runtime_checkable
from redis import ConnectionPool, Redis
from redis.asyncio import ConnectionPool as AsyncConnectionPool
from redis.asyncio import Redis as AsyncRedis
from backend.util.retry import conn_retry
from backend.util.settings import Settings
@@ -61,6 +63,80 @@ def _get_cache_pool() -> ConnectionPool:
redis = Redis(connection_pool=_get_cache_pool())
_async_cache_pools: dict[asyncio.AbstractEventLoop, AsyncConnectionPool] = {}
@conn_retry("Redis", "Acquiring async cache connection pool")
async def _get_async_cache_pool() -> AsyncConnectionPool:
"""Get or create an async connection pool for the current event loop."""
global _async_cache_pools
try:
loop = asyncio.get_running_loop()
except RuntimeError:
raise RuntimeError("No running event loop")
if loop not in _async_cache_pools:
_async_cache_pools[loop] = AsyncConnectionPool(
host=settings.config.redis_host,
port=settings.config.redis_port,
password=settings.config.redis_password or None,
decode_responses=False, # Binary mode for pickle
max_connections=50,
socket_keepalive=True,
socket_connect_timeout=5,
retry_on_timeout=True,
)
return _async_cache_pools[loop]
# Store async Redis clients per event loop to avoid event loop conflicts
_async_redis_clients: dict[asyncio.AbstractEventLoop, AsyncRedis] = {}
_async_redis_locks: dict[asyncio.AbstractEventLoop, asyncio.Lock] = {}
async def get_async_redis() -> AsyncRedis:
"""Get or create an async Redis client for the current event loop."""
global _async_redis_clients, _async_redis_locks
try:
loop = asyncio.get_running_loop()
except RuntimeError:
raise RuntimeError("No running event loop")
# Get or create lock for this event loop
if loop not in _async_redis_locks:
_async_redis_locks[loop] = asyncio.Lock()
lock = _async_redis_locks[loop]
# Check if we need to create a new client
if loop not in _async_redis_clients:
async with lock:
# Double-checked locking to handle multiple awaiters
if loop not in _async_redis_clients:
pool = await _get_async_cache_pool()
_async_redis_clients[loop] = AsyncRedis(connection_pool=pool)
return _async_redis_clients[loop]
# For backward compatibility, create a proxy object that lazily initializes
class AsyncRedisProxy:
"""Proxy for async Redis that lazily initializes the connection."""
def __getattr__(self, name):
# This will be called when any method is accessed
async def async_method(*args, **kwargs):
client = await get_async_redis()
method = getattr(client, name)
return await method(*args, **kwargs)
return async_method
async_redis = AsyncRedisProxy()
@dataclass
class CachedValue:

View File

@@ -98,3 +98,9 @@ class DatabaseError(Exception):
"""Raised when there is an error interacting with the database"""
pass
class RedisError(Exception):
"""Raised when there is an error interacting with Redis"""
pass

View File

@@ -5,7 +5,8 @@ from functools import wraps
from typing import Any, Awaitable, Callable, TypeVar
import ldclient
from fastapi import HTTPException
from fastapi import HTTPException, Security
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from ldclient import Context, LDClient
from ldclient.config import Config
from typing_extensions import ParamSpec
@@ -23,6 +24,37 @@ T = TypeVar("T")
_is_initialized = False
# Optional bearer token authentication for feature flags
_optional_bearer = HTTPBearer(auto_error=False)
def _get_optional_user_id_from_auth(
credentials: HTTPAuthorizationCredentials | None = Security(_optional_bearer),
) -> str | None:
"""
Extract user ID from JWT token if present, otherwise return None.
This is used by feature flag dependencies to get the authenticated user context
for LaunchDarkly targeting while still supporting anonymous access.
Args:
credentials: Optional HTTP bearer credentials from the request
Returns:
User ID string if authenticated, None for anonymous access
"""
if not credentials:
return None
try:
from autogpt_libs.auth.jwt_utils import parse_jwt_token
payload = parse_jwt_token(credentials.credentials)
return payload.get("sub")
except Exception as e:
logger.debug(f"Auth token validation failed (anonymous access): {e}")
return None
class Flag(str, Enum):
"""
@@ -36,6 +68,7 @@ class Flag(str, Enum):
BETA_BLOCKS = "beta-blocks"
AGENT_ACTIVITY = "agent-activity"
ENABLE_PLATFORM_PAYMENT = "enable-platform-payment"
CHAT = "chat"
def is_configured() -> bool:
@@ -252,6 +285,75 @@ def feature_flag(
return decorator
def create_feature_flag_dependency(
flag_key: Flag,
default: bool = False,
) -> Callable[[str | None], Awaitable[None]]:
"""
Create a FastAPI dependency that checks a feature flag.
This dependency automatically extracts the user_id from the JWT token
(if present) for proper LaunchDarkly user targeting, while still
supporting anonymous access.
Args:
flag_key: The Flag enum value to check
default: Default value if flag evaluation fails
Returns:
An async dependency function that raises HTTPException if flag is disabled
Example:
router = APIRouter(
dependencies=[Depends(create_feature_flag_dependency(Flag.CHAT))]
)
"""
async def check_feature_flag(
user_id: str | None = Security(_get_optional_user_id_from_auth),
) -> None:
"""Check if feature flag is enabled for the user.
The user_id is automatically injected from JWT authentication if present,
or None for anonymous access.
"""
# For routes that don't require authentication, use anonymous context
check_user_id = user_id or "anonymous"
# Check if LaunchDarkly is configured before trying to use it
if not is_configured():
logger.debug(
f"LaunchDarkly not configured, using default {flag_key.value}={default}"
)
if not default:
raise HTTPException(status_code=404, detail="Feature not available")
return
try:
client = get_client()
if not client.is_initialized():
logger.debug(
f"LaunchDarkly not initialized, using default {flag_key.value}={default}"
)
if not default:
raise HTTPException(status_code=404, detail="Feature not available")
return
is_enabled = await is_feature_enabled(flag_key, check_user_id, default)
if not is_enabled:
raise HTTPException(status_code=404, detail="Feature not available")
except Exception as e:
# If LaunchDarkly fails for any reason, use default
logger.warning(
f"LaunchDarkly error for flag {flag_key.value}: {e}, using default={default}"
)
if not default:
raise HTTPException(status_code=404, detail="Feature not available")
return check_feature_flag
@contextlib.contextmanager
def mock_flag_variation(flag_key: str, return_value: Any):
"""Context manager for testing feature flags."""

View File

@@ -10,6 +10,7 @@ const nextConfig = {
"upload.wikimedia.org",
"storage.googleapis.com",
"example.com",
"ideogram.ai", // for generated images
"picsum.photos", // for placeholder images
],

View File

@@ -0,0 +1,68 @@
import { cn } from "@/lib/utils";
import { ChatInput } from "@/components/atoms/ChatInput/ChatInput";
import { MessageList } from "@/components/molecules/MessageList/MessageList";
import { QuickActionsWelcome } from "@/components/molecules/QuickActionsWelcome/QuickActionsWelcome";
import { useChatContainer } from "./useChatContainer";
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
export interface ChatContainerProps {
sessionId: string | null;
initialMessages: SessionDetailResponse["messages"];
onRefreshSession: () => Promise<void>;
className?: string;
}
export function ChatContainer({
sessionId,
initialMessages,
onRefreshSession,
className,
}: ChatContainerProps) {
const { messages, streamingChunks, isStreaming, sendMessage } =
useChatContainer({
sessionId,
initialMessages,
onRefreshSession,
});
const quickActions = [
"Find agents for data analysis",
"Show me automation agents",
"Help me build a workflow",
"What can you help me with?",
];
return (
<div className={cn("flex h-full flex-col", className)}>
{/* Messages or Welcome Screen */}
{messages.length === 0 ? (
<QuickActionsWelcome
title="Welcome to AutoGPT Chat"
description="Start a conversation to discover and run AI agents."
actions={quickActions}
onActionClick={sendMessage}
disabled={isStreaming || !sessionId}
/>
) : (
<MessageList
messages={messages}
streamingChunks={streamingChunks}
isStreaming={isStreaming}
onSendMessage={sendMessage}
className="flex-1"
/>
)}
{/* Input - Always visible */}
<div className="border-t border-zinc-200 p-4 dark:border-zinc-800">
<ChatInput
onSend={sendMessage}
disabled={isStreaming || !sessionId}
placeholder={
sessionId ? "Type your message..." : "Creating session..."
}
/>
</div>
</div>
);
}

View File

@@ -0,0 +1,83 @@
import { toast } from "sonner";
import type { StreamChunk } from "@/hooks/useChatStream";
import type { HandlerDependencies } from "./useChatContainer.handlers";
import {
handleTextChunk,
handleTextEnded,
handleToolCallStart,
handleToolResponse,
handleLoginNeeded,
handleStreamEnd,
handleError,
} from "./useChatContainer.handlers";
/**
* Creates a stream event dispatcher that routes stream chunks to appropriate handlers.
*
* This dispatcher pattern separates event routing logic from individual handler implementations,
* making the code more maintainable and testable.
*
* @param deps - Handler dependencies (state setters and refs)
* @returns A function that processes StreamChunk events
*
* @example
* ```ts
* const dispatcher = createStreamEventDispatcher({
* setMessages,
* setStreamingChunks,
* streamingChunksRef,
* setHasTextChunks,
* sessionId,
* });
*
* // Use with streaming hook
* await sendStreamMessage(sessionId, content, dispatcher);
* ```
*/
export function createStreamEventDispatcher(
deps: HandlerDependencies,
): (chunk: StreamChunk) => void {
return function dispatchStreamEvent(chunk: StreamChunk): void {
switch (chunk.type) {
case "text_chunk":
handleTextChunk(chunk, deps);
break;
case "text_ended":
handleTextEnded(chunk, deps);
break;
case "tool_call_start":
handleToolCallStart(chunk, deps);
break;
case "tool_response":
handleToolResponse(chunk, deps);
break;
case "login_needed":
case "need_login":
handleLoginNeeded(chunk, deps);
break;
case "stream_end":
handleStreamEnd(chunk, deps);
break;
case "error":
handleError(chunk, deps);
// Show toast at dispatcher level to avoid circular dependencies
toast.error("Chat Error", {
description: chunk.message || chunk.content || "An error occurred",
});
break;
case "usage":
// TODO: Handle usage for display
break;
default:
console.warn("Unknown stream chunk type:", chunk);
}
};
}

View File

@@ -0,0 +1,404 @@
import type { ChatMessageData } from "@/components/molecules/ChatMessage/useChatMessage";
import type { ToolResult } from "@/types/chat";
/**
* Creates a user message object with current timestamp.
*
* @param content - The message content
* @returns A ChatMessageData object of type "message" with role "user"
*/
export function createUserMessage(content: string): ChatMessageData {
return {
type: "message",
role: "user",
content,
timestamp: new Date(),
};
}
/**
* Filters out authentication-related messages (credentials_needed, login_needed).
* Used when sending a new message to remove stale authentication prompts.
*
* @param messages - Array of chat messages
* @returns Filtered array without authentication prompt messages
*/
export function filterAuthMessages(
messages: ChatMessageData[],
): ChatMessageData[] {
return messages.filter(
(msg) => msg.type !== "credentials_needed" && msg.type !== "login_needed",
);
}
/**
* Type guard to validate message structure from backend.
*
* @param msg - The message to validate
* @returns True if the message has valid structure
*/
export function isValidMessage(msg: unknown): msg is Record<string, unknown> {
if (typeof msg !== "object" || msg === null) {
return false;
}
const m = msg as Record<string, unknown>;
// Validate required fields
if (typeof m.role !== "string") {
return false;
}
// Content can be string or undefined
if (m.content !== undefined && typeof m.content !== "string") {
return false;
}
return true;
}
/**
* Type guard to validate tool_calls array structure.
*
* @param value - The value to validate
* @returns True if value is a valid tool_calls array
*/
export function isToolCallArray(value: unknown): value is Array<{
id: string;
type: string;
function: { name: string; arguments: string };
}> {
if (!Array.isArray(value)) {
return false;
}
return value.every(
(item) =>
typeof item === "object" &&
item !== null &&
"id" in item &&
typeof item.id === "string" &&
"type" in item &&
typeof item.type === "string" &&
"function" in item &&
typeof item.function === "object" &&
item.function !== null &&
"name" in item.function &&
typeof item.function.name === "string" &&
"arguments" in item.function &&
typeof item.function.arguments === "string",
);
}
/**
* Type guard to validate agent data structure.
*
* @param value - The value to validate
* @returns True if value is a valid agents array
*/
export function isAgentArray(value: unknown): value is Array<{
id: string;
name: string;
description: string;
version?: number;
}> {
if (!Array.isArray(value)) {
return false;
}
return value.every(
(item) =>
typeof item === "object" &&
item !== null &&
"id" in item &&
typeof item.id === "string" &&
"name" in item &&
typeof item.name === "string" &&
"description" in item &&
typeof item.description === "string" &&
(!("version" in item) || typeof item.version === "number"),
);
}
/**
* Extracts a JSON object embedded within an error message string.
*
* This handles the edge case where the backend returns error messages
* containing JSON objects with credential requirements or other structured data.
* Uses manual brace matching to extract the first balanced JSON object.
*
* @param message - The error message that may contain embedded JSON
* @returns The parsed JSON object, or null if no valid JSON found
*
* @example
* ```ts
* const msg = "Error: Missing credentials {\"missing_credentials\": {...}}";
* const result = extractJsonFromErrorMessage(msg);
* // Returns: { missing_credentials: {...} }
* ```
*/
export function extractJsonFromErrorMessage(
message: string,
): Record<string, unknown> | null {
try {
const start = message.indexOf("{");
if (start === -1) {
return null;
}
// Extract first balanced JSON object using brace matching
let depth = 0;
let end = -1;
for (let i = start; i < message.length; i++) {
const ch = message[i];
if (ch === "{") {
depth++;
} else if (ch === "}") {
depth--;
if (depth === 0) {
end = i;
break;
}
}
}
if (end === -1) {
return null;
}
const jsonStr = message.slice(start, end + 1);
return JSON.parse(jsonStr) as Record<string, unknown>;
} catch {
return null;
}
}
/**
* Parses a tool result and converts it to the appropriate ChatMessageData type.
*
* Handles specialized tool response types like:
* - no_results: Search returned no matches
* - agent_carousel: List of agents to display
* - execution_started: Agent execution began
* - Generic tool responses: Raw tool output
*
* @param result - The tool result to parse (may be string or object)
* @param toolId - The unique identifier for this tool call
* @param toolName - The name of the tool that was called
* @param timestamp - Optional timestamp for the response
* @returns The appropriate ChatMessageData object, or null for setup_requirements
*/
export function parseToolResponse(
result: ToolResult,
toolId: string,
toolName: string,
timestamp?: Date,
): ChatMessageData | null {
// Try to parse as JSON if it's a string
let parsedResult: Record<string, unknown> | null = null;
try {
parsedResult =
typeof result === "string"
? JSON.parse(result)
: (result as Record<string, unknown>);
} catch {
// If parsing fails, we'll use the generic tool response
parsedResult = null;
}
// Handle structured response types
if (parsedResult && typeof parsedResult === "object") {
const responseType = parsedResult.type as string | undefined;
// Handle no_results response - treat as a successful tool response
if (responseType === "no_results") {
return {
type: "tool_response",
toolId,
toolName,
result: (parsedResult.message as string) || "No results found",
success: true,
timestamp: timestamp || new Date(),
};
}
// Handle agent_carousel response
if (responseType === "agent_carousel") {
const agentsData = parsedResult.agents;
// Validate agents array structure before using it
if (isAgentArray(agentsData)) {
return {
type: "agent_carousel",
agents: agentsData,
totalCount: parsedResult.total_count as number | undefined,
timestamp: timestamp || new Date(),
};
} else {
console.warn("Invalid agents array in agent_carousel response");
}
}
// Handle execution_started response
if (responseType === "execution_started") {
return {
type: "execution_started",
executionId: (parsedResult.execution_id as string) || "",
agentName: parsedResult.agent_name as string | undefined,
message: parsedResult.message as string | undefined,
timestamp: timestamp || new Date(),
};
}
// Handle need_login response
if (responseType === "need_login") {
return {
type: "login_needed",
message:
(parsedResult.message as string) ||
"Please sign in to use chat and agent features",
sessionId: (parsedResult.session_id as string) || "",
agentInfo: parsedResult.agent_info as
| {
graph_id: string;
name: string;
trigger_type: string;
}
| undefined,
timestamp: timestamp || new Date(),
};
}
// Handle setup_requirements - return null so caller can handle it specially
if (responseType === "setup_requirements") {
return null;
}
}
// Generic tool response
return {
type: "tool_response",
toolId,
toolName,
result,
success: true,
timestamp: timestamp || new Date(),
};
}
/**
* Type guard to validate user readiness structure from backend.
*
* @param value - The value to validate
* @returns True if the value matches the UserReadiness structure
*/
export function isUserReadiness(
value: unknown,
): value is { missing_credentials?: Record<string, unknown> } {
return (
typeof value === "object" &&
value !== null &&
(!("missing_credentials" in value) ||
typeof (value as any).missing_credentials === "object")
);
}
/**
* Type guard to validate missing credentials structure.
*
* @param value - The value to validate
* @returns True if the value is a valid missing credentials record
*/
export function isMissingCredentials(
value: unknown,
): value is Record<string, Record<string, unknown>> {
if (typeof value !== "object" || value === null) {
return false;
}
// Check that all values are objects
return Object.values(value).every((v) => typeof v === "object" && v !== null);
}
/**
* Type guard to validate setup info structure.
*
* @param value - The value to validate
* @returns True if the value contains valid setup info
*/
export function isSetupInfo(value: unknown): value is {
user_readiness?: Record<string, unknown>;
agent_name?: string;
} {
return (
typeof value === "object" &&
value !== null &&
(!("user_readiness" in value) ||
typeof (value as any).user_readiness === "object") &&
(!("agent_name" in value) || typeof (value as any).agent_name === "string")
);
}
/**
* Extract credentials requirements from setup info result.
*
* Used when a tool response indicates missing credentials are needed
* to execute an agent.
*
* @param parsedResult - The parsed tool response result
* @returns ChatMessageData for credentials_needed, or null if no credentials needed
*/
export function extractCredentialsNeeded(
parsedResult: Record<string, unknown>,
): ChatMessageData | null {
try {
const setupInfo = parsedResult?.setup_info as
| Record<string, unknown>
| undefined;
const userReadiness = setupInfo?.user_readiness as
| Record<string, unknown>
| undefined;
const missingCreds = userReadiness?.missing_credentials as
| Record<string, Record<string, unknown>>
| undefined;
// If there are missing credentials, create the message with ALL credentials
if (missingCreds && Object.keys(missingCreds).length > 0) {
const agentName = (setupInfo?.agent_name as string) || "this agent";
// Map all missing credentials to the array format
const credentials = Object.values(missingCreds).map((credInfo) => ({
provider: (credInfo.provider as string) || "unknown",
providerName:
(credInfo.provider_name as string) ||
(credInfo.provider as string) ||
"Unknown Provider",
credentialType:
(credInfo.type as
| "api_key"
| "oauth2"
| "user_password"
| "host_scoped") || "api_key",
title:
(credInfo.title as string) ||
`${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`,
scopes: credInfo.scopes as string[] | undefined,
}));
return {
type: "credentials_needed",
credentials,
message: `To run ${agentName}, you need to add ${credentials.length === 1 ? "credentials" : `${credentials.length} credentials`}.`,
agentName,
timestamp: new Date(),
};
}
return null;
} catch (err) {
console.error("Failed to extract credentials from setup info:", err);
return null;
}
}

View File

@@ -0,0 +1,267 @@
import type { Dispatch, SetStateAction, MutableRefObject } from "react";
import type { StreamChunk } from "@/hooks/useChatStream";
import type { ChatMessageData } from "@/components/molecules/ChatMessage/useChatMessage";
import { parseToolResponse, extractCredentialsNeeded } from "./helpers";
/**
* Handler dependencies - all state setters and refs needed by handlers.
*/
export interface HandlerDependencies {
setHasTextChunks: Dispatch<SetStateAction<boolean>>;
setStreamingChunks: Dispatch<SetStateAction<string[]>>;
streamingChunksRef: MutableRefObject<string[]>;
setMessages: Dispatch<SetStateAction<ChatMessageData[]>>;
sessionId: string;
}
/**
* Handles text_chunk events by accumulating streaming text.
* Updates both the state and ref to prevent stale closures.
*/
export function handleTextChunk(
chunk: StreamChunk,
deps: HandlerDependencies,
): void {
if (!chunk.content) return;
deps.setHasTextChunks(true);
deps.setStreamingChunks((prev) => {
const updated = [...prev, chunk.content!];
deps.streamingChunksRef.current = updated;
return updated;
});
}
/**
* Handles text_ended events by saving completed text as assistant message.
* Clears streaming state after saving the message.
*/
export function handleTextEnded(
_chunk: StreamChunk,
deps: HandlerDependencies,
): void {
console.log("[Text Ended] Saving streamed text as assistant message");
const completedText = deps.streamingChunksRef.current.join("");
if (completedText.trim()) {
const assistantMessage: ChatMessageData = {
type: "message",
role: "assistant",
content: completedText,
timestamp: new Date(),
};
deps.setMessages((prev) => [...prev, assistantMessage]);
}
// Clear streaming state
deps.setStreamingChunks([]);
deps.streamingChunksRef.current = [];
deps.setHasTextChunks(false);
}
/**
* Handles tool_call_start events by adding a ToolCallMessage to the UI.
* Shows a loading state while the tool executes.
*/
export function handleToolCallStart(
chunk: StreamChunk,
deps: HandlerDependencies,
): void {
const toolCallMessage: ChatMessageData = {
type: "tool_call",
toolId: chunk.tool_id || `tool-${Date.now()}-${chunk.idx || 0}`,
toolName: chunk.tool_name || "Executing...",
arguments: chunk.arguments || {},
timestamp: new Date(),
};
deps.setMessages((prev) => [...prev, toolCallMessage]);
console.log("[Tool Call Start]", {
toolId: toolCallMessage.toolId,
toolName: toolCallMessage.toolName,
timestamp: new Date().toISOString(),
});
}
/**
* Handles tool_response events by replacing the matching tool_call message.
* Parses the response and handles special cases like credential requirements.
*/
export function handleToolResponse(
chunk: StreamChunk,
deps: HandlerDependencies,
): void {
console.log("[Tool Response] Received:", {
toolId: chunk.tool_id,
toolName: chunk.tool_name,
timestamp: new Date().toISOString(),
});
// Find the matching tool_call to get the tool name if missing
let toolName = chunk.tool_name || "unknown";
if (!chunk.tool_name || chunk.tool_name === "unknown") {
deps.setMessages((prev) => {
const matchingToolCall = [...prev]
.reverse()
.find(
(msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id,
);
if (matchingToolCall && matchingToolCall.type === "tool_call") {
toolName = matchingToolCall.toolName;
}
return prev;
});
}
// Use helper function to parse tool response
const responseMessage = parseToolResponse(
chunk.result!,
chunk.tool_id!,
toolName,
new Date(),
);
// If helper returns null (setup_requirements), handle credentials
if (!responseMessage) {
// Parse for credentials check
let parsedResult: Record<string, unknown> | null = null;
try {
parsedResult =
typeof chunk.result === "string"
? JSON.parse(chunk.result)
: (chunk.result as Record<string, unknown>);
} catch {
parsedResult = null;
}
// Check if this is get_required_setup_info with missing credentials
if (
chunk.tool_name === "get_required_setup_info" &&
chunk.success &&
parsedResult
) {
const credentialsMessage = extractCredentialsNeeded(parsedResult);
if (credentialsMessage) {
deps.setMessages((prev) => [...prev, credentialsMessage]);
}
}
// Don't add message if setup_requirements
return;
}
// Replace the tool_call message with matching tool_id
deps.setMessages((prev) => {
// Find the tool_call with the matching tool_id
const toolCallIndex = prev.findIndex(
(msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id,
);
if (toolCallIndex !== -1) {
const newMessages = [...prev];
newMessages[toolCallIndex] = responseMessage;
console.log(
"[Tool Response] Replaced tool_call with matching tool_id:",
chunk.tool_id,
"at index:",
toolCallIndex,
);
return newMessages;
}
console.warn(
"[Tool Response] No tool_call found with tool_id:",
chunk.tool_id,
"appending instead",
);
return [...prev, responseMessage];
});
}
/**
* Handles login_needed events by adding a login prompt message.
*/
export function handleLoginNeeded(
chunk: StreamChunk,
deps: HandlerDependencies,
): void {
const loginNeededMessage: ChatMessageData = {
type: "login_needed",
message:
chunk.message || "Please sign in to use chat and agent features",
sessionId: chunk.session_id || deps.sessionId,
agentInfo: chunk.agent_info,
timestamp: new Date(),
};
deps.setMessages((prev) => [...prev, loginNeededMessage]);
}
/**
* Handles stream_end events by finalizing the streaming session.
* Converts any remaining streaming chunks into a completed assistant message.
*/
export function handleStreamEnd(
_chunk: StreamChunk,
deps: HandlerDependencies,
): void {
// Convert streaming chunks into a completed assistant message
// Use ref to get the latest chunks value (not stale closure value)
const completedContent = deps.streamingChunksRef.current.join("");
if (completedContent) {
const assistantMessage: ChatMessageData = {
type: "message",
role: "assistant",
content: completedContent,
timestamp: new Date(),
};
// Add the completed assistant message to local state
deps.setMessages((prev) => {
const updated = [...prev, assistantMessage];
// Log final state using current messages from state
console.log("[Stream End] Final state:", {
localMessages: updated.map((m) => ({
type: m.type,
...(m.type === "message" && {
role: m.role,
contentLength: m.content.length,
}),
...(m.type === "tool_call" && {
toolId: m.toolId,
toolName: m.toolName,
}),
...(m.type === "tool_response" && {
toolId: m.toolId,
toolName: m.toolName,
success: m.success,
}),
})),
streamingChunks: deps.streamingChunksRef.current,
timestamp: new Date().toISOString(),
});
return updated;
});
}
// Clear streaming state immediately now that we have the message
deps.setStreamingChunks([]);
deps.streamingChunksRef.current = [];
deps.setHasTextChunks(false);
// Messages are now in local state and will be displayed
console.log("[Stream End] Stream complete, messages in local state");
}
/**
* Handles error events by logging and showing error toast.
*/
export function handleError(chunk: StreamChunk, _deps: HandlerDependencies): void {
const errorMessage = chunk.message || chunk.content || "An error occurred";
console.error("Stream error:", errorMessage);
// Note: Toast import removed to avoid circular dependencies
// Error toasts should be shown at the hook level
}

View File

@@ -0,0 +1,195 @@
import { useState, useCallback, useRef, useMemo } from "react";
import { toast } from "sonner";
import { useChatStream } from "@/hooks/useChatStream";
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
import type { ChatMessageData } from "@/components/molecules/ChatMessage/useChatMessage";
import {
parseToolResponse,
isValidMessage,
isToolCallArray,
createUserMessage,
filterAuthMessages,
} from "./helpers";
import { createStreamEventDispatcher } from "./createStreamEventDispatcher";
interface UseChatContainerArgs {
sessionId: string | null;
initialMessages: SessionDetailResponse["messages"];
onRefreshSession: () => Promise<void>;
}
interface UseChatContainerResult {
messages: ChatMessageData[];
streamingChunks: string[];
isStreaming: boolean;
error: Error | null;
sendMessage: (content: string, isUserMessage?: boolean) => Promise<void>;
}
export function useChatContainer({
sessionId,
initialMessages,
onRefreshSession,
}: UseChatContainerArgs): UseChatContainerResult {
const [messages, setMessages] = useState<ChatMessageData[]>([]);
const [streamingChunks, setStreamingChunks] = useState<string[]>([]);
const [hasTextChunks, setHasTextChunks] = useState(false);
// Track streaming chunks in a ref so we can access the latest value in callbacks
const streamingChunksRef = useRef<string[]>([]);
const { error, sendMessage: sendStreamMessage } = useChatStream();
// Show streaming UI when we have text chunks, independent of connection state
// This keeps the StreamingMessage visible during the transition to persisted message
const isStreaming = hasTextChunks;
/**
* Convert initial messages to our format, filtering out empty messages.
* Memoized to prevent expensive re-computation on every render.
*/
const allMessages = useMemo((): ChatMessageData[] => {
const processedInitialMessages = initialMessages
.filter((msg: Record<string, unknown>) => {
// Validate message structure first
if (!isValidMessage(msg)) {
console.warn("Invalid message structure from backend:", msg);
return false;
}
// Include messages with content OR tool_calls (tool_call messages have empty content)
const content = String(msg.content || "").trim();
const toolCalls = msg.tool_calls;
return (
content.length > 0 ||
(toolCalls && Array.isArray(toolCalls) && toolCalls.length > 0)
);
})
.map((msg: Record<string, unknown>): ChatMessageData | null => {
const content = String(msg.content || "");
const role = String(msg.role || "assistant").toLowerCase();
// Check if this is a tool_call message (assistant message with tool_calls)
const toolCalls = msg.tool_calls;
// Validate tool_calls structure if present
if (
role === "assistant" &&
toolCalls &&
isToolCallArray(toolCalls) &&
toolCalls.length > 0
) {
// Skip tool_call messages from persisted history
// We only show tool_calls during live streaming, not from history
// The tool_response that follows it is what we want to display
return null;
}
// Check if this is a tool response message (role="tool")
if (role === "tool") {
const timestamp = msg.timestamp
? new Date(msg.timestamp as string)
: undefined;
// Use helper function to parse tool response
const toolResponse = parseToolResponse(
content,
(msg.tool_call_id as string) || "",
"unknown",
timestamp,
);
// parseToolResponse returns null for setup_requirements
// In that case, skip this message (it should be handled during streaming)
if (!toolResponse) {
return null;
}
return toolResponse;
}
// Return as regular message
return {
type: "message",
role: role as "user" | "assistant" | "system",
content,
timestamp: msg.timestamp
? new Date(msg.timestamp as string)
: undefined,
};
})
.filter((msg): msg is ChatMessageData => msg !== null); // Remove null entries
return [...processedInitialMessages, ...messages];
}, [initialMessages, messages]);
/**
* Send a message and handle the streaming response.
*
* Message Flow:
* 1. User message added immediately to local state
* 2. text_chunk events accumulate in streaming box
* 3. text_ended closes streaming box
* 4. tool_call_start shows ToolCallMessage (spinning gear)
* 5. tool_response replaces ToolCallMessage with ToolResponseMessage (result)
* 6. stream_end finalizes, saves to backend, triggers refresh
*
* State Management:
* - Local `messages` state tracks only new messages during streaming
* - `streamingChunks` accumulates text as it arrives
* - `streamingChunksRef` prevents stale closures in async handlers
* - On stream_end, local messages cleared and replaced by refreshed initialMessages
*/
const sendMessage = useCallback(
async function sendMessage(content: string, isUserMessage: boolean = true) {
if (!sessionId) {
console.error("Cannot send message: no session ID");
return;
}
// Update message state: add user message and remove stale auth prompts
if (isUserMessage) {
const userMessage = createUserMessage(content);
setMessages((prev) => [...filterAuthMessages(prev), userMessage]);
} else {
// For system messages, just remove the login/credentials prompts
setMessages((prev) => filterAuthMessages(prev));
}
// Clear streaming state
setStreamingChunks([]);
streamingChunksRef.current = [];
setHasTextChunks(false);
// Create event dispatcher with all handler dependencies
const dispatcher = createStreamEventDispatcher({
setHasTextChunks,
setStreamingChunks,
streamingChunksRef,
setMessages,
sessionId,
});
try {
// Stream the response using the event dispatcher
await sendStreamMessage(sessionId, content, dispatcher, isUserMessage);
} catch (err) {
console.error("Failed to send message:", err);
const errorMessage =
err instanceof Error ? err.message : "Failed to send message";
toast.error("Failed to send message", {
description: errorMessage,
});
}
},
[sessionId, sendStreamMessage],
);
return {
messages: allMessages,
streamingChunks,
isStreaming,
error,
sendMessage,
};
}

View File

@@ -0,0 +1,127 @@
import type { Meta, StoryObj } from "@storybook/react";
import { ChatCredentialsSetup } from "./ChatCredentialsSetup";
const meta: Meta<typeof ChatCredentialsSetup> = {
title: "Chat/ChatCredentialsSetup",
component: ChatCredentialsSetup,
parameters: {
layout: "centered",
},
argTypes: {
onAllCredentialsComplete: { action: "all credentials complete" },
onCancel: { action: "cancelled" },
},
};
export default meta;
type Story = StoryObj<typeof ChatCredentialsSetup>;
export const SingleAPIKey: Story = {
args: {
credentials: [
{
provider: "openai",
providerName: "OpenAI",
credentialType: "api_key",
title: "OpenAI API",
},
],
agentName: "GPT Assistant",
message: "To run GPT Assistant, you need to add credentials.",
},
};
export const SingleOAuth: Story = {
args: {
credentials: [
{
provider: "github",
providerName: "GitHub",
credentialType: "oauth2",
title: "GitHub Integration",
scopes: ["repo", "read:user"],
},
],
agentName: "GitHub Agent",
message: "To run GitHub Agent, you need to add credentials.",
},
};
export const MultipleCredentials: Story = {
args: {
credentials: [
{
provider: "github",
providerName: "GitHub",
credentialType: "oauth2",
title: "GitHub Integration",
scopes: ["repo", "read:user"],
},
{
provider: "openai",
providerName: "OpenAI",
credentialType: "api_key",
title: "OpenAI API",
},
{
provider: "notion",
providerName: "Notion",
credentialType: "oauth2",
title: "Notion Integration",
},
],
agentName: "Multi-Service Agent",
message: "To run Multi-Service Agent, you need to add 3 credentials.",
},
};
export const MixedCredentialTypes: Story = {
args: {
credentials: [
{
provider: "openai",
providerName: "OpenAI",
credentialType: "api_key",
title: "OpenAI API",
},
{
provider: "github",
providerName: "GitHub",
credentialType: "oauth2",
title: "GitHub Integration",
scopes: ["repo"],
},
{
provider: "database",
providerName: "Database",
credentialType: "user_password",
title: "Database Connection",
},
{
provider: "custom_api",
providerName: "Custom API",
credentialType: "host_scoped",
title: "Custom API Headers",
},
],
agentName: "Full Stack Agent",
message: "To run Full Stack Agent, you need to add 4 credentials.",
},
};
export const LongAgentName: Story = {
args: {
credentials: [
{
provider: "openai",
providerName: "OpenAI",
credentialType: "api_key",
title: "OpenAI API",
},
],
agentName:
"Super Complex Multi-Step Data Processing and Analysis Agent with Machine Learning",
message:
"To run Super Complex Multi-Step Data Processing and Analysis Agent with Machine Learning, you need to add credentials.",
},
};

View File

@@ -0,0 +1,154 @@
import { useEffect, useRef } from "react";
import { Card } from "@/components/atoms/Card/Card";
import { Text } from "@/components/atoms/Text/Text";
import { Key, Check, Warning } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
import type { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api";
export interface CredentialInfo {
provider: string;
providerName: string;
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
title: string;
scopes?: string[];
}
interface Props {
credentials: CredentialInfo[];
agentName?: string;
message: string;
onAllCredentialsComplete: () => void;
onCancel: () => void;
className?: string;
}
function createSchemaFromCredentialInfo(
credential: CredentialInfo,
): BlockIOCredentialsSubSchema {
return {
type: "object",
properties: {},
credentials_provider: [credential.provider],
credentials_types: [credential.credentialType],
credentials_scopes: credential.scopes,
discriminator: undefined,
discriminator_mapping: undefined,
discriminator_values: undefined,
};
}
export function ChatCredentialsSetup({
credentials,
agentName,
message,
onAllCredentialsComplete,
onCancel,
className,
}: Props) {
const { selectedCredentials, isAllComplete, handleCredentialSelect } =
useChatCredentialsSetup(credentials);
// Track if we've already called completion to prevent double calls
const hasCalledCompleteRef = useRef(false);
// Reset the completion flag when credentials change (new credential setup flow)
useEffect(
function resetCompletionFlag() {
hasCalledCompleteRef.current = false;
},
[credentials],
);
// Auto-call completion when all credentials are configured
useEffect(
function autoCompleteWhenReady() {
if (isAllComplete && !hasCalledCompleteRef.current) {
hasCalledCompleteRef.current = true;
onAllCredentialsComplete();
}
},
[isAllComplete, onAllCredentialsComplete],
);
return (
<Card
className={cn(
"mx-4 my-2 overflow-hidden border-orange-200 bg-orange-50 dark:border-orange-900 dark:bg-orange-950",
className,
)}
>
<div className="flex items-start gap-4 p-6">
<div className="flex h-12 w-12 flex-shrink-0 items-center justify-center rounded-full bg-orange-500">
<Key size={24} weight="bold" className="text-white" />
</div>
<div className="flex-1">
<Text
variant="h3"
className="mb-2 text-orange-900 dark:text-orange-100"
>
Credentials Required
</Text>
<Text
variant="body"
className="mb-4 text-orange-700 dark:text-orange-300"
>
{message}
</Text>
<div className="space-y-3">
{credentials.map((cred, index) => {
const schema = createSchemaFromCredentialInfo(cred);
const isSelected = !!selectedCredentials[cred.provider];
return (
<div
key={`${cred.provider}-${index}`}
className={cn(
"relative rounded-lg border border-orange-200 bg-white p-4 dark:border-orange-800 dark:bg-orange-900/20",
isSelected &&
"border-green-500 bg-green-50 dark:border-green-700 dark:bg-green-950/30",
)}
>
<div className="mb-2 flex items-center justify-between">
<div className="flex items-center gap-2">
{isSelected ? (
<Check
size={20}
className="text-green-500"
weight="bold"
/>
) : (
<Warning
size={20}
className="text-orange-500"
weight="bold"
/>
)}
<Text
variant="body"
className="font-semibold text-orange-900 dark:text-orange-100"
>
{cred.providerName}
</Text>
</div>
</div>
<CredentialsInput
schema={schema}
selectedCredentials={selectedCredentials[cred.provider]}
onSelectCredentials={(credMeta) =>
handleCredentialSelect(cred.provider, credMeta)
}
hideIfSingleCredentialAvailable={false}
/>
</div>
);
})}
</div>
</div>
</div>
</Card>
);
}

View File

@@ -0,0 +1,36 @@
import { useState, useEffect, useMemo } from "react";
import type { CredentialInfo } from "./ChatCredentialsSetup";
import type { CredentialsMetaInput } from "@/lib/autogpt-server-api";
export function useChatCredentialsSetup(credentials: CredentialInfo[]) {
const [selectedCredentials, setSelectedCredentials] = useState<
Record<string, CredentialsMetaInput>
>({});
// Check if all credentials are configured
const isAllComplete = useMemo(
function checkAllComplete() {
if (credentials.length === 0) return false;
return credentials.every((cred) => selectedCredentials[cred.provider]);
},
[credentials, selectedCredentials],
);
function handleCredentialSelect(
provider: string,
credential?: CredentialsMetaInput,
) {
if (credential) {
setSelectedCredentials((prev) => ({
...prev,
[provider]: credential,
}));
}
}
return {
selectedCredentials,
isAllComplete,
handleCredentialSelect,
};
}

View File

@@ -0,0 +1,30 @@
import React from "react";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { cn } from "@/lib/utils";
export interface ChatErrorStateProps {
error: Error;
onRetry?: () => void;
className?: string;
}
export function ChatErrorState({
error,
onRetry,
className,
}: ChatErrorStateProps) {
return (
<div
className={cn("flex flex-1 items-center justify-center p-6", className)}
>
<ErrorCard
responseError={{
message: error.message,
}}
context="chat session"
onRetry={onRetry}
className="max-w-md"
/>
</div>
);
}

View File

@@ -0,0 +1,31 @@
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { ArrowClockwise } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
export interface ChatLoadingStateProps {
message?: string;
className?: string;
}
export function ChatLoadingState({
message = "Loading...",
className,
}: ChatLoadingStateProps) {
return (
<div
className={cn("flex flex-1 items-center justify-center p-6", className)}
>
<div className="flex flex-col items-center gap-4 text-center">
<ArrowClockwise
size={32}
weight="bold"
className="animate-spin text-purple-500"
/>
<Text variant="body" className="text-zinc-600 dark:text-zinc-400">
{message}
</Text>
</div>
</div>
);
}

View File

@@ -0,0 +1,70 @@
"use client";
import { useChatPage } from "./useChatPage";
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState";
import { ChatLoadingState } from "./components/ChatLoadingState/ChatLoadingState";
function ChatPage() {
const {
messages,
isLoading,
isCreating,
error,
sessionId,
createSession,
clearSession,
refreshSession,
} = useChatPage();
return (
<div className="flex h-full flex-col">
{/* Header */}
<header className="border-b border-zinc-200 bg-white p-4 dark:border-zinc-800 dark:bg-zinc-900">
<div className="container mx-auto flex items-center justify-between">
<h1 className="text-xl font-semibold">Chat</h1>
{sessionId && (
<div className="flex items-center gap-4">
<span className="text-sm text-zinc-600 dark:text-zinc-400">
Session: {sessionId.slice(0, 8)}...
</span>
<button
onClick={clearSession}
className="text-sm text-zinc-600 hover:text-zinc-900 dark:text-zinc-400 dark:hover:text-zinc-100"
>
New Chat
</button>
</div>
)}
</div>
</header>
{/* Main Content */}
<main className="container mx-auto flex flex-1 flex-col overflow-hidden">
{/* Loading State */}
{(isLoading || isCreating) && (
<ChatLoadingState
message={isCreating ? "Creating session..." : "Loading..."}
/>
)}
{/* Error State */}
{error && !isLoading && (
<ChatErrorState error={error} onRetry={createSession} />
)}
{/* Session Content */}
{sessionId && !isLoading && !error && (
<ChatContainer
sessionId={sessionId}
initialMessages={messages}
onRefreshSession={refreshSession}
className="flex-1"
/>
)}
</main>
</div>
);
}
export default ChatPage;

View File

@@ -0,0 +1,176 @@
import { useEffect, useState, useRef } from "react";
import { useSearchParams, useRouter } from "next/navigation";
import { toast } from "sonner";
import { useChatSession } from "@/hooks/useChatSession";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { useChatStream } from "@/hooks/useChatStream";
interface UseChatPageResult {
session: ReturnType<typeof useChatSession>["session"];
messages: ReturnType<typeof useChatSession>["messages"];
isLoading: boolean;
isCreating: boolean;
error: Error | null;
createSession: () => Promise<string>;
refreshSession: () => Promise<void>;
clearSession: () => void;
sessionId: string | null;
}
export function useChatPage(): UseChatPageResult {
const router = useRouter();
const searchParams = useSearchParams();
// Support both 'session' and 'session_id' query parameters
const urlSessionId =
searchParams.get("session_id") || searchParams.get("session");
const [isOnline, setIsOnline] = useState(true);
const hasCreatedSessionRef = useRef(false);
const hasClaimedSessionRef = useRef(false);
const { user } = useSupabase();
const { sendMessage: sendStreamMessage } = useChatStream();
const {
session,
sessionId: sessionIdFromHook,
messages,
isLoading,
isCreating,
error,
createSession,
refreshSession,
claimSession,
clearSession: clearSessionBase,
} = useChatSession({
urlSessionId,
autoCreate: false, // We'll manually create when needed
});
// Auto-create session ONLY if there's no URL session
// If URL session exists, GET query will fetch it automatically
useEffect(
function autoCreateSession() {
// Only create if:
// 1. No URL session (not loading someone else's session)
// 2. Haven't already created one this mount
// 3. Not currently creating
// 4. We don't already have a sessionId
if (
!urlSessionId &&
!hasCreatedSessionRef.current &&
!isCreating &&
!sessionIdFromHook
) {
console.log("[autoCreateSession] Creating new session");
hasCreatedSessionRef.current = true;
createSession().catch((err) => {
console.error("[autoCreateSession] Failed to create session:", err);
hasCreatedSessionRef.current = false; // Reset on error to allow retry
});
} else if (sessionIdFromHook) {
console.log(
"[autoCreateSession] Skipping - already have sessionId:",
sessionIdFromHook,
);
}
},
[urlSessionId, isCreating, sessionIdFromHook, createSession],
);
// Auto-claim session if user is logged in and session has no user_id
useEffect(
function autoClaimSession() {
// Only claim if:
// 1. We have a session loaded
// 2. Session has no user_id (anonymous session)
// 3. User is logged in
// 4. Haven't already claimed this session
// 5. Not currently loading
if (
session &&
!session.user_id &&
user &&
!hasClaimedSessionRef.current &&
!isLoading &&
sessionIdFromHook
) {
console.log("[autoClaimSession] Claiming anonymous session for user");
hasClaimedSessionRef.current = true;
claimSession(sessionIdFromHook)
.then(() => {
console.log(
"[autoClaimSession] Session claimed successfully, sending login notification",
);
// Send login notification message to backend after successful claim
// This notifies the agent that the user has logged in
sendStreamMessage(
sessionIdFromHook,
"User has successfully logged in.",
() => {
// Empty chunk handler - we don't need to process responses for this system message
},
false, // isUserMessage = false
).catch((err) => {
console.error(
"[autoClaimSession] Failed to send login notification:",
err,
);
});
})
.catch((err) => {
console.error("[autoClaimSession] Failed to claim session:", err);
hasClaimedSessionRef.current = false; // Reset on error to allow retry
});
}
},
[session, user, isLoading, sessionIdFromHook, claimSession, sendStreamMessage],
);
// Monitor online/offline status
useEffect(function monitorNetworkStatus() {
function handleOnline() {
setIsOnline(true);
toast.success("Connection restored", {
description: "You're back online",
});
}
function handleOffline() {
setIsOnline(false);
toast.error("You're offline", {
description: "Check your internet connection",
});
}
window.addEventListener("online", handleOnline);
window.addEventListener("offline", handleOffline);
// Check initial status
setIsOnline(navigator.onLine);
return () => {
window.removeEventListener("online", handleOnline);
window.removeEventListener("offline", handleOffline);
};
}, []);
function clearSession() {
clearSessionBase();
// Reset the created session flag so a new session can be created
hasCreatedSessionRef.current = false;
hasClaimedSessionRef.current = false;
// Remove session from URL and trigger new session creation
router.push("/chat");
}
return {
session,
messages,
isLoading,
isCreating,
error,
createSession,
refreshSession,
clearSession,
sessionId: sessionIdFromHook, // Use direct sessionId from hook, not derived from session.id
};
}

View File

@@ -1,9 +1,5 @@
import { Button } from "@/components/atoms/Button/Button";
import {
IconKey,
IconKeyPlus,
IconUserPlus,
} from "@/components/__legacy__/ui/icons";
import { Key, Plus, UserPlus } from "@phosphor-icons/react";
import {
Select,
SelectContent,
@@ -13,7 +9,11 @@ import {
SelectValue,
} from "@/components/__legacy__/ui/select";
import useCredentials from "@/hooks/useCredentials";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import {
useGetV1InitiateOauthFlow,
usePostV1ExchangeOauthCodeForTokens,
} from "@/app/api/__generated__/endpoints/integrations/integrations";
import { LoginResponse } from "@/app/api/__generated__/models/loginResponse";
import {
BlockIOCredentialsSubSchema,
CredentialsMetaInput,
@@ -21,7 +21,7 @@ import {
import { cn } from "@/lib/utils";
import { getHostFromUrl } from "@/lib/utils/url";
import { NotionLogoIcon } from "@radix-ui/react-icons";
import { FC, useEffect, useMemo, useState } from "react";
import { FC, useEffect, useMemo, useState, useCallback, useRef } from "react";
import {
FaDiscord,
FaGithub,
@@ -128,9 +128,46 @@ export const CredentialsInput: FC<{
useState<AbortController | null>(null);
const [oAuthError, setOAuthError] = useState<string | null>(null);
const api = useBackendAPI();
const credentials = useCredentials(schema, siblingInputs);
// Use refs to track previous values and only recompute when they actually change
const providerRef = useRef("");
const scopesRef = useRef<string | undefined>(undefined);
// Compute current values
const currentProvider =
credentials && "provider" in credentials ? credentials.provider : "";
const currentScopes = schema.credentials_scopes?.join(",");
// Only update refs when values actually change
if (currentProvider !== providerRef.current) {
providerRef.current = currentProvider;
}
if (currentScopes !== scopesRef.current) {
scopesRef.current = currentScopes;
}
// Use stable ref values for hooks
const stableProvider = providerRef.current;
const stableScopes = scopesRef.current;
// Setup OAuth hooks with generated API endpoints (only when provider is stable)
const { refetch: initiateOauthFlow } = useGetV1InitiateOauthFlow(
stableProvider,
{
scopes: stableScopes,
},
{
query: {
enabled: false,
select: (res) => res.data as LoginResponse,
},
},
);
const { mutateAsync: oAuthCallbackMutation } =
usePostV1ExchangeOauthCodeForTokens();
// Report loaded state to parent
useEffect(() => {
if (onLoaded) {
@@ -198,12 +235,17 @@ export const CredentialsInput: FC<{
oAuthCallback,
} = credentials;
async function handleOAuthLogin() {
const handleOAuthLogin = useCallback(async () => {
setOAuthError(null);
const { login_url, state_token } = await api.oAuthLogin(
provider,
schema.credentials_scopes,
);
// Use the generated API hook to initiate OAuth flow
const { data } = await initiateOauthFlow();
if (!data || !data.login_url || !data.state_token) {
setOAuthError("Failed to initiate OAuth flow");
return;
}
const { login_url, state_token } = data;
setOAuth2FlowInProgress(true);
const popup = window.open(login_url, "_blank", "popup=true");
@@ -248,14 +290,28 @@ export const CredentialsInput: FC<{
try {
console.debug("Processing OAuth callback");
const credentials = await oAuthCallback(e.data.code, e.data.state);
console.debug("OAuth callback processed successfully");
onSelectCredentials({
id: credentials.id,
type: "oauth2",
title: credentials.title,
// Use the generated API hook for OAuth callback
const result = await oAuthCallbackMutation({
provider,
data: {
code: e.data.code,
state_token: e.data.state,
},
});
console.debug("OAuth callback processed successfully");
// Extract credential data from response
const credData = result.status === 200 ? result.data : null;
if (credData && "id" in credData) {
onSelectCredentials({
id: credData.id,
type: "oauth2",
title:
("title" in credData ? credData.title : undefined) ||
`${providerName} account`,
provider,
});
}
} catch (error) {
console.error("Error in OAuth callback:", error);
setOAuthError(
@@ -285,7 +341,13 @@ export const CredentialsInput: FC<{
},
5 * 60 * 1000,
);
}
}, [
initiateOauthFlow,
oAuthCallbackMutation,
stableProvider,
providerName,
onSelectCredentials,
]);
const ProviderIcon = providerIcons[provider] || fallbackIcon;
const modals = (
@@ -444,7 +506,7 @@ export const CredentialsInput: FC<{
.map((credentials, index) => (
<SelectItem key={index} value={credentials.id}>
<ProviderIcon className="mr-2 inline h-4 w-4" />
<IconKey className="mr-1.5 inline" />
<Key className="mr-1.5 inline" size={16} />
{credentials.title}
</SelectItem>
))}
@@ -453,7 +515,7 @@ export const CredentialsInput: FC<{
.map((credentials, index) => (
<SelectItem key={index} value={credentials.id}>
<ProviderIcon className="mr-2 inline h-4 w-4" />
<IconUserPlus className="mr-1.5 inline" />
<UserPlus className="mr-1.5 inline" size={16} />
{credentials.title}
</SelectItem>
))}
@@ -462,32 +524,32 @@ export const CredentialsInput: FC<{
.map((credentials, index) => (
<SelectItem key={index} value={credentials.id}>
<ProviderIcon className="mr-2 inline h-4 w-4" />
<IconKey className="mr-1.5 inline" />
<Key className="mr-1.5 inline" size={16} />
{credentials.title}
</SelectItem>
))}
<SelectSeparator />
{supportsOAuth2 && (
<SelectItem value="sign-in">
<IconUserPlus className="mr-1.5 inline" />
<UserPlus className="mr-1.5 inline" size={16} />
Sign in with {providerName}
</SelectItem>
)}
{supportsApiKey && (
<SelectItem value="add-api-key">
<IconKeyPlus className="mr-1.5 inline" />
<Plus className="mr-1.5 inline" size={16} weight="bold" />
Add new API key
</SelectItem>
)}
{supportsUserPassword && (
<SelectItem value="add-user-password">
<IconUserPlus className="mr-1.5 inline" />
<UserPlus className="mr-1.5 inline" size={16} />
Add new user password
</SelectItem>
)}
{supportsHostScoped && (
<SelectItem value="add-host-scoped">
<IconKey className="mr-1.5 inline" />
<Key className="mr-1.5 inline" size={16} />
Add host-scoped headers
</SelectItem>
)}

View File

@@ -4,7 +4,7 @@ import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { environment } from "@/services/environment";
import { loginFormSchema, LoginProvider } from "@/types/auth";
import { zodResolver } from "@hookform/resolvers/zod";
import { useRouter } from "next/navigation";
import { useRouter, useSearchParams } from "next/navigation";
import { useCallback, useEffect, useState } from "react";
import { useForm } from "react-hook-form";
import z from "zod";
@@ -14,6 +14,8 @@ export function useLoginPage() {
const [feedback, setFeedback] = useState<string | null>(null);
const [captchaKey, setCaptchaKey] = useState(0);
const router = useRouter();
const searchParams = useSearchParams();
const returnUrl = searchParams.get("returnUrl");
const { toast } = useToast();
const [isLoading, setIsLoading] = useState(false);
const [isGoogleLoading, setIsGoogleLoading] = useState(false);
@@ -140,8 +142,11 @@ export function useLoginPage() {
setIsLoading(false);
setFeedback(null);
const next =
(result?.next as string) || (result?.onboarding ? "/onboarding" : "/");
// Prioritize returnUrl from query params over backend's onboarding logic
const next = returnUrl
? returnUrl
: (result?.next as string) ||
(result?.onboarding ? "/onboarding" : "/");
if (next) router.push(next);
} catch (error) {
toast({

View File

@@ -0,0 +1,86 @@
import { environment } from "@/services/environment";
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
import { NextRequest } from "next/server";
/**
* SSE Proxy for chat streaming.
* EventSource doesn't support custom headers, so we need a server-side proxy
* that adds authentication and forwards the SSE stream to the client.
*/
export async function GET(
request: NextRequest,
{ params }: { params: Promise<{ sessionId: string }> },
) {
const { sessionId } = await params;
const searchParams = request.nextUrl.searchParams;
const message = searchParams.get("message");
const isUserMessage = searchParams.get("is_user_message");
if (!message) {
return new Response("Missing message parameter", { status: 400 });
}
try {
// Get auth token from server-side session
const token = await getServerAuthToken();
// Build backend URL
const backendUrl = environment.getAGPTServerBaseUrl();
const streamUrl = new URL(
`/api/chat/sessions/${sessionId}/stream`,
backendUrl,
);
streamUrl.searchParams.set("message", message);
// Pass is_user_message parameter if provided
if (isUserMessage !== null) {
streamUrl.searchParams.set("is_user_message", isUserMessage);
}
// Forward request to backend with auth header
const headers: Record<string, string> = {
Accept: "text/event-stream",
"Cache-Control": "no-cache",
Connection: "keep-alive",
};
if (token) {
headers["Authorization"] = `Bearer ${token}`;
}
const response = await fetch(streamUrl.toString(), {
method: "GET",
headers,
});
if (!response.ok) {
const error = await response.text();
return new Response(error, {
status: response.status,
headers: { "Content-Type": "application/json" },
});
}
// Return the SSE stream directly
return new Response(response.body, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
Connection: "keep-alive",
"X-Accel-Buffering": "no",
},
});
} catch (error) {
console.error("SSE proxy error:", error);
return new Response(
JSON.stringify({
error: "Failed to connect to chat service",
detail: error instanceof Error ? error.message : String(error),
}),
{
status: 500,
headers: { "Content-Type": "application/json" },
},
);
}
}

View File

@@ -4795,6 +4795,181 @@
"security": [{ "APIKeyAuthenticator-X-Postmark-Webhook-Token": [] }]
}
},
"/api/chat/sessions": {
"post": {
"tags": ["v2", "chat", "chat"],
"summary": "Create Session",
"description": "Create a new chat session.\n\nInitiates a new chat session for either an authenticated or anonymous user.\n\nArgs:\n user_id: The optional authenticated user ID parsed from the JWT. If missing, creates an anonymous session.\n\nReturns:\n CreateSessionResponse: Details of the created session.",
"operationId": "postV2CreateSession",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CreateSessionResponse"
}
}
}
}
},
"security": [{ "HTTPBearer": [] }, { "HTTPBearer": [] }]
}
},
"/api/chat/sessions/{session_id}": {
"get": {
"tags": ["v2", "chat", "chat"],
"summary": "Get Session",
"description": "Retrieve the details of a specific chat session.\n\nLooks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.\n\nArgs:\n session_id: The unique identifier for the desired chat session.\n user_id: The optional authenticated user ID, or None for anonymous access.\n\nReturns:\n SessionDetailResponse: Details for the requested session; raises NotFoundError if not found.",
"operationId": "getV2GetSession",
"security": [{ "HTTPBearer": [] }, { "HTTPBearer": [] }],
"parameters": [
{
"name": "session_id",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Session Id" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/SessionDetailResponse"
}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/chat/sessions/{session_id}/stream": {
"get": {
"tags": ["v2", "chat", "chat"],
"summary": "Stream Chat",
"description": "Stream chat responses for a session.\n\nStreams the AI/completion responses in real time over Server-Sent Events (SSE), including:\n - Text fragments as they are generated\n - Tool call UI elements (if invoked)\n - Tool execution results\n\nArgs:\n session_id: The chat session identifier to associate with the streamed messages.\n message: The user's new message to process.\n user_id: Optional authenticated user ID.\n is_user_message: Whether the message is a user message.\nReturns:\n StreamingResponse: SSE-formatted response chunks.",
"operationId": "getV2StreamChat",
"security": [{ "HTTPBearer": [] }, { "HTTPBearer": [] }],
"parameters": [
{
"name": "session_id",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Session Id" }
},
{
"name": "message",
"in": "query",
"required": true,
"schema": {
"type": "string",
"minLength": 1,
"maxLength": 10000,
"title": "Message"
}
},
{
"name": "is_user_message",
"in": "query",
"required": false,
"schema": {
"type": "boolean",
"default": true,
"title": "Is User Message"
}
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/chat/sessions/{session_id}/assign-user": {
"patch": {
"tags": ["v2", "chat", "chat"],
"summary": "Session Assign User",
"description": "Assign an authenticated user to a chat session.\n\nUsed (typically post-login) to claim an existing anonymous session as the current authenticated user.\n\nArgs:\n session_id: The identifier for the (previously anonymous) session.\n user_id: The authenticated user's ID to associate with the session.\n\nReturns:\n dict: Status of the assignment.",
"operationId": "patchV2SessionAssignUser",
"security": [{ "HTTPBearer": [] }, { "HTTPBearerJWT": [] }],
"parameters": [
{
"name": "session_id",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Session Id" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"type": "object",
"additionalProperties": true,
"title": "Response Patchv2Sessionassignuser"
}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
},
"401": {
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
}
}
}
},
"/api/chat/health": {
"get": {
"tags": ["v2", "chat", "chat"],
"summary": "Health Check",
"description": "Health check endpoint for the chat service.\n\nPerforms a full cycle test of session creation, assignment, and retrieval. Should always return healthy\nif the service and data layer are operational.\n\nReturns:\n dict: A status dictionary indicating health, service name, and API version.",
"operationId": "getV2HealthCheck",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"additionalProperties": true,
"type": "object",
"title": "Response Getv2Healthcheck"
}
}
}
}
},
"security": [{ "HTTPBearer": [] }]
}
},
"/health": {
"get": {
"tags": ["health"],
@@ -5371,6 +5546,20 @@
"required": ["graph"],
"title": "CreateGraph"
},
"CreateSessionResponse": {
"properties": {
"id": { "type": "string", "title": "Id" },
"created_at": { "type": "string", "title": "Created At" },
"user_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "User Id"
}
},
"type": "object",
"required": ["id", "created_at", "user_id"],
"title": "CreateSessionResponse",
"description": "Response model containing information on a newly created chat session."
},
"Creator": {
"properties": {
"name": { "type": "string", "title": "Name" },
@@ -7500,6 +7689,26 @@
"required": ["items", "total_items", "page", "more_pages"],
"title": "SearchResponse"
},
"SessionDetailResponse": {
"properties": {
"id": { "type": "string", "title": "Id" },
"created_at": { "type": "string", "title": "Created At" },
"updated_at": { "type": "string", "title": "Updated At" },
"user_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "User Id"
},
"messages": {
"items": { "additionalProperties": true, "type": "object" },
"type": "array",
"title": "Messages"
}
},
"type": "object",
"required": ["id", "created_at", "updated_at", "user_id", "messages"],
"title": "SessionDetailResponse",
"description": "Response model providing complete details for a chat session, including messages."
},
"SetGraphActiveVersion": {
"properties": {
"active_graph_version": {
@@ -9654,7 +9863,8 @@
"type": "apiKey",
"in": "header",
"name": "X-Postmark-Webhook-Token"
}
},
"HTTPBearer": { "type": "http", "scheme": "bearer" }
},
"responses": {
"HTTP401NotAuthenticatedError": {

View File

@@ -0,0 +1,55 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { ChatInput } from "./ChatInput";
const meta = {
title: "Atoms/ChatInput",
component: ChatInput,
parameters: {
layout: "padded",
},
tags: ["autodocs"],
args: {
onSend: (message: string) => console.log("Message sent:", message),
},
} satisfies Meta<typeof ChatInput>;
export default meta;
type Story = StoryObj<typeof meta>;
export const Default: Story = {
args: {
placeholder: "Type your message...",
disabled: false,
},
};
export const Disabled: Story = {
args: {
placeholder: "Type your message...",
disabled: true,
},
};
export const CustomPlaceholder: Story = {
args: {
placeholder: "Ask me anything about agents...",
disabled: false,
},
};
export const WithText: Story = {
render: (args) => {
return (
<div className="space-y-4">
<ChatInput {...args} />
<p className="text-sm text-neutral-600 dark:text-neutral-400">
Try typing a message and pressing Enter to send, or Shift+Enter for a
new line.
</p>
</div>
);
},
args: {
placeholder: "Type your message...",
},
};

View File

@@ -0,0 +1,63 @@
import { cn } from "@/lib/utils";
import { PaperPlaneRight } from "@phosphor-icons/react";
import { Button } from "../Button/Button";
import { useChatInput } from "./useChatInput";
export interface ChatInputProps {
onSend: (message: string) => void;
disabled?: boolean;
placeholder?: string;
className?: string;
}
export function ChatInput({
onSend,
disabled = false,
placeholder = "Type your message...",
className,
}: ChatInputProps) {
const { value, setValue, handleKeyDown, handleSend, textareaRef } =
useChatInput({
onSend,
disabled,
maxRows: 5,
});
return (
<div className={cn("flex gap-2", className)}>
<textarea
ref={textareaRef}
value={value}
onChange={(e) => setValue(e.target.value)}
onKeyDown={handleKeyDown}
placeholder={placeholder}
disabled={disabled}
rows={1}
autoComplete="off"
aria-label="Chat message input"
aria-describedby="chat-input-hint"
className={cn(
"flex-1 resize-none rounded-lg border border-neutral-200 bg-white px-4 py-2 text-sm",
"placeholder:text-neutral-400",
"focus:border-violet-600 focus:outline-none focus:ring-2 focus:ring-violet-600/20",
"dark:border-neutral-800 dark:bg-neutral-900 dark:text-neutral-100 dark:placeholder:text-neutral-500",
"disabled:cursor-not-allowed disabled:opacity-50",
)}
/>
<span id="chat-input-hint" className="sr-only">
Press Enter to send, Shift+Enter for new line
</span>
<Button
variant="primary"
size="small"
onClick={handleSend}
disabled={disabled || !value.trim()}
className="self-end"
aria-label="Send message"
>
<PaperPlaneRight className="h-4 w-4" weight="fill" />
</Button>
</div>
);
}

View File

@@ -0,0 +1,83 @@
import { KeyboardEvent, useCallback, useState, useRef, useEffect } from "react";
interface UseChatInputArgs {
onSend: (message: string) => void;
disabled?: boolean;
maxRows?: number;
}
interface UseChatInputResult {
value: string;
setValue: (value: string) => void;
handleKeyDown: (event: KeyboardEvent<HTMLTextAreaElement>) => void;
handleSend: () => void;
textareaRef: React.RefObject<HTMLTextAreaElement>;
}
export function useChatInput({
onSend,
disabled = false,
maxRows = 5,
}: UseChatInputArgs): UseChatInputResult {
const [value, setValue] = useState("");
const textareaRef = useRef<HTMLTextAreaElement>(null);
// Auto-resize textarea
useEffect(
function autoResizeTextarea() {
const textarea = textareaRef.current;
if (!textarea) return;
// Reset height to auto to get the correct scrollHeight
textarea.style.height = "auto";
// Calculate the number of rows
const lineHeight = parseInt(
window.getComputedStyle(textarea).lineHeight,
10,
);
const maxHeight = lineHeight * maxRows;
const newHeight = Math.min(textarea.scrollHeight, maxHeight);
textarea.style.height = `${newHeight}px`;
textarea.style.overflowY =
textarea.scrollHeight > maxHeight ? "auto" : "hidden";
},
[value, maxRows],
);
const handleSend = useCallback(
function handleSend() {
if (disabled || !value.trim()) return;
onSend(value.trim());
setValue("");
// Reset textarea height
if (textareaRef.current) {
textareaRef.current.style.height = "auto";
}
},
[value, onSend, disabled],
);
const handleKeyDown = useCallback(
function handleKeyDown(event: KeyboardEvent<HTMLTextAreaElement>) {
// Enter without Shift = send message
// Shift + Enter = new line
if (event.key === "Enter" && !event.shiftKey) {
event.preventDefault();
handleSend();
}
},
[handleSend],
);
return {
value,
setValue,
handleKeyDown,
handleSend,
textareaRef,
};
}

View File

@@ -0,0 +1,323 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { MarkdownContent } from "./MarkdownContent";
const meta = {
title: "Atoms/MarkdownContent",
component: MarkdownContent,
parameters: {
layout: "padded",
},
tags: ["autodocs"],
} satisfies Meta<typeof MarkdownContent>;
export default meta;
type Story = StoryObj<typeof meta>;
export const BasicText: Story = {
args: {
content: "This is a simple paragraph with **bold text** and *italic text*.",
},
};
export const InlineCode: Story = {
args: {
content:
"Use the `useState` hook to manage state in React components. You can also use `useEffect` for side effects.",
},
};
export const CodeBlock: Story = {
args: {
content: `Here's a code example:
\`\`\`typescript
function greet(name: string): string {
return \`Hello, \${name}!\`;
}
const message = greet("World");
console.log(message);
\`\`\`
This is a TypeScript function that returns a greeting.`,
},
};
export const Links: Story = {
args: {
content: `Check out these resources:
- [React Documentation](https://react.dev)
- [TypeScript Handbook](https://www.typescriptlang.org/docs/)
- [Tailwind CSS](https://tailwindcss.com)
All links open in new tabs for your convenience.`,
},
};
export const UnorderedList: Story = {
args: {
content: `Shopping list:
- Apples
- Bananas
- Oranges
- Grapes
- Strawberries`,
},
};
export const OrderedList: Story = {
args: {
content: `Steps to deploy:
1. Run tests locally
2. Create a pull request
3. Wait for CI to pass
4. Get code review approval
5. Merge to main
6. Deploy to production`,
},
};
export const TaskList: Story = {
args: {
content: `Project tasks:
- [x] Set up project structure
- [x] Implement authentication
- [ ] Add user dashboard
- [ ] Create admin panel
- [ ] Write documentation`,
},
};
export const Blockquote: Story = {
args: {
content: `As Einstein said:
> Imagination is more important than knowledge. Knowledge is limited. Imagination encircles the world.
This quote reminds us to think creatively.`,
},
};
export const Table: Story = {
args: {
content: `Here's a comparison table:
| Feature | Basic | Pro | Enterprise |
|---------|-------|-----|------------|
| Users | 5 | 50 | Unlimited |
| Storage | 10GB | 100GB | 1TB |
| Support | Email | Priority | 24/7 Phone |
| Price | $9/mo | $29/mo | Custom |`,
},
};
export const Headings: Story = {
args: {
content: `# Heading 1
This is the largest heading.
## Heading 2
A bit smaller.
### Heading 3
Even smaller.
#### Heading 4
Getting smaller still.
##### Heading 5
Almost the smallest.
###### Heading 6
The smallest heading.`,
},
};
export const StrikethroughAndFormatting: Story = {
args: {
content: `Text formatting options:
- **Bold text** is important
- *Italic text* is emphasized
- ~~Strikethrough~~ text is deleted
- ***Bold and italic*** is very important
- **Bold with *nested italic***`,
},
};
export const HorizontalRule: Story = {
args: {
content: `Section One
---
Section Two
---
Section Three`,
},
};
export const MixedContent: Story = {
args: {
content: `# Chat Message Example
I found **three solutions** to your problem:
## 1. Using the API
You can call the endpoint like this:
\`\`\`typescript
const response = await fetch('/api/users', {
method: 'GET',
headers: { 'Authorization': \`Bearer \${token}\` }
});
\`\`\`
## 2. Using the CLI
Alternatively, use the command line:
\`\`\`bash
cli users list --format json
\`\`\`
## 3. Manual approach
If you prefer, you can:
1. Open the dashboard
2. Navigate to *Users* section
3. Click **Export**
4. Choose JSON format
> **Note**: The API approach is recommended for automation.
For more information, check out the [documentation](https://docs.example.com).`,
},
};
export const XSSAttempt: Story = {
args: {
content: `# Security Test
This content attempts XSS attacks that should be escaped:
<script>alert('XSS')</script>
<img src="x" onerror="alert('XSS')">
<a href="javascript:alert('XSS')">Click me</a>
<style>body { background: red; }</style>
All of these should render as plain text, not execute.`,
},
};
export const MalformedMarkdown: Story = {
args: {
content: `# Unclosed Heading
**Bold without closing
\`\`\`
Code block without closing language tag
[Link with no URL]
![Image with no src]
**Nested *formatting without** proper closing*
| Table | with |
| mismatched | columns | extra |`,
},
};
export const UnicodeAndEmoji: Story = {
args: {
content: `# Unicode Support
## Emojis
🎉 🚀 💡 ✨ 🔥 👍 ❤️ 🎯 📊 🌟
## Special Characters
→ ← ↑ ↓ © ® ™ € £ ¥ § ¶ † ‡
## Other Languages
你好世界 (Chinese)
مرحبا بالعالم (Arabic)
Привет мир (Russian)
हैलो वर्ल्ड (Hindi)
All characters should render correctly.`,
},
};
export const LongCodeBlock: Story = {
args: {
content: `Here's a longer code example that tests overflow:
\`\`\`typescript
interface User {
id: string;
name: string;
email: string;
createdAt: Date;
updatedAt: Date;
roles: string[];
metadata: Record<string, unknown>;
}
function processUsers(users: User[]): Map<string, User> {
return users.reduce((acc, user) => {
acc.set(user.id, user);
return acc;
}, new Map<string, User>());
}
const users: User[] = [
{ id: '1', name: 'Alice', email: 'alice@example.com', createdAt: new Date(), updatedAt: new Date(), roles: ['admin'], metadata: {} },
{ id: '2', name: 'Bob', email: 'bob@example.com', createdAt: new Date(), updatedAt: new Date(), roles: ['user'], metadata: {} },
];
const userMap = processUsers(users);
console.log(userMap);
\`\`\`
The code block should scroll horizontally if needed.`,
},
};
export const NestedStructures: Story = {
args: {
content: `# Nested Structures
## Lists within Blockquotes
> Here's a quote with a list:
> - First item
> - Second item
> - Third item
## Blockquotes within Lists
- Regular list item
- List item with quote:
> This is a nested quote
- Another regular item
## Code in Lists
1. First step: Install dependencies
\`\`\`bash
npm install
\`\`\`
2. Second step: Run the server
\`\`\`bash
npm start
\`\`\`
3. Third step: Open browser`,
},
};

View File

@@ -0,0 +1,272 @@
"use client";
import React from "react";
import ReactMarkdown from "react-markdown";
import remarkGfm from "remark-gfm";
import { cn } from "@/lib/utils";
interface MarkdownContentProps {
content: string;
className?: string;
}
// Type definitions for ReactMarkdown component props
interface CodeProps extends React.HTMLAttributes<HTMLElement> {
children?: React.ReactNode;
className?: string;
}
interface ListProps extends React.HTMLAttributes<HTMLUListElement> {
children?: React.ReactNode;
className?: string;
}
interface ListItemProps extends React.HTMLAttributes<HTMLLIElement> {
children?: React.ReactNode;
className?: string;
}
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
type?: string;
}
/**
* Lightweight markdown renderer for chat messages.
*
* Security: Uses ReactMarkdown v9+ which automatically escapes HTML by default.
* HTML tags in markdown content will be rendered as text, not executed.
*
* Supports GitHub Flavored Markdown:
* - Tables
* - Task lists with checkboxes
* - Strikethrough
* - Autolinks
*
* @param content - Raw markdown string (user-generated content is safe)
* @param className - Additional Tailwind classes to apply to the container
*
* @remarks
* For full-featured markdown with math/syntax highlighting, see MarkdownRenderer
* in OutputRenderers.
*/
export function MarkdownContent({ content, className }: MarkdownContentProps) {
return (
<div className={cn("markdown-content", className)}>
<ReactMarkdown
// Security: skipHtml is true by default in react-markdown v9+
// This prevents XSS attacks by escaping any HTML in the markdown
skipHtml={true}
remarkPlugins={[remarkGfm]}
components={{
// Inline code
code: ({ children, className, ...props }: CodeProps) => {
const isInline = !className?.includes("language-");
if (isInline) {
return (
<code
className="rounded bg-zinc-100 px-1.5 py-0.5 font-mono text-sm text-zinc-800 dark:bg-zinc-800 dark:text-zinc-200"
{...props}
>
{children}
</code>
);
}
// Block code
return (
<code
className="font-mono text-sm text-zinc-100 dark:text-zinc-200"
{...props}
>
{children}
</code>
);
},
// Code blocks
pre: ({ children, ...props }) => (
<pre
className="my-2 overflow-x-auto rounded-md bg-zinc-900 p-3 dark:bg-zinc-950"
{...props}
>
{children}
</pre>
),
// Links
a: ({ children, href, ...props }) => (
<a
href={href}
target="_blank"
rel="noopener noreferrer"
className="text-purple-600 underline decoration-1 underline-offset-2 hover:text-purple-700 dark:text-purple-400 dark:hover:text-purple-300"
{...props}
>
{children}
</a>
),
// Bold
strong: ({ children, ...props }) => (
<strong className="font-semibold" {...props}>
{children}
</strong>
),
// Italic
em: ({ children, ...props }) => (
<em className="italic" {...props}>
{children}
</em>
),
// Strikethrough
del: ({ children, ...props }) => (
<del className="line-through opacity-70" {...props}>
{children}
</del>
),
// Lists
ul: ({ children, ...props }: ListProps) => (
<ul
className={cn(
"my-2 space-y-1 pl-6",
props.className?.includes("contains-task-list")
? "list-none pl-0"
: "list-disc",
)}
{...props}
>
{children}
</ul>
),
ol: ({ children, ...props }) => (
<ol className="my-2 list-decimal space-y-1 pl-6" {...props}>
{children}
</ol>
),
li: ({ children, ...props }: ListItemProps) => (
<li
className={cn(
props.className?.includes("task-list-item")
? "flex items-start"
: "",
)}
{...props}
>
{children}
</li>
),
// Task list checkboxes
input: ({ ...props }: InputProps) => {
if (props.type === "checkbox") {
return (
<input
type="checkbox"
className="mr-2 h-4 w-4 rounded border-zinc-300 text-purple-600 focus:ring-purple-500 disabled:cursor-not-allowed disabled:opacity-70 dark:border-zinc-600"
disabled
{...props}
/>
);
}
return <input {...props} />;
},
// Blockquotes
blockquote: ({ children, ...props }) => (
<blockquote
className="my-2 border-l-4 border-zinc-300 pl-3 italic text-zinc-700 dark:border-zinc-600 dark:text-zinc-300"
{...props}
>
{children}
</blockquote>
),
// Headings (smaller sizes for chat)
h1: ({ children, ...props }) => (
<h1
className="my-2 text-xl font-bold text-zinc-900 dark:text-zinc-100"
{...props}
>
{children}
</h1>
),
h2: ({ children, ...props }) => (
<h2
className="my-2 text-lg font-semibold text-zinc-800 dark:text-zinc-200"
{...props}
>
{children}
</h2>
),
h3: ({ children, ...props }) => (
<h3
className="my-1 text-base font-semibold text-zinc-800 dark:text-zinc-200"
{...props}
>
{children}
</h3>
),
h4: ({ children, ...props }) => (
<h4
className="my-1 text-sm font-medium text-zinc-700 dark:text-zinc-300"
{...props}
>
{children}
</h4>
),
h5: ({ children, ...props }) => (
<h5
className="my-1 text-sm font-medium text-zinc-700 dark:text-zinc-300"
{...props}
>
{children}
</h5>
),
h6: ({ children, ...props }) => (
<h6
className="my-1 text-xs font-medium text-zinc-600 dark:text-zinc-400"
{...props}
>
{children}
</h6>
),
// Paragraphs
p: ({ children, ...props }) => (
<p className="my-2 leading-relaxed" {...props}>
{children}
</p>
),
// Horizontal rule
hr: ({ ...props }) => (
<hr
className="my-3 border-zinc-300 dark:border-zinc-700"
{...props}
/>
),
// Tables
table: ({ children, ...props }) => (
<div className="my-2 overflow-x-auto">
<table
className="min-w-full divide-y divide-zinc-200 rounded border border-zinc-200 dark:divide-zinc-700 dark:border-zinc-700"
{...props}
>
{children}
</table>
</div>
),
th: ({ children, ...props }) => (
<th
className="bg-zinc-50 px-3 py-2 text-left text-xs font-semibold text-zinc-700 dark:bg-zinc-800 dark:text-zinc-300"
{...props}
>
{children}
</th>
),
td: ({ children, ...props }) => (
<td
className="border-t border-zinc-200 px-3 py-2 text-sm dark:border-zinc-700"
{...props}
>
{children}
</td>
),
}}
>
{content}
</ReactMarkdown>
</div>
);
}

View File

@@ -0,0 +1,51 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { MessageBubble } from "./MessageBubble";
const meta = {
title: "Atoms/MessageBubble",
component: MessageBubble,
parameters: {
layout: "centered",
},
tags: ["autodocs"],
} satisfies Meta<typeof MessageBubble>;
export default meta;
type Story = StoryObj<typeof meta>;
export const User: Story = {
args: {
variant: "user",
children: "Hello! This is a message from the user.",
},
};
export const Assistant: Story = {
args: {
variant: "assistant",
children:
"Hi there! This is a response from the AI assistant. It can be longer and contain multiple sentences to show how the bubble handles different content lengths.",
},
};
export const UserLong: Story = {
args: {
variant: "user",
children:
"This is a much longer message from the user that demonstrates how the message bubble handles multi-line content. It should wrap nicely and maintain good readability even with lots of text. The styling should remain consistent regardless of the content length.",
},
};
export const AssistantWithCode: Story = {
args: {
variant: "assistant",
children: (
<div>
<p className="mb-2">Here&apos;s a code example:</p>
<code className="block rounded bg-neutral-100 p-2 dark:bg-neutral-800">
const greeting = &quot;Hello, world!&quot;;
</code>
</div>
),
},
};

View File

@@ -0,0 +1,28 @@
import { cn } from "@/lib/utils";
import { ReactNode } from "react";
export interface MessageBubbleProps {
children: ReactNode;
variant: "user" | "assistant";
className?: string;
}
export function MessageBubble({
children,
variant,
className,
}: MessageBubbleProps) {
return (
<div
className={cn(
"rounded-lg px-4 py-3 text-sm",
variant === "user" && "bg-violet-600 text-white dark:bg-violet-500",
variant === "assistant" &&
"border border-neutral-200 bg-white dark:border-neutral-700 dark:bg-neutral-900 dark:text-neutral-100",
className,
)}
>
{children}
</div>
);
}

View File

@@ -0,0 +1,120 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { AgentCarouselMessage } from "./AgentCarouselMessage";
const meta = {
title: "Molecules/AgentCarouselMessage",
component: AgentCarouselMessage,
parameters: {
layout: "padded",
},
tags: ["autodocs"],
} satisfies Meta<typeof AgentCarouselMessage>;
export default meta;
type Story = StoryObj<typeof meta>;
const sampleAgents = [
{
id: "agent-1",
name: "Data Analysis Agent",
description:
"Analyzes CSV and Excel files, generates insights and visualizations",
version: 1,
},
{
id: "agent-2",
name: "Web Scraper",
description:
"Extracts data from websites and formats it into structured JSON",
version: 2,
},
{
id: "agent-3",
name: "Email Automation",
description:
"Automates email responses based on custom rules and templates",
version: 1,
},
{
id: "agent-4",
name: "Social Media Manager",
description:
"Schedules and publishes posts across multiple social media platforms",
version: 3,
},
];
export const SingleAgent: Story = {
args: {
agents: [sampleAgents[0]],
onSelectAgent: (id) => console.log("Selected agent:", id),
},
};
export const TwoAgents: Story = {
args: {
agents: sampleAgents.slice(0, 2),
onSelectAgent: (id) => console.log("Selected agent:", id),
},
};
export const FourAgents: Story = {
args: {
agents: sampleAgents,
onSelectAgent: (id) => console.log("Selected agent:", id),
},
};
export const ManyAgentsWithTotal: Story = {
args: {
agents: sampleAgents,
totalCount: 15,
onSelectAgent: (id) => console.log("Selected agent:", id),
},
};
export const WithoutVersion: Story = {
args: {
agents: [
{
id: "agent-1",
name: "Basic Agent",
description: "A simple agent without version information",
},
{
id: "agent-2",
name: "Another Agent",
description: "Another agent without version",
},
],
onSelectAgent: (id) => console.log("Selected agent:", id),
},
};
export const LongDescriptions: Story = {
args: {
agents: [
{
id: "agent-1",
name: "Complex Agent",
description:
"This agent performs multiple complex tasks including data analysis, report generation, automated email responses, integration with third-party APIs, and much more. It's designed to handle large-scale operations efficiently.",
version: 1,
},
{
id: "agent-2",
name: "Advanced Automation Agent",
description:
"An advanced automation solution that connects to various services, processes data in real-time, sends notifications, generates reports, and maintains comprehensive logs of all operations performed.",
version: 2,
},
],
onSelectAgent: (id) => console.log("Selected agent:", id),
},
};
export const WithoutSelectHandler: Story = {
args: {
agents: sampleAgents.slice(0, 2),
},
};

View File

@@ -0,0 +1,115 @@
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { Button } from "@/components/atoms/Button/Button";
import { Card } from "@/components/atoms/Card/Card";
import { List, Robot, ArrowRight } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
export interface Agent {
id: string;
name: string;
description: string;
version?: number;
}
export interface AgentCarouselMessageProps {
agents: Agent[];
totalCount?: number;
onSelectAgent?: (agentId: string) => void;
className?: string;
}
export function AgentCarouselMessage({
agents,
totalCount,
onSelectAgent,
className,
}: AgentCarouselMessageProps) {
const displayCount = totalCount ?? agents.length;
return (
<div
className={cn(
"mx-4 my-2 flex flex-col gap-4 rounded-lg border border-purple-200 bg-purple-50 p-6 dark:border-purple-900 dark:bg-purple-950",
className,
)}
>
{/* Header */}
<div className="flex items-center gap-3">
<div className="flex h-10 w-10 items-center justify-center rounded-full bg-purple-500">
<List size={24} weight="bold" className="text-white" />
</div>
<div>
<Text variant="h3" className="text-purple-900 dark:text-purple-100">
Found {displayCount} {displayCount === 1 ? "Agent" : "Agents"}
</Text>
<Text
variant="small"
className="text-purple-700 dark:text-purple-300"
>
Select an agent to view details or run it
</Text>
</div>
</div>
{/* Agent Cards */}
<div className="grid gap-3 sm:grid-cols-2">
{agents.map((agent) => (
<Card
key={agent.id}
className="border border-purple-200 bg-white p-4 dark:border-purple-800 dark:bg-purple-900"
>
<div className="flex gap-3">
<div className="flex h-10 w-10 flex-shrink-0 items-center justify-center rounded-lg bg-purple-100 dark:bg-purple-800">
<Robot size={20} weight="bold" className="text-purple-600" />
</div>
<div className="flex-1 space-y-2">
<div>
<Text
variant="body"
className="font-semibold text-purple-900 dark:text-purple-100"
>
{agent.name}
</Text>
{agent.version && (
<Text
variant="small"
className="text-purple-600 dark:text-purple-400"
>
v{agent.version}
</Text>
)}
</div>
<Text
variant="small"
className="line-clamp-2 text-purple-700 dark:text-purple-300"
>
{agent.description}
</Text>
{onSelectAgent && (
<Button
onClick={() => onSelectAgent(agent.id)}
variant="ghost"
className="mt-2 flex items-center gap-1 p-0 text-sm text-purple-600 hover:text-purple-800 dark:text-purple-400 dark:hover:text-purple-200"
>
View details
<ArrowRight size={16} weight="bold" />
</Button>
)}
</div>
</div>
</Card>
))}
</div>
{totalCount && totalCount > agents.length && (
<Text
variant="small"
className="text-center text-purple-600 dark:text-purple-400"
>
Showing {agents.length} of {totalCount} results
</Text>
)}
</div>
);
}

View File

@@ -0,0 +1,129 @@
"use client";
import React from "react";
import { useRouter } from "next/navigation";
import { Button } from "@/components/atoms/Button/Button";
import { SignIn, UserPlus, Shield } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
export interface AuthPromptWidgetProps {
message: string;
sessionId: string;
agentInfo?: {
graph_id: string;
name: string;
trigger_type: string;
};
returnUrl?: string;
className?: string;
}
export function AuthPromptWidget({
message,
sessionId,
agentInfo,
returnUrl = "/chat",
className,
}: AuthPromptWidgetProps) {
const router = useRouter();
function handleSignIn() {
// Store session info to return after auth
if (typeof window !== "undefined") {
localStorage.setItem("pending_chat_session", sessionId);
if (agentInfo) {
localStorage.setItem("pending_agent_setup", JSON.stringify(agentInfo));
}
}
// Build return URL with session ID (using session_id to match chat page parameter)
const returnUrlWithSession = `${returnUrl}?session_id=${sessionId}`;
const encodedReturnUrl = encodeURIComponent(returnUrlWithSession);
router.push(`/login?returnUrl=${encodedReturnUrl}`);
}
function handleSignUp() {
// Store session info to return after auth
if (typeof window !== "undefined") {
localStorage.setItem("pending_chat_session", sessionId);
if (agentInfo) {
localStorage.setItem("pending_agent_setup", JSON.stringify(agentInfo));
}
}
// Build return URL with session ID (using session_id to match chat page parameter)
const returnUrlWithSession = `${returnUrl}?session_id=${sessionId}`;
const encodedReturnUrl = encodeURIComponent(returnUrlWithSession);
router.push(`/signup?returnUrl=${encodedReturnUrl}`);
}
return (
<div
className={cn(
"my-4 overflow-hidden rounded-lg border border-violet-200 dark:border-violet-800",
"bg-gradient-to-br from-violet-50 to-purple-50 dark:from-violet-950/30 dark:to-purple-950/30",
"duration-500 animate-in fade-in-50 slide-in-from-bottom-2",
className,
)}
>
<div className="px-6 py-5">
<div className="mb-4 flex items-center gap-3">
<div className="flex h-10 w-10 items-center justify-center rounded-full bg-violet-600">
<Shield size={20} weight="fill" className="text-white" />
</div>
<div>
<h3 className="text-lg font-semibold text-neutral-900 dark:text-neutral-100">
Authentication Required
</h3>
<p className="text-sm text-neutral-600 dark:text-neutral-400">
Sign in to set up and manage agents
</p>
</div>
</div>
<div className="mb-5 rounded-md bg-white/50 p-4 dark:bg-neutral-900/50">
<p className="text-sm text-neutral-700 dark:text-neutral-300">
{message}
</p>
{agentInfo && (
<div className="mt-3 text-xs text-neutral-600 dark:text-neutral-400">
<p>
Ready to set up:{" "}
<span className="font-medium">{agentInfo.name}</span>
</p>
<p>
Type:{" "}
<span className="font-medium">{agentInfo.trigger_type}</span>
</p>
</div>
)}
</div>
<div className="flex gap-3">
<Button
onClick={handleSignIn}
variant="primary"
size="small"
className="flex-1"
>
<SignIn size={16} weight="bold" className="mr-2" />
Sign In
</Button>
<Button
onClick={handleSignUp}
variant="secondary"
size="small"
className="flex-1"
>
<UserPlus size={16} weight="bold" className="mr-2" />
Create Account
</Button>
</div>
<div className="mt-4 text-center text-xs text-neutral-500 dark:text-neutral-500">
Your chat session will be preserved after signing in
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,2 @@
export { AuthPromptWidget } from "./AuthPromptWidget";
export type { AuthPromptWidgetProps } from "./AuthPromptWidget";

View File

@@ -0,0 +1,119 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { ChatMessage } from "./ChatMessage";
const meta = {
title: "Molecules/ChatMessage",
component: ChatMessage,
parameters: {
layout: "padded",
},
tags: ["autodocs"],
} satisfies Meta<typeof ChatMessage>;
export default meta;
type Story = StoryObj<typeof meta>;
export const UserMessage: Story = {
args: {
message: {
type: "message",
role: "user",
content: "Hello! How can you help me today?",
timestamp: new Date(Date.now() - 2 * 60 * 1000), // 2 minutes ago
},
},
};
export const AssistantMessage: Story = {
args: {
message: {
type: "message",
role: "assistant",
content:
"I can help you discover and run AI agents! I can search for agents, explain what they do, help you set them up, and run them for you. What would you like to do?",
timestamp: new Date(Date.now() - 1 * 60 * 1000), // 1 minute ago
},
},
};
export const UserMessageLong: Story = {
args: {
message: {
type: "message",
role: "user",
content:
"I'm looking for an agent that can help me analyze data from a CSV file. I have sales data for the last quarter and I want to understand trends, identify top-performing products, and get recommendations for inventory management. Can you help me find something suitable?",
timestamp: new Date(Date.now() - 5 * 60 * 1000), // 5 minutes ago
},
},
};
export const AssistantMessageLong: Story = {
args: {
message: {
type: "message",
role: "assistant",
content:
"Great! I found several agents that can help with data analysis. The 'CSV Data Analyzer' agent is perfect for your needs. It can:\n\n1. Parse and validate CSV files\n2. Generate statistical summaries\n3. Identify trends and patterns\n4. Create visualizations\n5. Provide actionable insights\n\nWould you like me to set this up for you?",
timestamp: new Date(Date.now() - 30 * 1000), // 30 seconds ago
},
},
};
export const JustNow: Story = {
args: {
message: {
type: "message",
role: "user",
content: "Yes, please set it up!",
},
},
};
export const Conversation: Story = {
args: {
message: {
type: "message",
role: "user",
content: "",
},
},
render: () => (
<div className="w-full max-w-2xl space-y-0">
<ChatMessage
message={{
type: "message",
role: "user",
content: "Find me automation agents",
timestamp: new Date(Date.now() - 10 * 60 * 1000),
}}
/>
<ChatMessage
message={{
type: "message",
role: "assistant",
content:
"I found 15 automation agents. Here are the top 3:\n\n1. Email Automation Agent\n2. Social Media Scheduler\n3. Workflow Automator\n\nWould you like details on any of these?",
timestamp: new Date(Date.now() - 9 * 60 * 1000),
}}
/>
<ChatMessage
message={{
type: "message",
role: "user",
content: "Tell me about the Email Automation Agent",
timestamp: new Date(Date.now() - 8 * 60 * 1000),
}}
/>
<ChatMessage
message={{
type: "message",
role: "assistant",
content:
"The Email Automation Agent helps you automate email workflows. It can send scheduled emails, respond to incoming messages based on rules, and integrate with your calendar for meeting reminders.",
timestamp: new Date(Date.now() - 7 * 60 * 1000),
}}
/>
</div>
),
};

View File

@@ -0,0 +1,249 @@
import { cn } from "@/lib/utils";
import { Robot, User, CheckCircle } from "@phosphor-icons/react";
import { useRouter } from "next/navigation";
import { useCallback } from "react";
import { MessageBubble } from "@/components/atoms/MessageBubble/MessageBubble";
import { MarkdownContent } from "@/components/atoms/MarkdownContent/MarkdownContent";
import { ToolCallMessage } from "@/components/molecules/ToolCallMessage/ToolCallMessage";
import { ToolResponseMessage } from "@/components/molecules/ToolResponseMessage/ToolResponseMessage";
import { AuthPromptWidget } from "@/components/molecules/AuthPromptWidget/AuthPromptWidget";
import { ChatCredentialsSetup } from "@/app/(platform)/chat/components/ChatCredentialsSetup/ChatCredentialsSetup";
import { NoResultsMessage } from "@/components/molecules/NoResultsMessage/NoResultsMessage";
import { AgentCarouselMessage } from "@/components/molecules/AgentCarouselMessage/AgentCarouselMessage";
import { ExecutionStartedMessage } from "@/components/molecules/ExecutionStartedMessage/ExecutionStartedMessage";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { useChatMessage, type ChatMessageData } from "./useChatMessage";
export interface ChatMessageProps {
message: ChatMessageData;
className?: string;
onDismissLogin?: () => void;
onDismissCredentials?: () => void;
onSendMessage?: (content: string, isUserMessage?: boolean) => void;
}
export function ChatMessage({
message,
className,
onDismissLogin,
onDismissCredentials,
onSendMessage,
}: ChatMessageProps) {
const router = useRouter();
const { user } = useSupabase();
const {
formattedTimestamp,
isUser,
isAssistant,
isToolCall,
isToolResponse,
isLoginNeeded,
isCredentialsNeeded,
isNoResults,
isAgentCarousel,
isExecutionStarted,
} = useChatMessage(message);
function handleLogin() {
// Save current path to return after login
const currentPath = window.location.pathname + window.location.search;
sessionStorage.setItem("post_login_redirect", currentPath);
router.push("/login");
}
function handleContinueAsGuest() {
// Dismiss the login prompt
if (onDismissLogin) {
onDismissLogin();
}
}
const handleAllCredentialsComplete = useCallback(
function handleAllCredentialsComplete() {
// Send a user message that explicitly asks to retry the setup
// This ensures the LLM calls get_required_setup_info again and proceeds with execution
if (onSendMessage) {
onSendMessage(
"I've configured the required credentials. Please check if everything is ready and proceed with setting up the agent.",
);
}
// Optionally dismiss the credentials prompt
if (onDismissCredentials) {
onDismissCredentials();
}
},
[onSendMessage, onDismissCredentials],
);
function handleCancelCredentials() {
// Dismiss the credentials prompt
if (onDismissCredentials) {
onDismissCredentials();
}
}
// Render credentials needed messages
if (isCredentialsNeeded && message.type === "credentials_needed") {
return (
<ChatCredentialsSetup
credentials={message.credentials}
agentName={message.agentName}
message={message.message}
onAllCredentialsComplete={handleAllCredentialsComplete}
onCancel={handleCancelCredentials}
className={className}
/>
);
}
// Render login needed messages
if (isLoginNeeded && message.type === "login_needed") {
// If user is already logged in, show success message instead of auth prompt
if (user) {
return (
<div className={cn("px-4 py-2", className)}>
<div className="my-4 overflow-hidden rounded-lg border border-green-200 bg-gradient-to-br from-green-50 to-emerald-50 dark:border-green-800 dark:from-green-950/30 dark:to-emerald-950/30">
<div className="px-6 py-4">
<div className="flex items-center gap-3">
<div className="flex h-10 w-10 items-center justify-center rounded-full bg-green-600">
<CheckCircle size={20} weight="fill" className="text-white" />
</div>
<div>
<h3 className="text-lg font-semibold text-neutral-900 dark:text-neutral-100">
Successfully Authenticated
</h3>
<p className="text-sm text-neutral-600 dark:text-neutral-400">
You're now signed in and ready to continue
</p>
</div>
</div>
</div>
</div>
</div>
);
}
// Show auth prompt if not logged in
return (
<div className={cn("px-4 py-2", className)}>
<AuthPromptWidget
message={message.message}
sessionId={message.sessionId}
agentInfo={message.agentInfo}
returnUrl="/chat"
/>
</div>
);
}
// Render tool call messages
if (isToolCall && message.type === "tool_call") {
return (
<div className={cn("px-4 py-2", className)}>
<ToolCallMessage
toolId={message.toolId}
toolName={message.toolName}
arguments={message.arguments}
/>
</div>
);
}
// Render tool response messages
if (isToolResponse && message.type === "tool_response") {
return (
<div className={cn("px-4 py-2", className)}>
<ToolResponseMessage
toolId={message.toolId}
toolName={message.toolName}
result={message.result}
success={message.success}
/>
</div>
);
}
// Render no results messages
if (isNoResults && message.type === "no_results") {
return (
<NoResultsMessage
message={message.message}
suggestions={message.suggestions}
className={className}
/>
);
}
// Render agent carousel messages
if (isAgentCarousel && message.type === "agent_carousel") {
return (
<AgentCarouselMessage
agents={message.agents}
totalCount={message.totalCount}
className={className}
/>
);
}
// Render execution started messages
if (isExecutionStarted && message.type === "execution_started") {
return (
<ExecutionStartedMessage
executionId={message.executionId}
agentName={message.agentName}
message={message.message}
className={className}
/>
);
}
// Render regular chat messages
if (message.type === "message") {
return (
<div
className={cn(
"flex gap-3 px-4 py-4",
isUser && "flex-row-reverse",
className,
)}
>
{/* Avatar */}
<div className="flex-shrink-0">
<div
className={cn(
"flex h-8 w-8 items-center justify-center rounded-full",
isUser && "bg-zinc-200 dark:bg-zinc-700",
isAssistant && "bg-purple-600 dark:bg-purple-500",
)}
>
{isUser ? (
<User className="h-5 w-5 text-zinc-700 dark:text-zinc-200" />
) : (
<Robot className="h-5 w-5 text-white" />
)}
</div>
</div>
{/* Message Content */}
<div className={cn("flex max-w-[70%] flex-col", isUser && "items-end")}>
<MessageBubble variant={isUser ? "user" : "assistant"}>
<MarkdownContent content={message.content} />
</MessageBubble>
{/* Timestamp */}
<span
className={cn(
"mt-1 text-xs text-zinc-500 dark:text-zinc-400",
isUser && "text-right",
)}
>
{formattedTimestamp}
</span>
</div>
</div>
);
}
// Fallback for unknown message types
return null;
}

View File

@@ -0,0 +1,108 @@
import { formatDistanceToNow } from "date-fns";
import type { ToolArguments, ToolResult } from "@/types/chat";
export type ChatMessageData =
| {
type: "message";
role: "user" | "assistant" | "system";
content: string;
timestamp?: string | Date;
}
| {
type: "tool_call";
toolId: string;
toolName: string;
arguments?: ToolArguments;
timestamp?: string | Date;
}
| {
type: "tool_response";
toolId: string;
toolName: string;
result: ToolResult;
success?: boolean;
timestamp?: string | Date;
}
| {
type: "login_needed";
message: string;
sessionId: string;
agentInfo?: {
graph_id: string;
name: string;
trigger_type: string;
};
timestamp?: string | Date;
}
| {
type: "credentials_needed";
credentials: Array<{
provider: string;
providerName: string;
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
title: string;
scopes?: string[];
}>;
message: string;
agentName?: string;
timestamp?: string | Date;
}
| {
type: "no_results";
message: string;
suggestions?: string[];
sessionId?: string;
timestamp?: string | Date;
}
| {
type: "agent_carousel";
agents: Array<{
id: string;
name: string;
description: string;
version?: number;
}>;
totalCount?: number;
timestamp?: string | Date;
}
| {
type: "execution_started";
executionId: string;
agentName?: string;
message?: string;
timestamp?: string | Date;
};
interface UseChatMessageResult {
formattedTimestamp: string;
isUser: boolean;
isAssistant: boolean;
isSystem: boolean;
isToolCall: boolean;
isToolResponse: boolean;
isLoginNeeded: boolean;
isCredentialsNeeded: boolean;
isNoResults: boolean;
isAgentCarousel: boolean;
isExecutionStarted: boolean;
}
export function useChatMessage(message: ChatMessageData): UseChatMessageResult {
const formattedTimestamp = message.timestamp
? formatDistanceToNow(new Date(message.timestamp), { addSuffix: true })
: "Just now";
return {
formattedTimestamp,
isUser: message.type === "message" && message.role === "user",
isAssistant: message.type === "message" && message.role === "assistant",
isSystem: message.type === "message" && message.role === "system",
isToolCall: message.type === "tool_call",
isToolResponse: message.type === "tool_response",
isLoginNeeded: message.type === "login_needed",
isCredentialsNeeded: message.type === "credentials_needed",
isNoResults: message.type === "no_results",
isAgentCarousel: message.type === "agent_carousel",
isExecutionStarted: message.type === "execution_started",
};
}

View File

@@ -0,0 +1,64 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { ExecutionStartedMessage } from "./ExecutionStartedMessage";
const meta = {
title: "Molecules/ExecutionStartedMessage",
component: ExecutionStartedMessage,
parameters: {
layout: "padded",
},
tags: ["autodocs"],
} satisfies Meta<typeof ExecutionStartedMessage>;
export default meta;
type Story = StoryObj<typeof meta>;
export const Default: Story = {
args: {
executionId: "exec-123e4567-e89b-12d3-a456-426614174000",
agentName: "Data Analysis Agent",
onViewExecution: () => console.log("View execution clicked"),
},
};
export const WithoutAgentName: Story = {
args: {
executionId: "exec-987f6543-a21b-45c6-b789-123456789abc",
onViewExecution: () => console.log("View execution clicked"),
},
};
export const CustomMessage: Story = {
args: {
executionId: "exec-456a7890-b12c-34d5-e678-901234567def",
agentName: "Email Automation Agent",
message: "Your email automation agent is now processing emails",
onViewExecution: () => console.log("View execution clicked"),
},
};
export const WithoutViewButton: Story = {
args: {
executionId: "exec-789b1234-c56d-78e9-f012-345678901abc",
agentName: "Social Media Manager",
},
};
export const LongAgentName: Story = {
args: {
executionId: "exec-321d5678-e90f-12a3-b456-789012345cde",
agentName:
"Advanced Multi-Platform Social Media Content Manager and Analytics Agent",
message:
"Your advanced automation agent has started processing multiple tasks in the background",
onViewExecution: () => console.log("View execution clicked"),
},
};
export const ShortExecutionId: Story = {
args: {
executionId: "exec-123",
agentName: "Quick Agent",
onViewExecution: () => console.log("View execution clicked"),
},
};

View File

@@ -0,0 +1,106 @@
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { Button } from "@/components/atoms/Button/Button";
import { CheckCircle, Play, ArrowSquareOut } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
export interface ExecutionStartedMessageProps {
executionId: string;
agentName?: string;
message?: string;
onViewExecution?: () => void;
className?: string;
}
export function ExecutionStartedMessage({
executionId,
agentName,
message = "Agent execution started successfully",
onViewExecution,
className,
}: ExecutionStartedMessageProps) {
return (
<div
className={cn(
"mx-4 my-2 flex flex-col gap-4 rounded-lg border border-green-200 bg-green-50 p-6 dark:border-green-900 dark:bg-green-950",
className,
)}
>
{/* Icon & Header */}
<div className="flex items-start gap-4">
<div className="flex h-12 w-12 flex-shrink-0 items-center justify-center rounded-full bg-green-500">
<CheckCircle size={24} weight="bold" className="text-white" />
</div>
<div className="flex-1">
<Text
variant="h3"
className="mb-1 text-green-900 dark:text-green-100"
>
Execution Started
</Text>
<Text variant="body" className="text-green-700 dark:text-green-300">
{message}
</Text>
</div>
</div>
{/* Details */}
<div className="rounded-md bg-green-100 p-4 dark:bg-green-900">
<div className="space-y-2">
{agentName && (
<div className="flex items-center justify-between">
<Text
variant="small"
className="font-semibold text-green-900 dark:text-green-100"
>
Agent:
</Text>
<Text
variant="body"
className="text-green-800 dark:text-green-200"
>
{agentName}
</Text>
</div>
)}
<div className="flex items-center justify-between">
<Text
variant="small"
className="font-semibold text-green-900 dark:text-green-100"
>
Execution ID:
</Text>
<Text
variant="small"
className="font-mono text-green-800 dark:text-green-200"
>
{executionId.slice(0, 16)}...
</Text>
</div>
</div>
</div>
{/* Action Buttons */}
{onViewExecution && (
<div className="flex gap-3">
<Button
onClick={onViewExecution}
variant="primary"
className="flex flex-1 items-center justify-center gap-2"
>
<ArrowSquareOut size={20} weight="bold" />
View Execution
</Button>
</div>
)}
<div className="flex items-center gap-2 text-green-600 dark:text-green-400">
<Play size={16} weight="fill" />
<Text variant="small">
Your agent is now running. You can monitor its progress in the monitor
page.
</Text>
</div>
</div>
);
}

View File

@@ -0,0 +1,434 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { MessageList } from "./MessageList";
import { useEffect, useState } from "react";
const meta = {
title: "Molecules/MessageList",
component: MessageList,
parameters: {
layout: "fullscreen",
},
tags: ["autodocs"],
decorators: [
(Story) => (
<div className="h-screen p-4">
<Story />
</div>
),
],
} satisfies Meta<typeof MessageList>;
export default meta;
type Story = StoryObj<typeof meta>;
const sampleMessages = [
{
type: "message" as const,
role: "user" as const,
content: "Hello! How can you help me today?",
timestamp: new Date(Date.now() - 10 * 60 * 1000),
},
{
type: "message" as const,
role: "assistant" as const,
content:
"I can help you discover and run AI agents! What would you like to do?",
timestamp: new Date(Date.now() - 9 * 60 * 1000),
},
{
type: "message" as const,
role: "user" as const,
content: "Find me automation agents",
timestamp: new Date(Date.now() - 8 * 60 * 1000),
},
{
type: "message" as const,
role: "assistant" as const,
content: "I found 15 automation agents. Here are the top 3...",
timestamp: new Date(Date.now() - 7 * 60 * 1000),
},
];
export const Empty: Story = {
args: {
messages: [],
},
};
export const FewMessages: Story = {
args: {
messages: sampleMessages.slice(0, 2),
},
};
export const ManyMessages: Story = {
args: {
messages: sampleMessages,
},
};
export const WithStreaming: Story = {
args: {
messages: sampleMessages,
isStreaming: true,
streamingChunks: [
"Let me ",
"help you ",
"with that. ",
"I can show you ",
"the details...",
],
},
};
export const SimulatedConversation: Story = {
args: {
messages: [],
},
render: () => {
const [messages, setMessages] = useState(sampleMessages);
const [streamingChunks, setStreamingChunks] = useState<string[]>([]);
const [isStreaming, setIsStreaming] = useState(false);
useEffect(function simulateConversation() {
const timer = setTimeout(() => {
setIsStreaming(true);
const fullText =
"This is a simulated streaming response that demonstrates the auto-scroll behavior and real-time message updates!";
const words = fullText.split(" ");
let index = 0;
const interval = setInterval(() => {
if (index < words.length) {
setStreamingChunks((prev) => [...prev, words[index] + " "]);
index++;
} else {
clearInterval(interval);
setTimeout(() => {
setIsStreaming(false);
setMessages((prev) => [
...prev,
{
type: "message",
role: "assistant",
content: fullText,
timestamp: new Date(),
},
]);
setStreamingChunks([]);
}, 500);
}
}, 100);
}, 1000);
return () => clearTimeout(timer);
}, []);
return (
<MessageList
messages={messages}
streamingChunks={streamingChunks}
isStreaming={isStreaming}
/>
);
},
};
export const LongConversation: Story = {
args: {
messages: [
...sampleMessages,
...sampleMessages.map((msg, i) => ({
...msg,
timestamp: new Date(Date.now() - (6 - i) * 60 * 1000),
})),
...sampleMessages.map((msg, i) => ({
...msg,
timestamp: new Date(Date.now() - (2 - i) * 60 * 1000),
})),
],
},
};
export const WithToolCalls: Story = {
args: {
messages: [
{
type: "message" as const,
role: "user" as const,
content: "Find me data analysis agents",
timestamp: new Date(Date.now() - 5 * 60 * 1000),
},
{
type: "tool_call" as const,
toolId: "tool-123e4567-e89b-12d3-a456-426614174000",
toolName: "find_agent",
arguments: { query: "data analysis" },
timestamp: new Date(Date.now() - 4 * 60 * 1000),
},
{
type: "message" as const,
role: "assistant" as const,
content: "I found several data analysis agents for you!",
timestamp: new Date(Date.now() - 3 * 60 * 1000),
},
],
},
};
export const WithToolResponses: Story = {
args: {
messages: [
{
type: "message" as const,
role: "user" as const,
content: "Get details about this agent",
timestamp: new Date(Date.now() - 5 * 60 * 1000),
},
{
type: "tool_call" as const,
toolId: "tool-456a7890-b12c-34d5-e678-901234567def",
toolName: "get_agent_details",
arguments: { agent_id: "agent-123" },
timestamp: new Date(Date.now() - 4 * 60 * 1000),
},
{
type: "tool_response" as const,
toolId: "tool-456a7890-b12c-34d5-e678-901234567def",
toolName: "get_agent_details",
result: {
name: "Data Analysis Agent",
description: "Analyzes CSV and Excel files",
version: 1,
},
success: true,
timestamp: new Date(Date.now() - 3 * 60 * 1000),
},
],
},
};
export const WithCredentialsPrompt: Story = {
args: {
messages: [
{
type: "message" as const,
role: "user" as const,
content: "Run the GitHub agent",
timestamp: new Date(Date.now() - 3 * 60 * 1000),
},
{
type: "credentials_needed" as const,
credentials: [
{
provider: "github",
providerName: "GitHub",
credentialType: "oauth2" as const,
title: "GitHub Integration",
},
],
agentName: "GitHub Integration Agent",
message:
"To run GitHub Integration Agent, you need to add credentials.",
timestamp: new Date(Date.now() - 2 * 60 * 1000),
},
],
},
};
export const WithNoResults: Story = {
args: {
messages: [
{
type: "message" as const,
role: "user" as const,
content: "Find crypto mining agents",
timestamp: new Date(Date.now() - 3 * 60 * 1000),
},
{
type: "tool_call" as const,
toolId: "tool-789b1234-c56d-78e9-f012-345678901abc",
toolName: "find_agent",
arguments: { query: "crypto mining" },
timestamp: new Date(Date.now() - 2 * 60 * 1000),
},
{
type: "no_results" as const,
message:
"No agents found matching 'crypto mining'. Try different keywords or browse the marketplace.",
suggestions: [
"Try more general terms",
"Browse categories in the marketplace",
"Check spelling",
],
timestamp: new Date(Date.now() - 1 * 60 * 1000),
},
],
},
};
export const WithAgentCarousel: Story = {
args: {
messages: [
{
type: "message" as const,
role: "user" as const,
content: "Find automation agents",
timestamp: new Date(Date.now() - 3 * 60 * 1000),
},
{
type: "tool_call" as const,
toolId: "tool-321d5678-e90f-12a3-b456-789012345cde",
toolName: "find_agent",
arguments: { query: "automation" },
timestamp: new Date(Date.now() - 2 * 60 * 1000),
},
{
type: "agent_carousel" as const,
agents: [
{
id: "agent-1",
name: "Email Automation",
description:
"Automates email responses based on custom rules and templates",
version: 1,
},
{
id: "agent-2",
name: "Social Media Manager",
description:
"Schedules and publishes posts across multiple platforms",
version: 2,
},
{
id: "agent-3",
name: "Data Sync Agent",
description: "Syncs data between different services automatically",
version: 1,
},
],
totalCount: 15,
timestamp: new Date(Date.now() - 1 * 60 * 1000),
},
],
},
};
export const WithExecutionStarted: Story = {
args: {
messages: [
{
type: "message" as const,
role: "user" as const,
content: "Run the data analysis agent",
timestamp: new Date(Date.now() - 3 * 60 * 1000),
},
{
type: "tool_call" as const,
toolId: "tool-654f9876-a54b-32c1-d765-432109876fed",
toolName: "run_agent",
arguments: { agent_id: "agent-123", input: { file: "data.csv" } },
timestamp: new Date(Date.now() - 2 * 60 * 1000),
},
{
type: "execution_started" as const,
executionId: "exec-123e4567-e89b-12d3-a456-426614174000",
agentName: "Data Analysis Agent",
message: "Your agent execution has started successfully",
timestamp: new Date(Date.now() - 1 * 60 * 1000),
},
],
},
};
export const MixedConversation: Story = {
args: {
messages: [
{
type: "message" as const,
role: "user" as const,
content: "Hello! I want to find and run an automation agent",
timestamp: new Date(Date.now() - 15 * 60 * 1000),
},
{
type: "message" as const,
role: "assistant" as const,
content:
"I can help you find and run automation agents! Let me search for you.",
timestamp: new Date(Date.now() - 14 * 60 * 1000),
},
{
type: "tool_call" as const,
toolId: "tool-111",
toolName: "find_agent",
arguments: { query: "automation" },
timestamp: new Date(Date.now() - 13 * 60 * 1000),
},
{
type: "agent_carousel" as const,
agents: [
{
id: "agent-1",
name: "Email Automation",
description: "Automates email responses",
version: 1,
},
{
id: "agent-2",
name: "Social Media Manager",
description: "Schedules social posts",
version: 2,
},
],
totalCount: 8,
timestamp: new Date(Date.now() - 12 * 60 * 1000),
},
{
type: "message" as const,
role: "user" as const,
content: "Run the Email Automation agent",
timestamp: new Date(Date.now() - 10 * 60 * 1000),
},
{
type: "tool_call" as const,
toolId: "tool-222",
toolName: "run_agent",
arguments: { agent_id: "agent-1" },
timestamp: new Date(Date.now() - 9 * 60 * 1000),
},
{
type: "credentials_needed" as const,
credentials: [
{
provider: "gmail",
providerName: "Gmail",
credentialType: "oauth2" as const,
title: "Gmail Integration",
},
],
agentName: "Email Automation",
message: "To run Email Automation, you need to add credentials.",
timestamp: new Date(Date.now() - 8 * 60 * 1000),
},
{
type: "message" as const,
role: "user" as const,
content: "Try finding crypto agents instead",
timestamp: new Date(Date.now() - 5 * 60 * 1000),
},
{
type: "tool_call" as const,
toolId: "tool-333",
toolName: "find_agent",
arguments: { query: "crypto" },
timestamp: new Date(Date.now() - 4 * 60 * 1000),
},
{
type: "no_results" as const,
message: "No agents found matching 'crypto'. Try different keywords.",
suggestions: ["Try more general terms", "Browse the marketplace"],
timestamp: new Date(Date.now() - 3 * 60 * 1000),
},
],
},
};

View File

@@ -0,0 +1,61 @@
import { cn } from "@/lib/utils";
import { ChatMessage } from "../ChatMessage/ChatMessage";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
import { StreamingMessage } from "../StreamingMessage/StreamingMessage";
import { useMessageList } from "./useMessageList";
export interface MessageListProps {
messages: ChatMessageData[];
streamingChunks?: string[];
isStreaming?: boolean;
className?: string;
onStreamComplete?: () => void;
onSendMessage?: (content: string) => void;
}
export function MessageList({
messages,
streamingChunks = [],
isStreaming = false,
className,
onStreamComplete,
onSendMessage,
}: MessageListProps) {
const { messagesEndRef, messagesContainerRef } = useMessageList({
messageCount: messages.length,
isStreaming,
});
return (
<div
ref={messagesContainerRef}
className={cn(
"flex-1 overflow-y-auto",
"scrollbar-thin scrollbar-track-transparent scrollbar-thumb-zinc-300 dark:scrollbar-thumb-zinc-700",
className,
)}
>
<div className="space-y-0">
{/* Render all persisted messages */}
{messages.map((message, index) => (
<ChatMessage
key={index}
message={message}
onSendMessage={onSendMessage}
/>
))}
{/* Render streaming message if active */}
{isStreaming && streamingChunks.length > 0 && (
<StreamingMessage
chunks={streamingChunks}
onComplete={onStreamComplete}
/>
)}
{/* Invisible div to scroll to */}
<div ref={messagesEndRef} />
</div>
</div>
);
}

View File

@@ -0,0 +1,38 @@
import { useEffect, useRef, useCallback } from "react";
interface UseMessageListArgs {
messageCount: number;
isStreaming: boolean;
}
interface UseMessageListResult {
messagesEndRef: React.RefObject<HTMLDivElement>;
messagesContainerRef: React.RefObject<HTMLDivElement>;
scrollToBottom: () => void;
}
export function useMessageList({
messageCount,
isStreaming,
}: UseMessageListArgs): UseMessageListResult {
const messagesEndRef = useRef<HTMLDivElement>(null);
const messagesContainerRef = useRef<HTMLDivElement>(null);
const scrollToBottom = useCallback(function scrollToBottom() {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
}, []);
// Auto-scroll when new messages arrive or streaming updates
useEffect(
function autoScroll() {
scrollToBottom();
},
[messageCount, isStreaming, scrollToBottom],
);
return {
messagesEndRef,
messagesContainerRef,
scrollToBottom,
};
}

View File

@@ -0,0 +1,67 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { NoResultsMessage } from "./NoResultsMessage";
const meta = {
title: "Molecules/NoResultsMessage",
component: NoResultsMessage,
parameters: {
layout: "padded",
},
tags: ["autodocs"],
} satisfies Meta<typeof NoResultsMessage>;
export default meta;
type Story = StoryObj<typeof meta>;
export const Default: Story = {
args: {
message:
"No agents found matching 'crypto mining'. Try different keywords or browse the marketplace.",
suggestions: [
"Try more general terms",
"Browse categories in the marketplace",
"Check spelling",
],
},
};
export const NoSuggestions: Story = {
args: {
message: "No results found for your search. Please try a different query.",
suggestions: [],
},
};
export const LongMessage: Story = {
args: {
message:
"We couldn't find any agents matching your search criteria. This could be because the agent you're looking for doesn't exist yet, or you might need to adjust your search terms to be more specific or more general depending on what you're trying to find.",
suggestions: [
"Try using different keywords",
"Browse all available agents in the marketplace",
"Check your spelling and try again",
"Consider creating your own agent for this use case",
],
},
};
export const ShortMessage: Story = {
args: {
message: "No results.",
suggestions: ["Try again"],
},
};
export const ManySuggestions: Story = {
args: {
message: "No agents found matching your criteria.",
suggestions: [
"Use broader search terms",
"Try searching by category",
"Check the spelling of your keywords",
"Browse the full marketplace",
"Consider synonyms or related terms",
"Filter by specific features",
],
},
};

View File

@@ -0,0 +1,68 @@
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { MagnifyingGlass, X } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
export interface NoResultsMessageProps {
message: string;
suggestions?: string[];
className?: string;
}
export function NoResultsMessage({
message,
suggestions = [],
className,
}: NoResultsMessageProps) {
return (
<div
className={cn(
"mx-4 my-2 flex flex-col items-center gap-4 rounded-lg border border-gray-200 bg-gray-50 p-6 dark:border-gray-800 dark:bg-gray-900",
className,
)}
>
{/* Icon */}
<div className="relative flex h-16 w-16 items-center justify-center">
<div className="flex h-16 w-16 items-center justify-center rounded-full bg-gray-200 dark:bg-gray-700">
<MagnifyingGlass size={32} weight="bold" className="text-gray-500" />
</div>
<div className="absolute -right-1 -top-1 flex h-8 w-8 items-center justify-center rounded-full bg-gray-400 dark:bg-gray-600">
<X size={20} weight="bold" className="text-white" />
</div>
</div>
{/* Content */}
<div className="text-center">
<Text variant="h3" className="mb-2 text-gray-900 dark:text-gray-100">
No Results Found
</Text>
<Text variant="body" className="text-gray-700 dark:text-gray-300">
{message}
</Text>
</div>
{/* Suggestions */}
{suggestions.length > 0 && (
<div className="w-full space-y-2">
<Text
variant="small"
className="font-semibold text-gray-900 dark:text-gray-100"
>
Try these suggestions:
</Text>
<ul className="space-y-1 rounded-md bg-gray-100 p-4 dark:bg-gray-800">
{suggestions.map((suggestion, index) => (
<li
key={index}
className="flex items-start gap-2 text-sm text-gray-700 dark:text-gray-300"
>
<span className="mt-1 text-gray-500"></span>
<span>{suggestion}</span>
</li>
))}
</ul>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,91 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { QuickActionsWelcome } from "./QuickActionsWelcome";
const meta = {
title: "Molecules/QuickActionsWelcome",
component: QuickActionsWelcome,
parameters: {
layout: "fullscreen",
},
tags: ["autodocs"],
args: {
onActionClick: (action: string) => console.log("Action clicked:", action),
},
} satisfies Meta<typeof QuickActionsWelcome>;
export default meta;
type Story = StoryObj<typeof meta>;
export const Default: Story = {
args: {
title: "Welcome to Agent Chat",
description:
"I can help you discover and run AI agents. Try one of these quick actions to get started:",
actions: [
"Find agents for data analysis",
"Search for automation tools",
"Show me popular agents",
"Help me build a workflow",
],
},
};
export const TwoActions: Story = {
args: {
title: "Get Started",
description: "Choose an action to begin your journey with AI agents:",
actions: ["Explore the marketplace", "Create a new agent"],
},
};
export const SixActions: Story = {
args: {
title: "What would you like to do?",
description: "Select from these options to get started:",
actions: [
"Search for agents",
"Browse categories",
"View my agents",
"Create new workflow",
"Import from template",
"Learn about agents",
],
},
};
export const Disabled: Story = {
args: {
title: "Welcome to Agent Chat",
description:
"I can help you discover and run AI agents. Try one of these quick actions to get started:",
actions: [
"Find agents for data analysis",
"Search for automation tools",
"Show me popular agents",
"Help me build a workflow",
],
disabled: true,
},
};
export const LongTexts: Story = {
args: {
title: "Welcome to the AutoGPT Platform Agent Discovery System",
description:
"This interactive chat interface allows you to explore, discover, and execute AI-powered agents that can help you automate tasks, analyze data, and solve complex problems. Select one of the suggested actions below to begin your exploration journey:",
actions: [
"Find agents that can help me with advanced data analysis and visualization tasks",
"Search for workflow automation agents that integrate with popular services",
"Show me the most popular and highly-rated agents in the marketplace",
"Help me understand how to build and deploy my own custom agent workflows",
],
},
};
export const ShortTitle: Story = {
args: {
title: "Chat",
description: "What would you like to do today?",
actions: ["Find agents", "Browse marketplace", "View history", "Get help"],
},
};

View File

@@ -0,0 +1,51 @@
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
export interface QuickActionsWelcomeProps {
title: string;
description: string;
actions: string[];
onActionClick: (action: string) => void;
disabled?: boolean;
className?: string;
}
export function QuickActionsWelcome({
title,
description,
actions,
onActionClick,
disabled = false,
className,
}: QuickActionsWelcomeProps) {
return (
<div
className={cn("flex flex-1 items-center justify-center p-4", className)}
>
<div className="max-w-2xl text-center">
<Text
variant="h2"
className="mb-4 text-3xl font-bold text-zinc-900 dark:text-zinc-100"
>
{title}
</Text>
<Text variant="body" className="mb-8 text-zinc-600 dark:text-zinc-400">
{description}
</Text>
<div className="grid gap-2 sm:grid-cols-2">
{actions.map((action) => (
<button
key={action}
onClick={() => onActionClick(action)}
disabled={disabled}
className="rounded-lg border border-zinc-200 bg-white p-4 text-left text-sm hover:bg-zinc-50 disabled:cursor-not-allowed disabled:opacity-50 dark:border-zinc-800 dark:bg-zinc-900 dark:hover:bg-zinc-800"
>
{action}
</button>
))}
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,100 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { StreamingMessage } from "./StreamingMessage";
import { useEffect, useState } from "react";
const meta = {
title: "Molecules/StreamingMessage",
component: StreamingMessage,
parameters: {
layout: "padded",
},
tags: ["autodocs"],
} satisfies Meta<typeof StreamingMessage>;
export default meta;
type Story = StoryObj<typeof meta>;
export const Empty: Story = {
args: {
chunks: [],
},
};
export const SingleChunk: Story = {
args: {
chunks: ["Hello! "],
},
};
export const MultipleChunks: Story = {
args: {
chunks: [
"I can ",
"help you ",
"discover ",
"and run ",
"AI agents. ",
"What would ",
"you like ",
"to do?",
],
},
};
export const SimulatedStreaming: Story = {
args: {
chunks: [],
},
render: () => {
const [chunks, setChunks] = useState<string[]>([]);
const fullText =
"I'm a streaming message that simulates real-time text generation. Watch as the text appears word by word, just like a real AI assistant typing out a response!";
useEffect(function simulateStreaming() {
const words = fullText.split(" ");
let currentIndex = 0;
const interval = setInterval(() => {
if (currentIndex < words.length) {
setChunks((prev) => [...prev, words[currentIndex] + " "]);
currentIndex++;
} else {
clearInterval(interval);
}
}, 100); // Add a word every 100ms
return () => clearInterval(interval);
}, []);
return <StreamingMessage chunks={chunks} />;
},
};
export const LongStreaming: Story = {
args: {
chunks: [],
},
render: () => {
const [chunks, setChunks] = useState<string[]>([]);
const fullText =
"This is a much longer streaming message that demonstrates how the component handles larger amounts of text. It includes multiple sentences and should wrap nicely within the message bubble. The blinking cursor at the end indicates that text is still being generated in real-time.";
useEffect(function simulateLongStreaming() {
const words = fullText.split(" ");
let currentIndex = 0;
const interval = setInterval(() => {
if (currentIndex < words.length) {
setChunks((prev) => [...prev, words[currentIndex] + " "]);
currentIndex++;
} else {
clearInterval(interval);
}
}, 80);
return () => clearInterval(interval);
}, []);
return <StreamingMessage chunks={chunks} />;
},
};

View File

@@ -0,0 +1,42 @@
import { cn } from "@/lib/utils";
import { Robot } from "@phosphor-icons/react";
import { MessageBubble } from "@/components/atoms/MessageBubble/MessageBubble";
import { MarkdownContent } from "@/components/atoms/MarkdownContent/MarkdownContent";
import { useStreamingMessage } from "./useStreamingMessage";
export interface StreamingMessageProps {
chunks: string[];
className?: string;
onComplete?: () => void;
}
export function StreamingMessage({
chunks,
className,
onComplete,
}: StreamingMessageProps) {
const { displayText } = useStreamingMessage({ chunks, onComplete });
return (
<div className={cn("flex gap-3 px-4 py-4", className)}>
{/* Avatar */}
<div className="flex-shrink-0">
<div className="flex h-8 w-8 items-center justify-center rounded-full bg-purple-600 dark:bg-purple-500">
<Robot className="h-5 w-5 text-white" />
</div>
</div>
{/* Message Content */}
<div className="flex max-w-[70%] flex-col">
<MessageBubble variant="assistant">
<MarkdownContent content={displayText} />
</MessageBubble>
{/* Timestamp */}
<span className="mt-1 text-xs text-neutral-500 dark:text-neutral-400">
Typing...
</span>
</div>
</div>
);
}

View File

@@ -0,0 +1,38 @@
import { useEffect, useState } from "react";
interface UseStreamingMessageArgs {
chunks: string[];
onComplete?: () => void;
}
interface UseStreamingMessageResult {
displayText: string;
isComplete: boolean;
}
export function useStreamingMessage({
chunks,
onComplete,
}: UseStreamingMessageArgs): UseStreamingMessageResult {
const [isComplete, _setIsComplete] = useState(false);
// Accumulate all chunks into display text
const displayText = chunks.join("");
// Detect completion (no new chunks for a while, or explicit done signal)
useEffect(
function detectCompletion() {
// For now, we'll rely on the parent to signal completion
// This hook mainly serves to manage the streaming state
if (isComplete && onComplete) {
onComplete();
}
},
[isComplete, onComplete],
);
return {
displayText,
isComplete,
};
}

View File

@@ -0,0 +1,96 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { ToolCallMessage } from "./ToolCallMessage";
const meta = {
title: "Molecules/ToolCallMessage",
component: ToolCallMessage,
parameters: {
layout: "padded",
},
tags: ["autodocs"],
} satisfies Meta<typeof ToolCallMessage>;
export default meta;
type Story = StoryObj<typeof meta>;
export const Simple: Story = {
args: {
toolId: "tool_abc123def456ghi789jkl012mno345",
toolName: "search_database",
arguments: {
query: "SELECT * FROM users WHERE active = true",
limit: 10,
},
},
};
export const NoArguments: Story = {
args: {
toolId: "tool_xyz987wvu654tsr321qpo098nml765",
toolName: "get_current_time",
},
};
export const ComplexArguments: Story = {
args: {
toolId: "tool_def456ghi789jkl012mno345pqr678",
toolName: "process_data",
arguments: {
data: {
source: "api",
format: "json",
filters: ["active", "verified"],
},
options: {
validate: true,
timeout: 30000,
retry: 3,
},
callback_url: "https://example.com/webhook",
},
},
};
export const NestedArguments: Story = {
args: {
toolId: "tool_ghi789jkl012mno345pqr678stu901",
toolName: "send_email",
arguments: {
to: ["user@example.com", "admin@example.com"],
subject: "Test Email",
body: {
text: "This is a test email",
html: "<p>This is a <strong>test</strong> email</p>",
},
attachments: [
{
filename: "report.pdf",
content_type: "application/pdf",
size: 1024000,
},
],
metadata: {
campaign_id: "camp_123",
tags: ["automated", "test"],
},
},
},
};
export const LargeArguments: Story = {
args: {
toolId: "tool_jkl012mno345pqr678stu901vwx234",
toolName: "analyze_text",
arguments: {
text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.",
options: {
detect_language: true,
extract_entities: true,
sentiment_analysis: true,
keyword_extraction: true,
summarization: true,
},
max_results: 100,
},
},
};

View File

@@ -0,0 +1,108 @@
import React, { useState } from "react";
import { Text } from "@/components/atoms/Text/Text";
import { Wrench, Spinner, CaretDown, CaretUp } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
import { getToolDisplayName } from "@/lib/chat/tool-display-names";
import type { ToolArguments } from "@/types/chat";
export interface ToolCallMessageProps {
toolId: string;
toolName: string;
arguments?: ToolArguments;
className?: string;
}
export function ToolCallMessage({
toolId,
toolName,
arguments: args,
className,
}: ToolCallMessageProps) {
const [isExpanded, setIsExpanded] = useState(false);
return (
<div
className={cn(
"overflow-hidden rounded-lg border transition-all duration-200",
"border-neutral-200 dark:border-neutral-700",
"bg-white dark:bg-neutral-900",
"animate-in fade-in-50 slide-in-from-top-1",
className,
)}
>
{/* Header */}
<div
className={cn(
"flex items-center justify-between px-3 py-2",
"bg-gradient-to-r from-neutral-50 to-neutral-100 dark:from-neutral-800/20 dark:to-neutral-700/20",
)}
>
<div className="flex items-center gap-2">
<Wrench
size={16}
weight="bold"
className="text-neutral-500 dark:text-neutral-400"
/>
<span className="text-sm font-medium text-neutral-700 dark:text-neutral-300">
{getToolDisplayName(toolName)}
</span>
<div className="ml-2 flex items-center gap-1.5">
<Spinner
size={16}
weight="bold"
className="animate-spin text-blue-500"
/>
<span className="text-xs text-neutral-500 dark:text-neutral-400">
Executing...
</span>
</div>
</div>
<button
onClick={() => setIsExpanded(!isExpanded)}
className="rounded p-1 hover:bg-neutral-200/50 dark:hover:bg-neutral-700/50"
aria-label={isExpanded ? "Collapse details" : "Expand details"}
>
{isExpanded ? (
<CaretUp
size={16}
weight="bold"
className="text-neutral-600 dark:text-neutral-400"
/>
) : (
<CaretDown
size={16}
weight="bold"
className="text-neutral-600 dark:text-neutral-400"
/>
)}
</button>
</div>
{/* Expandable Content */}
{isExpanded && (
<div className="px-4 py-3">
{args && Object.keys(args).length > 0 && (
<div className="mb-3">
<div className="mb-2 text-xs font-medium text-neutral-600 dark:text-neutral-400">
Parameters:
</div>
<div className="rounded-md bg-neutral-50 p-3 dark:bg-neutral-800">
<pre className="overflow-x-auto text-xs text-neutral-700 dark:text-neutral-300">
{JSON.stringify(args, null, 2)}
</pre>
</div>
</div>
)}
<Text
variant="small"
className="text-neutral-500 dark:text-neutral-400"
>
Tool ID: {toolId.slice(0, 8)}...
</Text>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,137 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { ToolResponseMessage } from "./ToolResponseMessage";
const meta = {
title: "Molecules/ToolResponseMessage",
component: ToolResponseMessage,
parameters: {
layout: "padded",
},
tags: ["autodocs"],
} satisfies Meta<typeof ToolResponseMessage>;
export default meta;
type Story = StoryObj<typeof meta>;
export const SuccessString: Story = {
args: {
toolId: "tool_abc123def456ghi789jkl012mno345",
toolName: "search_database",
result: "Found 15 matching records",
success: true,
},
};
export const SuccessObject: Story = {
args: {
toolId: "tool_xyz987wvu654tsr321qpo098nml765",
toolName: "get_user_info",
result: {
id: "user_123",
name: "John Doe",
email: "john@example.com",
status: "active",
created_at: "2024-01-15T10:30:00Z",
},
success: true,
},
};
export const FailedString: Story = {
args: {
toolId: "tool_def456ghi789jkl012mno345pqr678",
toolName: "send_email",
result: "Failed to send email: SMTP connection timeout",
success: false,
},
};
export const FailedObject: Story = {
args: {
toolId: "tool_ghi789jkl012mno345pqr678stu901",
toolName: "api_request",
result: {
error: "Authentication failed",
code: "AUTH_ERROR",
status: 401,
message: "Invalid API key provided",
},
success: false,
},
};
export const LongStringResult: Story = {
args: {
toolId: "tool_jkl012mno345pqr678stu901vwx234",
toolName: "analyze_text",
result:
"Analysis complete. The text contains 150 words, 8 sentences, and 5 paragraphs. Sentiment: Positive (0.85). Key topics: technology, innovation, future. Entities detected: 3 organizations, 5 people, 2 locations. The overall tone is optimistic and forward-looking. Primary language: English. Reading level: Grade 10. Estimated reading time: 45 seconds. The text demonstrates strong coherence with clear topic progression. Main themes include digital transformation, artificial intelligence, and sustainable development.",
success: true,
},
};
export const ComplexNestedObject: Story = {
args: {
toolId: "tool_mno345pqr678stu901vwx234yza567",
toolName: "process_data",
result: {
status: "completed",
processed_items: 1250,
duration_ms: 3450,
results: {
valid: 1200,
invalid: 50,
errors: [
{ line: 45, message: "Invalid format" },
{ line: 128, message: "Missing required field" },
],
},
summary: {
categories: {
type_a: 450,
type_b: 600,
type_c: 150,
},
average_score: 87.5,
confidence: 0.94,
},
},
success: true,
},
};
export const VeryLongObjectResult: Story = {
args: {
toolId: "tool_pqr678stu901vwx234yza567bcd890",
toolName: "generate_report",
result: {
report_id: "rep_20240130_1530",
generated_at: "2024-01-30T15:30:00Z",
data: {
metrics: {
total_users: 15234,
active_users: 12890,
new_users: 456,
retention_rate: 84.6,
engagement_score: 7.8,
},
performance: {
response_time_avg: 245,
error_rate: 0.02,
uptime_percentage: 99.95,
},
revenue: {
total: 125000.0,
currency: "USD",
breakdown: {
subscriptions: 100000,
one_time: 25000,
},
},
},
insights:
"User engagement increased by 15% compared to last month. Response times improved by 8%. Revenue growth of 12% quarter over quarter. Retention rate remains stable.",
},
success: true,
},
};

View File

@@ -0,0 +1,200 @@
import React, { useState } from "react";
import { Text } from "@/components/atoms/Text/Text";
import {
CheckCircle,
XCircle,
CaretDown,
CaretUp,
Wrench,
} from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
import { getToolDisplayName } from "@/lib/chat/tool-display-names";
import type { ToolResult } from "@/types/chat";
export interface ToolResponseMessageProps {
toolId: string;
toolName: string;
result: ToolResult;
success?: boolean;
className?: string;
}
// Check if result should be hidden (special response types)
function shouldHideResult(result: ToolResult): boolean {
try {
const resultString =
typeof result === "string" ? result : JSON.stringify(result);
const parsed = JSON.parse(resultString);
// Hide raw JSON for these special types
if (parsed.type === "agent_carousel") return true;
if (parsed.type === "execution_started") return true;
if (parsed.type === "setup_requirements") return true;
if (parsed.type === "no_results") return true;
return false;
} catch {
return false;
}
}
// Get a friendly summary for special response types
function getResultSummary(result: ToolResult): string | null {
try {
const resultString =
typeof result === "string" ? result : JSON.stringify(result);
const parsed = JSON.parse(resultString);
if (parsed.type === "agent_carousel") {
return `Found ${parsed.agents?.length || parsed.count || 0} agents${parsed.query ? ` matching "${parsed.query}"` : ""}`;
}
if (parsed.type === "execution_started") {
return `Started execution${parsed.execution_id ? ` (ID: ${parsed.execution_id.slice(0, 8)}...)` : ""}`;
}
if (parsed.type === "setup_requirements") {
return "Retrieved setup requirements";
}
if (parsed.type === "no_results") {
return parsed.message || "No results found";
}
return null;
} catch {
return null;
}
}
export function ToolResponseMessage({
toolId,
toolName,
result,
success = true,
className,
}: ToolResponseMessageProps) {
const [isExpanded, setIsExpanded] = useState(false);
const hideResult = shouldHideResult(result);
const resultSummary = getResultSummary(result);
const resultString =
typeof result === "object"
? JSON.stringify(result, null, 2)
: String(result);
const shouldTruncate = resultString.length > 200;
return (
<div
className={cn(
"overflow-hidden rounded-lg border transition-all duration-200",
success
? "border-neutral-200 dark:border-neutral-700"
: "border-red-200 dark:border-red-800",
"bg-white dark:bg-neutral-900",
"animate-in fade-in-50 slide-in-from-top-1",
className,
)}
>
{/* Header */}
<div
className={cn(
"flex items-center justify-between px-3 py-2",
"bg-gradient-to-r",
success
? "from-neutral-50 to-neutral-100 dark:from-neutral-800/20 dark:to-neutral-700/20"
: "from-red-50 to-red-100 dark:from-red-900/20 dark:to-red-800/20",
)}
>
<div className="flex items-center gap-2">
<Wrench
size={16}
weight="bold"
className="text-neutral-500 dark:text-neutral-400"
/>
<span className="text-sm font-medium text-neutral-700 dark:text-neutral-300">
{getToolDisplayName(toolName)}
</span>
<div className="ml-2 flex items-center gap-1.5">
{success ? (
<CheckCircle size={16} weight="fill" className="text-green-500" />
) : (
<XCircle size={16} weight="fill" className="text-red-500" />
)}
<span className="text-xs text-neutral-500 dark:text-neutral-400">
{success ? "Completed" : "Error"}
</span>
</div>
</div>
{!hideResult && (
<button
onClick={() => setIsExpanded(!isExpanded)}
className="rounded p-1 hover:bg-neutral-200/50 dark:hover:bg-neutral-700/50"
aria-label={isExpanded ? "Collapse details" : "Expand details"}
>
{isExpanded ? (
<CaretUp
size={16}
weight="bold"
className="text-neutral-600 dark:text-neutral-400"
/>
) : (
<CaretDown
size={16}
weight="bold"
className="text-neutral-600 dark:text-neutral-400"
/>
)}
</button>
)}
</div>
{/* Expandable Content */}
{isExpanded && !hideResult && (
<div className="px-4 py-3">
<div className="mb-2 text-xs font-medium text-neutral-600 dark:text-neutral-400">
Result:
</div>
<div
className={cn(
"rounded-md p-3",
success
? "bg-green-50 dark:bg-green-900/20"
: "bg-red-50 dark:bg-red-900/20",
)}
>
<pre
className={cn(
"whitespace-pre-wrap text-xs",
success
? "text-green-800 dark:text-green-200"
: "text-red-800 dark:text-red-200",
)}
>
{shouldTruncate && !isExpanded
? `${resultString.slice(0, 200)}...`
: resultString}
</pre>
</div>
<Text
variant="small"
className="mt-2 text-neutral-500 dark:text-neutral-400"
>
Tool ID: {toolId.slice(0, 8)}...
</Text>
</div>
)}
{/* Summary for special response types */}
{hideResult && resultSummary && (
<div className="px-4 py-2">
<Text
variant="small"
className="text-neutral-600 dark:text-neutral-400"
>
{resultSummary}
</Text>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,324 @@
import { useCallback, useEffect, useState, useRef, useMemo } from "react";
import { useQueryClient } from "@tanstack/react-query";
import { toast } from "sonner";
import {
usePostV2CreateSession,
postV2CreateSession,
useGetV2GetSession,
usePatchV2SessionAssignUser,
getGetV2GetSessionQueryKey,
} from "@/app/api/__generated__/endpoints/chat/chat";
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
import { storage, Key } from "@/services/storage/local-storage";
import { isValidUUID } from "@/lib/utils";
interface UseChatSessionArgs {
urlSessionId?: string | null;
autoCreate?: boolean;
}
interface UseChatSessionResult {
session: SessionDetailResponse | null;
sessionId: string | null; // Direct access to session ID state
messages: SessionDetailResponse["messages"];
isLoading: boolean;
isCreating: boolean;
error: Error | null;
createSession: () => Promise<string>; // Return session ID
loadSession: (sessionId: string) => Promise<void>;
refreshSession: () => Promise<void>;
claimSession: (sessionId: string) => Promise<void>;
clearSession: () => void;
}
export function useChatSession({
urlSessionId,
autoCreate = false,
}: UseChatSessionArgs = {}): UseChatSessionResult {
const queryClient = useQueryClient();
const [sessionId, setSessionId] = useState<string | null>(null);
const [error, setError] = useState<Error | null>(null);
const justCreatedSessionIdRef = useRef<string | null>(null);
// Initialize session ID from URL or localStorage
useEffect(
function initializeSessionId() {
if (urlSessionId) {
// Validate UUID format
if (!isValidUUID(urlSessionId)) {
console.error("Invalid session ID format:", urlSessionId);
toast.error("Invalid session ID", {
description:
"The session ID in the URL is not valid. Starting a new session...",
});
setSessionId(null);
storage.clean(Key.CHAT_SESSION_ID);
return;
}
setSessionId(urlSessionId);
storage.set(Key.CHAT_SESSION_ID, urlSessionId);
} else {
const storedSessionId = storage.get(Key.CHAT_SESSION_ID);
if (storedSessionId) {
// Validate stored session ID as well
if (!isValidUUID(storedSessionId)) {
console.error("Invalid stored session ID:", storedSessionId);
storage.clean(Key.CHAT_SESSION_ID);
setSessionId(null);
} else {
setSessionId(storedSessionId);
}
} else if (autoCreate) {
// Auto-create will be handled by the mutation below
setSessionId(null);
}
}
},
[urlSessionId, autoCreate],
);
// Create session mutation
const {
mutateAsync: createSessionMutation,
isPending: isCreating,
error: createError,
} = usePostV2CreateSession();
// Get session query - runs for any valid session (URL or locally created)
const {
data: sessionData,
isLoading: isLoadingSession,
error: loadError,
refetch,
} = useGetV2GetSession(sessionId || "", {
query: {
enabled: !!sessionId, // Fetch whenever we have a session ID
staleTime: 30000, // Consider data fresh for 30 seconds
retry: 1,
// Error handling is done in useChatPage via the error state
},
});
// Claim session mutation (assign user to anonymous session)
const { mutateAsync: claimSessionMutation } = usePatchV2SessionAssignUser();
// Extract session from response with type guard
// Once we have session data from the backend, use it
// While waiting for the first fetch, create a minimal object for just-created sessions
const session: SessionDetailResponse | null = useMemo(() => {
// If we have real session data from GET query, use it
if (sessionData?.status === 200) {
return sessionData.data;
}
// If we just created a session and are waiting for the first fetch, create a minimal object
// This prevents a blank page while the GET query loads
if (sessionId && justCreatedSessionIdRef.current === sessionId) {
return {
id: sessionId,
user_id: null, // Placeholder - actual value set by backend during creation
messages: [],
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
} as SessionDetailResponse;
}
return null;
}, [sessionData, sessionId]);
const messages = session?.messages || [];
// Combined loading state
const isLoading = isCreating || isLoadingSession;
// Combined error state
useEffect(
function updateError() {
if (createError) {
setError(
createError instanceof Error
? createError
: new Error("Failed to create session"),
);
} else if (loadError) {
setError(
loadError instanceof Error
? loadError
: new Error("Failed to load session"),
);
} else {
setError(null);
}
},
[createError, loadError],
);
const createSession = useCallback(
async function createSession(): Promise<string> {
try {
setError(null);
// Call the API function directly with empty body to satisfy Content-Type: application/json
const response = await postV2CreateSession({
body: JSON.stringify({}),
});
// Type guard to ensure we have a successful response
if (response.status !== 200) {
throw new Error("Failed to create session");
}
const newSessionId = response.data.id;
setSessionId(newSessionId);
storage.set(Key.CHAT_SESSION_ID, newSessionId);
// Mark this session as "just created" so we can create a minimal object for it
justCreatedSessionIdRef.current = newSessionId;
// Clear the "just created" flag after 10 seconds
// By then, the session should have been claimed or the user should have started using it
setTimeout(() => {
if (justCreatedSessionIdRef.current === newSessionId) {
justCreatedSessionIdRef.current = null;
}
}, 10000);
return newSessionId;
} catch (err) {
const error =
err instanceof Error ? err : new Error("Failed to create session");
setError(error);
toast.error("Failed to create chat session", {
description: error.message,
});
throw error;
}
},
[createSessionMutation],
);
const loadSession = useCallback(
async function loadSession(id: string) {
try {
setError(null);
setSessionId(id);
storage.set(Key.CHAT_SESSION_ID, id);
// Attempt to fetch the session to verify it exists
const result = await refetch();
// If session doesn't exist (404), clear it and throw error
if (!result.data || result.isError) {
console.warn("Session not found on server, clearing local state");
storage.clean(Key.CHAT_SESSION_ID);
setSessionId(null);
throw new Error("Session not found");
}
} catch (err) {
const error =
err instanceof Error ? err : new Error("Failed to load session");
setError(error);
throw error;
}
},
[refetch],
);
const refreshSession = useCallback(
async function refreshSession() {
// Refresh session data from backend (works for all sessions now)
if (!sessionId) {
console.log("[refreshSession] Skipping - no session ID");
return;
}
try {
setError(null);
await refetch();
} catch (err) {
const error =
err instanceof Error ? err : new Error("Failed to refresh session");
setError(error);
throw error;
}
},
[sessionId, refetch],
);
const claimSession = useCallback(
async function claimSession(id: string) {
try {
setError(null);
await claimSessionMutation({ sessionId: id });
// Session was successfully claimed, so we know it exists on the server
// Clear the "just created" flag for this session
if (justCreatedSessionIdRef.current === id) {
justCreatedSessionIdRef.current = null;
}
// Invalidate and refetch the session query to get updated user_id
await queryClient.invalidateQueries({
queryKey: getGetV2GetSessionQueryKey(id),
});
// Force a refetch to sync the session data
await refetch();
toast.success("Session claimed successfully", {
description: "Your chat history has been saved to your account",
});
} catch (err: unknown) {
const error =
err instanceof Error ? err : new Error("Failed to claim session");
// Check if this is a 404 error (API errors may have status or response.status)
const is404 =
(typeof err === "object" &&
err !== null &&
"status" in err &&
err.status === 404) ||
(typeof err === "object" &&
err !== null &&
"response" in err &&
typeof err.response === "object" &&
err.response !== null &&
"status" in err.response &&
err.response.status === 404);
// Don't show toast for 404 - it will be handled by the caller
if (!is404) {
setError(error);
toast.error("Failed to claim session", {
description: error.message || "Unable to claim session",
});
}
throw error;
}
},
[claimSessionMutation, queryClient, refetch],
);
const clearSession = useCallback(function clearSession() {
setSessionId(null);
setError(null);
storage.clean(Key.CHAT_SESSION_ID);
justCreatedSessionIdRef.current = null;
}, []);
return {
session,
sessionId, // Return direct access to sessionId state
messages,
isLoading,
isCreating,
error,
createSession,
loadSession,
refreshSession,
claimSession,
clearSession,
};
}

View File

@@ -0,0 +1,249 @@
import { useState, useCallback, useRef, useEffect } from "react";
import { toast } from "sonner";
import type { ToolArguments, ToolResult } from "@/types/chat";
const MAX_RETRIES = 3;
const INITIAL_RETRY_DELAY = 1000; // 1 second
export interface StreamChunk {
type:
| "text_chunk"
| "text_ended"
| "tool_call"
| "tool_call_start"
| "tool_response"
| "login_needed"
| "need_login"
| "credentials_needed"
| "error"
| "usage"
| "stream_end";
timestamp?: string;
content?: string;
message?: string;
// Tool call/response fields
tool_id?: string;
tool_name?: string;
arguments?: ToolArguments;
result?: ToolResult;
success?: boolean;
idx?: number; // Index for tool_call_start
// Login needed fields
session_id?: string;
agent_info?: {
graph_id: string;
name: string;
trigger_type: string;
};
// Credentials needed fields
provider?: string;
provider_name?: string;
credential_type?: string;
scopes?: string[];
title?: string;
[key: string]: unknown; // Allow additional fields
}
interface UseChatStreamResult {
isStreaming: boolean;
error: Error | null;
sendMessage: (
sessionId: string,
message: string,
onChunk: (chunk: StreamChunk) => void,
isUserMessage?: boolean,
) => Promise<void>;
stopStreaming: () => void;
}
export function useChatStream(): UseChatStreamResult {
const [isStreaming, setIsStreaming] = useState(false);
const [error, setError] = useState<Error | null>(null);
const eventSourceRef = useRef<EventSource | null>(null);
const retryCountRef = useRef<number>(0);
const retryTimeoutRef = useRef<NodeJS.Timeout | null>(null);
const abortControllerRef = useRef<AbortController | null>(null);
const stopStreaming = useCallback(function stopStreaming() {
// Abort any ongoing operations
if (abortControllerRef.current) {
abortControllerRef.current.abort();
abortControllerRef.current = null;
}
if (eventSourceRef.current) {
eventSourceRef.current.close();
eventSourceRef.current = null;
}
if (retryTimeoutRef.current) {
clearTimeout(retryTimeoutRef.current);
retryTimeoutRef.current = null;
}
retryCountRef.current = 0;
setIsStreaming(false);
}, []);
// Cleanup on unmount
useEffect(() => {
return () => {
stopStreaming();
};
}, [stopStreaming]);
const sendMessage = useCallback(
async function sendMessage(
sessionId: string,
message: string,
onChunk: (chunk: StreamChunk) => void,
isUserMessage: boolean = true,
) {
// Stop any existing stream
stopStreaming();
// Create abort controller for this request
const abortController = new AbortController();
abortControllerRef.current = abortController;
// Check if already aborted
if (abortController.signal.aborted) {
return Promise.reject(new Error("Request aborted"));
}
// Reset retry count for new message
retryCountRef.current = 0;
setIsStreaming(true);
setError(null);
try {
// EventSource doesn't support custom headers, so we use a Next.js API route
// that acts as a proxy and adds authentication headers server-side
// This matches the pattern from PR #10905 where SSE went through the same server
const url = `/api/chat/sessions/${sessionId}/stream?message=${encodeURIComponent(message)}&is_user_message=${isUserMessage}`;
// Create EventSource for SSE (connects to our Next.js proxy)
const eventSource = new EventSource(url);
eventSourceRef.current = eventSource;
// Listen for abort signal
abortController.signal.addEventListener("abort", () => {
eventSource.close();
eventSourceRef.current = null;
});
// Handle incoming messages
eventSource.onmessage = function handleMessage(event) {
try {
const chunk = JSON.parse(event.data) as StreamChunk;
// Reset retry count on successful message
if (retryCountRef.current > 0) {
retryCountRef.current = 0;
}
onChunk(chunk);
// Close connection when stream ends
if (chunk.type === "stream_end") {
stopStreaming();
}
} catch (err) {
console.error("Failed to parse SSE chunk:", err);
const parseError =
err instanceof Error
? err
: new Error("Failed to parse stream chunk");
setError(parseError);
}
};
// Handle errors with retry logic
eventSource.onerror = function handleError(event) {
console.error("SSE error:", event);
// Close current connection
if (eventSourceRef.current) {
eventSourceRef.current.close();
eventSourceRef.current = null;
}
// Check if we should retry
if (retryCountRef.current < MAX_RETRIES) {
retryCountRef.current += 1;
const retryDelay =
INITIAL_RETRY_DELAY * Math.pow(2, retryCountRef.current - 1);
toast.info("Connection interrupted", {
description: `Retrying in ${retryDelay / 1000} seconds...`,
});
retryTimeoutRef.current = setTimeout(() => {
// Retry by recursively calling sendMessage
sendMessage(sessionId, message, onChunk, isUserMessage).catch((err) => {
console.error("Retry failed:", err);
});
}, retryDelay);
} else {
// Max retries exceeded
const streamError = new Error(
"Stream connection failed after multiple retries",
);
setError(streamError);
toast.error("Connection Failed", {
description:
"Unable to connect to chat service. Please try again.",
});
stopStreaming();
}
};
// Return a promise that resolves when streaming completes
return new Promise<void>((resolve, reject) => {
const cleanup = () => {
eventSource.removeEventListener("message", messageHandler);
eventSource.removeEventListener("error", errorHandler);
};
const messageHandler = (event: MessageEvent) => {
try {
const chunk = JSON.parse(event.data) as StreamChunk;
if (chunk.type === "stream_end") {
cleanup();
resolve();
} else if (chunk.type === "error") {
cleanup();
reject(
new Error(chunk.message || chunk.content || "Stream error"),
);
}
} catch (_err) {
// Already handled in onmessage
}
};
const errorHandler = () => {
cleanup();
reject(new Error("Stream connection error"));
};
eventSource.addEventListener("message", messageHandler);
eventSource.addEventListener("error", errorHandler);
});
} catch (err) {
const streamError =
err instanceof Error ? err : new Error("Failed to start stream");
setError(streamError);
setIsStreaming(false);
throw streamError;
}
},
[stopStreaming],
);
return {
isStreaming,
error,
sendMessage,
stopStreaming,
};
}

View File

@@ -0,0 +1,17 @@
/**
* Maps internal tool names to user-friendly display names with emojis.
*
* @param toolName - The internal tool name from the backend
* @returns A user-friendly display name with an emoji prefix
*/
export function getToolDisplayName(toolName: string): string {
const toolDisplayNames: Record<string, string> = {
find_agent: "🔍 Search Marketplace",
get_agent_details: "📋 Get Agent Details",
check_credentials: "🔑 Check Credentials",
setup_agent: "⚙️ Setup Agent",
run_agent: "▶️ Run Agent",
get_required_setup_info: "📝 Get Setup Requirements",
};
return toolDisplayNames[toolName] || toolName;
}

View File

@@ -16,7 +16,9 @@ export const ADMIN_PAGES = ["/admin"] as const;
export function getCookieSettings(): Partial<CookieOptions> {
return {
secure: process.env.NODE_ENV === "production",
// Use secure cookies only when behaving as CLOUD (served over HTTPS)
// Local/dev and Playwright runs use http://localhost, so cookies must be non-secure
secure: environment.isCloud(),
sameSite: "lax",
httpOnly: true,
} as const;

View File

@@ -420,3 +420,10 @@ export function isEmpty(value: any): boolean {
export function isObject(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null && !Array.isArray(value);
}
/** Validate UUID v4 format */
export function isValidUUID(value: string): boolean {
const uuidRegex =
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
return uuidRegex.test(value);
}

View File

@@ -8,6 +8,7 @@ export enum Key {
SHEPHERD_TOUR = "shepherd-tour",
WALLET_LAST_SEEN_CREDITS = "wallet-last-seen-credits",
LIBRARY_AGENTS_CACHE = "library-agents-cache",
CHAT_SESSION_ID = "chat_session_id",
}
function get(key: Key) {

View File

@@ -0,0 +1,30 @@
/**
* Shared type definitions for chat-related data structures.
* These types provide type-safe alternatives to Record<string, any>.
*/
/**
* Represents a valid JSON value that can be used in tool arguments or results.
* This is a recursive type that allows for nested objects and arrays.
*/
export type JsonValue =
| string
| number
| boolean
| null
| JsonValue[]
| { [key: string]: JsonValue };
/**
* Represents tool arguments passed to a tool call.
* Can be a simple object with string keys and JSON values.
*/
export interface ToolArguments {
[key: string]: JsonValue;
}
/**
* Represents the result returned from a tool execution.
* Can be either a string or a structured object with JSON values.
*/
export type ToolResult = string | { [key: string]: JsonValue };