Compare commits

..

2 Commits

Author SHA1 Message Date
Reinier van der Leer
a301ae4879 Merge branch 'dev' into pwuts/open-2923-v2-external-api 2026-01-16 14:48:02 +01:00
Reinier van der Leer
87a63ecdba first draft (missing migrations) 2026-01-05 15:30:01 +01:00
152 changed files with 5092 additions and 6892 deletions

View File

@@ -1,21 +1,57 @@
from fastapi import FastAPI
"""
External API Application
This module defines the main FastAPI application for the external API,
which mounts the v1 and v2 sub-applications.
"""
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from backend.api.middleware.security import SecurityHeadersMiddleware
from backend.monitoring.instrumentation import instrument_fastapi
from .v1.routes import v1_router
from .v1.app import v1_app
from .v2.app import v2_app
DESCRIPTION = """
The external API provides programmatic access to the AutoGPT Platform for building
integrations, automations, and custom applications.
### API Versions
| Version | End of Life | Path | Documentation |
|---------------------|-------------|------------------------|---------------|
| **v2** | | `/external-api/v2/...` | [v2 docs](v2/docs) |
| **v1** (deprecated) | 2025-05-01 | `/external-api/v1/...` | [v1 docs](v1/docs) |
**Recommendation**: New integrations should use v2.
For authentication details and usage examples, see the
[API Integration Guide](https://docs.agpt.co/platform/integrating/api-guide/).
"""
external_api = FastAPI(
title="AutoGPT External API",
description="External API for AutoGPT integrations",
title="AutoGPT Platform API",
summary="External API for AutoGPT Platform integrations",
description=DESCRIPTION,
version="2.0.0",
docs_url="/docs",
version="1.0",
redoc_url="/redoc",
)
external_api.add_middleware(SecurityHeadersMiddleware)
external_api.include_router(v1_router, prefix="/v1")
# Add Prometheus instrumentation
@external_api.get("/", include_in_schema=False)
async def root_redirect() -> RedirectResponse:
"""Redirect root to API documentation."""
return RedirectResponse(url="/docs")
# Mount versioned sub-applications
# Each sub-app has its own /docs page at /v1/docs and /v2/docs
external_api.mount("/v1", v1_app)
external_api.mount("/v2", v2_app)
# Add Prometheus instrumentation to the main app
instrument_fastapi(
external_api,
service_name="external-api",

View File

@@ -0,0 +1,39 @@
"""
V1 External API Application
This module defines the FastAPI application for the v1 external API.
"""
from fastapi import FastAPI
from backend.api.middleware.security import SecurityHeadersMiddleware
from .routes import v1_router
DESCRIPTION = """
The v1 API provides access to core AutoGPT functionality for external integrations.
For authentication details and usage examples, see the
[API Integration Guide](https://docs.agpt.co/platform/integrating/api-guide/).
"""
v1_app = FastAPI(
title="AutoGPT Platform API",
summary="External API for AutoGPT Platform integrations (v1)",
description=DESCRIPTION,
version="1.0.0",
docs_url="/docs",
redoc_url="/redoc",
openapi_url="/openapi.json",
openapi_tags=[
{"name": "user", "description": "User information"},
{"name": "blocks", "description": "Block operations"},
{"name": "graphs", "description": "Graph execution"},
{"name": "store", "description": "Marketplace agents and creators"},
{"name": "integrations", "description": "OAuth credential management"},
{"name": "tools", "description": "AI assistant tools"},
],
)
v1_app.add_middleware(SecurityHeadersMiddleware)
v1_app.include_router(v1_router)

View File

@@ -0,0 +1,9 @@
"""
V2 External API
This module provides the v2 external API for programmatic access to the AutoGPT Platform.
"""
from .routes import v2_router
__all__ = ["v2_router"]

View File

@@ -0,0 +1,82 @@
"""
V2 External API Application
This module defines the FastAPI application for the v2 external API.
"""
from fastapi import FastAPI
from backend.api.middleware.security import SecurityHeadersMiddleware
from .routes import v2_router
DESCRIPTION = """
The v2 API provides comprehensive access to the AutoGPT Platform for building
integrations, automations, and custom applications.
### Key Improvements over v1
- **Consistent naming**: Uses `graph_id`/`graph_version` consistently
- **Better pagination**: All list endpoints support pagination
- **Comprehensive coverage**: Access to library, runs, schedules, credits, and more
- **Human-in-the-loop**: Review and approve agent decisions via the API
For authentication details and usage examples, see the
[API Integration Guide](https://docs.agpt.co/platform/integrating/api-guide/).
### Pagination
List endpoints return paginated responses. Use `page` and `page_size` query
parameters to navigate results. Maximum page size is 100 items.
"""
v2_app = FastAPI(
title="AutoGPT Platform External API",
summary="External API for AutoGPT Platform integrations (v2)",
description=DESCRIPTION,
version="2.0.0",
docs_url="/docs",
redoc_url="/redoc",
openapi_url="/openapi.json",
openapi_tags=[
{
"name": "graphs",
"description": "Create, update, and manage agent graphs",
},
{
"name": "schedules",
"description": "Manage scheduled graph executions",
},
{
"name": "blocks",
"description": "Discover available building blocks",
},
{
"name": "marketplace",
"description": "Browse agents and creators, manage submissions",
},
{
"name": "library",
"description": "Access your agent library and execute agents",
},
{
"name": "runs",
"description": "Monitor execution runs and human-in-the-loop reviews",
},
{
"name": "credits",
"description": "Check balance and view transaction history",
},
{
"name": "integrations",
"description": "Manage OAuth credentials for external services",
},
{
"name": "files",
"description": "Upload files for agent input",
},
],
)
v2_app.add_middleware(SecurityHeadersMiddleware)
v2_app.include_router(v2_router)

View File

@@ -0,0 +1,140 @@
"""
V2 External API - Blocks Endpoints
Provides read-only access to available building blocks.
"""
import logging
from typing import Any
from fastapi import APIRouter, Response, Security
from fastapi.concurrency import run_in_threadpool
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field
from backend.api.external.middleware import require_permission
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.block import get_blocks
from backend.util.cache import cached
from backend.util.json import dumps
logger = logging.getLogger(__name__)
blocks_router = APIRouter()
# ============================================================================
# Models
# ============================================================================
class BlockCost(BaseModel):
"""Cost information for a block."""
cost_type: str = Field(description="Type of cost (e.g., 'per_call', 'per_token')")
cost_filter: dict[str, Any] = Field(
default_factory=dict, description="Conditions for this cost"
)
cost_amount: int = Field(description="Cost amount in credits")
class Block(BaseModel):
"""A building block that can be used in graphs."""
id: str
name: str
description: str
categories: list[str] = Field(default_factory=list)
input_schema: dict[str, Any]
output_schema: dict[str, Any]
costs: list[BlockCost] = Field(default_factory=list)
disabled: bool = Field(default=False)
class BlocksListResponse(BaseModel):
"""Response for listing blocks."""
blocks: list[Block]
total_count: int
# ============================================================================
# Internal Functions
# ============================================================================
def _compute_blocks_sync() -> str:
"""
Synchronous function to compute blocks data.
This does the heavy lifting: instantiate 226+ blocks, compute costs, serialize.
"""
from backend.data.credit import get_block_cost
block_classes = get_blocks()
result = []
for block_class in block_classes.values():
block_instance = block_class()
if not block_instance.disabled:
costs = get_block_cost(block_instance)
# Convert BlockCost BaseModel objects to dictionaries
costs_dict = [
cost.model_dump() if isinstance(cost, BaseModel) else cost
for cost in costs
]
result.append({**block_instance.to_dict(), "costs": costs_dict})
return dumps(result)
@cached(ttl_seconds=3600)
async def _get_cached_blocks() -> str:
"""
Async cached function with thundering herd protection.
On cache miss: runs heavy work in thread pool
On cache hit: returns cached string immediately
"""
return await run_in_threadpool(_compute_blocks_sync)
# ============================================================================
# Endpoints
# ============================================================================
@blocks_router.get(
path="",
summary="List available blocks",
responses={
200: {
"description": "List of available building blocks",
"content": {
"application/json": {
"schema": {
"items": {"additionalProperties": True, "type": "object"},
"type": "array",
}
}
},
}
},
)
async def list_blocks(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_BLOCK)
),
) -> Response:
"""
List all available building blocks that can be used in graphs.
Each block represents a specific capability (e.g., HTTP request, text processing,
AI completion, etc.) that can be connected in a graph to create an agent.
The response includes input/output schemas for each block, as well as
cost information for blocks that consume credits.
"""
content = await _get_cached_blocks()
return Response(
content=content,
media_type="application/json",
)

View File

@@ -0,0 +1,36 @@
"""
Common utilities for V2 External API
"""
from typing import TypeVar
from pydantic import BaseModel, Field
# Constants for pagination
MAX_PAGE_SIZE = 100
DEFAULT_PAGE_SIZE = 20
class PaginationParams(BaseModel):
"""Common pagination parameters."""
page: int = Field(default=1, ge=1, description="Page number (1-indexed)")
page_size: int = Field(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Number of items per page (max {MAX_PAGE_SIZE})",
)
T = TypeVar("T")
class PaginatedResponse(BaseModel):
"""Generic paginated response wrapper."""
items: list
total_count: int = Field(description="Total number of items across all pages")
page: int = Field(description="Current page number (1-indexed)")
page_size: int = Field(description="Number of items per page")
total_pages: int = Field(description="Total number of pages")

View File

@@ -0,0 +1,141 @@
"""
V2 External API - Credits Endpoints
Provides access to credit balance and transaction history.
"""
import logging
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Query, Security
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field
from backend.api.external.middleware import require_permission
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.credit import get_user_credit_model
from .common import DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE
logger = logging.getLogger(__name__)
credits_router = APIRouter()
# ============================================================================
# Models
# ============================================================================
class CreditBalance(BaseModel):
"""User's credit balance."""
balance: int = Field(description="Current credit balance")
class CreditTransaction(BaseModel):
"""A credit transaction."""
transaction_key: str
amount: int = Field(description="Transaction amount (positive or negative)")
type: str = Field(description="One of: TOP_UP, USAGE, GRANT, REFUND")
transaction_time: datetime
running_balance: Optional[int] = Field(
default=None, description="Balance after this transaction"
)
description: Optional[str] = None
class CreditTransactionsResponse(BaseModel):
"""Response for listing credit transactions."""
transactions: list[CreditTransaction]
total_count: int
page: int
page_size: int
total_pages: int
# ============================================================================
# Endpoints
# ============================================================================
@credits_router.get(
path="",
summary="Get credit balance",
response_model=CreditBalance,
)
async def get_balance(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_CREDITS)
),
) -> CreditBalance:
"""
Get the current credit balance for the authenticated user.
"""
user_credit_model = await get_user_credit_model(auth.user_id)
balance = await user_credit_model.get_credits(auth.user_id)
return CreditBalance(balance=balance)
@credits_router.get(
path="/transactions",
summary="Get transaction history",
response_model=CreditTransactionsResponse,
)
async def get_transactions(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_CREDITS)
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
transaction_type: Optional[str] = Query(
default=None,
description="Filter by transaction type (TOP_UP, USAGE, GRANT, REFUND)",
),
) -> CreditTransactionsResponse:
"""
Get credit transaction history for the authenticated user.
Returns transactions sorted by most recent first.
"""
user_credit_model = await get_user_credit_model(auth.user_id)
history = await user_credit_model.get_transaction_history(
user_id=auth.user_id,
transaction_count_limit=page_size,
transaction_type=transaction_type,
)
transactions = [
CreditTransaction(
transaction_key=t.transaction_key,
amount=t.amount,
type=t.transaction_type.value,
transaction_time=t.transaction_time,
running_balance=t.running_balance,
description=t.description,
)
for t in history.transactions
]
# Note: The current credit module doesn't support true pagination,
# so we're returning what we have
total_count = len(transactions)
total_pages = 1 # Without true pagination support
return CreditTransactionsResponse(
transactions=transactions,
total_count=total_count,
page=page,
page_size=page_size,
total_pages=total_pages,
)

View File

@@ -0,0 +1,132 @@
"""
V2 External API - Files Endpoints
Provides file upload functionality for agent inputs.
"""
import base64
import logging
from fastapi import APIRouter, File, HTTPException, Query, Security, UploadFile
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field
from backend.api.external.middleware import require_permission
from backend.data.auth.base import APIAuthorizationInfo
from backend.util.cloud_storage import get_cloud_storage_handler
from backend.util.settings import Settings
from backend.util.virus_scanner import scan_content_safe
logger = logging.getLogger(__name__)
settings = Settings()
files_router = APIRouter()
# ============================================================================
# Models
# ============================================================================
class UploadFileResponse(BaseModel):
"""Response after uploading a file."""
file_uri: str = Field(description="URI to reference the uploaded file in agents")
file_name: str
size: int = Field(description="File size in bytes")
content_type: str
expires_in_hours: int
# ============================================================================
# Endpoints
# ============================================================================
def _create_file_size_error(size_bytes: int, max_size_mb: int) -> HTTPException:
"""Create standardized file size error response."""
return HTTPException(
status_code=400,
detail=f"File size ({size_bytes} bytes) exceeds the maximum allowed size of {max_size_mb}MB",
)
@files_router.post(
path="/upload",
summary="Upload a file",
response_model=UploadFileResponse,
)
async def upload_file(
file: UploadFile = File(...),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.UPLOAD_FILES)
),
provider: str = Query(
default="gcs", description="Storage provider (gcs, s3, azure)"
),
expiration_hours: int = Query(
default=24, ge=1, le=48, description="Hours until file expires (1-48)"
),
) -> UploadFileResponse:
"""
Upload a file to cloud storage for use with agents.
The returned `file_uri` can be used as input to agents that accept file inputs
(e.g., FileStoreBlock, AgentFileInputBlock).
Files are automatically scanned for viruses before storage.
"""
# Check file size limit
max_size_mb = settings.config.upload_file_size_limit_mb
max_size_bytes = max_size_mb * 1024 * 1024
# Try to get file size from headers first
if hasattr(file, "size") and file.size is not None and file.size > max_size_bytes:
raise _create_file_size_error(file.size, max_size_mb)
# Read file content
content = await file.read()
content_size = len(content)
# Double-check file size after reading
if content_size > max_size_bytes:
raise _create_file_size_error(content_size, max_size_mb)
# Extract file info
file_name = file.filename or "uploaded_file"
content_type = file.content_type or "application/octet-stream"
# Virus scan the content
await scan_content_safe(content, filename=file_name)
# Check if cloud storage is configured
cloud_storage = await get_cloud_storage_handler()
if not cloud_storage.config.gcs_bucket_name:
# Fallback to base64 data URI when GCS is not configured
base64_content = base64.b64encode(content).decode("utf-8")
data_uri = f"data:{content_type};base64,{base64_content}"
return UploadFileResponse(
file_uri=data_uri,
file_name=file_name,
size=content_size,
content_type=content_type,
expires_in_hours=expiration_hours,
)
# Store in cloud storage
storage_path = await cloud_storage.store_file(
content=content,
filename=file_name,
provider=provider,
expiration_hours=expiration_hours,
user_id=auth.user_id,
)
return UploadFileResponse(
file_uri=storage_path,
file_name=file_name,
size=content_size,
content_type=content_type,
expires_in_hours=expiration_hours,
)

View File

@@ -0,0 +1,445 @@
"""
V2 External API - Graphs Endpoints
Provides endpoints for managing agent graphs (CRUD operations).
"""
import logging
from fastapi import APIRouter, HTTPException, Query, Security
from prisma.enums import APIKeyPermission
from backend.api.external.middleware import require_permission
from backend.data import graph as graph_db
from backend.data.auth.base import APIAuthorizationInfo
from backend.integrations.webhooks.graph_lifecycle_hooks import (
on_graph_activate,
on_graph_deactivate,
)
from .common import DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE
from .models import (
CreateGraphRequest,
DeleteGraphResponse,
GraphDetails,
GraphLink,
GraphMeta,
GraphNode,
GraphSettings,
GraphsListResponse,
SetActiveVersionRequest,
)
logger = logging.getLogger(__name__)
graphs_router = APIRouter()
def _convert_graph_meta(graph: graph_db.GraphMeta) -> GraphMeta:
"""Convert internal GraphMeta to v2 API model."""
return GraphMeta(
id=graph.id,
version=graph.version,
is_active=graph.is_active,
name=graph.name,
description=graph.description,
created_at=graph.created_at,
input_schema=graph.input_schema,
output_schema=graph.output_schema,
)
def _convert_graph_details(graph: graph_db.GraphModel) -> GraphDetails:
"""Convert internal GraphModel to v2 API GraphDetails model."""
return GraphDetails(
id=graph.id,
version=graph.version,
is_active=graph.is_active,
name=graph.name,
description=graph.description,
created_at=graph.created_at,
input_schema=graph.input_schema,
output_schema=graph.output_schema,
nodes=[
GraphNode(
id=node.id,
block_id=node.block_id,
input_default=node.input_default,
metadata=node.metadata,
)
for node in graph.nodes
],
links=[
GraphLink(
id=link.id,
source_id=link.source_id,
sink_id=link.sink_id,
source_name=link.source_name,
sink_name=link.sink_name,
is_static=link.is_static,
)
for link in graph.links
],
credentials_input_schema=graph.credentials_input_schema,
)
@graphs_router.get(
path="",
summary="List user's graphs",
response_model=GraphsListResponse,
)
async def list_graphs(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_GRAPH)
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> GraphsListResponse:
"""
List all graphs owned by the authenticated user.
Returns a paginated list of graph metadata (not full graph details).
"""
graphs, pagination_info = await graph_db.list_graphs_paginated(
user_id=auth.user_id,
page=page,
page_size=page_size,
filter_by="active",
)
return GraphsListResponse(
graphs=[_convert_graph_meta(g) for g in graphs],
total_count=pagination_info.total_items,
page=pagination_info.current_page,
page_size=pagination_info.page_size,
total_pages=pagination_info.total_pages,
)
@graphs_router.post(
path="",
summary="Create a new graph",
response_model=GraphDetails,
)
async def create_graph(
create_graph_request: CreateGraphRequest,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_GRAPH)
),
) -> GraphDetails:
"""
Create a new agent graph.
The graph will be validated and assigned a new ID. It will automatically
be added to the user's library.
"""
# Import here to avoid circular imports
from backend.api.features.library import db as library_db
# Convert v2 API Graph model to internal Graph model
internal_graph = graph_db.Graph(
id=create_graph_request.graph.id or "",
version=create_graph_request.graph.version,
is_active=create_graph_request.graph.is_active,
name=create_graph_request.graph.name,
description=create_graph_request.graph.description,
nodes=[
graph_db.Node(
id=node.id,
block_id=node.block_id,
input_default=node.input_default,
metadata=node.metadata,
)
for node in create_graph_request.graph.nodes
],
links=[
graph_db.Link(
id=link.id,
source_id=link.source_id,
sink_id=link.sink_id,
source_name=link.source_name,
sink_name=link.sink_name,
is_static=link.is_static,
)
for link in create_graph_request.graph.links
],
)
graph = graph_db.make_graph_model(internal_graph, auth.user_id)
graph.reassign_ids(user_id=auth.user_id, reassign_graph_id=True)
graph.validate_graph(for_run=False)
await graph_db.create_graph(graph, user_id=auth.user_id)
await library_db.create_library_agent(graph, user_id=auth.user_id)
activated_graph = await on_graph_activate(graph, user_id=auth.user_id)
return _convert_graph_details(activated_graph)
@graphs_router.get(
path="/{graph_id}",
summary="Get graph details",
response_model=GraphDetails,
)
async def get_graph(
graph_id: str,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_GRAPH)
),
version: int | None = Query(
default=None,
description="Specific version to retrieve (default: active version)",
),
) -> GraphDetails:
"""
Get detailed information about a specific graph.
By default returns the active version. Use the `version` query parameter
to retrieve a specific version.
"""
graph = await graph_db.get_graph(
graph_id,
version,
user_id=auth.user_id,
include_subgraphs=True,
)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
return _convert_graph_details(graph)
@graphs_router.put(
path="/{graph_id}",
summary="Update graph (creates new version)",
response_model=GraphDetails,
)
async def update_graph(
graph_id: str,
graph_request: CreateGraphRequest,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_GRAPH)
),
) -> GraphDetails:
"""
Update a graph by creating a new version.
This does not modify existing versions - it creates a new version with
the provided content. The new version becomes the active version.
"""
# Import here to avoid circular imports
from backend.api.features.library import db as library_db
graph_data = graph_request.graph
if graph_data.id and graph_data.id != graph_id:
raise HTTPException(400, detail="Graph ID does not match ID in URI")
existing_versions = await graph_db.get_graph_all_versions(
graph_id, user_id=auth.user_id
)
if not existing_versions:
raise HTTPException(404, detail=f"Graph #{graph_id} not found")
latest_version_number = max(g.version for g in existing_versions)
# Convert v2 API Graph model to internal Graph model
internal_graph = graph_db.Graph(
id=graph_id,
version=latest_version_number + 1,
is_active=graph_data.is_active,
name=graph_data.name,
description=graph_data.description,
nodes=[
graph_db.Node(
id=node.id,
block_id=node.block_id,
input_default=node.input_default,
metadata=node.metadata,
)
for node in graph_data.nodes
],
links=[
graph_db.Link(
id=link.id,
source_id=link.source_id,
sink_id=link.sink_id,
source_name=link.source_name,
sink_name=link.sink_name,
is_static=link.is_static,
)
for link in graph_data.links
],
)
current_active_version = next((v for v in existing_versions if v.is_active), None)
graph = graph_db.make_graph_model(internal_graph, auth.user_id)
graph.reassign_ids(user_id=auth.user_id, reassign_graph_id=False)
graph.validate_graph(for_run=False)
new_graph_version = await graph_db.create_graph(graph, user_id=auth.user_id)
if new_graph_version.is_active:
await library_db.update_agent_version_in_library(
auth.user_id, new_graph_version.id, new_graph_version.version
)
new_graph_version = await on_graph_activate(
new_graph_version, user_id=auth.user_id
)
await graph_db.set_graph_active_version(
graph_id=graph_id, version=new_graph_version.version, user_id=auth.user_id
)
if current_active_version:
await on_graph_deactivate(current_active_version, user_id=auth.user_id)
new_graph_version_with_subgraphs = await graph_db.get_graph(
graph_id,
new_graph_version.version,
user_id=auth.user_id,
include_subgraphs=True,
)
assert new_graph_version_with_subgraphs
return _convert_graph_details(new_graph_version_with_subgraphs)
@graphs_router.delete(
path="/{graph_id}",
summary="Delete graph permanently",
response_model=DeleteGraphResponse,
)
async def delete_graph(
graph_id: str,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_GRAPH)
),
) -> DeleteGraphResponse:
"""
Permanently delete a graph and all its versions.
This action cannot be undone. All associated executions will remain
but will reference a deleted graph.
"""
if active_version := await graph_db.get_graph(
graph_id=graph_id, version=None, user_id=auth.user_id
):
await on_graph_deactivate(active_version, user_id=auth.user_id)
version_count = await graph_db.delete_graph(graph_id, user_id=auth.user_id)
return DeleteGraphResponse(version_count=version_count)
@graphs_router.get(
path="/{graph_id}/versions",
summary="List all graph versions",
response_model=list[GraphDetails],
)
async def list_graph_versions(
graph_id: str,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_GRAPH)
),
) -> list[GraphDetails]:
"""
Get all versions of a specific graph.
Returns a list of all versions, with the active version marked.
"""
graphs = await graph_db.get_graph_all_versions(graph_id, user_id=auth.user_id)
if not graphs:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
return [_convert_graph_details(g) for g in graphs]
@graphs_router.put(
path="/{graph_id}/versions/active",
summary="Set active graph version",
)
async def set_active_version(
graph_id: str,
request_body: SetActiveVersionRequest,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_GRAPH)
),
) -> None:
"""
Set which version of a graph is the active version.
The active version is used when executing the graph without specifying
a version number.
"""
# Import here to avoid circular imports
from backend.api.features.library import db as library_db
new_active_version = request_body.active_graph_version
new_active_graph = await graph_db.get_graph(
graph_id, new_active_version, user_id=auth.user_id
)
if not new_active_graph:
raise HTTPException(404, f"Graph #{graph_id} v{new_active_version} not found")
current_active_graph = await graph_db.get_graph(
graph_id=graph_id,
version=None,
user_id=auth.user_id,
)
await on_graph_activate(new_active_graph, user_id=auth.user_id)
await graph_db.set_graph_active_version(
graph_id=graph_id,
version=new_active_version,
user_id=auth.user_id,
)
await library_db.update_agent_version_in_library(
auth.user_id, new_active_graph.id, new_active_graph.version
)
if current_active_graph and current_active_graph.version != new_active_version:
await on_graph_deactivate(current_active_graph, user_id=auth.user_id)
@graphs_router.patch(
path="/{graph_id}/settings",
summary="Update graph settings",
response_model=GraphSettings,
)
async def update_graph_settings(
graph_id: str,
settings: GraphSettings,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_GRAPH)
),
) -> GraphSettings:
"""
Update settings for a graph.
Currently supports:
- human_in_the_loop_safe_mode: Enable/disable safe mode for human-in-the-loop blocks
"""
# Import here to avoid circular imports
from backend.api.features.library import db as library_db
from backend.data.graph import GraphSettings as InternalGraphSettings
library_agent = await library_db.get_library_agent_by_graph_id(
graph_id=graph_id, user_id=auth.user_id
)
if not library_agent:
raise HTTPException(404, f"Graph #{graph_id} not found in user's library")
# Convert to internal model
internal_settings = InternalGraphSettings(
human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode
)
updated_agent = await library_db.update_library_agent_settings(
user_id=auth.user_id,
agent_id=library_agent.id,
settings=internal_settings,
)
return GraphSettings(
human_in_the_loop_safe_mode=updated_agent.settings.human_in_the_loop_safe_mode
)

View File

@@ -0,0 +1,271 @@
"""
V2 External API - Integrations Endpoints
Provides access to user's integration credentials.
"""
import logging
from typing import Optional
from fastapi import APIRouter, HTTPException, Path, Security
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field
from backend.api.external.middleware import require_permission
from backend.api.features.library import db as library_db
from backend.data import graph as graph_db
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.model import Credentials, OAuth2Credentials
from backend.integrations.creds_manager import IntegrationCredentialsManager
logger = logging.getLogger(__name__)
integrations_router = APIRouter()
creds_manager = IntegrationCredentialsManager()
# ============================================================================
# Models
# ============================================================================
class Credential(BaseModel):
"""A user's credential for an integration."""
id: str
provider: str = Field(description="Integration provider name")
title: Optional[str] = Field(
default=None, description="User-assigned title for this credential"
)
scopes: list[str] = Field(default_factory=list, description="Granted scopes")
class CredentialsListResponse(BaseModel):
"""Response for listing credentials."""
credentials: list[Credential]
class CredentialRequirement(BaseModel):
"""A credential requirement for a graph or agent."""
provider: str = Field(description="Required provider name")
required_scopes: list[str] = Field(
default_factory=list, description="Required scopes"
)
matching_credentials: list[Credential] = Field(
default_factory=list,
description="User's credentials that match this requirement",
)
class CredentialRequirementsResponse(BaseModel):
"""Response for listing credential requirements."""
requirements: list[CredentialRequirement]
# ============================================================================
# Conversion Functions
# ============================================================================
def _convert_credential(cred: Credentials) -> Credential:
"""Convert internal credential to v2 API model."""
scopes: list[str] = []
if isinstance(cred, OAuth2Credentials):
scopes = cred.scopes or []
return Credential(
id=cred.id,
provider=cred.provider,
title=cred.title,
scopes=scopes,
)
# ============================================================================
# Endpoints
# ============================================================================
@integrations_router.get(
path="/credentials",
summary="List all credentials",
response_model=CredentialsListResponse,
)
async def list_credentials(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> CredentialsListResponse:
"""
List all integration credentials for the authenticated user.
This returns all OAuth credentials the user has connected, across
all integration providers.
"""
credentials = await creds_manager.store.get_all_creds(auth.user_id)
return CredentialsListResponse(
credentials=[_convert_credential(c) for c in credentials]
)
@integrations_router.get(
path="/credentials/{provider}",
summary="List credentials by provider",
response_model=CredentialsListResponse,
)
async def list_credentials_by_provider(
provider: str = Path(description="Provider name (e.g., 'github', 'google')"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> CredentialsListResponse:
"""
List integration credentials for a specific provider.
"""
all_credentials = await creds_manager.store.get_all_creds(auth.user_id)
# Filter by provider
filtered = [c for c in all_credentials if c.provider.lower() == provider.lower()]
return CredentialsListResponse(
credentials=[_convert_credential(c) for c in filtered]
)
@integrations_router.get(
path="/graphs/{graph_id}/credentials",
summary="List credentials matching graph requirements",
response_model=CredentialRequirementsResponse,
)
async def list_graph_credential_requirements(
graph_id: str = Path(description="Graph ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> CredentialRequirementsResponse:
"""
List credential requirements for a graph and matching user credentials.
This helps identify which credentials the user needs to provide
when executing a graph.
"""
# Get the graph
graph = await graph_db.get_graph(
graph_id=graph_id,
version=None, # Active version
user_id=auth.user_id,
include_subgraphs=True,
)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found")
# Get the credentials input schema which contains provider requirements
creds_schema = graph.credentials_input_schema
all_credentials = await creds_manager.store.get_all_creds(auth.user_id)
requirements = []
for field_name, field_schema in creds_schema.get("properties", {}).items():
# Extract provider from schema
# The schema structure varies, but typically has provider info
providers = []
if "anyOf" in field_schema:
for option in field_schema["anyOf"]:
if "provider" in option:
providers.append(option["provider"])
elif "provider" in field_schema:
providers.append(field_schema["provider"])
for provider in providers:
# Find matching credentials
matching = [
_convert_credential(c)
for c in all_credentials
if c.provider.lower() == provider.lower()
]
requirements.append(
CredentialRequirement(
provider=provider,
required_scopes=[], # Would need to extract from schema
matching_credentials=matching,
)
)
return CredentialRequirementsResponse(requirements=requirements)
@integrations_router.get(
path="/library/{agent_id}/credentials",
summary="List credentials matching library agent requirements",
response_model=CredentialRequirementsResponse,
)
async def list_library_agent_credential_requirements(
agent_id: str = Path(description="Library agent ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> CredentialRequirementsResponse:
"""
List credential requirements for a library agent and matching user credentials.
This helps identify which credentials the user needs to provide
when executing an agent from their library.
"""
# Get the library agent
try:
library_agent = await library_db.get_library_agent(
id=agent_id,
user_id=auth.user_id,
)
except Exception:
raise HTTPException(status_code=404, detail=f"Agent #{agent_id} not found")
# Get the underlying graph
graph = await graph_db.get_graph(
graph_id=library_agent.graph_id,
version=library_agent.graph_version,
user_id=auth.user_id,
include_subgraphs=True,
)
if not graph:
raise HTTPException(
status_code=404,
detail=f"Graph for agent #{agent_id} not found",
)
# Get the credentials input schema
creds_schema = graph.credentials_input_schema
all_credentials = await creds_manager.store.get_all_creds(auth.user_id)
requirements = []
for field_name, field_schema in creds_schema.get("properties", {}).items():
# Extract provider from schema
providers = []
if "anyOf" in field_schema:
for option in field_schema["anyOf"]:
if "provider" in option:
providers.append(option["provider"])
elif "provider" in field_schema:
providers.append(field_schema["provider"])
for provider in providers:
# Find matching credentials
matching = [
_convert_credential(c)
for c in all_credentials
if c.provider.lower() == provider.lower()
]
requirements.append(
CredentialRequirement(
provider=provider,
required_scopes=[],
matching_credentials=matching,
)
)
return CredentialRequirementsResponse(requirements=requirements)

View File

@@ -0,0 +1,247 @@
"""
V2 External API - Library Endpoints
Provides access to the user's agent library and agent execution.
"""
import logging
from fastapi import APIRouter, HTTPException, Path, Query, Security
from prisma.enums import APIKeyPermission
from backend.api.external.middleware import require_permission
from backend.api.features.library import db as library_db
from backend.api.features.library import model as library_model
from backend.data import execution as execution_db
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.credit import get_user_credit_model
from backend.executor import utils as execution_utils
from .common import DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE
from .models import (
ExecuteAgentRequest,
LibraryAgent,
LibraryAgentsResponse,
Run,
RunsListResponse,
)
logger = logging.getLogger(__name__)
library_router = APIRouter()
# ============================================================================
# Conversion Functions
# ============================================================================
def _convert_library_agent(agent: library_model.LibraryAgent) -> LibraryAgent:
"""Convert internal LibraryAgent to v2 API model."""
return LibraryAgent(
id=agent.id,
graph_id=agent.graph_id,
graph_version=agent.graph_version,
name=agent.name,
description=agent.description,
is_favorite=agent.is_favorite,
can_access_graph=agent.can_access_graph,
is_latest_version=agent.is_latest_version,
image_url=agent.image_url,
creator_name=agent.creator_name,
input_schema=agent.input_schema,
output_schema=agent.output_schema,
created_at=agent.created_at,
updated_at=agent.updated_at,
)
def _convert_execution_to_run(exec: execution_db.GraphExecutionMeta) -> Run:
"""Convert internal execution to v2 API Run model."""
return Run(
id=exec.id,
graph_id=exec.graph_id,
graph_version=exec.graph_version,
status=exec.status.value,
started_at=exec.started_at,
ended_at=exec.ended_at,
inputs=exec.inputs,
cost=exec.stats.cost if exec.stats else 0,
duration=exec.stats.duration if exec.stats else 0,
node_count=exec.stats.node_exec_count if exec.stats else 0,
)
# ============================================================================
# Endpoints
# ============================================================================
@library_router.get(
path="/agents",
summary="List library agents",
response_model=LibraryAgentsResponse,
)
async def list_library_agents(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_LIBRARY)
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> LibraryAgentsResponse:
"""
List agents in the user's library.
The library contains agents the user has created or added from the marketplace.
"""
result = await library_db.list_library_agents(
user_id=auth.user_id,
page=page,
page_size=page_size,
)
return LibraryAgentsResponse(
agents=[_convert_library_agent(a) for a in result.agents],
total_count=result.pagination.total_items,
page=result.pagination.current_page,
page_size=result.pagination.page_size,
total_pages=result.pagination.total_pages,
)
@library_router.get(
path="/agents/favorites",
summary="List favorite agents",
response_model=LibraryAgentsResponse,
)
async def list_favorite_agents(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_LIBRARY)
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> LibraryAgentsResponse:
"""
List favorite agents in the user's library.
"""
result = await library_db.list_favorite_library_agents(
user_id=auth.user_id,
page=page,
page_size=page_size,
)
return LibraryAgentsResponse(
agents=[_convert_library_agent(a) for a in result.agents],
total_count=result.pagination.total_items,
page=result.pagination.current_page,
page_size=result.pagination.page_size,
total_pages=result.pagination.total_pages,
)
@library_router.post(
path="/agents/{agent_id}/runs",
summary="Execute an agent",
response_model=Run,
)
async def execute_agent(
request: ExecuteAgentRequest,
agent_id: str = Path(description="Library agent ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.RUN_AGENT)
),
) -> Run:
"""
Execute an agent from the library.
This creates a new run with the provided inputs. The run executes
asynchronously and you can poll the run status using GET /runs/{run_id}.
"""
# Check credit balance
user_credit_model = await get_user_credit_model(auth.user_id)
current_balance = await user_credit_model.get_credits(auth.user_id)
if current_balance <= 0:
raise HTTPException(
status_code=402,
detail="Insufficient balance to execute the agent. Please top up your account.",
)
# Get the library agent to find the graph ID and version
try:
library_agent = await library_db.get_library_agent(
id=agent_id,
user_id=auth.user_id,
)
except Exception:
raise HTTPException(status_code=404, detail=f"Agent #{agent_id} not found")
try:
result = await execution_utils.add_graph_execution(
graph_id=library_agent.graph_id,
user_id=auth.user_id,
inputs=request.inputs,
graph_version=library_agent.graph_version,
graph_credentials_inputs=request.credentials_inputs,
)
return _convert_execution_to_run(result)
except Exception as e:
logger.error(f"Failed to execute agent: {e}")
raise HTTPException(status_code=400, detail=str(e))
@library_router.get(
path="/agents/{agent_id}/runs",
summary="List runs for an agent",
response_model=RunsListResponse,
)
async def list_agent_runs(
agent_id: str = Path(description="Library agent ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_LIBRARY)
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> RunsListResponse:
"""
List execution runs for a specific agent.
"""
# Get the library agent to find the graph ID
try:
library_agent = await library_db.get_library_agent(
id=agent_id,
user_id=auth.user_id,
)
except Exception:
raise HTTPException(status_code=404, detail=f"Agent #{agent_id} not found")
result = await execution_db.get_graph_executions_paginated(
graph_id=library_agent.graph_id,
user_id=auth.user_id,
page=page,
page_size=page_size,
)
return RunsListResponse(
runs=[_convert_execution_to_run(e) for e in result.executions],
total_count=result.pagination.total_items,
page=result.pagination.current_page,
page_size=result.pagination.page_size,
total_pages=result.pagination.total_pages,
)

View File

@@ -0,0 +1,510 @@
"""
V2 External API - Marketplace Endpoints
Provides access to the agent marketplace (store).
"""
import logging
import urllib.parse
from datetime import datetime
from typing import Literal, Optional
from fastapi import APIRouter, HTTPException, Path, Query, Security
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field
from backend.api.external.middleware import require_permission
from backend.api.features.store import cache as store_cache
from backend.api.features.store import db as store_db
from backend.api.features.store import model as store_model
from backend.data.auth.base import APIAuthorizationInfo
from .common import DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE
logger = logging.getLogger(__name__)
marketplace_router = APIRouter()
# ============================================================================
# Models
# ============================================================================
class MarketplaceAgent(BaseModel):
"""An agent available in the marketplace."""
slug: str
name: str
description: str
sub_heading: str
creator: str
creator_avatar: str
runs: int = Field(default=0, description="Number of times this agent has been run")
rating: float = Field(default=0.0, description="Average rating")
image_url: str = Field(default="")
class MarketplaceAgentDetails(BaseModel):
"""Detailed information about a marketplace agent."""
store_listing_version_id: str
slug: str
name: str
description: str
sub_heading: str
instructions: Optional[str] = None
creator: str
creator_avatar: str
categories: list[str] = Field(default_factory=list)
runs: int = Field(default=0)
rating: float = Field(default=0.0)
image_urls: list[str] = Field(default_factory=list)
video_url: str = Field(default="")
versions: list[str] = Field(default_factory=list, description="Available versions")
agent_graph_versions: list[str] = Field(default_factory=list)
agent_graph_id: str
last_updated: datetime
class MarketplaceAgentsResponse(BaseModel):
"""Response for listing marketplace agents."""
agents: list[MarketplaceAgent]
total_count: int
page: int
page_size: int
total_pages: int
class MarketplaceCreator(BaseModel):
"""A creator on the marketplace."""
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool = False
class MarketplaceCreatorDetails(BaseModel):
"""Detailed information about a marketplace creator."""
name: str
username: str
description: str
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str] = Field(default_factory=list)
links: list[str] = Field(default_factory=list)
class MarketplaceCreatorsResponse(BaseModel):
"""Response for listing marketplace creators."""
creators: list[MarketplaceCreator]
total_count: int
page: int
page_size: int
total_pages: int
class MarketplaceSubmission(BaseModel):
"""A marketplace submission."""
graph_id: str
graph_version: int
name: str
sub_heading: str
slug: str
description: str
instructions: Optional[str] = None
image_urls: list[str] = Field(default_factory=list)
date_submitted: datetime
status: str = Field(description="One of: DRAFT, PENDING, APPROVED, REJECTED")
runs: int = Field(default=0)
rating: float = Field(default=0.0)
store_listing_version_id: Optional[str] = None
version: Optional[int] = None
review_comments: Optional[str] = None
reviewed_at: Optional[datetime] = None
video_url: Optional[str] = None
categories: list[str] = Field(default_factory=list)
class SubmissionsListResponse(BaseModel):
"""Response for listing submissions."""
submissions: list[MarketplaceSubmission]
total_count: int
page: int
page_size: int
total_pages: int
class CreateSubmissionRequest(BaseModel):
"""Request to create a marketplace submission."""
graph_id: str = Field(description="ID of the graph to submit")
graph_version: int = Field(description="Version of the graph to submit")
name: str = Field(description="Display name for the agent")
slug: str = Field(description="URL-friendly identifier")
description: str = Field(description="Full description")
sub_heading: str = Field(description="Short tagline")
image_urls: list[str] = Field(default_factory=list)
video_url: Optional[str] = None
categories: list[str] = Field(default_factory=list)
# ============================================================================
# Conversion Functions
# ============================================================================
def _convert_store_agent(agent: store_model.StoreAgent) -> MarketplaceAgent:
"""Convert internal StoreAgent to v2 API model."""
return MarketplaceAgent(
slug=agent.slug,
name=agent.agent_name,
description=agent.description,
sub_heading=agent.sub_heading,
creator=agent.creator,
creator_avatar=agent.creator_avatar,
runs=agent.runs,
rating=agent.rating,
image_url=agent.agent_image,
)
def _convert_store_agent_details(
agent: store_model.StoreAgentDetails,
) -> MarketplaceAgentDetails:
"""Convert internal StoreAgentDetails to v2 API model."""
return MarketplaceAgentDetails(
store_listing_version_id=agent.store_listing_version_id,
slug=agent.slug,
name=agent.agent_name,
description=agent.description,
sub_heading=agent.sub_heading,
instructions=agent.instructions,
creator=agent.creator,
creator_avatar=agent.creator_avatar,
categories=agent.categories,
runs=agent.runs,
rating=agent.rating,
image_urls=agent.agent_image,
video_url=agent.agent_video,
versions=agent.versions,
agent_graph_versions=agent.agentGraphVersions,
agent_graph_id=agent.agentGraphId,
last_updated=agent.last_updated,
)
def _convert_creator(creator: store_model.Creator) -> MarketplaceCreator:
"""Convert internal Creator to v2 API model."""
return MarketplaceCreator(
name=creator.name,
username=creator.username,
description=creator.description,
avatar_url=creator.avatar_url,
num_agents=creator.num_agents,
agent_rating=creator.agent_rating,
agent_runs=creator.agent_runs,
is_featured=creator.is_featured,
)
def _convert_creator_details(
creator: store_model.CreatorDetails,
) -> MarketplaceCreatorDetails:
"""Convert internal CreatorDetails to v2 API model."""
return MarketplaceCreatorDetails(
name=creator.name,
username=creator.username,
description=creator.description,
avatar_url=creator.avatar_url,
agent_rating=creator.agent_rating,
agent_runs=creator.agent_runs,
top_categories=creator.top_categories,
links=creator.links,
)
def _convert_submission(sub: store_model.StoreSubmission) -> MarketplaceSubmission:
"""Convert internal StoreSubmission to v2 API model."""
return MarketplaceSubmission(
graph_id=sub.agent_id,
graph_version=sub.agent_version,
name=sub.name,
sub_heading=sub.sub_heading,
slug=sub.slug,
description=sub.description,
instructions=sub.instructions,
image_urls=sub.image_urls,
date_submitted=sub.date_submitted,
status=sub.status.value,
runs=sub.runs,
rating=sub.rating,
store_listing_version_id=sub.store_listing_version_id,
version=sub.version,
review_comments=sub.review_comments,
reviewed_at=sub.reviewed_at,
video_url=sub.video_url,
categories=sub.categories,
)
# ============================================================================
# Endpoints - Read (authenticated)
# ============================================================================
@marketplace_router.get(
path="/agents",
summary="List marketplace agents",
response_model=MarketplaceAgentsResponse,
)
async def list_agents(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_STORE)
),
featured: bool = Query(default=False, description="Filter to featured agents only"),
creator: Optional[str] = Query(
default=None, description="Filter by creator username"
),
sorted_by: Optional[Literal["rating", "runs", "name", "updated_at"]] = Query(
default=None, description="Sort field"
),
search_query: Optional[str] = Query(default=None, description="Search query"),
category: Optional[str] = Query(default=None, description="Filter by category"),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> MarketplaceAgentsResponse:
"""
List agents available in the marketplace.
Supports filtering by featured status, creator, category, and search query.
Results can be sorted by rating, runs, name, or update time.
"""
result = await store_cache._get_cached_store_agents(
featured=featured,
creator=creator,
sorted_by=sorted_by,
search_query=search_query,
category=category,
page=page,
page_size=page_size,
)
return MarketplaceAgentsResponse(
agents=[_convert_store_agent(a) for a in result.agents],
total_count=result.pagination.total_items,
page=result.pagination.current_page,
page_size=result.pagination.page_size,
total_pages=result.pagination.total_pages,
)
@marketplace_router.get(
path="/agents/{username}/{agent_name}",
summary="Get agent details",
response_model=MarketplaceAgentDetails,
)
async def get_agent_details(
username: str = Path(description="Creator username"),
agent_name: str = Path(description="Agent slug/name"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_STORE)
),
) -> MarketplaceAgentDetails:
"""
Get detailed information about a specific marketplace agent.
"""
username = urllib.parse.unquote(username).lower()
agent_name = urllib.parse.unquote(agent_name).lower()
agent = await store_cache._get_cached_agent_details(
username=username, agent_name=agent_name
)
return _convert_store_agent_details(agent)
@marketplace_router.get(
path="/creators",
summary="List marketplace creators",
response_model=MarketplaceCreatorsResponse,
)
async def list_creators(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_STORE)
),
featured: bool = Query(
default=False, description="Filter to featured creators only"
),
search_query: Optional[str] = Query(default=None, description="Search query"),
sorted_by: Optional[Literal["agent_rating", "agent_runs", "num_agents"]] = Query(
default=None, description="Sort field"
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> MarketplaceCreatorsResponse:
"""
List creators on the marketplace.
Supports filtering by featured status and search query.
Results can be sorted by rating, runs, or number of agents.
"""
result = await store_cache._get_cached_store_creators(
featured=featured,
search_query=search_query,
sorted_by=sorted_by,
page=page,
page_size=page_size,
)
return MarketplaceCreatorsResponse(
creators=[_convert_creator(c) for c in result.creators],
total_count=result.pagination.total_items,
page=result.pagination.current_page,
page_size=result.pagination.page_size,
total_pages=result.pagination.total_pages,
)
@marketplace_router.get(
path="/creators/{username}",
summary="Get creator details",
response_model=MarketplaceCreatorDetails,
)
async def get_creator_details(
username: str = Path(description="Creator username"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_STORE)
),
) -> MarketplaceCreatorDetails:
"""
Get detailed information about a specific marketplace creator.
"""
username = urllib.parse.unquote(username).lower()
creator = await store_cache._get_cached_creator_details(username=username)
return _convert_creator_details(creator)
# ============================================================================
# Endpoints - Submissions (CRUD)
# ============================================================================
@marketplace_router.get(
path="/submissions",
summary="List my submissions",
response_model=SubmissionsListResponse,
)
async def list_submissions(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_STORE)
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> SubmissionsListResponse:
"""
List your marketplace submissions.
Returns all submissions you've created, including drafts, pending,
approved, and rejected submissions.
"""
result = await store_db.get_store_submissions(
user_id=auth.user_id,
page=page,
page_size=page_size,
)
return SubmissionsListResponse(
submissions=[_convert_submission(s) for s in result.submissions],
total_count=result.pagination.total_items,
page=result.pagination.current_page,
page_size=result.pagination.page_size,
total_pages=result.pagination.total_pages,
)
@marketplace_router.post(
path="/submissions",
summary="Create a submission",
response_model=MarketplaceSubmission,
)
async def create_submission(
request: CreateSubmissionRequest,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_STORE)
),
) -> MarketplaceSubmission:
"""
Create a new marketplace submission.
This submits an agent for review to be published in the marketplace.
The submission will be in PENDING status until reviewed by the team.
"""
submission = await store_db.create_store_submission(
user_id=auth.user_id,
agent_id=request.graph_id,
agent_version=request.graph_version,
slug=request.slug,
name=request.name,
sub_heading=request.sub_heading,
description=request.description,
image_urls=request.image_urls,
video_url=request.video_url,
categories=request.categories,
)
return _convert_submission(submission)
@marketplace_router.delete(
path="/submissions/{submission_id}",
summary="Delete a submission",
)
async def delete_submission(
submission_id: str = Path(description="Submission ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_STORE)
),
) -> None:
"""
Delete a marketplace submission.
Only submissions in DRAFT status can be deleted.
"""
success = await store_db.delete_store_submission(
user_id=auth.user_id,
submission_id=submission_id,
)
if not success:
raise HTTPException(
status_code=404, detail=f"Submission #{submission_id} not found"
)

View File

@@ -0,0 +1,552 @@
"""
V2 External API - Request and Response Models
This module defines all request and response models for the v2 external API.
All models are self-contained and specific to the external API contract.
"""
from datetime import datetime
from typing import Any, Optional
from pydantic import BaseModel, Field
# ============================================================================
# Common/Shared Models
# ============================================================================
class PaginatedResponse(BaseModel):
"""Base class for paginated responses."""
total_count: int = Field(description="Total number of items across all pages")
page: int = Field(description="Current page number (1-indexed)")
page_size: int = Field(description="Number of items per page")
total_pages: int = Field(description="Total number of pages")
# ============================================================================
# Graph Models
# ============================================================================
class GraphLink(BaseModel):
"""A link between two nodes in a graph."""
id: str
source_id: str = Field(description="ID of the source node")
sink_id: str = Field(description="ID of the target node")
source_name: str = Field(description="Output pin name on source node")
sink_name: str = Field(description="Input pin name on target node")
is_static: bool = Field(
default=False, description="Whether this link provides static data"
)
class GraphNode(BaseModel):
"""A node in an agent graph."""
id: str
block_id: str = Field(description="ID of the block type")
input_default: dict[str, Any] = Field(
default_factory=dict, description="Default input values"
)
metadata: dict[str, Any] = Field(
default_factory=dict, description="Node metadata (e.g., position)"
)
class Graph(BaseModel):
"""Graph definition for creating or updating an agent."""
id: Optional[str] = Field(default=None, description="Graph ID (assigned by server)")
version: int = Field(default=1, description="Graph version")
is_active: bool = Field(default=True, description="Whether this version is active")
name: str = Field(description="Graph name")
description: str = Field(default="", description="Graph description")
nodes: list[GraphNode] = Field(default_factory=list, description="List of nodes")
links: list[GraphLink] = Field(
default_factory=list, description="Links between nodes"
)
class GraphMeta(BaseModel):
"""Graph metadata (summary information)."""
id: str
version: int
is_active: bool
name: str
description: str
created_at: datetime
input_schema: dict[str, Any] = Field(description="Input schema for the graph")
output_schema: dict[str, Any] = Field(description="Output schema for the graph")
class GraphDetails(GraphMeta):
"""Full graph details including nodes and links."""
nodes: list[GraphNode]
links: list[GraphLink]
credentials_input_schema: dict[str, Any] = Field(
description="Schema for required credentials"
)
class GraphSettings(BaseModel):
"""Settings for a graph."""
human_in_the_loop_safe_mode: Optional[bool] = Field(
default=None, description="Enable safe mode for human-in-the-loop blocks"
)
class CreateGraphRequest(BaseModel):
"""Request to create a new graph."""
graph: Graph = Field(description="The graph definition")
class SetActiveVersionRequest(BaseModel):
"""Request to set the active graph version."""
active_graph_version: int = Field(description="Version number to set as active")
class GraphsListResponse(PaginatedResponse):
"""Response for listing graphs."""
graphs: list[GraphMeta]
class DeleteGraphResponse(BaseModel):
"""Response for deleting a graph."""
version_count: int = Field(description="Number of versions deleted")
# ============================================================================
# Schedule Models
# ============================================================================
class Schedule(BaseModel):
"""An execution schedule for a graph."""
id: str
name: str
graph_id: str
graph_version: int
cron: str = Field(description="Cron expression for the schedule")
input_data: dict[str, Any] = Field(
default_factory=dict, description="Input data for scheduled executions"
)
is_enabled: bool = Field(default=True, description="Whether schedule is enabled")
next_run_time: Optional[datetime] = Field(
default=None, description="Next scheduled run time"
)
class CreateScheduleRequest(BaseModel):
"""Request to create a schedule."""
name: str = Field(description="Display name for the schedule")
cron: str = Field(description="Cron expression (e.g., '0 9 * * *' for 9am daily)")
input_data: dict[str, Any] = Field(
default_factory=dict, description="Input data for scheduled executions"
)
credentials_inputs: dict[str, Any] = Field(
default_factory=dict, description="Credentials for the schedule"
)
graph_version: Optional[int] = Field(
default=None, description="Graph version (default: active version)"
)
timezone: Optional[str] = Field(
default=None,
description="Timezone for schedule (e.g., 'America/New_York')",
)
class SchedulesListResponse(PaginatedResponse):
"""Response for listing schedules."""
schedules: list[Schedule]
# ============================================================================
# Block Models
# ============================================================================
class BlockCost(BaseModel):
"""Cost information for a block."""
cost_type: str = Field(description="Type of cost (e.g., 'per_call', 'per_token')")
cost_filter: dict[str, Any] = Field(
default_factory=dict, description="Conditions for this cost"
)
cost_amount: int = Field(description="Cost amount in credits")
class Block(BaseModel):
"""A building block that can be used in graphs."""
id: str
name: str
description: str
categories: list[str] = Field(default_factory=list)
input_schema: dict[str, Any]
output_schema: dict[str, Any]
costs: list[BlockCost] = Field(default_factory=list)
class BlocksListResponse(BaseModel):
"""Response for listing blocks."""
blocks: list[Block]
# ============================================================================
# Marketplace Models
# ============================================================================
class MarketplaceAgent(BaseModel):
"""An agent available in the marketplace."""
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int = Field(default=0, description="Number of times this agent has been run")
rating: float = Field(default=0.0, description="Average rating")
class MarketplaceAgentDetails(BaseModel):
"""Detailed information about a marketplace agent."""
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_output_demo: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
instructions: Optional[str] = None
categories: list[str]
runs: int
rating: float
versions: list[str]
agent_graph_versions: list[str]
agent_graph_id: str
last_updated: datetime
recommended_schedule_cron: Optional[str] = None
class MarketplaceCreator(BaseModel):
"""A creator on the marketplace."""
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool = False
class MarketplaceAgentsResponse(PaginatedResponse):
"""Response for listing marketplace agents."""
agents: list[MarketplaceAgent]
class MarketplaceCreatorsResponse(PaginatedResponse):
"""Response for listing marketplace creators."""
creators: list[MarketplaceCreator]
# Submission models
class MarketplaceSubmission(BaseModel):
"""A marketplace submission."""
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
instructions: Optional[str] = None
image_urls: list[str] = Field(default_factory=list)
date_submitted: datetime
status: str = Field(description="One of: DRAFT, PENDING, APPROVED, REJECTED")
runs: int
rating: float
store_listing_version_id: Optional[str] = None
version: Optional[int] = None
# Review fields
review_comments: Optional[str] = None
reviewed_at: Optional[datetime] = None
# Additional optional fields
video_url: Optional[str] = None
categories: list[str] = Field(default_factory=list)
class CreateSubmissionRequest(BaseModel):
"""Request to create a marketplace submission."""
agent_id: str = Field(description="ID of the graph to submit")
agent_version: int = Field(description="Version of the graph to submit")
name: str = Field(description="Display name for the agent")
slug: str = Field(description="URL-friendly identifier")
description: str = Field(description="Full description")
sub_heading: str = Field(description="Short tagline")
image_urls: list[str] = Field(default_factory=list)
video_url: Optional[str] = None
categories: list[str] = Field(default_factory=list)
class UpdateSubmissionRequest(BaseModel):
"""Request to update a marketplace submission."""
name: Optional[str] = None
description: Optional[str] = None
sub_heading: Optional[str] = None
image_urls: Optional[list[str]] = None
video_url: Optional[str] = None
categories: Optional[list[str]] = None
class SubmissionsListResponse(PaginatedResponse):
"""Response for listing submissions."""
submissions: list[MarketplaceSubmission]
# ============================================================================
# Library Models
# ============================================================================
class LibraryAgent(BaseModel):
"""An agent in the user's library."""
id: str
graph_id: str
graph_version: int
name: str
description: str
is_favorite: bool = False
can_access_graph: bool = False
is_latest_version: bool = False
image_url: Optional[str] = None
creator_name: str
input_schema: dict[str, Any] = Field(description="Input schema for the agent")
output_schema: dict[str, Any] = Field(description="Output schema for the agent")
created_at: datetime
updated_at: datetime
class LibraryAgentsResponse(PaginatedResponse):
"""Response for listing library agents."""
agents: list[LibraryAgent]
class ExecuteAgentRequest(BaseModel):
"""Request to execute an agent."""
inputs: dict[str, Any] = Field(
default_factory=dict, description="Input values for the agent"
)
credentials_inputs: dict[str, Any] = Field(
default_factory=dict, description="Credentials for the agent"
)
# ============================================================================
# Run Models
# ============================================================================
class Run(BaseModel):
"""An execution run."""
id: str
graph_id: str
graph_version: int
status: str = Field(
description="One of: INCOMPLETE, QUEUED, RUNNING, COMPLETED, TERMINATED, FAILED, REVIEW"
)
started_at: datetime
ended_at: Optional[datetime] = None
inputs: Optional[dict[str, Any]] = None
cost: int = Field(default=0, description="Cost in credits")
duration: float = Field(default=0, description="Duration in seconds")
node_count: int = Field(default=0, description="Number of nodes executed")
class RunDetails(Run):
"""Detailed information about a run including node executions."""
outputs: Optional[dict[str, list[Any]]] = None
node_executions: list[dict[str, Any]] = Field(
default_factory=list, description="Individual node execution results"
)
class RunsListResponse(PaginatedResponse):
"""Response for listing runs."""
runs: list[Run]
# ============================================================================
# Run Review Models (Human-in-the-loop)
# ============================================================================
class PendingReview(BaseModel):
"""A pending human-in-the-loop review."""
id: str # node_exec_id
run_id: str
graph_id: str
graph_version: int
payload: Any = Field(description="Data to be reviewed")
instructions: Optional[str] = Field(
default=None, description="Instructions for the reviewer"
)
editable: bool = Field(
default=True, description="Whether the reviewer can edit the data"
)
status: str = Field(description="One of: WAITING, APPROVED, REJECTED")
created_at: datetime
class PendingReviewsResponse(PaginatedResponse):
"""Response for listing pending reviews."""
reviews: list[PendingReview]
class ReviewDecision(BaseModel):
"""Decision for a single review item."""
node_exec_id: str = Field(description="Node execution ID (review ID)")
approved: bool = Field(description="Whether to approve the data")
edited_payload: Optional[Any] = Field(
default=None, description="Modified payload data (if editing)"
)
message: Optional[str] = Field(
default=None, description="Optional message from reviewer", max_length=2000
)
class SubmitReviewsRequest(BaseModel):
"""Request to submit review responses for all pending reviews of an execution."""
reviews: list[ReviewDecision] = Field(
description="All review decisions for the execution"
)
class SubmitReviewsResponse(BaseModel):
"""Response after submitting reviews."""
run_id: str
approved_count: int = Field(description="Number of reviews approved")
rejected_count: int = Field(description="Number of reviews rejected")
# ============================================================================
# Credit Models
# ============================================================================
class CreditBalance(BaseModel):
"""User's credit balance."""
balance: int = Field(description="Current credit balance")
class CreditTransaction(BaseModel):
"""A credit transaction."""
transaction_key: str
amount: int
transaction_type: str = Field(description="Transaction type")
transaction_time: datetime
running_balance: int
description: Optional[str] = None
class CreditTransactionsResponse(PaginatedResponse):
"""Response for listing credit transactions."""
transactions: list[CreditTransaction]
# ============================================================================
# Integration Models
# ============================================================================
class Credential(BaseModel):
"""A user's credential for an integration."""
id: str
provider: str = Field(description="Integration provider name")
title: Optional[str] = Field(
default=None, description="User-assigned title for this credential"
)
scopes: list[str] = Field(default_factory=list, description="Granted scopes")
class CredentialsListResponse(BaseModel):
"""Response for listing credentials."""
credentials: list[Credential]
class CredentialRequirement(BaseModel):
"""A credential requirement for a graph or agent."""
provider: str = Field(description="Required provider name")
required_scopes: list[str] = Field(
default_factory=list, description="Required scopes"
)
matching_credentials: list[Credential] = Field(
default_factory=list,
description="User's credentials that match this requirement",
)
class CredentialRequirementsResponse(BaseModel):
"""Response for listing credential requirements."""
requirements: list[CredentialRequirement]
# ============================================================================
# File Models
# ============================================================================
class UploadFileResponse(BaseModel):
"""Response after uploading a file."""
file_uri: str = Field(description="URI to reference the uploaded file")
file_name: str
size: int = Field(description="File size in bytes")
content_type: str
expires_in_hours: int

View File

@@ -0,0 +1,35 @@
"""
V2 External API Routes
This module defines the main v2 router that aggregates all v2 API endpoints.
"""
from fastapi import APIRouter
from .blocks import blocks_router
from .credits import credits_router
from .files import files_router
from .graphs import graphs_router
from .integrations import integrations_router
from .library import library_router
from .marketplace import marketplace_router
from .runs import runs_router
from .schedules import graph_schedules_router, schedules_router
v2_router = APIRouter()
# Include all sub-routers
v2_router.include_router(graphs_router, prefix="/graphs", tags=["graphs"])
v2_router.include_router(graph_schedules_router, prefix="/graphs", tags=["schedules"])
v2_router.include_router(schedules_router, prefix="/schedules", tags=["schedules"])
v2_router.include_router(blocks_router, prefix="/blocks", tags=["blocks"])
v2_router.include_router(
marketplace_router, prefix="/marketplace", tags=["marketplace"]
)
v2_router.include_router(library_router, prefix="/library", tags=["library"])
v2_router.include_router(runs_router, prefix="/runs", tags=["runs"])
v2_router.include_router(credits_router, prefix="/credits", tags=["credits"])
v2_router.include_router(
integrations_router, prefix="/integrations", tags=["integrations"]
)
v2_router.include_router(files_router, prefix="/files", tags=["files"])

View File

@@ -0,0 +1,451 @@
"""
V2 External API - Runs Endpoints
Provides access to execution runs and human-in-the-loop reviews.
"""
import logging
from datetime import datetime
from typing import Any, Optional
from fastapi import APIRouter, HTTPException, Path, Query, Security
from prisma.enums import APIKeyPermission, ReviewStatus
from pydantic import BaseModel, Field
from backend.api.external.middleware import require_permission
from backend.api.features.executions.review.model import (
PendingHumanReviewModel,
SafeJsonData,
)
from backend.data import execution as execution_db
from backend.data import human_review as review_db
from backend.data.auth.base import APIAuthorizationInfo
from backend.executor import utils as execution_utils
from .common import DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE
logger = logging.getLogger(__name__)
runs_router = APIRouter()
# ============================================================================
# Models
# ============================================================================
class Run(BaseModel):
"""An execution run."""
id: str
graph_id: str
graph_version: int
status: str = Field(
description="One of: INCOMPLETE, QUEUED, RUNNING, COMPLETED, TERMINATED, FAILED, REVIEW"
)
started_at: datetime
ended_at: Optional[datetime] = None
inputs: Optional[dict[str, Any]] = None
cost: int = Field(default=0, description="Cost in credits")
duration: float = Field(default=0, description="Duration in seconds")
node_count: int = Field(default=0, description="Number of nodes executed")
class RunDetails(Run):
"""Detailed information about a run including outputs and node executions."""
outputs: Optional[dict[str, list[Any]]] = None
node_executions: list[dict[str, Any]] = Field(
default_factory=list, description="Individual node execution results"
)
class RunsListResponse(BaseModel):
"""Response for listing runs."""
runs: list[Run]
total_count: int
page: int
page_size: int
total_pages: int
class PendingReview(BaseModel):
"""A pending human-in-the-loop review."""
id: str # node_exec_id
run_id: str
graph_id: str
graph_version: int
payload: SafeJsonData = Field(description="Data to be reviewed")
instructions: Optional[str] = Field(
default=None, description="Instructions for the reviewer"
)
editable: bool = Field(
default=True, description="Whether the reviewer can edit the data"
)
status: str = Field(description="One of: WAITING, APPROVED, REJECTED")
created_at: datetime
class PendingReviewsResponse(BaseModel):
"""Response for listing pending reviews."""
reviews: list[PendingReview]
total_count: int
page: int
page_size: int
total_pages: int
class ReviewDecision(BaseModel):
"""Decision for a single review item."""
node_exec_id: str = Field(description="Node execution ID (review ID)")
approved: bool = Field(description="Whether to approve the data")
edited_payload: Optional[SafeJsonData] = Field(
default=None, description="Modified payload data (if editing)"
)
message: Optional[str] = Field(
default=None, description="Optional message from reviewer", max_length=2000
)
class SubmitReviewsRequest(BaseModel):
"""Request to submit review responses for all pending reviews of an execution."""
reviews: list[ReviewDecision] = Field(
description="All review decisions for the execution"
)
class SubmitReviewsResponse(BaseModel):
"""Response after submitting reviews."""
run_id: str
approved_count: int = Field(description="Number of reviews approved")
rejected_count: int = Field(description="Number of reviews rejected")
# ============================================================================
# Conversion Functions
# ============================================================================
def _convert_execution_to_run(exec: execution_db.GraphExecutionMeta) -> Run:
"""Convert internal execution to v2 API Run model."""
return Run(
id=exec.id,
graph_id=exec.graph_id,
graph_version=exec.graph_version,
status=exec.status.value,
started_at=exec.started_at,
ended_at=exec.ended_at,
inputs=exec.inputs,
cost=exec.stats.cost if exec.stats else 0,
duration=exec.stats.duration if exec.stats else 0,
node_count=exec.stats.node_exec_count if exec.stats else 0,
)
def _convert_execution_to_run_details(
exec: execution_db.GraphExecutionWithNodes,
) -> RunDetails:
"""Convert internal execution with nodes to v2 API RunDetails model."""
return RunDetails(
id=exec.id,
graph_id=exec.graph_id,
graph_version=exec.graph_version,
status=exec.status.value,
started_at=exec.started_at,
ended_at=exec.ended_at,
inputs=exec.inputs,
outputs=exec.outputs,
cost=exec.stats.cost if exec.stats else 0,
duration=exec.stats.duration if exec.stats else 0,
node_count=exec.stats.node_exec_count if exec.stats else 0,
node_executions=[
{
"node_id": node.node_id,
"status": node.status.value,
"input_data": node.input_data,
"output_data": node.output_data,
"started_at": node.start_time,
"ended_at": node.end_time,
}
for node in exec.node_executions
],
)
def _convert_pending_review(review: PendingHumanReviewModel) -> PendingReview:
"""Convert internal PendingHumanReviewModel to v2 API PendingReview model."""
return PendingReview(
id=review.node_exec_id,
run_id=review.graph_exec_id,
graph_id=review.graph_id,
graph_version=review.graph_version,
payload=review.payload,
instructions=review.instructions,
editable=review.editable,
status=review.status.value,
created_at=review.created_at,
)
# ============================================================================
# Endpoints - Runs
# ============================================================================
@runs_router.get(
path="",
summary="List all runs",
response_model=RunsListResponse,
)
async def list_runs(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_RUN)
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> RunsListResponse:
"""
List all execution runs for the authenticated user.
Returns runs across all agents, sorted by most recent first.
"""
result = await execution_db.get_graph_executions_paginated(
user_id=auth.user_id,
page=page,
page_size=page_size,
)
return RunsListResponse(
runs=[_convert_execution_to_run(e) for e in result.executions],
total_count=result.pagination.total_items,
page=result.pagination.current_page,
page_size=result.pagination.page_size,
total_pages=result.pagination.total_pages,
)
@runs_router.get(
path="/{run_id}",
summary="Get run details",
response_model=RunDetails,
)
async def get_run(
run_id: str = Path(description="Run ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_RUN)
),
) -> RunDetails:
"""
Get detailed information about a specific run.
Includes outputs and individual node execution results.
"""
result = await execution_db.get_graph_execution(
user_id=auth.user_id,
execution_id=run_id,
include_node_executions=True,
)
if not result:
raise HTTPException(status_code=404, detail=f"Run #{run_id} not found")
return _convert_execution_to_run_details(result)
@runs_router.post(
path="/{run_id}/stop",
summary="Stop a run",
)
async def stop_run(
run_id: str = Path(description="Run ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_RUN)
),
) -> Run:
"""
Stop a running execution.
Only runs in QUEUED or RUNNING status can be stopped.
"""
# Verify the run exists and belongs to the user
exec = await execution_db.get_graph_execution(
user_id=auth.user_id,
execution_id=run_id,
)
if not exec:
raise HTTPException(status_code=404, detail=f"Run #{run_id} not found")
# Stop the execution
await execution_utils.stop_graph_execution(
graph_exec_id=run_id,
user_id=auth.user_id,
)
# Fetch updated execution
updated_exec = await execution_db.get_graph_execution(
user_id=auth.user_id,
execution_id=run_id,
)
if not updated_exec:
raise HTTPException(status_code=404, detail=f"Run #{run_id} not found")
return _convert_execution_to_run(updated_exec)
@runs_router.delete(
path="/{run_id}",
summary="Delete a run",
)
async def delete_run(
run_id: str = Path(description="Run ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_RUN)
),
) -> None:
"""
Delete an execution run.
This marks the run as deleted. The data may still be retained for
some time for recovery purposes.
"""
await execution_db.delete_graph_execution(
graph_exec_id=run_id,
user_id=auth.user_id,
)
# ============================================================================
# Endpoints - Reviews (Human-in-the-loop)
# ============================================================================
@runs_router.get(
path="/reviews",
summary="List all pending reviews",
response_model=PendingReviewsResponse,
)
async def list_pending_reviews(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_RUN_REVIEW)
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> PendingReviewsResponse:
"""
List all pending human-in-the-loop reviews.
These are blocks that require human approval or input before the
agent can continue execution.
"""
reviews = await review_db.get_pending_reviews_for_user(
user_id=auth.user_id,
page=page,
page_size=page_size,
)
# Note: get_pending_reviews_for_user returns list directly, not a paginated result
# We compute pagination info based on results
total_count = len(reviews)
total_pages = max(1, (total_count + page_size - 1) // page_size)
return PendingReviewsResponse(
reviews=[_convert_pending_review(r) for r in reviews],
total_count=total_count,
page=page,
page_size=page_size,
total_pages=total_pages,
)
@runs_router.get(
path="/{run_id}/reviews",
summary="List reviews for a run",
response_model=list[PendingReview],
)
async def list_run_reviews(
run_id: str = Path(description="Run ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_RUN_REVIEW)
),
) -> list[PendingReview]:
"""
List all human-in-the-loop reviews for a specific run.
"""
reviews = await review_db.get_pending_reviews_for_execution(
graph_exec_id=run_id,
user_id=auth.user_id,
)
return [_convert_pending_review(r) for r in reviews]
@runs_router.post(
path="/{run_id}/reviews",
summary="Submit review responses for a run",
response_model=SubmitReviewsResponse,
)
async def submit_reviews(
request: SubmitReviewsRequest,
run_id: str = Path(description="Run ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_RUN_REVIEW)
),
) -> SubmitReviewsResponse:
"""
Submit responses to all pending human-in-the-loop reviews for a run.
All pending reviews for the execution must be included. Approving
a review will allow the agent to continue; rejecting will terminate
execution at that point.
"""
# Build review decisions dict for process_all_reviews_for_execution
review_decisions: dict[
str, tuple[ReviewStatus, SafeJsonData | None, str | None]
] = {}
for decision in request.reviews:
status = ReviewStatus.APPROVED if decision.approved else ReviewStatus.REJECTED
review_decisions[decision.node_exec_id] = (
status,
decision.edited_payload,
decision.message,
)
try:
results = await review_db.process_all_reviews_for_execution(
user_id=auth.user_id,
review_decisions=review_decisions,
)
approved_count = sum(
1 for r in results.values() if r.status == ReviewStatus.APPROVED
)
rejected_count = sum(
1 for r in results.values() if r.status == ReviewStatus.REJECTED
)
return SubmitReviewsResponse(
run_id=run_id,
approved_count=approved_count,
rejected_count=rejected_count,
)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))

View File

@@ -0,0 +1,250 @@
"""
V2 External API - Schedules Endpoints
Provides endpoints for managing execution schedules.
"""
import logging
from datetime import datetime
from typing import Any, Optional
from fastapi import APIRouter, HTTPException, Path, Query, Security
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field
from backend.api.external.middleware import require_permission
from backend.data import graph as graph_db
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.user import get_user_by_id
from backend.executor import scheduler
from backend.util.clients import get_scheduler_client
from backend.util.timezone_utils import get_user_timezone_or_utc
from .common import DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE
logger = logging.getLogger(__name__)
schedules_router = APIRouter()
# ============================================================================
# Request/Response Models
# ============================================================================
class Schedule(BaseModel):
"""An execution schedule for a graph."""
id: str
name: str
graph_id: str
graph_version: int
cron: str = Field(description="Cron expression for the schedule")
input_data: dict[str, Any] = Field(
default_factory=dict, description="Input data for scheduled executions"
)
next_run_time: Optional[datetime] = Field(
default=None, description="Next scheduled run time"
)
is_enabled: bool = Field(default=True, description="Whether schedule is enabled")
class SchedulesListResponse(BaseModel):
"""Response for listing schedules."""
schedules: list[Schedule]
total_count: int
page: int
page_size: int
total_pages: int
class CreateScheduleRequest(BaseModel):
"""Request to create a schedule."""
name: str = Field(description="Display name for the schedule")
cron: str = Field(description="Cron expression (e.g., '0 9 * * *' for 9am daily)")
input_data: dict[str, Any] = Field(
default_factory=dict, description="Input data for scheduled executions"
)
credentials_inputs: dict[str, Any] = Field(
default_factory=dict, description="Credentials for the schedule"
)
graph_version: Optional[int] = Field(
default=None, description="Graph version (default: active version)"
)
timezone: Optional[str] = Field(
default=None,
description=(
"Timezone for schedule (e.g., 'America/New_York'). "
"Defaults to user's timezone."
),
)
def _convert_schedule(job: scheduler.GraphExecutionJobInfo) -> Schedule:
"""Convert internal schedule job info to v2 API model."""
# Parse the ISO format string to datetime
next_run = datetime.fromisoformat(job.next_run_time) if job.next_run_time else None
return Schedule(
id=job.id,
name=job.name or "",
graph_id=job.graph_id,
graph_version=job.graph_version,
cron=job.cron,
input_data=job.input_data,
next_run_time=next_run,
is_enabled=True, # All returned schedules are enabled
)
# ============================================================================
# Endpoints
# ============================================================================
@schedules_router.get(
path="",
summary="List all user schedules",
response_model=SchedulesListResponse,
)
async def list_all_schedules(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_SCHEDULE)
),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(
default=DEFAULT_PAGE_SIZE,
ge=1,
le=MAX_PAGE_SIZE,
description=f"Items per page (max {MAX_PAGE_SIZE})",
),
) -> SchedulesListResponse:
"""
List all schedules for the authenticated user across all graphs.
"""
schedules = await get_scheduler_client().get_execution_schedules(
user_id=auth.user_id
)
converted = [_convert_schedule(s) for s in schedules]
# Manual pagination (scheduler doesn't support pagination natively)
total_count = len(converted)
total_pages = (total_count + page_size - 1) // page_size if total_count > 0 else 1
start = (page - 1) * page_size
end = start + page_size
paginated = converted[start:end]
return SchedulesListResponse(
schedules=paginated,
total_count=total_count,
page=page,
page_size=page_size,
total_pages=total_pages,
)
@schedules_router.delete(
path="/{schedule_id}",
summary="Delete a schedule",
)
async def delete_schedule(
schedule_id: str = Path(description="Schedule ID to delete"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_SCHEDULE)
),
) -> None:
"""
Delete an execution schedule.
"""
try:
await get_scheduler_client().delete_schedule(
schedule_id=schedule_id,
user_id=auth.user_id,
)
except Exception as e:
if "not found" in str(e).lower():
raise HTTPException(
status_code=404, detail=f"Schedule #{schedule_id} not found"
)
raise
# ============================================================================
# Graph-specific Schedule Endpoints (nested under /graphs)
# These are included in the graphs router via include_router
# ============================================================================
graph_schedules_router = APIRouter()
@graph_schedules_router.get(
path="/{graph_id}/schedules",
summary="List schedules for a graph",
response_model=list[Schedule],
)
async def list_graph_schedules(
graph_id: str = Path(description="Graph ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_SCHEDULE)
),
) -> list[Schedule]:
"""
List all schedules for a specific graph.
"""
schedules = await get_scheduler_client().get_execution_schedules(
user_id=auth.user_id,
graph_id=graph_id,
)
return [_convert_schedule(s) for s in schedules]
@graph_schedules_router.post(
path="/{graph_id}/schedules",
summary="Create a schedule for a graph",
response_model=Schedule,
)
async def create_graph_schedule(
request: CreateScheduleRequest,
graph_id: str = Path(description="Graph ID"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.WRITE_SCHEDULE)
),
) -> Schedule:
"""
Create a new execution schedule for a graph.
The schedule will execute the graph at times matching the cron expression,
using the provided input data.
"""
graph = await graph_db.get_graph(
graph_id=graph_id,
version=request.graph_version,
user_id=auth.user_id,
)
if not graph:
raise HTTPException(
status_code=404,
detail=f"Graph #{graph_id} v{request.graph_version} not found.",
)
# Determine timezone
if request.timezone:
user_timezone = request.timezone
else:
user = await get_user_by_id(auth.user_id)
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
result = await get_scheduler_client().add_execution_schedule(
user_id=auth.user_id,
graph_id=graph_id,
graph_version=graph.version,
name=request.name,
cron=request.cron,
input_data=request.input_data,
input_credentials=request.credentials_inputs,
user_timezone=user_timezone,
)
return _convert_schedule(result)

View File

@@ -299,6 +299,9 @@ async def stream_chat_completion(
f"new message_count={len(session.messages)}"
)
if len(session.messages) > config.max_context_messages:
raise ValueError(f"Max messages exceeded: {config.max_context_messages}")
logger.info(
f"Upserting session: {session.session_id} with user id {session.user_id}, "
f"message_count={len(session.messages)}"

View File

@@ -8,12 +8,8 @@ from .add_understanding import AddUnderstandingTool
from .agent_output import AgentOutputTool
from .base import BaseTool
from .find_agent import FindAgentTool
from .find_block import FindBlockTool
from .find_library_agent import FindLibraryAgentTool
from .get_doc_page import GetDocPageTool
from .run_agent import RunAgentTool
from .run_block import RunBlockTool
from .search_docs import SearchDocsTool
if TYPE_CHECKING:
from backend.api.features.chat.response_model import StreamToolOutputAvailable
@@ -22,13 +18,9 @@ if TYPE_CHECKING:
TOOL_REGISTRY: dict[str, BaseTool] = {
"add_understanding": AddUnderstandingTool(),
"find_agent": FindAgentTool(),
"find_block": FindBlockTool(),
"find_library_agent": FindLibraryAgentTool(),
"run_agent": RunAgentTool(),
"run_block": RunBlockTool(),
"agent_output": AgentOutputTool(),
"search_docs": SearchDocsTool(),
"get_doc_page": GetDocPageTool(),
}
# Export individual tool instances for backwards compatibility

View File

@@ -1,192 +0,0 @@
import logging
from typing import Any
from prisma.enums import ContentType
from backend.api.features.chat.model import ChatSession
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
from backend.api.features.chat.tools.models import (
BlockInfoSummary,
BlockInputFieldInfo,
BlockListResponse,
ErrorResponse,
NoResultsResponse,
)
from backend.api.features.store.hybrid_search import unified_hybrid_search
from backend.data.block import get_block
logger = logging.getLogger(__name__)
class FindBlockTool(BaseTool):
"""Tool for searching available blocks."""
@property
def name(self) -> str:
return "find_block"
@property
def description(self) -> str:
return (
"Search for available blocks by name or description. "
"Blocks are reusable components that perform specific tasks like "
"sending emails, making API calls, processing text, etc. "
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
"The response includes each block's id, required_inputs, and input_schema."
)
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": (
"Search query to find blocks by name or description. "
"Use keywords like 'email', 'http', 'text', 'ai', etc."
),
},
},
"required": ["query"],
}
@property
def requires_auth(self) -> bool:
return True
async def _execute(
self,
user_id: str | None,
session: ChatSession,
**kwargs,
) -> ToolResponseBase:
"""Search for blocks matching the query.
Args:
user_id: User ID (required)
session: Chat session
query: Search query
Returns:
BlockListResponse: List of matching blocks
NoResultsResponse: No blocks found
ErrorResponse: Error message
"""
query = kwargs.get("query", "").strip()
session_id = session.session_id
if not query:
return ErrorResponse(
message="Please provide a search query",
session_id=session_id,
)
try:
# Search for blocks using hybrid search
results, total = await unified_hybrid_search(
query=query,
content_types=[ContentType.BLOCK],
page=1,
page_size=10,
)
if not results:
return NoResultsResponse(
message=f"No blocks found for '{query}'",
suggestions=[
"Try broader keywords like 'email', 'http', 'text', 'ai'",
"Check spelling of technical terms",
],
session_id=session_id,
)
# Enrich results with full block information
blocks: list[BlockInfoSummary] = []
for result in results:
block_id = result["content_id"]
block = get_block(block_id)
if block:
# Get input/output schemas
input_schema = {}
output_schema = {}
try:
input_schema = block.input_schema.jsonschema()
except Exception:
pass
try:
output_schema = block.output_schema.jsonschema()
except Exception:
pass
# Get categories from block instance
categories = []
if hasattr(block, "categories") and block.categories:
categories = [cat.value for cat in block.categories]
# Extract required inputs for easier use
required_inputs: list[BlockInputFieldInfo] = []
if input_schema:
properties = input_schema.get("properties", {})
required_fields = set(input_schema.get("required", []))
# Get credential field names to exclude from required inputs
credentials_fields = set(
block.input_schema.get_credentials_fields().keys()
)
for field_name, field_schema in properties.items():
# Skip credential fields - they're handled separately
if field_name in credentials_fields:
continue
required_inputs.append(
BlockInputFieldInfo(
name=field_name,
type=field_schema.get("type", "string"),
description=field_schema.get("description", ""),
required=field_name in required_fields,
default=field_schema.get("default"),
)
)
blocks.append(
BlockInfoSummary(
id=block_id,
name=block.name,
description=block.description or "",
categories=categories,
input_schema=input_schema,
output_schema=output_schema,
required_inputs=required_inputs,
)
)
if not blocks:
return NoResultsResponse(
message=f"No blocks found for '{query}'",
suggestions=[
"Try broader keywords like 'email', 'http', 'text', 'ai'",
],
session_id=session_id,
)
return BlockListResponse(
message=(
f"Found {len(blocks)} block(s) matching '{query}'. "
"To execute a block, use run_block with the block's 'id' field "
"and provide 'input_data' matching the block's input_schema."
),
blocks=blocks,
count=len(blocks),
query=query,
session_id=session_id,
)
except Exception as e:
logger.error(f"Error searching blocks: {e}", exc_info=True)
return ErrorResponse(
message="Failed to search blocks",
error=str(e),
session_id=session_id,
)

View File

@@ -1,148 +0,0 @@
"""GetDocPageTool - Fetch full content of a documentation page."""
import logging
from pathlib import Path
from typing import Any
from backend.api.features.chat.model import ChatSession
from backend.api.features.chat.tools.base import BaseTool
from backend.api.features.chat.tools.models import (
DocPageResponse,
ErrorResponse,
ToolResponseBase,
)
logger = logging.getLogger(__name__)
# Base URL for documentation (can be configured)
DOCS_BASE_URL = "https://docs.agpt.co"
class GetDocPageTool(BaseTool):
"""Tool for fetching full content of a documentation page."""
@property
def name(self) -> str:
return "get_doc_page"
@property
def description(self) -> str:
return (
"Get the full content of a documentation page by its path. "
"Use this after search_docs to read the complete content of a relevant page."
)
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": (
"The path to the documentation file, as returned by search_docs. "
"Example: 'platform/block-sdk-guide.md'"
),
},
},
"required": ["path"],
}
@property
def requires_auth(self) -> bool:
return False # Documentation is public
def _get_docs_root(self) -> Path:
"""Get the documentation root directory."""
this_file = Path(__file__)
project_root = this_file.parent.parent.parent.parent.parent.parent.parent.parent
return project_root / "docs"
def _extract_title(self, content: str, fallback: str) -> str:
"""Extract title from markdown content."""
lines = content.split("\n")
for line in lines:
if line.startswith("# "):
return line[2:].strip()
return fallback
def _make_doc_url(self, path: str) -> str:
"""Create a URL for a documentation page."""
url_path = path.rsplit(".", 1)[0] if "." in path else path
return f"{DOCS_BASE_URL}/{url_path}"
async def _execute(
self,
user_id: str | None,
session: ChatSession,
**kwargs,
) -> ToolResponseBase:
"""Fetch full content of a documentation page.
Args:
user_id: User ID (not required for docs)
session: Chat session
path: Path to the documentation file
Returns:
DocPageResponse: Full document content
ErrorResponse: Error message
"""
path = kwargs.get("path", "").strip()
session_id = session.session_id if session else None
if not path:
return ErrorResponse(
message="Please provide a documentation path.",
error="Missing path parameter",
session_id=session_id,
)
# Sanitize path to prevent directory traversal
if ".." in path or path.startswith("/"):
return ErrorResponse(
message="Invalid documentation path.",
error="invalid_path",
session_id=session_id,
)
docs_root = self._get_docs_root()
full_path = docs_root / path
if not full_path.exists():
return ErrorResponse(
message=f"Documentation page not found: {path}",
error="not_found",
session_id=session_id,
)
# Ensure the path is within docs root
try:
full_path.resolve().relative_to(docs_root.resolve())
except ValueError:
return ErrorResponse(
message="Invalid documentation path.",
error="invalid_path",
session_id=session_id,
)
try:
content = full_path.read_text(encoding="utf-8")
title = self._extract_title(content, path)
return DocPageResponse(
message=f"Retrieved documentation page: {title}",
title=title,
path=path,
content=content,
doc_url=self._make_doc_url(path),
session_id=session_id,
)
except Exception as e:
logger.error(f"Failed to read documentation page {path}: {e}")
return ErrorResponse(
message=f"Failed to read documentation page: {str(e)}",
error="read_failed",
session_id=session_id,
)

View File

@@ -21,10 +21,6 @@ class ResponseType(str, Enum):
NO_RESULTS = "no_results"
AGENT_OUTPUT = "agent_output"
UNDERSTANDING_UPDATED = "understanding_updated"
BLOCK_LIST = "block_list"
BLOCK_OUTPUT = "block_output"
DOC_SEARCH_RESULTS = "doc_search_results"
DOC_PAGE = "doc_page"
# Base response model
@@ -213,83 +209,3 @@ class UnderstandingUpdatedResponse(ToolResponseBase):
type: ResponseType = ResponseType.UNDERSTANDING_UPDATED
updated_fields: list[str] = Field(default_factory=list)
current_understanding: dict[str, Any] = Field(default_factory=dict)
# Documentation search models
class DocSearchResult(BaseModel):
"""A single documentation search result."""
title: str
path: str
section: str
snippet: str # Short excerpt for UI display
score: float
doc_url: str | None = None
class DocSearchResultsResponse(ToolResponseBase):
"""Response for search_docs tool."""
type: ResponseType = ResponseType.DOC_SEARCH_RESULTS
results: list[DocSearchResult]
count: int
query: str
class DocPageResponse(ToolResponseBase):
"""Response for get_doc_page tool."""
type: ResponseType = ResponseType.DOC_PAGE
title: str
path: str
content: str # Full document content
doc_url: str | None = None
# Block models
class BlockInputFieldInfo(BaseModel):
"""Information about a block input field."""
name: str
type: str
description: str = ""
required: bool = False
default: Any | None = None
class BlockInfoSummary(BaseModel):
"""Summary of a block for search results."""
id: str
name: str
description: str
categories: list[str]
input_schema: dict[str, Any]
output_schema: dict[str, Any]
required_inputs: list[BlockInputFieldInfo] = Field(
default_factory=list,
description="List of required input fields for this block",
)
class BlockListResponse(ToolResponseBase):
"""Response for find_block tool."""
type: ResponseType = ResponseType.BLOCK_LIST
blocks: list[BlockInfoSummary]
count: int
query: str
usage_hint: str = Field(
default="To execute a block, call run_block with block_id set to the block's "
"'id' field and input_data containing the required fields from input_schema."
)
class BlockOutputResponse(ToolResponseBase):
"""Response for run_block tool."""
type: ResponseType = ResponseType.BLOCK_OUTPUT
block_id: str
block_name: str
outputs: dict[str, list[Any]]
success: bool = True

View File

@@ -1,297 +0,0 @@
"""Tool for executing blocks directly."""
import logging
from collections import defaultdict
from typing import Any
from backend.api.features.chat.model import ChatSession
from backend.data.block import get_block
from backend.data.execution import ExecutionContext
from backend.data.model import CredentialsMetaInput
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.util.exceptions import BlockError
from .base import BaseTool
from .models import (
BlockOutputResponse,
ErrorResponse,
SetupInfo,
SetupRequirementsResponse,
ToolResponseBase,
UserReadiness,
)
logger = logging.getLogger(__name__)
class RunBlockTool(BaseTool):
"""Tool for executing a block and returning its outputs."""
@property
def name(self) -> str:
return "run_block"
@property
def description(self) -> str:
return (
"Execute a specific block with the provided input data. "
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
"do NOT guess or make up block IDs. "
"Use the 'id' from find_block results and provide input_data "
"matching the block's required_inputs."
)
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"block_id": {
"type": "string",
"description": (
"The block's 'id' field from find_block results. "
"NEVER guess this - always get it from find_block first."
),
},
"input_data": {
"type": "object",
"description": (
"Input values for the block. Use the 'required_inputs' field "
"from find_block to see what fields are needed."
),
},
},
"required": ["block_id", "input_data"],
}
@property
def requires_auth(self) -> bool:
return True
async def _check_block_credentials(
self,
user_id: str,
block: Any,
) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]:
"""
Check if user has required credentials for a block.
Returns:
tuple[matched_credentials, missing_credentials]
"""
matched_credentials: dict[str, CredentialsMetaInput] = {}
missing_credentials: list[CredentialsMetaInput] = []
# Get credential field info from block's input schema
credentials_fields_info = block.input_schema.get_credentials_fields_info()
if not credentials_fields_info:
return matched_credentials, missing_credentials
# Get user's available credentials
creds_manager = IntegrationCredentialsManager()
available_creds = await creds_manager.store.get_all_creds(user_id)
for field_name, field_info in credentials_fields_info.items():
# field_info.provider is a frozenset of acceptable providers
# field_info.supported_types is a frozenset of acceptable types
matching_cred = next(
(
cred
for cred in available_creds
if cred.provider in field_info.provider
and cred.type in field_info.supported_types
),
None,
)
if matching_cred:
matched_credentials[field_name] = CredentialsMetaInput(
id=matching_cred.id,
provider=matching_cred.provider, # type: ignore
type=matching_cred.type,
title=matching_cred.title,
)
else:
# Create a placeholder for the missing credential
provider = next(iter(field_info.provider), "unknown")
cred_type = next(iter(field_info.supported_types), "api_key")
missing_credentials.append(
CredentialsMetaInput(
id=field_name,
provider=provider, # type: ignore
type=cred_type, # type: ignore
title=field_name.replace("_", " ").title(),
)
)
return matched_credentials, missing_credentials
async def _execute(
self,
user_id: str | None,
session: ChatSession,
**kwargs,
) -> ToolResponseBase:
"""Execute a block with the given input data.
Args:
user_id: User ID (required)
session: Chat session
block_id: Block UUID to execute
input_data: Input values for the block
Returns:
BlockOutputResponse: Block execution outputs
SetupRequirementsResponse: Missing credentials
ErrorResponse: Error message
"""
block_id = kwargs.get("block_id", "").strip()
input_data = kwargs.get("input_data", {})
session_id = session.session_id
if not block_id:
return ErrorResponse(
message="Please provide a block_id",
session_id=session_id,
)
if not isinstance(input_data, dict):
return ErrorResponse(
message="input_data must be an object",
session_id=session_id,
)
if not user_id:
return ErrorResponse(
message="Authentication required",
session_id=session_id,
)
# Get the block
block = get_block(block_id)
if not block:
return ErrorResponse(
message=f"Block '{block_id}' not found",
session_id=session_id,
)
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
# Check credentials
creds_manager = IntegrationCredentialsManager()
matched_credentials, missing_credentials = await self._check_block_credentials(
user_id, block
)
if missing_credentials:
# Return setup requirements response with missing credentials
missing_creds_dict = {c.id: c.model_dump() for c in missing_credentials}
return SetupRequirementsResponse(
message=(
f"Block '{block.name}' requires credentials that are not configured. "
"Please set up the required credentials before running this block."
),
session_id=session_id,
setup_info=SetupInfo(
agent_id=block_id,
agent_name=block.name,
user_readiness=UserReadiness(
has_all_credentials=False,
missing_credentials=missing_creds_dict,
ready_to_run=False,
),
requirements={
"credentials": [c.model_dump() for c in missing_credentials],
"inputs": self._get_inputs_list(block),
"execution_modes": ["immediate"],
},
),
graph_id=None,
graph_version=None,
)
try:
# Fetch actual credentials and prepare kwargs for block execution
# Create execution context with defaults (blocks may require it)
exec_kwargs: dict[str, Any] = {
"user_id": user_id,
"execution_context": ExecutionContext(),
}
for field_name, cred_meta in matched_credentials.items():
# Inject metadata into input_data (for validation)
if field_name not in input_data:
input_data[field_name] = cred_meta.model_dump()
# Fetch actual credentials and pass as kwargs (for execution)
actual_credentials = await creds_manager.get(
user_id, cred_meta.id, lock=False
)
if actual_credentials:
exec_kwargs[field_name] = actual_credentials
else:
return ErrorResponse(
message=f"Failed to retrieve credentials for {field_name}",
session_id=session_id,
)
# Execute the block and collect outputs
outputs: dict[str, list[Any]] = defaultdict(list)
async for output_name, output_data in block.execute(
input_data,
**exec_kwargs,
):
outputs[output_name].append(output_data)
return BlockOutputResponse(
message=f"Block '{block.name}' executed successfully",
block_id=block_id,
block_name=block.name,
outputs=dict(outputs),
success=True,
session_id=session_id,
)
except BlockError as e:
logger.warning(f"Block execution failed: {e}")
return ErrorResponse(
message=f"Block execution failed: {e}",
error=str(e),
session_id=session_id,
)
except Exception as e:
logger.error(f"Unexpected error executing block: {e}", exc_info=True)
return ErrorResponse(
message=f"Failed to execute block: {str(e)}",
error=str(e),
session_id=session_id,
)
def _get_inputs_list(self, block: Any) -> list[dict[str, Any]]:
"""Extract non-credential inputs from block schema."""
inputs_list = []
schema = block.input_schema.jsonschema()
properties = schema.get("properties", {})
required_fields = set(schema.get("required", []))
# Get credential field names to exclude
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
for field_name, field_schema in properties.items():
# Skip credential fields
if field_name in credentials_fields:
continue
inputs_list.append(
{
"name": field_name,
"title": field_schema.get("title", field_name),
"type": field_schema.get("type", "string"),
"description": field_schema.get("description", ""),
"required": field_name in required_fields,
}
)
return inputs_list

View File

@@ -1,208 +0,0 @@
"""SearchDocsTool - Search documentation using hybrid search."""
import logging
from typing import Any
from prisma.enums import ContentType
from backend.api.features.chat.model import ChatSession
from backend.api.features.chat.tools.base import BaseTool
from backend.api.features.chat.tools.models import (
DocSearchResult,
DocSearchResultsResponse,
ErrorResponse,
NoResultsResponse,
ToolResponseBase,
)
from backend.api.features.store.hybrid_search import unified_hybrid_search
logger = logging.getLogger(__name__)
# Base URL for documentation (can be configured)
DOCS_BASE_URL = "https://docs.agpt.co"
# Maximum number of results to return
MAX_RESULTS = 5
# Snippet length for preview
SNIPPET_LENGTH = 200
class SearchDocsTool(BaseTool):
"""Tool for searching AutoGPT platform documentation."""
@property
def name(self) -> str:
return "search_docs"
@property
def description(self) -> str:
return (
"Search the AutoGPT platform documentation for information about "
"how to use the platform, build agents, configure blocks, and more. "
"Returns relevant documentation sections. Use get_doc_page to read full content."
)
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": (
"Search query to find relevant documentation. "
"Use natural language to describe what you're looking for."
),
},
},
"required": ["query"],
}
@property
def requires_auth(self) -> bool:
return False # Documentation is public
def _create_snippet(self, content: str, max_length: int = SNIPPET_LENGTH) -> str:
"""Create a short snippet from content for preview."""
# Remove markdown formatting for cleaner snippet
clean_content = content.replace("#", "").replace("*", "").replace("`", "")
# Remove extra whitespace
clean_content = " ".join(clean_content.split())
if len(clean_content) <= max_length:
return clean_content
# Truncate at word boundary
truncated = clean_content[:max_length]
last_space = truncated.rfind(" ")
if last_space > max_length // 2:
truncated = truncated[:last_space]
return truncated + "..."
def _make_doc_url(self, path: str) -> str:
"""Create a URL for a documentation page."""
# Remove file extension for URL
url_path = path.rsplit(".", 1)[0] if "." in path else path
return f"{DOCS_BASE_URL}/{url_path}"
async def _execute(
self,
user_id: str | None,
session: ChatSession,
**kwargs,
) -> ToolResponseBase:
"""Search documentation and return relevant sections.
Args:
user_id: User ID (not required for docs)
session: Chat session
query: Search query
Returns:
DocSearchResultsResponse: List of matching documentation sections
NoResultsResponse: No results found
ErrorResponse: Error message
"""
query = kwargs.get("query", "").strip()
session_id = session.session_id if session else None
if not query:
return ErrorResponse(
message="Please provide a search query.",
error="Missing query parameter",
session_id=session_id,
)
try:
# Search using hybrid search for DOCUMENTATION content type only
results, total = await unified_hybrid_search(
query=query,
content_types=[ContentType.DOCUMENTATION],
page=1,
page_size=MAX_RESULTS * 2, # Fetch extra for deduplication
min_score=0.1, # Lower threshold for docs
)
if not results:
return NoResultsResponse(
message=f"No documentation found for '{query}'.",
suggestions=[
"Try different keywords",
"Use more general terms",
"Check for typos in your query",
],
session_id=session_id,
)
# Deduplicate by document path (keep highest scoring section per doc)
seen_docs: dict[str, dict[str, Any]] = {}
for result in results:
metadata = result.get("metadata", {})
doc_path = metadata.get("path", "")
if not doc_path:
continue
# Keep the highest scoring result for each document
if doc_path not in seen_docs:
seen_docs[doc_path] = result
elif result.get("combined_score", 0) > seen_docs[doc_path].get(
"combined_score", 0
):
seen_docs[doc_path] = result
# Sort by score and take top MAX_RESULTS
deduplicated = sorted(
seen_docs.values(),
key=lambda x: x.get("combined_score", 0),
reverse=True,
)[:MAX_RESULTS]
if not deduplicated:
return NoResultsResponse(
message=f"No documentation found for '{query}'.",
suggestions=[
"Try different keywords",
"Use more general terms",
],
session_id=session_id,
)
# Build response
doc_results: list[DocSearchResult] = []
for result in deduplicated:
metadata = result.get("metadata", {})
doc_path = metadata.get("path", "")
doc_title = metadata.get("doc_title", "")
section_title = metadata.get("section_title", "")
searchable_text = result.get("searchable_text", "")
score = result.get("combined_score", 0)
doc_results.append(
DocSearchResult(
title=doc_title or section_title or doc_path,
path=doc_path,
section=section_title,
snippet=self._create_snippet(searchable_text),
score=round(score, 3),
doc_url=self._make_doc_url(doc_path),
)
)
return DocSearchResultsResponse(
message=f"Found {len(doc_results)} relevant documentation sections.",
results=doc_results,
count=len(doc_results),
query=query,
session_id=session_id,
)
except Exception as e:
logger.error(f"Documentation search failed: {e}")
return ErrorResponse(
message=f"Failed to search documentation: {str(e)}",
error="search_failed",
session_id=session_id,
)

View File

@@ -275,22 +275,8 @@ class BlockHandler(ContentHandler):
}
@dataclass
class MarkdownSection:
"""Represents a section of a markdown document."""
title: str # Section heading text
content: str # Section content (including the heading line)
level: int # Heading level (1 for #, 2 for ##, etc.)
index: int # Section index within the document
class DocumentationHandler(ContentHandler):
"""Handler for documentation files (.md/.mdx).
Chunks documents by markdown headings to create multiple embeddings per file.
Each section (## heading) becomes a separate embedding for better retrieval.
"""
"""Handler for documentation files (.md/.mdx)."""
@property
def content_type(self) -> ContentType:
@@ -311,162 +297,35 @@ class DocumentationHandler(ContentHandler):
docs_root = project_root / "docs"
return docs_root
def _extract_doc_title(self, file_path: Path) -> str:
"""Extract the document title from a markdown file."""
def _extract_title_and_content(self, file_path: Path) -> tuple[str, str]:
"""Extract title and content from markdown file."""
try:
content = file_path.read_text(encoding="utf-8")
lines = content.split("\n")
# Try to extract title from first # heading
lines = content.split("\n")
title = ""
body_lines = []
for line in lines:
if line.startswith("# "):
return line[2:].strip()
if line.startswith("# ") and not title:
title = line[2:].strip()
else:
body_lines.append(line)
# If no title found, use filename
return file_path.stem.replace("-", " ").replace("_", " ").title()
except Exception as e:
logger.warning(f"Failed to read title from {file_path}: {e}")
return file_path.stem.replace("-", " ").replace("_", " ").title()
if not title:
title = file_path.stem.replace("-", " ").replace("_", " ").title()
def _chunk_markdown_by_headings(
self, file_path: Path, min_heading_level: int = 2
) -> list[MarkdownSection]:
"""
Split a markdown file into sections based on headings.
body = "\n".join(body_lines)
Args:
file_path: Path to the markdown file
min_heading_level: Minimum heading level to split on (default: 2 for ##)
Returns:
List of MarkdownSection objects, one per section.
If no headings found, returns a single section with all content.
"""
try:
content = file_path.read_text(encoding="utf-8")
return title, body
except Exception as e:
logger.warning(f"Failed to read {file_path}: {e}")
return []
lines = content.split("\n")
sections: list[MarkdownSection] = []
current_section_lines: list[str] = []
current_title = ""
current_level = 0
section_index = 0
doc_title = ""
for line in lines:
# Check if line is a heading
if line.startswith("#"):
# Count heading level
level = 0
for char in line:
if char == "#":
level += 1
else:
break
heading_text = line[level:].strip()
# Track document title (level 1 heading)
if level == 1 and not doc_title:
doc_title = heading_text
# Don't create a section for just the title - add it to first section
current_section_lines.append(line)
continue
# Check if this heading should start a new section
if level >= min_heading_level:
# Save previous section if it has content
if current_section_lines:
section_content = "\n".join(current_section_lines).strip()
if section_content:
# Use doc title for first section if no specific title
title = current_title if current_title else doc_title
if not title:
title = file_path.stem.replace("-", " ").replace(
"_", " "
)
sections.append(
MarkdownSection(
title=title,
content=section_content,
level=current_level if current_level else 1,
index=section_index,
)
)
section_index += 1
# Start new section
current_section_lines = [line]
current_title = heading_text
current_level = level
else:
# Lower level heading (e.g., # when splitting on ##)
current_section_lines.append(line)
else:
current_section_lines.append(line)
# Don't forget the last section
if current_section_lines:
section_content = "\n".join(current_section_lines).strip()
if section_content:
title = current_title if current_title else doc_title
if not title:
title = file_path.stem.replace("-", " ").replace("_", " ")
sections.append(
MarkdownSection(
title=title,
content=section_content,
level=current_level if current_level else 1,
index=section_index,
)
)
# If no sections were created (no headings found), create one section with all content
if not sections and content.strip():
title = (
doc_title
if doc_title
else file_path.stem.replace("-", " ").replace("_", " ")
)
sections.append(
MarkdownSection(
title=title,
content=content.strip(),
level=1,
index=0,
)
)
return sections
def _make_section_content_id(self, doc_path: str, section_index: int) -> str:
"""Create a unique content ID for a document section.
Format: doc_path::section_index
Example: 'platform/getting-started.md::0'
"""
return f"{doc_path}::{section_index}"
def _parse_section_content_id(self, content_id: str) -> tuple[str, int]:
"""Parse a section content ID back into doc_path and section_index.
Returns: (doc_path, section_index)
"""
if "::" in content_id:
parts = content_id.rsplit("::", 1)
return parts[0], int(parts[1])
# Legacy format (whole document)
return content_id, 0
return file_path.stem, ""
async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
"""Fetch documentation sections without embeddings.
Chunks each document by markdown headings and creates embeddings for each section.
Content IDs use the format: 'path/to/doc.md::section_index'
"""
"""Fetch documentation files without embeddings."""
docs_root = self._get_docs_root()
if not docs_root.exists():
@@ -476,28 +335,14 @@ class DocumentationHandler(ContentHandler):
# Find all .md and .mdx files
all_docs = list(docs_root.rglob("*.md")) + list(docs_root.rglob("*.mdx"))
if not all_docs:
# Get relative paths for content IDs
doc_paths = [str(doc.relative_to(docs_root)) for doc in all_docs]
if not doc_paths:
return []
# Build list of all sections from all documents
all_sections: list[tuple[str, Path, MarkdownSection]] = []
for doc_file in all_docs:
doc_path = str(doc_file.relative_to(docs_root))
sections = self._chunk_markdown_by_headings(doc_file)
for section in sections:
all_sections.append((doc_path, doc_file, section))
if not all_sections:
return []
# Generate content IDs for all sections
section_content_ids = [
self._make_section_content_id(doc_path, section.index)
for doc_path, _, section in all_sections
]
# Check which ones have embeddings
placeholders = ",".join([f"${i+1}" for i in range(len(section_content_ids))])
placeholders = ",".join([f"${i+1}" for i in range(len(doc_paths))])
existing_result = await query_raw_with_schema(
f"""
SELECT "contentId"
@@ -505,100 +350,76 @@ class DocumentationHandler(ContentHandler):
WHERE "contentType" = 'DOCUMENTATION'::{{schema_prefix}}"ContentType"
AND "contentId" = ANY(ARRAY[{placeholders}])
""",
*section_content_ids,
*doc_paths,
)
existing_ids = {row["contentId"] for row in existing_result}
# Filter to missing sections
missing_sections = [
(doc_path, doc_file, section, content_id)
for (doc_path, doc_file, section), content_id in zip(
all_sections, section_content_ids
)
if content_id not in existing_ids
missing_docs = [
(doc_path, doc_file)
for doc_path, doc_file in zip(doc_paths, all_docs)
if doc_path not in existing_ids
]
# Convert to ContentItem (up to batch_size)
# Convert to ContentItem
items = []
for doc_path, doc_file, section, content_id in missing_sections[:batch_size]:
for doc_path, doc_file in missing_docs[:batch_size]:
try:
# Get document title for context
doc_title = self._extract_doc_title(doc_file)
title, content = self._extract_title_and_content(doc_file)
# Build searchable text with context
# Include doc title and section title for better search relevance
searchable_text = f"{doc_title} - {section.title}\n\n{section.content}"
# Build searchable text
searchable_text = f"{title} {content}"
items.append(
ContentItem(
content_id=content_id,
content_id=doc_path,
content_type=ContentType.DOCUMENTATION,
searchable_text=searchable_text,
metadata={
"doc_title": doc_title,
"section_title": section.title,
"section_index": section.index,
"heading_level": section.level,
"title": title,
"path": doc_path,
},
user_id=None, # Documentation is public
)
)
except Exception as e:
logger.warning(f"Failed to process section {content_id}: {e}")
logger.warning(f"Failed to process doc {doc_path}: {e}")
continue
return items
def _get_all_section_content_ids(self, docs_root: Path) -> set[str]:
"""Get all current section content IDs from the docs directory.
Used for stats and cleanup to know what sections should exist.
"""
all_docs = list(docs_root.rglob("*.md")) + list(docs_root.rglob("*.mdx"))
content_ids = set()
for doc_file in all_docs:
doc_path = str(doc_file.relative_to(docs_root))
sections = self._chunk_markdown_by_headings(doc_file)
for section in sections:
content_ids.add(self._make_section_content_id(doc_path, section.index))
return content_ids
async def get_stats(self) -> dict[str, int]:
"""Get statistics about documentation embedding coverage.
Counts sections (not documents) since each section gets its own embedding.
"""
"""Get statistics about documentation embedding coverage."""
docs_root = self._get_docs_root()
if not docs_root.exists():
return {"total": 0, "with_embeddings": 0, "without_embeddings": 0}
# Get all section content IDs
all_section_ids = self._get_all_section_content_ids(docs_root)
total_sections = len(all_section_ids)
# Count all .md and .mdx files
all_docs = list(docs_root.rglob("*.md")) + list(docs_root.rglob("*.mdx"))
total_docs = len(all_docs)
if total_sections == 0:
if total_docs == 0:
return {"total": 0, "with_embeddings": 0, "without_embeddings": 0}
# Count embeddings in database for DOCUMENTATION type
doc_paths = [str(doc.relative_to(docs_root)) for doc in all_docs]
placeholders = ",".join([f"${i+1}" for i in range(len(doc_paths))])
embedded_result = await query_raw_with_schema(
"""
f"""
SELECT COUNT(*) as count
FROM {schema_prefix}"UnifiedContentEmbedding"
WHERE "contentType" = 'DOCUMENTATION'::{schema_prefix}"ContentType"
"""
FROM {{schema_prefix}}"UnifiedContentEmbedding"
WHERE "contentType" = 'DOCUMENTATION'::{{schema_prefix}}"ContentType"
AND "contentId" = ANY(ARRAY[{placeholders}])
""",
*doc_paths,
)
with_embeddings = embedded_result[0]["count"] if embedded_result else 0
return {
"total": total_sections,
"total": total_docs,
"with_embeddings": with_embeddings,
"without_embeddings": total_sections - with_embeddings,
"without_embeddings": total_docs - with_embeddings,
}

View File

@@ -164,20 +164,20 @@ async def test_documentation_handler_get_missing_items(tmp_path, mocker):
assert len(items) == 2
# Check guide.md (content_id format: doc_path::section_index)
# Check guide.md
guide_item = next(
(item for item in items if item.content_id == "guide.md::0"), None
(item for item in items if item.content_id == "guide.md"), None
)
assert guide_item is not None
assert guide_item.content_type == ContentType.DOCUMENTATION
assert "Getting Started" in guide_item.searchable_text
assert "This is a guide" in guide_item.searchable_text
assert guide_item.metadata["doc_title"] == "Getting Started"
assert guide_item.metadata["title"] == "Getting Started"
assert guide_item.user_id is None
# Check api.mdx (content_id format: doc_path::section_index)
# Check api.mdx
api_item = next(
(item for item in items if item.content_id == "api.mdx::0"), None
(item for item in items if item.content_id == "api.mdx"), None
)
assert api_item is not None
assert "API Reference" in api_item.searchable_text
@@ -218,74 +218,17 @@ async def test_documentation_handler_title_extraction(tmp_path):
# Test with heading
doc_with_heading = tmp_path / "with_heading.md"
doc_with_heading.write_text("# My Title\n\nContent here")
title = handler._extract_doc_title(doc_with_heading)
title, content = handler._extract_title_and_content(doc_with_heading)
assert title == "My Title"
assert "# My Title" not in content
assert "Content here" in content
# Test without heading
doc_without_heading = tmp_path / "no-heading.md"
doc_without_heading.write_text("Just content, no heading")
title = handler._extract_doc_title(doc_without_heading)
title, content = handler._extract_title_and_content(doc_without_heading)
assert title == "No Heading" # Uses filename
@pytest.mark.asyncio(loop_scope="session")
async def test_documentation_handler_markdown_chunking(tmp_path):
"""Test DocumentationHandler chunks markdown by headings."""
handler = DocumentationHandler()
# Test document with multiple sections
doc_with_sections = tmp_path / "sections.md"
doc_with_sections.write_text(
"# Document Title\n\n"
"Intro paragraph.\n\n"
"## Section One\n\n"
"Content for section one.\n\n"
"## Section Two\n\n"
"Content for section two.\n"
)
sections = handler._chunk_markdown_by_headings(doc_with_sections)
# Should have 3 sections: intro (with doc title), section one, section two
assert len(sections) == 3
assert sections[0].title == "Document Title"
assert sections[0].index == 0
assert "Intro paragraph" in sections[0].content
assert sections[1].title == "Section One"
assert sections[1].index == 1
assert "Content for section one" in sections[1].content
assert sections[2].title == "Section Two"
assert sections[2].index == 2
assert "Content for section two" in sections[2].content
# Test document without headings
doc_no_sections = tmp_path / "no-sections.md"
doc_no_sections.write_text("Just plain content without any headings.")
sections = handler._chunk_markdown_by_headings(doc_no_sections)
assert len(sections) == 1
assert sections[0].index == 0
assert "Just plain content" in sections[0].content
@pytest.mark.asyncio(loop_scope="session")
async def test_documentation_handler_section_content_ids():
"""Test DocumentationHandler creates and parses section content IDs."""
handler = DocumentationHandler()
# Test making content ID
content_id = handler._make_section_content_id("docs/guide.md", 2)
assert content_id == "docs/guide.md::2"
# Test parsing content ID
doc_path, section_index = handler._parse_section_content_id("docs/guide.md::2")
assert doc_path == "docs/guide.md"
assert section_index == 2
# Test parsing legacy format (no section index)
doc_path, section_index = handler._parse_section_content_id("docs/old-format.md")
assert doc_path == "docs/old-format.md"
assert section_index == 0
assert "Just content" in content
@pytest.mark.asyncio(loop_scope="session")

View File

@@ -683,20 +683,20 @@ async def cleanup_orphaned_embeddings() -> dict[str, Any]:
current_ids = set(get_blocks().keys())
elif content_type == ContentType.DOCUMENTATION:
# Use DocumentationHandler to get section-based content IDs
from backend.api.features.store.content_handlers import (
DocumentationHandler,
)
from pathlib import Path
doc_handler = CONTENT_HANDLERS.get(ContentType.DOCUMENTATION)
if isinstance(doc_handler, DocumentationHandler):
docs_root = doc_handler._get_docs_root()
if docs_root.exists():
current_ids = doc_handler._get_all_section_content_ids(
docs_root
)
else:
current_ids = set()
# embeddings.py is at: backend/backend/api/features/store/embeddings.py
# Need to go up to project root then into docs/
this_file = Path(__file__)
project_root = (
this_file.parent.parent.parent.parent.parent.parent.parent
)
docs_root = project_root / "docs"
if docs_root.exists():
all_docs = list(docs_root.rglob("*.md")) + list(
docs_root.rglob("*.mdx")
)
current_ids = {str(doc.relative_to(docs_root)) for doc in all_docs}
else:
current_ids = set()
else:

View File

@@ -3,16 +3,13 @@ Unified Hybrid Search
Combines semantic (embedding) search with lexical (tsvector) search
for improved relevance across all content types (agents, blocks, docs).
Includes BM25 reranking for improved lexical relevance.
"""
import logging
import re
from dataclasses import dataclass
from typing import Any, Literal
from prisma.enums import ContentType
from rank_bm25 import BM25Okapi
from backend.api.features.store.embeddings import (
EMBEDDING_DIM,
@@ -24,84 +21,6 @@ from backend.data.db import query_raw_with_schema
logger = logging.getLogger(__name__)
# ============================================================================
# BM25 Reranking
# ============================================================================
def tokenize(text: str) -> list[str]:
"""Simple tokenizer for BM25 - lowercase and split on non-alphanumeric."""
if not text:
return []
# Lowercase and split on non-alphanumeric characters
tokens = re.findall(r"\b\w+\b", text.lower())
return tokens
def bm25_rerank(
query: str,
results: list[dict[str, Any]],
text_field: str = "searchable_text",
bm25_weight: float = 0.3,
original_score_field: str = "combined_score",
) -> list[dict[str, Any]]:
"""
Rerank search results using BM25.
Combines the original combined_score with BM25 score for improved
lexical relevance, especially for exact term matches.
Args:
query: The search query
results: List of result dicts with text_field and original_score_field
text_field: Field name containing the text to score
bm25_weight: Weight for BM25 score (0-1). Original score gets (1 - bm25_weight)
original_score_field: Field name containing the original score
Returns:
Results list sorted by combined score (BM25 + original)
"""
if not results or not query:
return results
# Extract texts and tokenize
corpus = [tokenize(r.get(text_field, "") or "") for r in results]
# Handle edge case where all documents are empty
if all(len(doc) == 0 for doc in corpus):
return results
# Build BM25 index
bm25 = BM25Okapi(corpus)
# Score query against corpus
query_tokens = tokenize(query)
if not query_tokens:
return results
bm25_scores = bm25.get_scores(query_tokens)
# Normalize BM25 scores to 0-1 range
max_bm25 = max(bm25_scores) if max(bm25_scores) > 0 else 1.0
normalized_bm25 = [s / max_bm25 for s in bm25_scores]
# Combine scores
original_weight = 1.0 - bm25_weight
for i, result in enumerate(results):
original_score = result.get(original_score_field, 0) or 0
result["bm25_score"] = normalized_bm25[i]
final_score = (
original_weight * original_score + bm25_weight * normalized_bm25[i]
)
result["final_score"] = final_score
result["relevance"] = final_score
# Sort by relevance descending
results.sort(key=lambda x: x.get("relevance", 0), reverse=True)
return results
@dataclass
class UnifiedSearchWeights:
"""Weights for unified search (no popularity signal)."""
@@ -354,7 +273,9 @@ async def unified_hybrid_search(
FROM normalized
),
filtered AS (
SELECT *, COUNT(*) OVER () as total_count
SELECT
*,
COUNT(*) OVER () as total_count
FROM scored
WHERE combined_score >= {min_score_param}
)
@@ -368,15 +289,6 @@ async def unified_hybrid_search(
)
total = results[0]["total_count"] if results else 0
# Apply BM25 reranking
if results:
results = bm25_rerank(
query=query,
results=results,
text_field="searchable_text",
bm25_weight=0.3,
original_score_field="combined_score",
)
# Clean up results
for result in results:
@@ -604,8 +516,6 @@ async def hybrid_search(
sa.featured,
sa.is_available,
sa.updated_at,
-- Searchable text for BM25 reranking
COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text,
-- Semantic score
COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score,
-- Lexical score (raw, will normalize)
@@ -663,7 +573,6 @@ async def hybrid_search(
featured,
is_available,
updated_at,
searchable_text,
semantic_score,
lexical_score,
category_score,
@@ -694,19 +603,8 @@ async def hybrid_search(
total = results[0]["total_count"] if results else 0
# Apply BM25 reranking
if results:
results = bm25_rerank(
query=query,
results=results,
text_field="searchable_text",
bm25_weight=0.3,
original_score_field="combined_score",
)
for result in results:
result.pop("total_count", None)
result.pop("searchable_text", None)
logger.info(f"Hybrid search (store agents): {len(results)} results, {total} total")

View File

@@ -311,43 +311,11 @@ async def test_hybrid_search_min_score_filtering():
@pytest.mark.asyncio(loop_scope="session")
@pytest.mark.integration
async def test_hybrid_search_pagination():
"""Test hybrid search pagination.
Pagination happens in SQL (LIMIT/OFFSET), then BM25 reranking is applied
to the paginated results.
"""
# Create mock results that SQL would return for a page
mock_results = [
{
"slug": f"agent-{i}",
"agent_name": f"Agent {i}",
"agent_image": "test.png",
"creator_username": "test",
"creator_avatar": "avatar.png",
"sub_heading": "Test",
"description": "Test description",
"runs": 100 - i,
"rating": 4.5,
"categories": ["test"],
"featured": False,
"is_available": True,
"updated_at": "2024-01-01T00:00:00Z",
"searchable_text": f"Agent {i} test description",
"combined_score": 0.9 - (i * 0.01),
"semantic_score": 0.7,
"lexical_score": 0.6,
"category_score": 0.5,
"recency_score": 0.4,
"popularity_score": 0.3,
"total_count": 25,
}
for i in range(10) # SQL returns page_size results
]
"""Test hybrid search pagination."""
with patch(
"backend.api.features.store.hybrid_search.query_raw_with_schema"
) as mock_query:
mock_query.return_value = mock_results
mock_query.return_value = []
with patch(
"backend.api.features.store.hybrid_search.embed_query"
@@ -361,18 +329,16 @@ async def test_hybrid_search_pagination():
page_size=10,
)
# Verify results returned
assert len(results) == 10
assert total == 25 # Total from SQL COUNT(*) OVER()
# Verify the SQL query uses page_size and offset
# Verify pagination parameters
call_args = mock_query.call_args
params = call_args[0]
# Last two params are page_size and offset
page_size_param = params[-2]
offset_param = params[-1]
assert page_size_param == 10
assert offset_param == 10 # (page 2 - 1) * 10
# Last two params should be LIMIT and OFFSET
limit = params[-2]
offset = params[-1]
assert limit == 10 # page_size
assert offset == 10 # (page - 1) * page_size = (2 - 1) * 10
@pytest.mark.asyncio(loop_scope="session")
@@ -643,36 +609,14 @@ async def test_unified_hybrid_search_empty_query():
@pytest.mark.asyncio(loop_scope="session")
@pytest.mark.integration
async def test_unified_hybrid_search_pagination():
"""Test unified search pagination with BM25 reranking.
Pagination happens in SQL (LIMIT/OFFSET), then BM25 reranking is applied
to the paginated results.
"""
# Create mock results that SQL would return for a page
mock_results = [
{
"content_type": "STORE_AGENT",
"content_id": f"agent-{i}",
"searchable_text": f"Agent {i} description",
"metadata": {"name": f"Agent {i}"},
"updated_at": "2025-01-01T00:00:00Z",
"semantic_score": 0.7,
"lexical_score": 0.8 - (i * 0.01),
"category_score": 0.5,
"recency_score": 0.3,
"combined_score": 0.6 - (i * 0.01),
"total_count": 50,
}
for i in range(15) # SQL returns page_size results
]
"""Test unified search pagination."""
with patch(
"backend.api.features.store.hybrid_search.query_raw_with_schema"
) as mock_query:
with patch(
"backend.api.features.store.hybrid_search.embed_query"
) as mock_embed:
mock_query.return_value = mock_results
mock_query.return_value = []
mock_embed.return_value = [0.1] * embeddings.EMBEDDING_DIM
results, total = await unified_hybrid_search(
@@ -681,18 +625,15 @@ async def test_unified_hybrid_search_pagination():
page_size=15,
)
# Verify results returned
assert len(results) == 15
assert total == 50 # Total from SQL COUNT(*) OVER()
# Verify the SQL query uses page_size and offset
# Verify pagination parameters (last two params are LIMIT and OFFSET)
call_args = mock_query.call_args
params = call_args[0]
# Last two params are page_size and offset
page_size_param = params[-2]
offset_param = params[-1]
assert page_size_param == 15
assert offset_param == 30 # (page 3 - 1) * 15
limit = params[-2]
offset = params[-1]
assert limit == 15 # page_size
assert offset == 30 # (page - 1) * page_size = (3 - 1) * 15
@pytest.mark.asyncio(loop_scope="session")

View File

@@ -693,13 +693,13 @@ class DeleteGraphResponse(TypedDict):
async def list_graphs(
user_id: Annotated[str, Security(get_user_id)],
) -> Sequence[graph_db.GraphMeta]:
paginated_result = await graph_db.list_graphs_paginated(
graphs, _ = await graph_db.list_graphs_paginated(
user_id=user_id,
page=1,
page_size=250,
filter_by="active",
)
return paginated_result.graphs
return graphs
@v1_router.get(

View File

@@ -680,23 +680,12 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
return False, reviewed_data
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
# Check for review requirement only if running within a graph execution context
# Direct block execution (e.g., from chat) skips the review process
has_graph_context = all(
key in kwargs
for key in (
"node_exec_id",
"graph_exec_id",
"graph_id",
"execution_context",
)
# Check for review requirement and get potentially modified input data
should_pause, input_data = await self.is_block_exec_need_review(
input_data, **kwargs
)
if has_graph_context:
should_pause, input_data = await self.is_block_exec_need_review(
input_data, **kwargs
)
if should_pause:
return
if should_pause:
return
# Validate the input data (original or reviewer-modified) once
if error := self.input_schema.validate_data(input_data):

View File

@@ -804,9 +804,7 @@ class GraphModel(Graph):
)
class GraphMeta(Graph):
user_id: str
class GraphMeta(GraphModel):
# Easy work-around to prevent exposing nodes and links in the API response
nodes: list[NodeModel] = Field(default=[], exclude=True) # type: ignore
links: list[Link] = Field(default=[], exclude=True)
@@ -816,13 +814,6 @@ class GraphMeta(Graph):
return GraphMeta(**graph.model_dump())
class GraphsPaginated(BaseModel):
"""Response schema for paginated graphs."""
graphs: list[GraphMeta]
pagination: Pagination
# --------------------- CRUD functions --------------------- #
@@ -856,7 +847,7 @@ async def list_graphs_paginated(
page: int = 1,
page_size: int = 25,
filter_by: Literal["active"] | None = "active",
) -> GraphsPaginated:
) -> tuple[list[GraphMeta], Pagination]:
"""
Retrieves paginated graph metadata objects.
@@ -867,7 +858,8 @@ async def list_graphs_paginated(
filter_by: An optional filter to either select graphs.
Returns:
GraphsPaginated: Paginated list of graph metadata.
list[GraphMeta]: List of graph info objects.
Pagination: Pagination information.
"""
where_clause: AgentGraphWhereInput = {"userId": user_id}
@@ -900,14 +892,11 @@ async def list_graphs_paginated(
logger.error(f"Error processing graph {graph.id}: {e}")
continue
return GraphsPaginated(
graphs=graph_models,
pagination=Pagination(
total_items=total_count,
total_pages=total_pages,
current_page=page,
page_size=page_size,
),
return graph_models, Pagination(
total_items=total_count,
total_pages=total_pages,
current_page=page,
page_size=page_size,
)

View File

@@ -602,18 +602,6 @@ class Scheduler(AppService):
self.scheduler.add_listener(job_max_instances_listener, EVENT_JOB_MAX_INSTANCES)
self.scheduler.start()
# Run embedding backfill immediately on startup
# This ensures blocks/docs are searchable right away, not after 6 hours
# Safe to run on multiple pods - uses upserts and checks for existing embeddings
if self.register_system_tasks:
logger.info("Running embedding backfill on startup...")
try:
result = ensure_embeddings_coverage()
logger.info(f"Startup embedding backfill complete: {result}")
except Exception as e:
logger.error(f"Startup embedding backfill failed: {e}")
# Don't fail startup - the scheduled job will retry later
# Keep the service running since BackgroundScheduler doesn't block
super().run_service()

View File

@@ -16,7 +16,7 @@ import pickle
import threading
import time
from dataclasses import dataclass
from functools import cache, wraps
from functools import wraps
from typing import Any, Callable, ParamSpec, Protocol, TypeVar, cast, runtime_checkable
from redis import ConnectionPool, Redis
@@ -38,34 +38,29 @@ settings = Settings()
# maxmemory 2gb # Set memory limit (adjust based on your needs)
# save "" # Disable persistence if using Redis purely for caching
# Create a dedicated Redis connection pool for caching (binary mode for pickle)
_cache_pool: ConnectionPool | None = None
@cache
@conn_retry("Redis", "Acquiring cache connection pool")
def _get_cache_pool() -> ConnectionPool:
"""Get or create a connection pool for cache operations (lazy, thread-safe)."""
return ConnectionPool(
host=settings.config.redis_host,
port=settings.config.redis_port,
password=settings.config.redis_password or None,
decode_responses=False, # Binary mode for pickle
max_connections=50,
socket_keepalive=True,
socket_connect_timeout=5,
retry_on_timeout=True,
)
"""Get or create a connection pool for cache operations."""
global _cache_pool
if _cache_pool is None:
_cache_pool = ConnectionPool(
host=settings.config.redis_host,
port=settings.config.redis_port,
password=settings.config.redis_password or None,
decode_responses=False, # Binary mode for pickle
max_connections=50,
socket_keepalive=True,
socket_connect_timeout=5,
retry_on_timeout=True,
)
return _cache_pool
@cache
@conn_retry("Redis", "Acquiring cache connection")
def _get_redis() -> Redis:
"""
Get the lazily-initialized Redis client for shared cache operations.
Uses @cache for thread-safe singleton behavior - connection is only
established when first accessed, allowing services that only use
in-memory caching to work without Redis configuration.
"""
r = Redis(connection_pool=_get_cache_pool())
r.ping() # Verify connection
return r
redis = Redis(connection_pool=_get_cache_pool())
@dataclass
@@ -184,9 +179,9 @@ def cached(
try:
if refresh_ttl_on_get:
# Use GETEX to get value and refresh expiry atomically
cached_bytes = _get_redis().getex(redis_key, ex=ttl_seconds)
cached_bytes = redis.getex(redis_key, ex=ttl_seconds)
else:
cached_bytes = _get_redis().get(redis_key)
cached_bytes = redis.get(redis_key)
if cached_bytes and isinstance(cached_bytes, bytes):
return pickle.loads(cached_bytes)
@@ -200,7 +195,7 @@ def cached(
"""Set value in Redis with TTL."""
try:
pickled_value = pickle.dumps(value, protocol=pickle.HIGHEST_PROTOCOL)
_get_redis().setex(redis_key, ttl_seconds, pickled_value)
redis.setex(redis_key, ttl_seconds, pickled_value)
except Exception as e:
logger.error(
f"Redis error storing cache for {target_func.__name__}: {e}"
@@ -338,18 +333,14 @@ def cached(
if pattern:
# Clear entries matching pattern
keys = list(
_get_redis().scan_iter(
f"cache:{target_func.__name__}:{pattern}"
)
redis.scan_iter(f"cache:{target_func.__name__}:{pattern}")
)
else:
# Clear all cache keys
keys = list(
_get_redis().scan_iter(f"cache:{target_func.__name__}:*")
)
keys = list(redis.scan_iter(f"cache:{target_func.__name__}:*"))
if keys:
pipeline = _get_redis().pipeline()
pipeline = redis.pipeline()
for key in keys:
pipeline.delete(key)
pipeline.execute()
@@ -364,9 +355,7 @@ def cached(
def cache_info() -> dict[str, int | None]:
if shared_cache:
cache_keys = list(
_get_redis().scan_iter(f"cache:{target_func.__name__}:*")
)
cache_keys = list(redis.scan_iter(f"cache:{target_func.__name__}:*"))
return {
"size": len(cache_keys),
"maxsize": None, # Redis manages its own size
@@ -384,8 +373,10 @@ def cached(
key = _make_hashable_key(args, kwargs)
if shared_cache:
redis_key = _make_redis_key(key, target_func.__name__)
deleted_count = cast(int, _get_redis().delete(redis_key))
return deleted_count > 0
if redis.exists(redis_key):
redis.delete(redis_key)
return True
return False
else:
if key in cache_storage:
del cache_storage[key]

View File

@@ -5339,24 +5339,6 @@ urllib3 = ">=1.26.14,<3"
fastembed = ["fastembed (>=0.7,<0.8)"]
fastembed-gpu = ["fastembed-gpu (>=0.7,<0.8)"]
[[package]]
name = "rank-bm25"
version = "0.2.2"
description = "Various BM25 algorithms for document ranking"
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "rank_bm25-0.2.2-py3-none-any.whl", hash = "sha256:7bd4a95571adadfc271746fa146a4bcfd89c0cf731e49c3d1ad863290adbe8ae"},
{file = "rank_bm25-0.2.2.tar.gz", hash = "sha256:096ccef76f8188563419aaf384a02f0ea459503fdf77901378d4fd9d87e5e51d"},
]
[package.dependencies]
numpy = "*"
[package.extras]
dev = ["pytest"]
[[package]]
name = "rapidfuzz"
version = "3.13.0"
@@ -7512,4 +7494,4 @@ cffi = ["cffi (>=1.11)"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
content-hash = "18b92e09596298c82432e4d0a85cb6d80a40b4229bee0a0c15f0529fd6cb21a4"
content-hash = "86838b5ae40d606d6e01a14dad8a56c389d890d7a6a0c274a6602cca80f0df84"

View File

@@ -46,7 +46,6 @@ poetry = "2.1.1" # CHECK DEPENDABOT SUPPORT BEFORE UPGRADING
postmarker = "^1.0"
praw = "~7.8.1"
prisma = "^0.15.0"
rank-bm25 = "^0.2.2"
prometheus-client = "^0.22.1"
prometheus-fastapi-instrumentator = "^7.0.0"
psutil = "^7.0.0"

View File

@@ -1040,16 +1040,31 @@ enum SubmissionStatus {
}
enum APIKeyPermission {
// Legacy v1 permissions (kept for backward compatibility)
IDENTITY // Info about the authenticated user
EXECUTE_GRAPH // Can execute agent graphs
EXECUTE_GRAPH // Can execute agent graphs (v1 only)
READ_GRAPH // Can get graph versions and details
EXECUTE_BLOCK // Can execute individual blocks
EXECUTE_BLOCK // Can execute individual blocks (v1 only)
READ_BLOCK // Can get block information
READ_STORE // Can read store agents and creators
USE_TOOLS // Can use chat tools via external API
MANAGE_INTEGRATIONS // Can initiate OAuth flows and complete them
READ_STORE // Can read store/marketplace agents and creators
USE_TOOLS // Can use chat tools via external API (v1 only)
MANAGE_INTEGRATIONS // Can initiate OAuth flows and complete them (v1 only)
READ_INTEGRATIONS // Can list credentials and providers
DELETE_INTEGRATIONS // Can delete credentials
DELETE_INTEGRATIONS // Can delete credentials (v1 only)
// V2 permissions
WRITE_GRAPH // Can create, update, delete graphs
READ_SCHEDULE // Can list schedules
WRITE_SCHEDULE // Can create and delete schedules
WRITE_STORE // Can create, update, delete marketplace submissions
READ_LIBRARY // Can list library agents and runs
RUN_AGENT // Can run agents from library
READ_RUN // Can list and get run details
WRITE_RUN // Can stop and delete runs
READ_RUN_REVIEW // Can list pending human-in-the-loop reviews
WRITE_RUN_REVIEW // Can submit human-in-the-loop review responses
READ_CREDITS // Can get credit balance and transactions
UPLOAD_FILES // Can upload files for agent input
}
model APIKey {

View File

@@ -1,6 +1,6 @@
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInput";
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import { useState } from "react";
import { getSchemaDefaultCredentials } from "../../helpers";
import { areAllCredentialsSet, getCredentialFields } from "./helpers";

View File

@@ -1,12 +1,12 @@
"use client";
import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs";
import {
Card,
CardContent,
CardHeader,
CardTitle,
} from "@/components/__legacy__/ui/card";
import { RunAgentInputs } from "@/components/contextual/RunAgentInputs/RunAgentInputs";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { CircleNotchIcon } from "@phosphor-icons/react/dist/ssr";
import { Play } from "lucide-react";

View File

@@ -1,11 +1,11 @@
"use client";
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInput";
import { useGetOauthGetOauthAppInfo } from "@/app/api/__generated__/endpoints/oauth/oauth";
import { okData } from "@/app/api/helpers";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { AuthCard } from "@/components/auth/AuthCard";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import type {
BlockIOCredentialsSubSchema,

View File

@@ -1,6 +1,11 @@
import { BlockUIType } from "@/app/(platform)/build/components/types";
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
import {
globalRegistry,
OutputActions,
OutputItem,
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
import { Label } from "@/components/__legacy__/ui/label";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import {
@@ -18,11 +23,6 @@ import {
TooltipProvider,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import {
globalRegistry,
OutputActions,
OutputItem,
} from "@/components/contextual/OutputRenderers";
import { BookOpenIcon } from "@phosphor-icons/react";
import { useMemo } from "react";
import { useShallow } from "zustand/react/shallow";

View File

@@ -48,29 +48,17 @@ export const useRunInputDialog = ({
},
onError: (error) => {
if (error instanceof ApiError && error.isGraphValidationError()) {
const errorData = error.response?.detail || {
node_errors: {},
message: undefined,
};
const nodeErrors = errorData.node_errors || {};
if (Object.keys(nodeErrors).length > 0) {
Object.entries(nodeErrors).forEach(
([nodeId, nodeErrorsForNode]) => {
useNodeStore
.getState()
.updateNodeErrors(
nodeId,
nodeErrorsForNode as { [key: string]: string },
);
},
);
} else {
useNodeStore.getState().nodes.forEach((node) => {
useNodeStore.getState().updateNodeErrors(node.id, {});
});
}
const errorData = error.response?.detail;
Object.entries(errorData.node_errors).forEach(
([nodeId, nodeErrors]) => {
useNodeStore
.getState()
.updateNodeErrors(
nodeId,
nodeErrors as { [key: string]: string },
);
},
);
toast({
title: errorData?.message || "Graph validation failed",
description:
@@ -79,7 +67,7 @@ export const useRunInputDialog = ({
});
setIsOpen(false);
const firstBackendId = Object.keys(nodeErrors)[0];
const firstBackendId = Object.keys(errorData.node_errors)[0];
if (firstBackendId) {
const firstErrorNode = useNodeStore

View File

@@ -55,16 +55,14 @@ export const Flow = () => {
const edgeTypes = useMemo(() => ({ custom: CustomEdge }), []);
const onNodeDragStop = useCallback(() => {
const currentNodes = useNodeStore.getState().nodes;
setNodes(
resolveCollisions(currentNodes, {
resolveCollisions(nodes, {
maxIterations: Infinity,
overlapThreshold: 0.5,
margin: 15,
}),
);
}, [setNodes]);
}, [setNodes, nodes]);
const { edges, onConnect, onEdgesChange } = useCustomEdge();
// for loading purpose

View File

@@ -6,7 +6,6 @@ import {
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
import { useCallback } from "react";
import { useNodeStore } from "../../../stores/nodeStore";
import { useHistoryStore } from "../../../stores/historyStore";
import { CustomEdge } from "./CustomEdge";
export const useCustomEdge = () => {
@@ -52,20 +51,7 @@ export const useCustomEdge = () => {
const onEdgesChange = useCallback(
(changes: EdgeChange<CustomEdge>[]) => {
const hasRemoval = changes.some((change) => change.type === "remove");
const prevState = hasRemoval
? {
nodes: useNodeStore.getState().nodes,
edges: edges,
}
: null;
setEdges(applyEdgeChanges(changes, edges));
if (prevState) {
useHistoryStore.getState().pushState(prevState);
}
},
[edges, setEdges],
);

View File

@@ -22,7 +22,7 @@ export const NodeHeader = ({ data, nodeId }: Props) => {
const updateNodeData = useNodeStore((state) => state.updateNodeData);
const title =
(data.metadata?.customized_name as string) ||
data.hardcodedValues?.agent_name ||
data.hardcodedValues.agent_name ||
data.title;
const [isEditingTitle, setIsEditingTitle] = useState(false);

View File

@@ -1,7 +1,7 @@
"use client";
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
import { globalRegistry } from "@/components/contextual/OutputRenderers";
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
export const TextRenderer: React.FC<{
value: any;

View File

@@ -1,3 +1,7 @@
import {
OutputActions,
OutputItem,
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
@@ -7,10 +11,6 @@ import {
TooltipProvider,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import {
OutputActions,
OutputItem,
} from "@/components/contextual/OutputRenderers";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { beautifyString } from "@/lib/utils";
import {

View File

@@ -1,6 +1,6 @@
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
import { globalRegistry } from "@/components/contextual/OutputRenderers";
import { downloadOutputs } from "@/components/contextual/OutputRenderers/utils/download";
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
import { downloadOutputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/utils/download";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { beautifyString } from "@/lib/utils";
import React, { useMemo, useState } from "react";

View File

@@ -1,10 +1,10 @@
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
import { Text } from "@/components/atoms/Text/Text";
import Link from "next/link";
import { useGetV2GetLibraryAgentByGraphId } from "@/app/api/__generated__/endpoints/library/library";
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { Text } from "@/components/atoms/Text/Text";
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
import { isValidUUID } from "@/lib/utils";
import Link from "next/link";
import { parseAsString, useQueryStates } from "nuqs";
import { useQueryStates, parseAsString } from "nuqs";
import { isValidUUID } from "@/app/(platform)/chat/helpers";
export const WebhookDisclaimer = ({ nodeId }: { nodeId: string }) => {
const [{ flowID }] = useQueryStates({

View File

@@ -1,9 +1,9 @@
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
import {
globalRegistry,
OutputActions,
OutputItem,
} from "@/components/contextual/OutputRenderers";
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { beautifyString } from "@/lib/utils";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";

View File

@@ -3,6 +3,7 @@ import {
CustomNodeData,
} from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import { NodeTableInput } from "@/app/(platform)/build/components/legacy-builder/NodeTableInput";
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInput";
import { Button } from "@/components/__legacy__/ui/button";
import { Calendar } from "@/components/__legacy__/ui/calendar";
import { LocalValuedInput } from "@/components/__legacy__/ui/input";
@@ -27,7 +28,6 @@ import {
SelectValue,
} from "@/components/__legacy__/ui/select";
import { Switch } from "@/components/atoms/Switch/Switch";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput";
import {
BlockIOArraySubSchema,

View File

@@ -5,8 +5,6 @@ import { customEdgeToLink, linkToCustomEdge } from "../components/helper";
import { MarkerType } from "@xyflow/react";
import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
import { cleanUpHandleId } from "@/components/renderers/InputRenderer/helpers";
import { useHistoryStore } from "./historyStore";
import { useNodeStore } from "./nodeStore";
type EdgeStore = {
edges: CustomEdge[];
@@ -55,36 +53,25 @@ export const useEdgeStore = create<EdgeStore>((set, get) => ({
id,
};
const exists = get().edges.some(
(e) =>
e.source === newEdge.source &&
e.target === newEdge.target &&
e.sourceHandle === newEdge.sourceHandle &&
e.targetHandle === newEdge.targetHandle,
);
if (exists) return newEdge;
const prevState = {
nodes: useNodeStore.getState().nodes,
edges: get().edges,
};
set((state) => ({ edges: [...state.edges, newEdge] }));
useHistoryStore.getState().pushState(prevState);
set((state) => {
const exists = state.edges.some(
(e) =>
e.source === newEdge.source &&
e.target === newEdge.target &&
e.sourceHandle === newEdge.sourceHandle &&
e.targetHandle === newEdge.targetHandle,
);
if (exists) return state;
return { edges: [...state.edges, newEdge] };
});
return newEdge;
},
removeEdge: (edgeId) => {
const prevState = {
nodes: useNodeStore.getState().nodes,
edges: get().edges,
};
removeEdge: (edgeId) =>
set((state) => ({
edges: state.edges.filter((e) => e.id !== edgeId),
}));
useHistoryStore.getState().pushState(prevState);
},
})),
upsertMany: (edges) =>
set((state) => {

View File

@@ -37,15 +37,6 @@ export const useHistoryStore = create<HistoryStore>((set, get) => ({
return;
}
const actualCurrentState = {
nodes: useNodeStore.getState().nodes,
edges: useEdgeStore.getState().edges,
};
if (isEqual(state, actualCurrentState)) {
return;
}
set((prev) => ({
past: [...prev.past.slice(-MAX_HISTORY + 1), state],
future: [],
@@ -64,25 +55,18 @@ export const useHistoryStore = create<HistoryStore>((set, get) => ({
undo: () => {
const { past, future } = get();
if (past.length === 0) return;
if (past.length <= 1) return;
const actualCurrentState = {
nodes: useNodeStore.getState().nodes,
edges: useEdgeStore.getState().edges,
};
const currentState = past[past.length - 1];
const previousState = past[past.length - 1];
if (isEqual(actualCurrentState, previousState)) {
return;
}
const previousState = past[past.length - 2];
useNodeStore.getState().setNodes(previousState.nodes);
useEdgeStore.getState().setEdges(previousState.edges);
set({
past: past.length > 1 ? past.slice(0, -1) : past,
future: [actualCurrentState, ...future],
past: past.slice(0, -1),
future: [currentState, ...future],
});
},
@@ -90,36 +74,18 @@ export const useHistoryStore = create<HistoryStore>((set, get) => ({
const { past, future } = get();
if (future.length === 0) return;
const actualCurrentState = {
nodes: useNodeStore.getState().nodes,
edges: useEdgeStore.getState().edges,
};
const nextState = future[0];
useNodeStore.getState().setNodes(nextState.nodes);
useEdgeStore.getState().setEdges(nextState.edges);
const lastPast = past[past.length - 1];
const shouldPushToPast =
!lastPast || !isEqual(actualCurrentState, lastPast);
set({
past: shouldPushToPast ? [...past, actualCurrentState] : past,
past: [...past, nextState],
future: future.slice(1),
});
},
canUndo: () => {
const { past } = get();
if (past.length === 0) return false;
const actualCurrentState = {
nodes: useNodeStore.getState().nodes,
edges: useEdgeStore.getState().edges,
};
return !isEqual(actualCurrentState, past[past.length - 1]);
},
canUndo: () => get().past.length > 1,
canRedo: () => get().future.length > 0,
clear: () => set({ past: [{ nodes: [], edges: [] }], future: [] }),

View File

@@ -1,7 +1,6 @@
import { create } from "zustand";
import { NodeChange, XYPosition, applyNodeChanges } from "@xyflow/react";
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
import { CustomEdge } from "../components/FlowEditor/edges/CustomEdge";
import { BlockInfo } from "@/app/api/__generated__/models/blockInfo";
import {
convertBlockInfoIntoCustomNodeData,
@@ -45,8 +44,6 @@ const MINIMUM_MOVE_BEFORE_LOG = 50;
// Track initial positions when drag starts (outside store to avoid re-renders)
const dragStartPositions: Record<string, XYPosition> = {};
let dragStartState: { nodes: CustomNode[]; edges: CustomEdge[] } | null = null;
type NodeStore = {
nodes: CustomNode[];
nodeCounter: number;
@@ -127,20 +124,14 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
nodeCounter: state.nodeCounter + 1,
})),
onNodesChange: (changes) => {
const prevState = {
nodes: get().nodes,
edges: useEdgeStore.getState().edges,
};
// Track initial positions when drag starts
changes.forEach((change) => {
if (change.type === "position" && change.dragging === true) {
if (!dragStartState) {
const currentNodes = get().nodes;
const currentEdges = useEdgeStore.getState().edges;
dragStartState = {
nodes: currentNodes.map((n) => ({
...n,
position: { ...n.position },
data: { ...n.data },
})),
edges: currentEdges.map((e) => ({ ...e })),
};
}
if (!dragStartPositions[change.id]) {
const node = get().nodes.find((n) => n.id === change.id);
if (node) {
@@ -150,17 +141,12 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
}
});
let shouldTrack = changes.some((change) => change.type === "remove");
let stateToTrack: { nodes: CustomNode[]; edges: CustomEdge[] } | null =
null;
if (shouldTrack) {
stateToTrack = {
nodes: get().nodes,
edges: useEdgeStore.getState().edges,
};
}
// Check if we should track this change in history
let shouldTrack = changes.some(
(change) => change.type === "remove" || change.type === "add",
);
// For position changes, only track if movement exceeds threshold
if (!shouldTrack) {
changes.forEach((change) => {
if (change.type === "position" && change.dragging === false) {
@@ -172,23 +158,20 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
);
if (distanceMoved > MINIMUM_MOVE_BEFORE_LOG) {
shouldTrack = true;
stateToTrack = dragStartState;
}
}
// Clean up tracked position after drag ends
delete dragStartPositions[change.id];
}
});
if (Object.keys(dragStartPositions).length === 0) {
dragStartState = null;
}
}
set((state) => ({
nodes: applyNodeChanges(changes, state.nodes),
}));
if (shouldTrack && stateToTrack) {
useHistoryStore.getState().pushState(stateToTrack);
if (shouldTrack) {
useHistoryStore.getState().pushState(prevState);
}
},
@@ -202,11 +185,6 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
hardcodedValues?: Record<string, any>,
position?: XYPosition,
) => {
const prevState = {
nodes: get().nodes,
edges: useEdgeStore.getState().edges,
};
const customNodeData = convertBlockInfoIntoCustomNodeData(
block,
hardcodedValues,
@@ -240,24 +218,21 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
set((state) => ({
nodes: [...state.nodes, customNode],
}));
useHistoryStore.getState().pushState(prevState);
return customNode;
},
updateNodeData: (nodeId, data) => {
const prevState = {
nodes: get().nodes,
edges: useEdgeStore.getState().edges,
};
set((state) => ({
nodes: state.nodes.map((n) =>
n.id === nodeId ? { ...n, data: { ...n.data, ...data } } : n,
),
}));
useHistoryStore.getState().pushState(prevState);
const newState = {
nodes: get().nodes,
edges: useEdgeStore.getState().edges,
};
useHistoryStore.getState().pushState(newState);
},
toggleAdvanced: (nodeId: string) =>
set((state) => ({
@@ -416,11 +391,6 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
},
setCredentialsOptional: (nodeId: string, optional: boolean) => {
const prevState = {
nodes: get().nodes,
edges: useEdgeStore.getState().edges,
};
set((state) => ({
nodes: state.nodes.map((n) =>
n.id === nodeId
@@ -438,7 +408,12 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
),
}));
useHistoryStore.getState().pushState(prevState);
const newState = {
nodes: get().nodes,
edges: useEdgeStore.getState().edges,
};
useHistoryStore.getState().pushState(newState);
},
// Sub-agent resolution mode state

View File

@@ -1,16 +1,15 @@
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { Button } from "@/components/atoms/Button/Button";
import { Card } from "@/components/atoms/Card/Card";
import { Text } from "@/components/atoms/Text/Text";
import { List, Robot, ArrowRight } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
import { ArrowRight, List, Robot } from "@phosphor-icons/react";
import Image from "next/image";
export interface Agent {
id: string;
name: string;
description: string;
version?: number;
image_url?: string;
}
export interface AgentCarouselMessageProps {
@@ -31,7 +30,7 @@ export function AgentCarouselMessage({
return (
<div
className={cn(
"mx-4 my-2 flex flex-col gap-4 rounded-lg border border-purple-200 bg-purple-50 p-6",
"mx-4 my-2 flex flex-col gap-4 rounded-lg border border-purple-200 bg-purple-50 p-6 dark:border-purple-900 dark:bg-purple-950",
className,
)}
>
@@ -41,10 +40,13 @@ export function AgentCarouselMessage({
<List size={24} weight="bold" className="text-white" />
</div>
<div>
<Text variant="h3" className="text-purple-900">
<Text variant="h3" className="text-purple-900 dark:text-purple-100">
Found {displayCount} {displayCount === 1 ? "Agent" : "Agents"}
</Text>
<Text variant="small" className="text-purple-700">
<Text
variant="small"
className="text-purple-700 dark:text-purple-300"
>
Select an agent to view details or run it
</Text>
</div>
@@ -55,49 +57,40 @@ export function AgentCarouselMessage({
{agents.map((agent) => (
<Card
key={agent.id}
className="border border-purple-200 bg-white p-4"
className="border border-purple-200 bg-white p-4 dark:border-purple-800 dark:bg-purple-900"
>
<div className="flex gap-3">
<div className="relative h-10 w-10 flex-shrink-0 overflow-hidden rounded-lg bg-purple-100">
{agent.image_url ? (
<Image
src={agent.image_url}
alt={`${agent.name} preview image`}
fill
className="object-cover"
/>
) : (
<div className="flex h-full w-full items-center justify-center">
<Robot
size={20}
weight="bold"
className="text-purple-600"
/>
</div>
)}
<div className="flex h-10 w-10 flex-shrink-0 items-center justify-center rounded-lg bg-purple-100 dark:bg-purple-800">
<Robot size={20} weight="bold" className="text-purple-600" />
</div>
<div className="flex-1 space-y-2">
<div>
<Text
variant="body"
className="font-semibold text-purple-900"
className="font-semibold text-purple-900 dark:text-purple-100"
>
{agent.name}
</Text>
{agent.version && (
<Text variant="small" className="text-purple-600">
<Text
variant="small"
className="text-purple-600 dark:text-purple-400"
>
v{agent.version}
</Text>
)}
</div>
<Text variant="small" className="line-clamp-2 text-purple-700">
<Text
variant="small"
className="line-clamp-2 text-purple-700 dark:text-purple-300"
>
{agent.description}
</Text>
{onSelectAgent && (
<Button
onClick={() => onSelectAgent(agent.id)}
variant="ghost"
className="mt-2 flex items-center gap-1 p-0 text-sm text-purple-600 hover:text-purple-800"
className="mt-2 flex items-center gap-1 p-0 text-sm text-purple-600 hover:text-purple-800 dark:text-purple-400 dark:hover:text-purple-200"
>
View details
<ArrowRight size={16} weight="bold" />
@@ -110,7 +103,10 @@ export function AgentCarouselMessage({
</div>
{totalCount && totalCount > agents.length && (
<Text variant="small" className="text-center text-purple-600">
<Text
variant="small"
className="text-center text-purple-600 dark:text-purple-400"
>
Showing {agents.length} of {totalCount} results
</Text>
)}

View File

@@ -1,9 +1,10 @@
"use client";
import { Button } from "@/components/atoms/Button/Button";
import { cn } from "@/lib/utils";
import { ShieldIcon, SignInIcon, UserPlusIcon } from "@phosphor-icons/react";
import React from "react";
import { useRouter } from "next/navigation";
import { Button } from "@/components/atoms/Button/Button";
import { SignInIcon, UserPlusIcon, ShieldIcon } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
export interface AuthPromptWidgetProps {
message: string;
@@ -53,8 +54,8 @@ export function AuthPromptWidget({
return (
<div
className={cn(
"my-4 overflow-hidden rounded-lg border border-violet-200",
"bg-gradient-to-br from-violet-50 to-purple-50",
"my-4 overflow-hidden rounded-lg border border-violet-200 dark:border-violet-800",
"bg-gradient-to-br from-violet-50 to-purple-50 dark:from-violet-950/30 dark:to-purple-950/30",
"duration-500 animate-in fade-in-50 slide-in-from-bottom-2",
className,
)}
@@ -65,19 +66,21 @@ export function AuthPromptWidget({
<ShieldIcon size={20} weight="fill" className="text-white" />
</div>
<div>
<h3 className="text-lg font-semibold text-neutral-900">
<h3 className="text-lg font-semibold text-neutral-900 dark:text-neutral-100">
Authentication Required
</h3>
<p className="text-sm text-neutral-600">
<p className="text-sm text-neutral-600 dark:text-neutral-400">
Sign in to set up and manage agents
</p>
</div>
</div>
<div className="mb-5 rounded-md bg-white/50 p-4">
<p className="text-sm text-neutral-700">{message}</p>
<div className="mb-5 rounded-md bg-white/50 p-4 dark:bg-neutral-900/50">
<p className="text-sm text-neutral-700 dark:text-neutral-300">
{message}
</p>
{agentInfo && (
<div className="mt-3 text-xs text-neutral-600">
<div className="mt-3 text-xs text-neutral-600 dark:text-neutral-400">
<p>
Ready to set up:{" "}
<span className="font-medium">{agentInfo.name}</span>
@@ -111,7 +114,7 @@ export function AuthPromptWidget({
</Button>
</div>
<div className="mt-4 text-center text-xs text-neutral-500">
<div className="mt-4 text-center text-xs text-neutral-500 dark:text-neutral-500">
Your chat session will be preserved after signing in
</div>
</div>

View File

@@ -1,134 +0,0 @@
"use client";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
import { List } from "@phosphor-icons/react";
import React, { useState } from "react";
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState";
import { ChatLoadingState } from "./components/ChatLoadingState/ChatLoadingState";
import { SessionsDrawer } from "./components/SessionsDrawer/SessionsDrawer";
import { useChat } from "./useChat";
export interface ChatProps {
className?: string;
headerTitle?: React.ReactNode;
showHeader?: boolean;
showSessionInfo?: boolean;
showNewChatButton?: boolean;
onNewChat?: () => void;
headerActions?: React.ReactNode;
}
export function Chat({
className,
headerTitle = "AutoGPT Copilot",
showHeader = true,
showSessionInfo = true,
showNewChatButton = true,
onNewChat,
headerActions,
}: ChatProps) {
const {
messages,
isLoading,
isCreating,
error,
sessionId,
createSession,
clearSession,
loadSession,
} = useChat();
const [isSessionsDrawerOpen, setIsSessionsDrawerOpen] = useState(false);
const handleNewChat = () => {
clearSession();
onNewChat?.();
};
const handleSelectSession = async (sessionId: string) => {
try {
await loadSession(sessionId);
} catch (err) {
console.error("Failed to load session:", err);
}
};
return (
<div className={cn("flex h-full flex-col", className)}>
{/* Header */}
{showHeader && (
<header className="shrink-0 border-t border-zinc-200 bg-white p-3">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<button
aria-label="View sessions"
onClick={() => setIsSessionsDrawerOpen(true)}
className="flex size-8 items-center justify-center rounded hover:bg-zinc-100"
>
<List width="1.25rem" height="1.25rem" />
</button>
{typeof headerTitle === "string" ? (
<Text variant="h2" className="text-lg font-semibold">
{headerTitle}
</Text>
) : (
headerTitle
)}
</div>
<div className="flex items-center gap-3">
{showSessionInfo && sessionId && (
<>
{showNewChatButton && (
<Button
variant="outline"
size="small"
onClick={handleNewChat}
>
New Chat
</Button>
)}
</>
)}
{headerActions}
</div>
</div>
</header>
)}
{/* Main Content */}
<main className="flex min-h-0 flex-1 flex-col overflow-hidden">
{/* Loading State - show when explicitly loading/creating OR when we don't have a session yet and no error */}
{(isLoading || isCreating || (!sessionId && !error)) && (
<ChatLoadingState
message={isCreating ? "Creating session..." : "Loading..."}
/>
)}
{/* Error State */}
{error && !isLoading && (
<ChatErrorState error={error} onRetry={createSession} />
)}
{/* Session Content */}
{sessionId && !isLoading && !error && (
<ChatContainer
sessionId={sessionId}
initialMessages={messages}
className="flex-1"
/>
)}
</main>
{/* Sessions Drawer */}
<SessionsDrawer
isOpen={isSessionsDrawerOpen}
onClose={() => setIsSessionsDrawerOpen(false)}
onSelectSession={handleSelectSession}
currentSessionId={sessionId}
/>
</div>
);
}

View File

@@ -1,246 +0,0 @@
"use client";
import { Button } from "@/components/atoms/Button/Button";
import { Card } from "@/components/atoms/Card/Card";
import { Text } from "@/components/atoms/Text/Text";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import { RunAgentInputs } from "@/components/contextual/RunAgentInputs/RunAgentInputs";
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import {
BlockIOCredentialsSubSchema,
BlockIOSubSchema,
} from "@/lib/autogpt-server-api/types";
import { cn, isEmpty } from "@/lib/utils";
import { PlayIcon, WarningIcon } from "@phosphor-icons/react";
import { useMemo } from "react";
import { useAgentInputsSetup } from "./useAgentInputsSetup";
type LibraryAgentInputSchemaProperties = LibraryAgent["input_schema"] extends {
properties: infer P;
}
? P extends Record<string, BlockIOSubSchema>
? P
: Record<string, BlockIOSubSchema>
: Record<string, BlockIOSubSchema>;
type LibraryAgentCredentialsInputSchemaProperties =
LibraryAgent["credentials_input_schema"] extends {
properties: infer P;
}
? P extends Record<string, BlockIOCredentialsSubSchema>
? P
: Record<string, BlockIOCredentialsSubSchema>
: Record<string, BlockIOCredentialsSubSchema>;
interface Props {
agentName?: string;
inputSchema: LibraryAgentInputSchemaProperties | Record<string, any>;
credentialsSchema?:
| LibraryAgentCredentialsInputSchemaProperties
| Record<string, any>;
message: string;
requiredFields?: string[];
onRun: (
inputs: Record<string, any>,
credentials: Record<string, any>,
) => void;
onCancel?: () => void;
className?: string;
}
export function AgentInputsSetup({
agentName,
inputSchema,
credentialsSchema,
message,
requiredFields,
onRun,
onCancel,
className,
}: Props) {
const { inputValues, setInputValue, credentialsValues, setCredentialsValue } =
useAgentInputsSetup();
const inputSchemaObj = useMemo(() => {
if (!inputSchema) return { properties: {}, required: [] };
if ("properties" in inputSchema && "type" in inputSchema) {
return inputSchema as {
properties: Record<string, any>;
required?: string[];
};
}
return { properties: inputSchema as Record<string, any>, required: [] };
}, [inputSchema]);
const credentialsSchemaObj = useMemo(() => {
if (!credentialsSchema) return { properties: {}, required: [] };
if ("properties" in credentialsSchema && "type" in credentialsSchema) {
return credentialsSchema as {
properties: Record<string, any>;
required?: string[];
};
}
return {
properties: credentialsSchema as Record<string, any>,
required: [],
};
}, [credentialsSchema]);
const agentInputFields = useMemo(() => {
const properties = inputSchemaObj.properties || {};
return Object.fromEntries(
Object.entries(properties).filter(
([_, subSchema]: [string, any]) => !subSchema.hidden,
),
);
}, [inputSchemaObj]);
const agentCredentialsInputFields = useMemo(() => {
return credentialsSchemaObj.properties || {};
}, [credentialsSchemaObj]);
const inputFields = Object.entries(agentInputFields);
const credentialFields = Object.entries(agentCredentialsInputFields);
const defaultsFromSchema = useMemo(() => {
const defaults: Record<string, any> = {};
Object.entries(agentInputFields).forEach(([key, schema]) => {
if ("default" in schema && schema.default !== undefined) {
defaults[key] = schema.default;
}
});
return defaults;
}, [agentInputFields]);
const defaultsFromCredentialsSchema = useMemo(() => {
const defaults: Record<string, any> = {};
Object.entries(agentCredentialsInputFields).forEach(([key, schema]) => {
if ("default" in schema && schema.default !== undefined) {
defaults[key] = schema.default;
}
});
return defaults;
}, [agentCredentialsInputFields]);
const mergedInputValues = useMemo(() => {
return { ...defaultsFromSchema, ...inputValues };
}, [defaultsFromSchema, inputValues]);
const mergedCredentialsValues = useMemo(() => {
return { ...defaultsFromCredentialsSchema, ...credentialsValues };
}, [defaultsFromCredentialsSchema, credentialsValues]);
const allRequiredInputsAreSet = useMemo(() => {
const requiredInputs = new Set(
requiredFields || (inputSchemaObj.required as string[]) || [],
);
const nonEmptyInputs = new Set(
Object.keys(mergedInputValues).filter(
(k) => !isEmpty(mergedInputValues[k]),
),
);
const missing = [...requiredInputs].filter(
(input) => !nonEmptyInputs.has(input),
);
return missing.length === 0;
}, [inputSchemaObj.required, mergedInputValues, requiredFields]);
const allCredentialsAreSet = useMemo(() => {
const requiredCredentials = new Set(
(credentialsSchemaObj.required as string[]) || [],
);
if (requiredCredentials.size === 0) {
return true;
}
const missing = [...requiredCredentials].filter((key) => {
const cred = mergedCredentialsValues[key];
return !cred || !cred.id;
});
return missing.length === 0;
}, [credentialsSchemaObj.required, mergedCredentialsValues]);
const canRun = allRequiredInputsAreSet && allCredentialsAreSet;
function handleRun() {
if (canRun) {
onRun(mergedInputValues, mergedCredentialsValues);
}
}
return (
<Card
className={cn(
"mx-4 my-2 overflow-hidden border-blue-200 bg-blue-50",
className,
)}
>
<div className="flex items-start gap-4 p-6">
<div className="flex h-12 w-12 flex-shrink-0 items-center justify-center rounded-full bg-blue-500">
<WarningIcon size={24} weight="bold" className="text-white" />
</div>
<div className="flex-1">
<Text variant="h3" className="mb-2 text-blue-900">
{agentName ? `Configure ${agentName}` : "Agent Configuration"}
</Text>
<Text variant="body" className="mb-4 text-blue-700">
{message}
</Text>
{inputFields.length > 0 && (
<div className="mb-4 space-y-4">
{inputFields.map(([key, inputSubSchema]) => (
<RunAgentInputs
key={key}
schema={inputSubSchema}
value={inputValues[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
/>
))}
</div>
)}
{credentialFields.length > 0 && (
<div className="mb-4 space-y-4">
{credentialFields.map(([key, schema]) => {
const requiredCredentials = new Set(
(credentialsSchemaObj.required as string[]) || [],
);
return (
<CredentialsInput
key={key}
schema={schema}
selectedCredentials={credentialsValues[key]}
onSelectCredentials={(value) =>
setCredentialsValue(key, value)
}
siblingInputs={mergedInputValues}
isOptional={!requiredCredentials.has(key)}
/>
);
})}
</div>
)}
<div className="flex gap-2">
<Button
variant="primary"
size="small"
onClick={handleRun}
disabled={!canRun}
>
<PlayIcon className="mr-2 h-4 w-4" weight="bold" />
Run Agent
</Button>
{onCancel && (
<Button variant="outline" size="small" onClick={onCancel}>
Cancel
</Button>
)}
</div>
</div>
</div>
</Card>
);
}

View File

@@ -1,38 +0,0 @@
import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types";
import { useState } from "react";
export function useAgentInputsSetup() {
const [inputValues, setInputValues] = useState<Record<string, any>>({});
const [credentialsValues, setCredentialsValues] = useState<
Record<string, CredentialsMetaInput>
>({});
function setInputValue(key: string, value: any) {
setInputValues((prev) => ({
...prev,
[key]: value,
}));
}
function setCredentialsValue(key: string, value?: CredentialsMetaInput) {
if (value) {
setCredentialsValues((prev) => ({
...prev,
[key]: value,
}));
} else {
setCredentialsValues((prev) => {
const next = { ...prev };
delete next[key];
return next;
});
}
}
return {
inputValues,
setInputValue,
credentialsValues,
setCredentialsValue,
};
}

View File

@@ -1,88 +0,0 @@
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
import { cn } from "@/lib/utils";
import { useCallback } from "react";
import { usePageContext } from "../../usePageContext";
import { ChatInput } from "../ChatInput/ChatInput";
import { MessageList } from "../MessageList/MessageList";
import { QuickActionsWelcome } from "../QuickActionsWelcome/QuickActionsWelcome";
import { useChatContainer } from "./useChatContainer";
export interface ChatContainerProps {
sessionId: string | null;
initialMessages: SessionDetailResponse["messages"];
className?: string;
}
export function ChatContainer({
sessionId,
initialMessages,
className,
}: ChatContainerProps) {
const { messages, streamingChunks, isStreaming, sendMessage } =
useChatContainer({
sessionId,
initialMessages,
});
const { capturePageContext } = usePageContext();
// Wrap sendMessage to automatically capture page context
const sendMessageWithContext = useCallback(
async (content: string, isUserMessage: boolean = true) => {
const context = capturePageContext();
await sendMessage(content, isUserMessage, context);
},
[sendMessage, capturePageContext],
);
const quickActions = [
"Find agents for social media management",
"Show me agents for content creation",
"Help me automate my business",
"What can you help me with?",
];
return (
<div
className={cn("flex h-full min-h-0 flex-col", className)}
style={{
backgroundColor: "#ffffff",
backgroundImage:
"radial-gradient(#e5e5e5 0.5px, transparent 0.5px), radial-gradient(#e5e5e5 0.5px, #ffffff 0.5px)",
backgroundSize: "20px 20px",
backgroundPosition: "0 0, 10px 10px",
}}
>
{/* Messages or Welcome Screen */}
<div className="flex min-h-0 flex-1 flex-col overflow-hidden pb-24">
{messages.length === 0 ? (
<QuickActionsWelcome
title="Welcome to AutoGPT Copilot"
description="Start a conversation to discover and run AI agents."
actions={quickActions}
onActionClick={sendMessageWithContext}
disabled={isStreaming || !sessionId}
/>
) : (
<MessageList
messages={messages}
streamingChunks={streamingChunks}
isStreaming={isStreaming}
onSendMessage={sendMessageWithContext}
className="flex-1"
/>
)}
</div>
{/* Input - Always visible */}
<div className="fixed bottom-0 left-0 right-0 z-50 border-t border-zinc-200 bg-white p-4">
<ChatInput
onSend={sendMessageWithContext}
disabled={isStreaming || !sessionId}
placeholder={
sessionId ? "Type your message..." : "Creating session..."
}
/>
</div>
</div>
);
}

View File

@@ -1,206 +0,0 @@
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
import { useCallback, useMemo, useRef, useState } from "react";
import { toast } from "sonner";
import { useChatStream } from "../../useChatStream";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
import { createStreamEventDispatcher } from "./createStreamEventDispatcher";
import {
createUserMessage,
filterAuthMessages,
isToolCallArray,
isValidMessage,
parseToolResponse,
removePageContext,
} from "./helpers";
interface Args {
sessionId: string | null;
initialMessages: SessionDetailResponse["messages"];
}
export function useChatContainer({ sessionId, initialMessages }: Args) {
const [messages, setMessages] = useState<ChatMessageData[]>([]);
const [streamingChunks, setStreamingChunks] = useState<string[]>([]);
const [hasTextChunks, setHasTextChunks] = useState(false);
const [isStreamingInitiated, setIsStreamingInitiated] = useState(false);
const streamingChunksRef = useRef<string[]>([]);
const { error, sendMessage: sendStreamMessage } = useChatStream();
const isStreaming = isStreamingInitiated || hasTextChunks;
const allMessages = useMemo(() => {
const processedInitialMessages: ChatMessageData[] = [];
// Map to track tool calls by their ID so we can look up tool names for tool responses
const toolCallMap = new Map<string, string>();
for (const msg of initialMessages) {
if (!isValidMessage(msg)) {
console.warn("Invalid message structure from backend:", msg);
continue;
}
let content = String(msg.content || "");
const role = String(msg.role || "assistant").toLowerCase();
const toolCalls = msg.tool_calls;
const timestamp = msg.timestamp
? new Date(msg.timestamp as string)
: undefined;
// Remove page context from user messages when loading existing sessions
if (role === "user") {
content = removePageContext(content);
// Skip user messages that become empty after removing page context
if (!content.trim()) {
continue;
}
processedInitialMessages.push({
type: "message",
role: "user",
content,
timestamp,
});
continue;
}
// Handle assistant messages first (before tool messages) to build tool call map
if (role === "assistant") {
// Strip <thinking> tags from content
content = content
.replace(/<thinking>[\s\S]*?<\/thinking>/gi, "")
.trim();
// If assistant has tool calls, create tool_call messages for each
if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) {
for (const toolCall of toolCalls) {
const toolName = toolCall.function.name;
const toolId = toolCall.id;
// Store tool name for later lookup
toolCallMap.set(toolId, toolName);
try {
const args = JSON.parse(toolCall.function.arguments || "{}");
processedInitialMessages.push({
type: "tool_call",
toolId,
toolName,
arguments: args,
timestamp,
});
} catch (err) {
console.warn("Failed to parse tool call arguments:", err);
processedInitialMessages.push({
type: "tool_call",
toolId,
toolName,
arguments: {},
timestamp,
});
}
}
// Only add assistant message if there's content after stripping thinking tags
if (content.trim()) {
processedInitialMessages.push({
type: "message",
role: "assistant",
content,
timestamp,
});
}
} else if (content.trim()) {
// Assistant message without tool calls, but with content
processedInitialMessages.push({
type: "message",
role: "assistant",
content,
timestamp,
});
}
continue;
}
// Handle tool messages - look up tool name from tool call map
if (role === "tool") {
const toolCallId = (msg.tool_call_id as string) || "";
const toolName = toolCallMap.get(toolCallId) || "unknown";
const toolResponse = parseToolResponse(
content,
toolCallId,
toolName,
timestamp,
);
if (toolResponse) {
processedInitialMessages.push(toolResponse);
}
continue;
}
// Handle other message types (system, etc.)
if (content.trim()) {
processedInitialMessages.push({
type: "message",
role: role as "user" | "assistant" | "system",
content,
timestamp,
});
}
}
return [...processedInitialMessages, ...messages];
}, [initialMessages, messages]);
const sendMessage = useCallback(
async function sendMessage(
content: string,
isUserMessage: boolean = true,
context?: { url: string; content: string },
) {
if (!sessionId) {
console.error("Cannot send message: no session ID");
return;
}
if (isUserMessage) {
const userMessage = createUserMessage(content);
setMessages((prev) => [...filterAuthMessages(prev), userMessage]);
} else {
setMessages((prev) => filterAuthMessages(prev));
}
setStreamingChunks([]);
streamingChunksRef.current = [];
setHasTextChunks(false);
setIsStreamingInitiated(true);
const dispatcher = createStreamEventDispatcher({
setHasTextChunks,
setStreamingChunks,
streamingChunksRef,
setMessages,
sessionId,
setIsStreamingInitiated,
});
try {
await sendStreamMessage(
sessionId,
content,
dispatcher,
isUserMessage,
context,
);
} catch (err) {
console.error("Failed to send message:", err);
setIsStreamingInitiated(false);
const errorMessage =
err instanceof Error ? err.message : "Failed to send message";
toast.error("Failed to send message", {
description: errorMessage,
});
}
},
[sessionId, sendStreamMessage],
);
return {
messages: allMessages,
streamingChunks,
isStreaming,
error,
sendMessage,
};
}

View File

@@ -1,149 +0,0 @@
import { Text } from "@/components/atoms/Text/Text";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import type { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api";
import { cn } from "@/lib/utils";
import { CheckIcon, RobotIcon, WarningIcon } from "@phosphor-icons/react";
import { useEffect, useRef } from "react";
import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
export interface CredentialInfo {
provider: string;
providerName: string;
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
title: string;
scopes?: string[];
}
interface Props {
credentials: CredentialInfo[];
agentName?: string;
message: string;
onAllCredentialsComplete: () => void;
onCancel: () => void;
className?: string;
}
function createSchemaFromCredentialInfo(
credential: CredentialInfo,
): BlockIOCredentialsSubSchema {
return {
type: "object",
properties: {},
credentials_provider: [credential.provider],
credentials_types: [credential.credentialType],
credentials_scopes: credential.scopes,
discriminator: undefined,
discriminator_mapping: undefined,
discriminator_values: undefined,
};
}
export function ChatCredentialsSetup({
credentials,
agentName: _agentName,
message,
onAllCredentialsComplete,
onCancel: _onCancel,
}: Props) {
const { selectedCredentials, isAllComplete, handleCredentialSelect } =
useChatCredentialsSetup(credentials);
// Track if we've already called completion to prevent double calls
const hasCalledCompleteRef = useRef(false);
// Reset the completion flag when credentials change (new credential setup flow)
useEffect(
function resetCompletionFlag() {
hasCalledCompleteRef.current = false;
},
[credentials],
);
// Auto-call completion when all credentials are configured
useEffect(
function autoCompleteWhenReady() {
if (isAllComplete && !hasCalledCompleteRef.current) {
hasCalledCompleteRef.current = true;
onAllCredentialsComplete();
}
},
[isAllComplete, onAllCredentialsComplete],
);
return (
<div className="group relative flex w-full justify-start gap-3 px-4 py-3">
<div className="flex w-full max-w-3xl gap-3">
<div className="flex-shrink-0">
<div className="flex h-7 w-7 items-center justify-center rounded-lg bg-indigo-500">
<RobotIcon className="h-4 w-4 text-indigo-50" />
</div>
</div>
<div className="flex min-w-0 flex-1 flex-col">
<div className="group relative min-w-20 overflow-hidden rounded-xl border border-slate-100 bg-slate-50/20 px-6 py-2.5 text-sm leading-relaxed backdrop-blur-xl">
<div className="absolute inset-0 bg-gradient-to-br from-slate-200/20 via-slate-300/10 to-transparent" />
<div className="relative z-10 space-y-3 text-slate-900">
<div>
<Text variant="h4" className="mb-1 text-slate-900">
Credentials Required
</Text>
<Text variant="small" className="text-slate-600">
{message}
</Text>
</div>
<div className="space-y-3">
{credentials.map((cred, index) => {
const schema = createSchemaFromCredentialInfo(cred);
const isSelected = !!selectedCredentials[cred.provider];
return (
<div
key={`${cred.provider}-${index}`}
className={cn(
"relative rounded-lg border p-3",
isSelected
? "border-green-500 bg-green-50/50"
: "border-slate-200 bg-white/50",
)}
>
<div className="mb-2 flex items-center gap-2">
{isSelected ? (
<CheckIcon
size={16}
className="text-green-500"
weight="bold"
/>
) : (
<WarningIcon
size={16}
className="text-slate-500"
weight="bold"
/>
)}
<Text
variant="small"
className="font-semibold text-slate-900"
>
{cred.providerName}
</Text>
</div>
<CredentialsInput
schema={schema}
selectedCredentials={selectedCredentials[cred.provider]}
onSelectCredentials={(credMeta) =>
handleCredentialSelect(cred.provider, credMeta)
}
/>
</div>
);
})}
</div>
</div>
</div>
</div>
</div>
</div>
);
}

View File

@@ -1,64 +0,0 @@
import { Input } from "@/components/atoms/Input/Input";
import { cn } from "@/lib/utils";
import { ArrowUpIcon } from "@phosphor-icons/react";
import { useChatInput } from "./useChatInput";
export interface ChatInputProps {
onSend: (message: string) => void;
disabled?: boolean;
placeholder?: string;
className?: string;
}
export function ChatInput({
onSend,
disabled = false,
placeholder = "Type your message...",
className,
}: ChatInputProps) {
const inputId = "chat-input";
const { value, setValue, handleKeyDown, handleSend } = useChatInput({
onSend,
disabled,
maxRows: 5,
inputId,
});
return (
<div className={cn("relative flex-1", className)}>
<Input
id={inputId}
label="Chat message input"
hideLabel
type="textarea"
value={value}
onChange={(e) => setValue(e.target.value)}
onKeyDown={handleKeyDown}
placeholder={placeholder}
disabled={disabled}
rows={1}
wrapperClassName="mb-0 relative"
className="pr-12"
/>
<span id="chat-input-hint" className="sr-only">
Press Enter to send, Shift+Enter for new line
</span>
<button
onClick={handleSend}
disabled={disabled || !value.trim()}
className={cn(
"absolute right-3 top-1/2 flex h-8 w-8 -translate-y-1/2 items-center justify-center rounded-full",
"border border-zinc-800 bg-zinc-800 text-white",
"hover:border-zinc-900 hover:bg-zinc-900",
"disabled:border-zinc-200 disabled:bg-zinc-200 disabled:text-white disabled:opacity-50",
"transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950",
"disabled:pointer-events-none",
)}
aria-label="Send message"
>
<ArrowUpIcon className="h-3 w-3" weight="bold" />
</button>
</div>
);
}

View File

@@ -1,19 +0,0 @@
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { cn } from "@/lib/utils";
export interface ChatLoadingStateProps {
message?: string;
className?: string;
}
export function ChatLoadingState({ className }: ChatLoadingStateProps) {
return (
<div
className={cn("flex flex-1 items-center justify-center p-6", className)}
>
<div className="flex flex-col items-center gap-4 text-center">
<LoadingSpinner />
</div>
</div>
);
}

View File

@@ -1,341 +0,0 @@
"use client";
import { useGetV2GetUserProfile } from "@/app/api/__generated__/endpoints/store/store";
import Avatar, {
AvatarFallback,
AvatarImage,
} from "@/components/atoms/Avatar/Avatar";
import { Button } from "@/components/atoms/Button/Button";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { cn } from "@/lib/utils";
import {
ArrowClockwise,
CheckCircleIcon,
CheckIcon,
CopyIcon,
RobotIcon,
} from "@phosphor-icons/react";
import { useRouter } from "next/navigation";
import { useCallback, useState } from "react";
import { getToolActionPhrase } from "../../helpers";
import { AgentCarouselMessage } from "../AgentCarouselMessage/AgentCarouselMessage";
import { AuthPromptWidget } from "../AuthPromptWidget/AuthPromptWidget";
import { ChatCredentialsSetup } from "../ChatCredentialsSetup/ChatCredentialsSetup";
import { ExecutionStartedMessage } from "../ExecutionStartedMessage/ExecutionStartedMessage";
import { MarkdownContent } from "../MarkdownContent/MarkdownContent";
import { MessageBubble } from "../MessageBubble/MessageBubble";
import { NoResultsMessage } from "../NoResultsMessage/NoResultsMessage";
import { ToolCallMessage } from "../ToolCallMessage/ToolCallMessage";
import { ToolResponseMessage } from "../ToolResponseMessage/ToolResponseMessage";
import { useChatMessage, type ChatMessageData } from "./useChatMessage";
export interface ChatMessageProps {
message: ChatMessageData;
className?: string;
onDismissLogin?: () => void;
onDismissCredentials?: () => void;
onSendMessage?: (content: string, isUserMessage?: boolean) => void;
agentOutput?: ChatMessageData;
}
export function ChatMessage({
message,
className,
onDismissCredentials,
onSendMessage,
agentOutput,
}: ChatMessageProps) {
const { user } = useSupabase();
const router = useRouter();
const [copied, setCopied] = useState(false);
const {
isUser,
isToolCall,
isToolResponse,
isLoginNeeded,
isCredentialsNeeded,
} = useChatMessage(message);
const { data: profile } = useGetV2GetUserProfile({
query: {
select: (res) => (res.status === 200 ? res.data : null),
enabled: isUser && !!user,
queryKey: ["/api/store/profile", user?.id],
},
});
const handleAllCredentialsComplete = useCallback(
function handleAllCredentialsComplete() {
// Send a user message that explicitly asks to retry the setup
// This ensures the LLM calls get_required_setup_info again and proceeds with execution
if (onSendMessage) {
onSendMessage(
"I've configured the required credentials. Please check if everything is ready and proceed with setting up the agent.",
);
}
// Optionally dismiss the credentials prompt
if (onDismissCredentials) {
onDismissCredentials();
}
},
[onSendMessage, onDismissCredentials],
);
function handleCancelCredentials() {
// Dismiss the credentials prompt
if (onDismissCredentials) {
onDismissCredentials();
}
}
const handleCopy = useCallback(async () => {
if (message.type !== "message") return;
try {
await navigator.clipboard.writeText(message.content);
setCopied(true);
setTimeout(() => setCopied(false), 2000);
} catch (error) {
console.error("Failed to copy:", error);
}
}, [message]);
const handleTryAgain = useCallback(() => {
if (message.type !== "message" || !onSendMessage) return;
onSendMessage(message.content, message.role === "user");
}, [message, onSendMessage]);
const handleViewExecution = useCallback(() => {
if (message.type === "execution_started" && message.libraryAgentLink) {
router.push(message.libraryAgentLink);
}
}, [message, router]);
// Render credentials needed messages
if (isCredentialsNeeded && message.type === "credentials_needed") {
return (
<ChatCredentialsSetup
credentials={message.credentials}
agentName={message.agentName}
message={message.message}
onAllCredentialsComplete={handleAllCredentialsComplete}
onCancel={handleCancelCredentials}
className={className}
/>
);
}
// Render login needed messages
if (isLoginNeeded && message.type === "login_needed") {
// If user is already logged in, show success message instead of auth prompt
if (user) {
return (
<div className={cn("px-4 py-2", className)}>
<div className="my-4 overflow-hidden rounded-lg border border-green-200 bg-gradient-to-br from-green-50 to-emerald-50">
<div className="px-6 py-4">
<div className="flex items-center gap-3">
<div className="flex h-10 w-10 items-center justify-center rounded-full bg-green-600">
<CheckCircleIcon
size={20}
weight="fill"
className="text-white"
/>
</div>
<div>
<h3 className="text-lg font-semibold text-neutral-900">
Successfully Authenticated
</h3>
<p className="text-sm text-neutral-600">
You&apos;re now signed in and ready to continue
</p>
</div>
</div>
</div>
</div>
</div>
);
}
// Show auth prompt if not logged in
return (
<div className={cn("px-4 py-2", className)}>
<AuthPromptWidget
message={message.message}
sessionId={message.sessionId}
agentInfo={message.agentInfo}
/>
</div>
);
}
// Render tool call messages
if (isToolCall && message.type === "tool_call") {
return (
<div className={cn("px-4 py-2", className)}>
<ToolCallMessage toolName={message.toolName} />
</div>
);
}
// Render no_results messages - use dedicated component, not ToolResponseMessage
if (message.type === "no_results") {
return (
<div className={cn("px-4 py-2", className)}>
<NoResultsMessage
message={message.message}
suggestions={message.suggestions}
/>
</div>
);
}
// Render agent_carousel messages - use dedicated component, not ToolResponseMessage
if (message.type === "agent_carousel") {
return (
<div className={cn("px-4 py-2", className)}>
<AgentCarouselMessage
agents={message.agents}
totalCount={message.totalCount}
/>
</div>
);
}
// Render execution_started messages - use dedicated component, not ToolResponseMessage
if (message.type === "execution_started") {
return (
<div className={cn("px-4 py-2", className)}>
<ExecutionStartedMessage
executionId={message.executionId}
agentName={message.agentName}
message={message.message}
onViewExecution={
message.libraryAgentLink ? handleViewExecution : undefined
}
/>
</div>
);
}
// Render tool response messages (but skip agent_output if it's being rendered inside assistant message)
if (isToolResponse && message.type === "tool_response") {
// Check if this is an agent_output that should be rendered inside assistant message
if (message.result) {
let parsedResult: Record<string, unknown> | null = null;
try {
parsedResult =
typeof message.result === "string"
? JSON.parse(message.result)
: (message.result as Record<string, unknown>);
} catch {
parsedResult = null;
}
if (parsedResult?.type === "agent_output") {
// Skip rendering - this will be rendered inside the assistant message
return null;
}
}
return (
<div className={cn("px-4 py-2", className)}>
<ToolResponseMessage
toolName={getToolActionPhrase(message.toolName)}
result={message.result}
/>
</div>
);
}
// Render regular chat messages
if (message.type === "message") {
return (
<div
className={cn(
"group relative flex w-full gap-3 px-4 py-3",
isUser ? "justify-end" : "justify-start",
className,
)}
>
<div className="flex w-full max-w-3xl gap-3">
{!isUser && (
<div className="flex-shrink-0">
<div className="flex h-7 w-7 items-center justify-center rounded-lg bg-indigo-500">
<RobotIcon className="h-4 w-4 text-indigo-50" />
</div>
</div>
)}
<div
className={cn(
"flex min-w-0 flex-1 flex-col",
isUser && "items-end",
)}
>
<MessageBubble variant={isUser ? "user" : "assistant"}>
<MarkdownContent content={message.content} />
{agentOutput &&
agentOutput.type === "tool_response" &&
!isUser && (
<div className="mt-4">
<ToolResponseMessage
toolName={
agentOutput.toolName
? getToolActionPhrase(agentOutput.toolName)
: "Agent Output"
}
result={agentOutput.result}
/>
</div>
)}
</MessageBubble>
<div
className={cn(
"mt-1 flex gap-1",
isUser ? "justify-end" : "justify-start",
)}
>
{isUser && onSendMessage && (
<Button
variant="ghost"
size="icon"
onClick={handleTryAgain}
aria-label="Try again"
>
<ArrowClockwise className="size-3 text-neutral-500" />
</Button>
)}
<Button
variant="ghost"
size="icon"
onClick={handleCopy}
aria-label="Copy message"
>
{copied ? (
<CheckIcon className="size-3 text-green-600" />
) : (
<CopyIcon className="size-3 text-neutral-500" />
)}
</Button>
</div>
</div>
{isUser && (
<div className="flex-shrink-0">
<Avatar className="h-7 w-7">
<AvatarImage
src={profile?.avatar_url ?? ""}
alt={profile?.username ?? "User"}
/>
<AvatarFallback className="rounded-lg bg-neutral-200 text-neutral-600">
{profile?.username?.charAt(0)?.toUpperCase() || "U"}
</AvatarFallback>
</Avatar>
</div>
)}
</div>
</div>
);
}
// Fallback for unknown message types
return null;
}

View File

@@ -1,56 +0,0 @@
import { cn } from "@/lib/utils";
import { ReactNode } from "react";
export interface MessageBubbleProps {
children: ReactNode;
variant: "user" | "assistant";
className?: string;
}
export function MessageBubble({
children,
variant,
className,
}: MessageBubbleProps) {
const userTheme = {
bg: "bg-slate-900",
border: "border-slate-800",
gradient: "from-slate-900/30 via-slate-800/20 to-transparent",
text: "text-slate-50",
};
const assistantTheme = {
bg: "bg-slate-50/20",
border: "border-slate-100",
gradient: "from-slate-200/20 via-slate-300/10 to-transparent",
text: "text-slate-900",
};
const theme = variant === "user" ? userTheme : assistantTheme;
return (
<div
className={cn(
"group relative min-w-20 overflow-hidden rounded-xl border px-6 py-2.5 text-sm leading-relaxed backdrop-blur-xl transition-all duration-500 ease-in-out",
theme.bg,
theme.border,
variant === "user" && "text-right",
variant === "assistant" && "text-left",
className,
)}
>
{/* Gradient flare background */}
<div
className={cn("absolute inset-0 bg-gradient-to-br", theme.gradient)}
/>
<div
className={cn(
"relative z-10 transition-all duration-500 ease-in-out",
theme.text,
)}
>
{children}
</div>
</div>
);
}

View File

@@ -1,121 +0,0 @@
"use client";
import { cn } from "@/lib/utils";
import { ChatMessage } from "../ChatMessage/ChatMessage";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
import { StreamingMessage } from "../StreamingMessage/StreamingMessage";
import { ThinkingMessage } from "../ThinkingMessage/ThinkingMessage";
import { useMessageList } from "./useMessageList";
export interface MessageListProps {
messages: ChatMessageData[];
streamingChunks?: string[];
isStreaming?: boolean;
className?: string;
onStreamComplete?: () => void;
onSendMessage?: (content: string) => void;
}
export function MessageList({
messages,
streamingChunks = [],
isStreaming = false,
className,
onStreamComplete,
onSendMessage,
}: MessageListProps) {
const { messagesEndRef, messagesContainerRef } = useMessageList({
messageCount: messages.length,
isStreaming,
});
return (
<div
ref={messagesContainerRef}
className={cn(
"flex-1 overflow-y-auto",
"scrollbar-thin scrollbar-track-transparent scrollbar-thumb-zinc-300",
className,
)}
>
<div className="mx-auto flex max-w-3xl flex-col py-4">
{/* Render all persisted messages */}
{messages.map((message, index) => {
// Check if current message is an agent_output tool_response
// and if previous message is an assistant message
let agentOutput: ChatMessageData | undefined;
if (message.type === "tool_response" && message.result) {
let parsedResult: Record<string, unknown> | null = null;
try {
parsedResult =
typeof message.result === "string"
? JSON.parse(message.result)
: (message.result as Record<string, unknown>);
} catch {
parsedResult = null;
}
if (parsedResult?.type === "agent_output") {
const prevMessage = messages[index - 1];
if (
prevMessage &&
prevMessage.type === "message" &&
prevMessage.role === "assistant"
) {
// This agent output will be rendered inside the previous assistant message
// Skip rendering this message separately
return null;
}
}
}
// Check if next message is an agent_output tool_response to include in current assistant message
if (message.type === "message" && message.role === "assistant") {
const nextMessage = messages[index + 1];
if (
nextMessage &&
nextMessage.type === "tool_response" &&
nextMessage.result
) {
let parsedResult: Record<string, unknown> | null = null;
try {
parsedResult =
typeof nextMessage.result === "string"
? JSON.parse(nextMessage.result)
: (nextMessage.result as Record<string, unknown>);
} catch {
parsedResult = null;
}
if (parsedResult?.type === "agent_output") {
agentOutput = nextMessage;
}
}
}
return (
<ChatMessage
key={index}
message={message}
onSendMessage={onSendMessage}
agentOutput={agentOutput}
/>
);
})}
{/* Render thinking message when streaming but no chunks yet */}
{isStreaming && streamingChunks.length === 0 && <ThinkingMessage />}
{/* Render streaming message if active */}
{isStreaming && streamingChunks.length > 0 && (
<StreamingMessage
chunks={streamingChunks}
onComplete={onStreamComplete}
/>
)}
{/* Invisible div to scroll to */}
<div ref={messagesEndRef} />
</div>
</div>
);
}

View File

@@ -1,94 +0,0 @@
"use client";
import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
export interface QuickActionsWelcomeProps {
title: string;
description: string;
actions: string[];
onActionClick: (action: string) => void;
disabled?: boolean;
className?: string;
}
export function QuickActionsWelcome({
title,
description,
actions,
onActionClick,
disabled = false,
className,
}: QuickActionsWelcomeProps) {
return (
<div
className={cn("flex flex-1 items-center justify-center p-8", className)}
>
<div className="w-full max-w-3xl">
<div className="mb-12 text-center">
<Text
variant="h2"
className="mb-3 text-2xl font-semibold text-zinc-900"
>
{title}
</Text>
<Text variant="body" className="text-zinc-500">
{description}
</Text>
</div>
<div className="grid gap-3 sm:grid-cols-2">
{actions.map((action) => {
// Use slate theme for all cards
const theme = {
bg: "bg-slate-50/10",
border: "border-slate-100",
hoverBg: "hover:bg-slate-50/20",
hoverBorder: "hover:border-slate-200",
gradient: "from-slate-200/20 via-slate-300/10 to-transparent",
text: "text-slate-900",
hoverText: "group-hover:text-slate-900",
};
return (
<button
key={action}
onClick={() => onActionClick(action)}
disabled={disabled}
className={cn(
"group relative overflow-hidden rounded-xl border p-5 text-left backdrop-blur-xl",
"transition-all duration-200",
theme.bg,
theme.border,
theme.hoverBg,
theme.hoverBorder,
"hover:shadow-sm",
"focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-white/50 focus-visible:ring-offset-2",
"disabled:cursor-not-allowed disabled:opacity-50 disabled:hover:shadow-none",
)}
>
{/* Gradient flare background */}
<div
className={cn(
"absolute inset-0 bg-gradient-to-br",
theme.gradient,
)}
/>
<Text
variant="body"
className={cn(
"relative z-10 font-medium",
theme.text,
theme.hoverText,
)}
>
{action}
</Text>
</button>
);
})}
</div>
</div>
</div>
);
}

View File

@@ -1,136 +0,0 @@
"use client";
import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat";
import { Text } from "@/components/atoms/Text/Text";
import { scrollbarStyles } from "@/components/styles/scrollbars";
import { cn } from "@/lib/utils";
import { X } from "@phosphor-icons/react";
import { formatDistanceToNow } from "date-fns";
import { Drawer } from "vaul";
interface SessionsDrawerProps {
isOpen: boolean;
onClose: () => void;
onSelectSession: (sessionId: string) => void;
currentSessionId?: string | null;
}
export function SessionsDrawer({
isOpen,
onClose,
onSelectSession,
currentSessionId,
}: SessionsDrawerProps) {
const { data, isLoading } = useGetV2ListSessions(
{ limit: 100 },
{
query: {
enabled: isOpen,
},
},
);
const sessions =
data?.status === 200
? data.data.sessions.filter((session) => {
// Filter out sessions without messages (sessions that were never updated)
// If updated_at equals created_at, the session was created but never had messages
return session.updated_at !== session.created_at;
})
: [];
function handleSelectSession(sessionId: string) {
onSelectSession(sessionId);
onClose();
}
return (
<Drawer.Root
open={isOpen}
onOpenChange={(open) => !open && onClose()}
direction="right"
>
<Drawer.Portal>
<Drawer.Overlay className="fixed inset-0 z-[60] bg-black/10 backdrop-blur-sm" />
<Drawer.Content
className={cn(
"fixed right-0 top-0 z-[70] flex h-full w-96 flex-col border-l border-zinc-200 bg-white",
scrollbarStyles,
)}
>
<div className="shrink-0 p-4">
<div className="flex items-center justify-between">
<Drawer.Title className="text-lg font-semibold">
Chat Sessions
</Drawer.Title>
<button
aria-label="Close"
onClick={onClose}
className="flex size-8 items-center justify-center rounded hover:bg-zinc-100"
>
<X width="1.25rem" height="1.25rem" />
</button>
</div>
</div>
<div className="flex-1 overflow-y-auto p-4">
{isLoading ? (
<div className="flex items-center justify-center py-8">
<Text variant="body" className="text-zinc-500">
Loading sessions...
</Text>
</div>
) : sessions.length === 0 ? (
<div className="flex items-center justify-center py-8">
<Text variant="body" className="text-zinc-500">
No sessions found
</Text>
</div>
) : (
<div className="space-y-2">
{sessions.map((session) => {
const isActive = session.id === currentSessionId;
const updatedAt = session.updated_at
? formatDistanceToNow(new Date(session.updated_at), {
addSuffix: true,
})
: "";
return (
<button
key={session.id}
onClick={() => handleSelectSession(session.id)}
className={cn(
"w-full rounded-lg border p-3 text-left transition-colors",
isActive
? "border-indigo-500 bg-zinc-50"
: "border-zinc-200 bg-zinc-100/50 hover:border-zinc-300 hover:bg-zinc-50",
)}
>
<div className="flex flex-col gap-1">
<Text
variant="body"
className={cn(
"font-medium",
isActive ? "text-indigo-900" : "text-zinc-900",
)}
>
{session.title || "Untitled Chat"}
</Text>
<div className="flex items-center gap-2 text-xs text-zinc-500">
<span>{session.id.slice(0, 8)}...</span>
{updatedAt && <span></span>}
<span>{updatedAt}</span>
</div>
</div>
</button>
);
})}
</div>
)}
</div>
</Drawer.Content>
</Drawer.Portal>
</Drawer.Root>
);
}

View File

@@ -1,42 +0,0 @@
import { cn } from "@/lib/utils";
import { RobotIcon } from "@phosphor-icons/react";
import { MarkdownContent } from "../MarkdownContent/MarkdownContent";
import { MessageBubble } from "../MessageBubble/MessageBubble";
import { useStreamingMessage } from "./useStreamingMessage";
export interface StreamingMessageProps {
chunks: string[];
className?: string;
onComplete?: () => void;
}
export function StreamingMessage({
chunks,
className,
onComplete,
}: StreamingMessageProps) {
const { displayText } = useStreamingMessage({ chunks, onComplete });
return (
<div
className={cn(
"group relative flex w-full justify-start gap-3 px-4 py-3",
className,
)}
>
<div className="flex w-full max-w-3xl gap-3">
<div className="flex-shrink-0">
<div className="flex h-7 w-7 items-center justify-center rounded-lg bg-indigo-600">
<RobotIcon className="h-4 w-4 text-indigo-50" />
</div>
</div>
<div className="flex min-w-0 flex-1 flex-col">
<MessageBubble variant="assistant">
<MarkdownContent content={displayText} />
</MessageBubble>
</div>
</div>
</div>
);
}

View File

@@ -1,70 +0,0 @@
import { cn } from "@/lib/utils";
import { RobotIcon } from "@phosphor-icons/react";
import { useEffect, useRef, useState } from "react";
import { MessageBubble } from "../MessageBubble/MessageBubble";
export interface ThinkingMessageProps {
className?: string;
}
export function ThinkingMessage({ className }: ThinkingMessageProps) {
const [showSlowLoader, setShowSlowLoader] = useState(false);
const timerRef = useRef<NodeJS.Timeout | null>(null);
useEffect(() => {
if (timerRef.current === null) {
timerRef.current = setTimeout(() => {
setShowSlowLoader(true);
}, 8000);
}
return () => {
if (timerRef.current) {
clearTimeout(timerRef.current);
timerRef.current = null;
}
};
}, []);
return (
<div
className={cn(
"group relative flex w-full justify-start gap-3 px-4 py-3",
className,
)}
>
<div className="flex w-full max-w-3xl gap-3">
<div className="flex-shrink-0">
<div className="flex h-7 w-7 items-center justify-center rounded-lg bg-indigo-500">
<RobotIcon className="h-4 w-4 text-indigo-50" />
</div>
</div>
<div className="flex min-w-0 flex-1 flex-col">
<MessageBubble variant="assistant">
<div className="transition-all duration-500 ease-in-out">
{showSlowLoader ? (
<div className="flex flex-col items-center gap-3 py-2">
<div className="loader" style={{ flexShrink: 0 }} />
<p className="text-sm text-slate-700">
Taking a bit longer to think, wait a moment please
</p>
</div>
) : (
<span
className="inline-block bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-clip-text text-transparent"
style={{
backgroundSize: "200% 100%",
animation: "shimmer 2s ease-in-out infinite",
}}
>
Thinking...
</span>
)}
</div>
</MessageBubble>
</div>
</div>
</div>
);
}

View File

@@ -1,24 +0,0 @@
import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
import { WrenchIcon } from "@phosphor-icons/react";
import { getToolActionPhrase } from "../../helpers";
export interface ToolCallMessageProps {
toolName: string;
className?: string;
}
export function ToolCallMessage({ toolName, className }: ToolCallMessageProps) {
return (
<div className={cn("flex items-center justify-center gap-2", className)}>
<WrenchIcon
size={14}
weight="bold"
className="flex-shrink-0 text-neutral-500"
/>
<Text variant="small" className="text-neutral-500">
{getToolActionPhrase(toolName)}...
</Text>
</div>
);
}

View File

@@ -1,260 +0,0 @@
import { Text } from "@/components/atoms/Text/Text";
import "@/components/contextual/OutputRenderers";
import {
globalRegistry,
OutputItem,
} from "@/components/contextual/OutputRenderers";
import { cn } from "@/lib/utils";
import type { ToolResult } from "@/types/chat";
import { WrenchIcon } from "@phosphor-icons/react";
import { getToolActionPhrase } from "../../helpers";
export interface ToolResponseMessageProps {
toolName: string;
result?: ToolResult;
success?: boolean;
className?: string;
}
export function ToolResponseMessage({
toolName,
result,
success: _success = true,
className,
}: ToolResponseMessageProps) {
if (!result) {
return (
<div className={cn("flex items-center justify-center gap-2", className)}>
<WrenchIcon
size={14}
weight="bold"
className="flex-shrink-0 text-neutral-500"
/>
<Text variant="small" className="text-neutral-500">
{getToolActionPhrase(toolName)}...
</Text>
</div>
);
}
let parsedResult: Record<string, unknown> | null = null;
try {
parsedResult =
typeof result === "string"
? JSON.parse(result)
: (result as Record<string, unknown>);
} catch {
parsedResult = null;
}
if (parsedResult && typeof parsedResult === "object") {
const responseType = parsedResult.type as string | undefined;
if (responseType === "agent_output") {
const execution = parsedResult.execution as
| {
outputs?: Record<string, unknown[]>;
}
| null
| undefined;
const outputs = execution?.outputs || {};
const message = parsedResult.message as string | undefined;
return (
<div className={cn("space-y-4 px-4 py-2", className)}>
<div className="flex items-center gap-2">
<WrenchIcon
size={14}
weight="bold"
className="flex-shrink-0 text-neutral-500"
/>
<Text variant="small" className="text-neutral-500">
{getToolActionPhrase(toolName)}
</Text>
</div>
{message && (
<div className="rounded border p-4">
<Text variant="small" className="text-neutral-600">
{message}
</Text>
</div>
)}
{Object.keys(outputs).length > 0 && (
<div className="space-y-4">
{Object.entries(outputs).map(([outputName, values]) =>
values.map((value, index) => {
const renderer = globalRegistry.getRenderer(value);
if (renderer) {
return (
<OutputItem
key={`${outputName}-${index}`}
value={value}
renderer={renderer}
label={outputName}
/>
);
}
return (
<div
key={`${outputName}-${index}`}
className="rounded border p-4"
>
<Text variant="large-medium" className="mb-2 capitalize">
{outputName}
</Text>
<pre className="overflow-auto text-sm">
{JSON.stringify(value, null, 2)}
</pre>
</div>
);
}),
)}
</div>
)}
</div>
);
}
if (responseType === "block_output" && parsedResult.outputs) {
const outputs = parsedResult.outputs as Record<string, unknown[]>;
return (
<div className={cn("space-y-4 px-4 py-2", className)}>
<div className="flex items-center gap-2">
<WrenchIcon
size={14}
weight="bold"
className="flex-shrink-0 text-neutral-500"
/>
<Text variant="small" className="text-neutral-500">
{getToolActionPhrase(toolName)}
</Text>
</div>
<div className="space-y-4">
{Object.entries(outputs).map(([outputName, values]) =>
values.map((value, index) => {
const renderer = globalRegistry.getRenderer(value);
if (renderer) {
return (
<OutputItem
key={`${outputName}-${index}`}
value={value}
renderer={renderer}
label={outputName}
/>
);
}
return (
<div
key={`${outputName}-${index}`}
className="rounded border p-4"
>
<Text variant="large-medium" className="mb-2 capitalize">
{outputName}
</Text>
<pre className="overflow-auto text-sm">
{JSON.stringify(value, null, 2)}
</pre>
</div>
);
}),
)}
</div>
</div>
);
}
// Handle other response types with a message field (e.g., understanding_updated)
if (parsedResult.message && typeof parsedResult.message === "string") {
// Format tool name from snake_case to Title Case
const formattedToolName = toolName
.split("_")
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
.join(" ");
// Clean up message - remove incomplete user_name references
let cleanedMessage = parsedResult.message;
// Remove "Updated understanding with: user_name" pattern if user_name is just a placeholder
cleanedMessage = cleanedMessage.replace(
/Updated understanding with:\s*user_name\.?\s*/gi,
"",
);
// Remove standalone user_name references
cleanedMessage = cleanedMessage.replace(/\buser_name\b\.?\s*/gi, "");
cleanedMessage = cleanedMessage.trim();
// Only show message if it has content after cleaning
if (!cleanedMessage) {
return (
<div
className={cn(
"flex items-center justify-center gap-2 px-4 py-2",
className,
)}
>
<WrenchIcon
size={14}
weight="bold"
className="flex-shrink-0 text-neutral-500"
/>
<Text variant="small" className="text-neutral-500">
{formattedToolName}
</Text>
</div>
);
}
return (
<div className={cn("space-y-2 px-4 py-2", className)}>
<div className="flex items-center justify-center gap-2">
<WrenchIcon
size={14}
weight="bold"
className="flex-shrink-0 text-neutral-500"
/>
<Text variant="small" className="text-neutral-500">
{formattedToolName}
</Text>
</div>
<div className="rounded border p-4">
<Text variant="small" className="text-neutral-600">
{cleanedMessage}
</Text>
</div>
</div>
);
}
}
const renderer = globalRegistry.getRenderer(result);
if (renderer) {
return (
<div className={cn("px-4 py-2", className)}>
<div className="mb-2 flex items-center gap-2">
<WrenchIcon
size={14}
weight="bold"
className="flex-shrink-0 text-neutral-500"
/>
<Text variant="small" className="text-neutral-500">
{getToolActionPhrase(toolName)}
</Text>
</div>
<OutputItem value={result} renderer={renderer} />
</div>
);
}
return (
<div className={cn("flex items-center justify-center gap-2", className)}>
<WrenchIcon
size={14}
weight="bold"
className="flex-shrink-0 text-neutral-500"
/>
<Text variant="small" className="text-neutral-500">
{getToolActionPhrase(toolName)}...
</Text>
</div>
);
}

View File

@@ -1,17 +0,0 @@
"use client";
import { create } from "zustand";
interface ChatDrawerState {
isOpen: boolean;
open: () => void;
close: () => void;
toggle: () => void;
}
export const useChatDrawer = create<ChatDrawerState>((set) => ({
isOpen: false,
open: () => set({ isOpen: true }),
close: () => set({ isOpen: false }),
toggle: () => set((state) => ({ isOpen: !state.isOpen })),
}));

View File

@@ -1,371 +0,0 @@
import type { ToolArguments, ToolResult } from "@/types/chat";
import { useCallback, useEffect, useRef, useState } from "react";
import { toast } from "sonner";
const MAX_RETRIES = 3;
const INITIAL_RETRY_DELAY = 1000;
export interface StreamChunk {
type:
| "text_chunk"
| "text_ended"
| "tool_call"
| "tool_call_start"
| "tool_response"
| "login_needed"
| "need_login"
| "credentials_needed"
| "error"
| "usage"
| "stream_end";
timestamp?: string;
content?: string;
message?: string;
tool_id?: string;
tool_name?: string;
arguments?: ToolArguments;
result?: ToolResult;
success?: boolean;
idx?: number;
session_id?: string;
agent_info?: {
graph_id: string;
name: string;
trigger_type: string;
};
provider?: string;
provider_name?: string;
credential_type?: string;
scopes?: string[];
title?: string;
[key: string]: unknown;
}
type VercelStreamChunk =
| { type: "start"; messageId: string }
| { type: "finish" }
| { type: "text-start"; id: string }
| { type: "text-delta"; id: string; delta: string }
| { type: "text-end"; id: string }
| { type: "tool-input-start"; toolCallId: string; toolName: string }
| {
type: "tool-input-available";
toolCallId: string;
toolName: string;
input: ToolArguments;
}
| {
type: "tool-output-available";
toolCallId: string;
toolName?: string;
output: ToolResult;
success?: boolean;
}
| {
type: "usage";
promptTokens: number;
completionTokens: number;
totalTokens: number;
}
| {
type: "error";
errorText: string;
code?: string;
details?: Record<string, unknown>;
};
const LEGACY_STREAM_TYPES = new Set<StreamChunk["type"]>([
"text_chunk",
"text_ended",
"tool_call",
"tool_call_start",
"tool_response",
"login_needed",
"need_login",
"credentials_needed",
"error",
"usage",
"stream_end",
]);
function isLegacyStreamChunk(
chunk: StreamChunk | VercelStreamChunk,
): chunk is StreamChunk {
return LEGACY_STREAM_TYPES.has(chunk.type as StreamChunk["type"]);
}
function normalizeStreamChunk(
chunk: StreamChunk | VercelStreamChunk,
): StreamChunk | null {
if (isLegacyStreamChunk(chunk)) {
return chunk;
}
switch (chunk.type) {
case "text-delta":
return { type: "text_chunk", content: chunk.delta };
case "text-end":
return { type: "text_ended" };
case "tool-input-available":
return {
type: "tool_call_start",
tool_id: chunk.toolCallId,
tool_name: chunk.toolName,
arguments: chunk.input,
};
case "tool-output-available":
return {
type: "tool_response",
tool_id: chunk.toolCallId,
tool_name: chunk.toolName,
result: chunk.output,
success: chunk.success ?? true,
};
case "usage":
return {
type: "usage",
promptTokens: chunk.promptTokens,
completionTokens: chunk.completionTokens,
totalTokens: chunk.totalTokens,
};
case "error":
return {
type: "error",
message: chunk.errorText,
code: chunk.code,
details: chunk.details,
};
case "finish":
return { type: "stream_end" };
case "start":
case "text-start":
case "tool-input-start":
return null;
}
}
export function useChatStream() {
const [isStreaming, setIsStreaming] = useState(false);
const [error, setError] = useState<Error | null>(null);
const retryCountRef = useRef<number>(0);
const retryTimeoutRef = useRef<NodeJS.Timeout | null>(null);
const abortControllerRef = useRef<AbortController | null>(null);
const stopStreaming = useCallback(() => {
if (abortControllerRef.current) {
abortControllerRef.current.abort();
abortControllerRef.current = null;
}
if (retryTimeoutRef.current) {
clearTimeout(retryTimeoutRef.current);
retryTimeoutRef.current = null;
}
setIsStreaming(false);
}, []);
useEffect(() => {
return () => {
stopStreaming();
};
}, [stopStreaming]);
const sendMessage = useCallback(
async (
sessionId: string,
message: string,
onChunk: (chunk: StreamChunk) => void,
isUserMessage: boolean = true,
context?: { url: string; content: string },
isRetry: boolean = false,
) => {
stopStreaming();
const abortController = new AbortController();
abortControllerRef.current = abortController;
if (abortController.signal.aborted) {
return Promise.reject(new Error("Request aborted"));
}
if (!isRetry) {
retryCountRef.current = 0;
}
setIsStreaming(true);
setError(null);
try {
const url = `/api/chat/sessions/${sessionId}/stream`;
const body = JSON.stringify({
message,
is_user_message: isUserMessage,
context: context || null,
});
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "text/event-stream",
},
body,
signal: abortController.signal,
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(errorText || `HTTP ${response.status}`);
}
if (!response.body) {
throw new Error("Response body is null");
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
return new Promise<void>((resolve, reject) => {
let didDispatchStreamEnd = false;
function dispatchStreamEnd() {
if (didDispatchStreamEnd) return;
didDispatchStreamEnd = true;
onChunk({ type: "stream_end" });
}
const cleanup = () => {
reader.cancel().catch(() => {
// Ignore cancel errors
});
};
async function readStream() {
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
cleanup();
dispatchStreamEnd();
retryCountRef.current = 0;
stopStreaming();
resolve();
return;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
if (line.startsWith("data: ")) {
const data = line.slice(6);
if (data === "[DONE]") {
cleanup();
dispatchStreamEnd();
retryCountRef.current = 0;
stopStreaming();
resolve();
return;
}
try {
const rawChunk = JSON.parse(data) as
| StreamChunk
| VercelStreamChunk;
const chunk = normalizeStreamChunk(rawChunk);
if (!chunk) {
continue;
}
// Call the chunk handler
onChunk(chunk);
// Handle stream lifecycle
if (chunk.type === "stream_end") {
didDispatchStreamEnd = true;
cleanup();
retryCountRef.current = 0;
stopStreaming();
resolve();
return;
} else if (chunk.type === "error") {
cleanup();
reject(
new Error(
chunk.message || chunk.content || "Stream error",
),
);
return;
}
} catch (err) {
// Skip invalid JSON lines
console.warn("Failed to parse SSE chunk:", err, data);
}
}
}
}
} catch (err) {
if (err instanceof Error && err.name === "AbortError") {
cleanup();
return;
}
const streamError =
err instanceof Error ? err : new Error("Failed to read stream");
if (retryCountRef.current < MAX_RETRIES) {
retryCountRef.current += 1;
const retryDelay =
INITIAL_RETRY_DELAY * Math.pow(2, retryCountRef.current - 1);
toast.info("Connection interrupted", {
description: `Retrying in ${retryDelay / 1000} seconds...`,
});
retryTimeoutRef.current = setTimeout(() => {
sendMessage(
sessionId,
message,
onChunk,
isUserMessage,
context,
true,
).catch((_err) => {
// Retry failed
});
}, retryDelay);
} else {
setError(streamError);
toast.error("Connection Failed", {
description:
"Unable to connect to chat service. Please try again.",
});
cleanup();
dispatchStreamEnd();
retryCountRef.current = 0;
stopStreaming();
reject(streamError);
}
}
}
readStream();
});
} catch (err) {
const streamError =
err instanceof Error ? err : new Error("Failed to start stream");
setError(streamError);
setIsStreaming(false);
throw streamError;
}
},
[stopStreaming],
);
return {
isStreaming,
error,
sendMessage,
stopStreaming,
};
}

View File

@@ -1,98 +0,0 @@
import { useCallback } from "react";
export interface PageContext {
url: string;
content: string;
}
const MAX_CONTENT_CHARS = 10000;
/**
* Hook to capture the current page context (URL + full page content)
* Privacy-hardened: removes sensitive inputs and enforces content size limits
*/
export function usePageContext() {
const capturePageContext = useCallback((): PageContext => {
if (typeof window === "undefined" || typeof document === "undefined") {
return { url: "", content: "" };
}
const url = window.location.href;
// Clone document to avoid modifying the original
const clone = document.cloneNode(true) as Document;
// Remove script, style, and noscript elements
const scripts = clone.querySelectorAll("script, style, noscript");
scripts.forEach((el) => el.remove());
// Remove sensitive elements and their content
const sensitiveSelectors = [
"input",
"textarea",
"[contenteditable]",
'input[type="password"]',
'input[type="email"]',
'input[type="tel"]',
'input[type="search"]',
'input[type="hidden"]',
"form",
"[data-sensitive]",
"[data-sensitive='true']",
];
sensitiveSelectors.forEach((selector) => {
const elements = clone.querySelectorAll(selector);
elements.forEach((el) => {
// For form elements, remove the entire element
if (el.tagName === "FORM") {
el.remove();
} else {
// For inputs and textareas, clear their values but keep the element structure
if (
el instanceof HTMLInputElement ||
el instanceof HTMLTextAreaElement
) {
el.value = "";
el.textContent = "";
} else {
// For other sensitive elements, remove them entirely
el.remove();
}
}
});
});
// Strip any remaining input values that might have been missed
const allInputs = clone.querySelectorAll("input, textarea");
allInputs.forEach((el) => {
if (el instanceof HTMLInputElement || el instanceof HTMLTextAreaElement) {
el.value = "";
el.textContent = "";
}
});
// Get text content from body
const body = clone.body;
const content = body?.textContent || body?.innerText || "";
// Clean up whitespace
let cleanedContent = content
.replace(/\s+/g, " ")
.replace(/\n\s*\n/g, "\n")
.trim();
// Enforce maximum content size
if (cleanedContent.length > MAX_CONTENT_CHARS) {
cleanedContent =
cleanedContent.substring(0, MAX_CONTENT_CHARS) + "... [truncated]";
}
return {
url,
content: cleanedContent,
};
}, []);
return { capturePageContext };
}

View File

@@ -0,0 +1,68 @@
import { cn } from "@/lib/utils";
import { ChatInput } from "@/app/(platform)/chat/components/ChatInput/ChatInput";
import { MessageList } from "@/app/(platform)/chat/components/MessageList/MessageList";
import { QuickActionsWelcome } from "@/app/(platform)/chat/components/QuickActionsWelcome/QuickActionsWelcome";
import { useChatContainer } from "./useChatContainer";
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
export interface ChatContainerProps {
sessionId: string | null;
initialMessages: SessionDetailResponse["messages"];
onRefreshSession: () => Promise<void>;
className?: string;
}
export function ChatContainer({
sessionId,
initialMessages,
onRefreshSession,
className,
}: ChatContainerProps) {
const { messages, streamingChunks, isStreaming, sendMessage } =
useChatContainer({
sessionId,
initialMessages,
onRefreshSession,
});
const quickActions = [
"Find agents for social media management",
"Show me agents for content creation",
"Help me automate my business",
"What can you help me with?",
];
return (
<div className={cn("flex h-full flex-col", className)}>
{/* Messages or Welcome Screen */}
{messages.length === 0 ? (
<QuickActionsWelcome
title="Welcome to AutoGPT Chat"
description="Start a conversation to discover and run AI agents."
actions={quickActions}
onActionClick={sendMessage}
disabled={isStreaming || !sessionId}
/>
) : (
<MessageList
messages={messages}
streamingChunks={streamingChunks}
isStreaming={isStreaming}
onSendMessage={sendMessage}
className="flex-1"
/>
)}
{/* Input - Always visible */}
<div className="border-t border-zinc-200 p-4 dark:border-zinc-800">
<ChatInput
onSend={sendMessage}
disabled={isStreaming || !sessionId}
placeholder={
sessionId ? "Type your message..." : "Creating session..."
}
/>
</div>
</div>
);
}

View File

@@ -1,14 +1,14 @@
import { toast } from "sonner";
import { StreamChunk } from "../../useChatStream";
import type { StreamChunk } from "@/app/(platform)/chat/useChatStream";
import type { HandlerDependencies } from "./useChatContainer.handlers";
import {
handleError,
handleLoginNeeded,
handleStreamEnd,
handleTextChunk,
handleTextEnded,
handleToolCallStart,
handleToolResponse,
handleLoginNeeded,
handleStreamEnd,
handleError,
} from "./useChatContainer.handlers";
export function createStreamEventDispatcher(

View File

@@ -1,24 +1,5 @@
import type { ChatMessageData } from "@/app/(platform)/chat/components/ChatMessage/useChatMessage";
import type { ToolResult } from "@/types/chat";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
export function removePageContext(content: string): string {
// Remove "Page URL: ..." pattern at start of line (case insensitive, handles various formats)
let cleaned = content.replace(/^\s*Page URL:\s*[^\n\r]*/gim, "");
// Find "User Message:" marker at start of line to preserve the actual user message
const userMessageMatch = cleaned.match(/^\s*User Message:\s*([\s\S]*)$/im);
if (userMessageMatch) {
// If we found "User Message:", extract everything after it
cleaned = userMessageMatch[1];
} else {
// If no "User Message:" marker, remove "Page Content:" and everything after it at start of line
cleaned = cleaned.replace(/^\s*Page Content:[\s\S]*$/gim, "");
}
// Clean up extra whitespace and newlines
cleaned = cleaned.replace(/\n\s*\n\s*\n+/g, "\n\n").trim();
return cleaned;
}
export function createUserMessage(content: string): ChatMessageData {
return {
@@ -82,7 +63,6 @@ export function isAgentArray(value: unknown): value is Array<{
name: string;
description: string;
version?: number;
image_url?: string;
}> {
if (!Array.isArray(value)) {
return false;
@@ -97,8 +77,7 @@ export function isAgentArray(value: unknown): value is Array<{
typeof item.name === "string" &&
"description" in item &&
typeof item.description === "string" &&
(!("version" in item) || typeof item.version === "number") &&
(!("image_url" in item) || typeof item.image_url === "string"),
(!("version" in item) || typeof item.version === "number"),
);
}
@@ -253,7 +232,6 @@ export function isSetupInfo(value: unknown): value is {
export function extractCredentialsNeeded(
parsedResult: Record<string, unknown>,
toolName: string = "run_agent",
): ChatMessageData | null {
try {
const setupInfo = parsedResult?.setup_info as
@@ -266,7 +244,7 @@ export function extractCredentialsNeeded(
| Record<string, Record<string, unknown>>
| undefined;
if (missingCreds && Object.keys(missingCreds).length > 0) {
const agentName = (setupInfo?.agent_name as string) || "this block";
const agentName = (setupInfo?.agent_name as string) || "this agent";
const credentials = Object.values(missingCreds).map((credInfo) => ({
provider: (credInfo.provider as string) || "unknown",
providerName:
@@ -286,7 +264,7 @@ export function extractCredentialsNeeded(
}));
return {
type: "credentials_needed",
toolName,
toolName: "run_agent",
credentials,
message: `To run ${agentName}, you need to add ${credentials.length === 1 ? "credentials" : `${credentials.length} credentials`}.`,
agentName,
@@ -299,92 +277,3 @@ export function extractCredentialsNeeded(
return null;
}
}
export function extractInputsNeeded(
parsedResult: Record<string, unknown>,
toolName: string = "run_agent",
): ChatMessageData | null {
try {
const setupInfo = parsedResult?.setup_info as
| Record<string, unknown>
| undefined;
const requirements = setupInfo?.requirements as
| Record<string, unknown>
| undefined;
const inputs = requirements?.inputs as
| Array<Record<string, unknown>>
| undefined;
const credentials = requirements?.credentials as
| Array<Record<string, unknown>>
| undefined;
if (!inputs || inputs.length === 0) {
return null;
}
const agentName = (setupInfo?.agent_name as string) || "this agent";
const agentId = parsedResult?.graph_id as string | undefined;
const graphVersion = parsedResult?.graph_version as number | undefined;
const properties: Record<string, any> = {};
const requiredProps: string[] = [];
inputs.forEach((input) => {
const name = input.name as string;
if (name) {
properties[name] = {
title: input.name as string,
description: (input.description as string) || "",
type: (input.type as string) || "string",
default: input.default,
enum: input.options,
format: input.format,
};
if ((input.required as boolean) === true) {
requiredProps.push(name);
}
}
});
const inputSchema: Record<string, any> = {
type: "object",
properties,
};
if (requiredProps.length > 0) {
inputSchema.required = requiredProps;
}
const credentialsSchema: Record<string, any> = {};
if (credentials && credentials.length > 0) {
credentials.forEach((cred) => {
const id = cred.id as string;
if (id) {
credentialsSchema[id] = {
type: "object",
properties: {},
credentials_provider: [cred.provider as string],
credentials_types: [(cred.type as string) || "api_key"],
credentials_scopes: cred.scopes as string[] | undefined,
};
}
});
}
return {
type: "inputs_needed",
toolName,
agentName,
agentId,
graphVersion,
inputSchema,
credentialsSchema:
Object.keys(credentialsSchema).length > 0
? credentialsSchema
: undefined,
message: `Please provide the required inputs to run ${agentName}.`,
timestamp: new Date(),
};
} catch (err) {
console.error("Failed to extract inputs from setup info:", err);
return null;
}
}

View File

@@ -1,18 +1,13 @@
import type { Dispatch, MutableRefObject, SetStateAction } from "react";
import { StreamChunk } from "../../useChatStream";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
import {
extractCredentialsNeeded,
extractInputsNeeded,
parseToolResponse,
} from "./helpers";
import type { Dispatch, SetStateAction, MutableRefObject } from "react";
import type { StreamChunk } from "@/app/(platform)/chat/useChatStream";
import type { ChatMessageData } from "@/app/(platform)/chat/components/ChatMessage/useChatMessage";
import { parseToolResponse, extractCredentialsNeeded } from "./helpers";
export interface HandlerDependencies {
setHasTextChunks: Dispatch<SetStateAction<boolean>>;
setStreamingChunks: Dispatch<SetStateAction<string[]>>;
streamingChunksRef: MutableRefObject<string[]>;
setMessages: Dispatch<SetStateAction<ChatMessageData[]>>;
setIsStreamingInitiated: Dispatch<SetStateAction<boolean>>;
sessionId: string;
}
@@ -105,18 +100,11 @@ export function handleToolResponse(
parsedResult = null;
}
if (
(chunk.tool_name === "run_agent" || chunk.tool_name === "run_block") &&
chunk.tool_name === "run_agent" &&
chunk.success &&
parsedResult?.type === "setup_requirements"
) {
const inputsMessage = extractInputsNeeded(parsedResult, chunk.tool_name);
if (inputsMessage) {
deps.setMessages((prev) => [...prev, inputsMessage]);
}
const credentialsMessage = extractCredentialsNeeded(
parsedResult,
chunk.tool_name,
);
const credentialsMessage = extractCredentialsNeeded(parsedResult);
if (credentialsMessage) {
deps.setMessages((prev) => [...prev, credentialsMessage]);
}
@@ -209,15 +197,10 @@ export function handleStreamEnd(
deps.setStreamingChunks([]);
deps.streamingChunksRef.current = [];
deps.setHasTextChunks(false);
deps.setIsStreamingInitiated(false);
console.log("[Stream End] Stream complete, messages in local state");
}
export function handleError(chunk: StreamChunk, deps: HandlerDependencies) {
export function handleError(chunk: StreamChunk, _deps: HandlerDependencies) {
const errorMessage = chunk.message || chunk.content || "An error occurred";
console.error("Stream error:", errorMessage);
deps.setIsStreamingInitiated(false);
deps.setHasTextChunks(false);
deps.setStreamingChunks([]);
deps.streamingChunksRef.current = [];
}

View File

@@ -0,0 +1,130 @@
import { useState, useCallback, useRef, useMemo } from "react";
import { toast } from "sonner";
import { useChatStream } from "@/app/(platform)/chat/useChatStream";
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
import type { ChatMessageData } from "@/app/(platform)/chat/components/ChatMessage/useChatMessage";
import {
parseToolResponse,
isValidMessage,
isToolCallArray,
createUserMessage,
filterAuthMessages,
} from "./helpers";
import { createStreamEventDispatcher } from "./createStreamEventDispatcher";
interface UseChatContainerArgs {
sessionId: string | null;
initialMessages: SessionDetailResponse["messages"];
onRefreshSession: () => Promise<void>;
}
export function useChatContainer({
sessionId,
initialMessages,
}: UseChatContainerArgs) {
const [messages, setMessages] = useState<ChatMessageData[]>([]);
const [streamingChunks, setStreamingChunks] = useState<string[]>([]);
const [hasTextChunks, setHasTextChunks] = useState(false);
const streamingChunksRef = useRef<string[]>([]);
const { error, sendMessage: sendStreamMessage } = useChatStream();
const isStreaming = hasTextChunks;
const allMessages = useMemo(() => {
const processedInitialMessages = initialMessages
.filter((msg: Record<string, unknown>) => {
if (!isValidMessage(msg)) {
console.warn("Invalid message structure from backend:", msg);
return false;
}
const content = String(msg.content || "").trim();
const toolCalls = msg.tool_calls;
return (
content.length > 0 ||
(toolCalls && Array.isArray(toolCalls) && toolCalls.length > 0)
);
})
.map((msg: Record<string, unknown>) => {
const content = String(msg.content || "");
const role = String(msg.role || "assistant").toLowerCase();
const toolCalls = msg.tool_calls;
if (
role === "assistant" &&
toolCalls &&
isToolCallArray(toolCalls) &&
toolCalls.length > 0
) {
return null;
}
if (role === "tool") {
const timestamp = msg.timestamp
? new Date(msg.timestamp as string)
: undefined;
const toolResponse = parseToolResponse(
content,
(msg.tool_call_id as string) || "",
"unknown",
timestamp,
);
if (!toolResponse) {
return null;
}
return toolResponse;
}
return {
type: "message",
role: role as "user" | "assistant" | "system",
content,
timestamp: msg.timestamp
? new Date(msg.timestamp as string)
: undefined,
};
})
.filter((msg): msg is ChatMessageData => msg !== null);
return [...processedInitialMessages, ...messages];
}, [initialMessages, messages]);
const sendMessage = useCallback(
async function sendMessage(content: string, isUserMessage: boolean = true) {
if (!sessionId) {
console.error("Cannot send message: no session ID");
return;
}
if (isUserMessage) {
const userMessage = createUserMessage(content);
setMessages((prev) => [...filterAuthMessages(prev), userMessage]);
} else {
setMessages((prev) => filterAuthMessages(prev));
}
setStreamingChunks([]);
streamingChunksRef.current = [];
setHasTextChunks(false);
const dispatcher = createStreamEventDispatcher({
setHasTextChunks,
setStreamingChunks,
streamingChunksRef,
setMessages,
sessionId,
});
try {
await sendStreamMessage(sessionId, content, dispatcher, isUserMessage);
} catch (err) {
console.error("Failed to send message:", err);
const errorMessage =
err instanceof Error ? err.message : "Failed to send message";
toast.error("Failed to send message", {
description: errorMessage,
});
}
},
[sessionId, sendStreamMessage],
);
return {
messages: allMessages,
streamingChunks,
isStreaming,
error,
sendMessage,
};
}

View File

@@ -0,0 +1,153 @@
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInput";
import { Card } from "@/components/atoms/Card/Card";
import { Text } from "@/components/atoms/Text/Text";
import type { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api";
import { cn } from "@/lib/utils";
import { CheckIcon, KeyIcon, WarningIcon } from "@phosphor-icons/react";
import { useEffect, useRef } from "react";
import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
export interface CredentialInfo {
provider: string;
providerName: string;
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
title: string;
scopes?: string[];
}
interface Props {
credentials: CredentialInfo[];
agentName?: string;
message: string;
onAllCredentialsComplete: () => void;
onCancel: () => void;
className?: string;
}
function createSchemaFromCredentialInfo(
credential: CredentialInfo,
): BlockIOCredentialsSubSchema {
return {
type: "object",
properties: {},
credentials_provider: [credential.provider],
credentials_types: [credential.credentialType],
credentials_scopes: credential.scopes,
discriminator: undefined,
discriminator_mapping: undefined,
discriminator_values: undefined,
};
}
export function ChatCredentialsSetup({
credentials,
agentName: _agentName,
message,
onAllCredentialsComplete,
onCancel: _onCancel,
className,
}: Props) {
const { selectedCredentials, isAllComplete, handleCredentialSelect } =
useChatCredentialsSetup(credentials);
// Track if we've already called completion to prevent double calls
const hasCalledCompleteRef = useRef(false);
// Reset the completion flag when credentials change (new credential setup flow)
useEffect(
function resetCompletionFlag() {
hasCalledCompleteRef.current = false;
},
[credentials],
);
// Auto-call completion when all credentials are configured
useEffect(
function autoCompleteWhenReady() {
if (isAllComplete && !hasCalledCompleteRef.current) {
hasCalledCompleteRef.current = true;
onAllCredentialsComplete();
}
},
[isAllComplete, onAllCredentialsComplete],
);
return (
<Card
className={cn(
"mx-4 my-2 overflow-hidden border-orange-200 bg-orange-50 dark:border-orange-900 dark:bg-orange-950",
className,
)}
>
<div className="flex items-start gap-4 p-6">
<div className="flex h-12 w-12 flex-shrink-0 items-center justify-center rounded-full bg-orange-500">
<KeyIcon size={24} weight="bold" className="text-white" />
</div>
<div className="flex-1">
<Text
variant="h3"
className="mb-2 text-orange-900 dark:text-orange-100"
>
Credentials Required
</Text>
<Text
variant="body"
className="mb-4 text-orange-700 dark:text-orange-300"
>
{message}
</Text>
<div className="space-y-3">
{credentials.map((cred, index) => {
const schema = createSchemaFromCredentialInfo(cred);
const isSelected = !!selectedCredentials[cred.provider];
return (
<div
key={`${cred.provider}-${index}`}
className={cn(
"relative rounded-lg border border-orange-200 bg-white p-4 dark:border-orange-800 dark:bg-orange-900/20",
isSelected &&
"border-green-500 bg-green-50 dark:border-green-700 dark:bg-green-950/30",
)}
>
<div className="mb-2 flex items-center justify-between">
<div className="flex items-center gap-2">
{isSelected ? (
<CheckIcon
size={20}
className="text-green-500"
weight="bold"
/>
) : (
<WarningIcon
size={20}
className="text-orange-500"
weight="bold"
/>
)}
<Text
variant="body"
className="font-semibold text-orange-900 dark:text-orange-100"
>
{cred.providerName}
</Text>
</div>
</div>
<CredentialsInput
schema={schema}
selectedCredentials={selectedCredentials[cred.provider]}
onSelectCredentials={(credMeta) =>
handleCredentialSelect(cred.provider, credMeta)
}
/>
</div>
);
})}
</div>
</div>
</div>
</Card>
);
}

View File

@@ -0,0 +1,63 @@
import { cn } from "@/lib/utils";
import { PaperPlaneRightIcon } from "@phosphor-icons/react";
import { Button } from "@/components/atoms/Button/Button";
import { useChatInput } from "./useChatInput";
export interface ChatInputProps {
onSend: (message: string) => void;
disabled?: boolean;
placeholder?: string;
className?: string;
}
export function ChatInput({
onSend,
disabled = false,
placeholder = "Type your message...",
className,
}: ChatInputProps) {
const { value, setValue, handleKeyDown, handleSend, textareaRef } =
useChatInput({
onSend,
disabled,
maxRows: 5,
});
return (
<div className={cn("flex gap-2", className)}>
<textarea
ref={textareaRef}
value={value}
onChange={(e) => setValue(e.target.value)}
onKeyDown={handleKeyDown}
placeholder={placeholder}
disabled={disabled}
rows={1}
autoComplete="off"
aria-label="Chat message input"
aria-describedby="chat-input-hint"
className={cn(
"flex-1 resize-none rounded-lg border border-neutral-200 bg-white px-4 py-2 text-sm",
"placeholder:text-neutral-400",
"focus:border-violet-600 focus:outline-none focus:ring-2 focus:ring-violet-600/20",
"dark:border-neutral-800 dark:bg-neutral-900 dark:text-neutral-100 dark:placeholder:text-neutral-500",
"disabled:cursor-not-allowed disabled:opacity-50",
)}
/>
<span id="chat-input-hint" className="sr-only">
Press Enter to send, Shift+Enter for new line
</span>
<Button
variant="primary"
size="small"
onClick={handleSend}
disabled={disabled || !value.trim()}
className="self-end"
aria-label="Send message"
>
<PaperPlaneRightIcon className="h-4 w-4" weight="fill" />
</Button>
</div>
);
}

View File

@@ -1,22 +1,21 @@
import { KeyboardEvent, useCallback, useEffect, useState } from "react";
import { KeyboardEvent, useCallback, useState, useRef, useEffect } from "react";
interface UseChatInputArgs {
onSend: (message: string) => void;
disabled?: boolean;
maxRows?: number;
inputId?: string;
}
export function useChatInput({
onSend,
disabled = false,
maxRows = 5,
inputId = "chat-input",
}: UseChatInputArgs) {
const [value, setValue] = useState("");
const textareaRef = useRef<HTMLTextAreaElement>(null);
useEffect(() => {
const textarea = document.getElementById(inputId) as HTMLTextAreaElement;
const textarea = textareaRef.current;
if (!textarea) return;
textarea.style.height = "auto";
const lineHeight = parseInt(
@@ -28,25 +27,23 @@ export function useChatInput({
textarea.style.height = `${newHeight}px`;
textarea.style.overflowY =
textarea.scrollHeight > maxHeight ? "auto" : "hidden";
}, [value, maxRows, inputId]);
}, [value, maxRows]);
const handleSend = useCallback(() => {
if (disabled || !value.trim()) return;
onSend(value.trim());
setValue("");
const textarea = document.getElementById(inputId) as HTMLTextAreaElement;
if (textarea) {
textarea.style.height = "auto";
if (textareaRef.current) {
textareaRef.current.style.height = "auto";
}
}, [value, onSend, disabled, inputId]);
}, [value, onSend, disabled]);
const handleKeyDown = useCallback(
(event: KeyboardEvent<HTMLInputElement | HTMLTextAreaElement>) => {
(event: KeyboardEvent<HTMLTextAreaElement>) => {
if (event.key === "Enter" && !event.shiftKey) {
event.preventDefault();
handleSend();
}
// Shift+Enter allows default behavior (new line) - no need to handle explicitly
},
[handleSend],
);
@@ -56,5 +53,6 @@ export function useChatInput({
setValue,
handleKeyDown,
handleSend,
textareaRef,
};
}

View File

@@ -0,0 +1,31 @@
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { ArrowClockwiseIcon } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
export interface ChatLoadingStateProps {
message?: string;
className?: string;
}
export function ChatLoadingState({
message = "Loading...",
className,
}: ChatLoadingStateProps) {
return (
<div
className={cn("flex flex-1 items-center justify-center p-6", className)}
>
<div className="flex flex-col items-center gap-4 text-center">
<ArrowClockwiseIcon
size={32}
weight="bold"
className="animate-spin text-purple-500"
/>
<Text variant="body" className="text-zinc-600 dark:text-zinc-400">
{message}
</Text>
</div>
</div>
);
}

View File

@@ -0,0 +1,194 @@
"use client";
import { cn } from "@/lib/utils";
import { RobotIcon, UserIcon, CheckCircleIcon } from "@phosphor-icons/react";
import { useCallback } from "react";
import { MessageBubble } from "@/app/(platform)/chat/components/MessageBubble/MessageBubble";
import { MarkdownContent } from "@/app/(platform)/chat/components/MarkdownContent/MarkdownContent";
import { ToolCallMessage } from "@/app/(platform)/chat/components/ToolCallMessage/ToolCallMessage";
import { ToolResponseMessage } from "@/app/(platform)/chat/components/ToolResponseMessage/ToolResponseMessage";
import { AuthPromptWidget } from "@/app/(platform)/chat/components/AuthPromptWidget/AuthPromptWidget";
import { ChatCredentialsSetup } from "@/app/(platform)/chat/components/ChatCredentialsSetup/ChatCredentialsSetup";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { useChatMessage, type ChatMessageData } from "./useChatMessage";
import { getToolActionPhrase } from "@/app/(platform)/chat/helpers";
export interface ChatMessageProps {
message: ChatMessageData;
className?: string;
onDismissLogin?: () => void;
onDismissCredentials?: () => void;
onSendMessage?: (content: string, isUserMessage?: boolean) => void;
}
export function ChatMessage({
message,
className,
onDismissCredentials,
onSendMessage,
}: ChatMessageProps) {
const { user } = useSupabase();
const {
formattedTimestamp,
isUser,
isAssistant,
isToolCall,
isToolResponse,
isLoginNeeded,
isCredentialsNeeded,
} = useChatMessage(message);
const handleAllCredentialsComplete = useCallback(
function handleAllCredentialsComplete() {
// Send a user message that explicitly asks to retry the setup
// This ensures the LLM calls get_required_setup_info again and proceeds with execution
if (onSendMessage) {
onSendMessage(
"I've configured the required credentials. Please check if everything is ready and proceed with setting up the agent.",
);
}
// Optionally dismiss the credentials prompt
if (onDismissCredentials) {
onDismissCredentials();
}
},
[onSendMessage, onDismissCredentials],
);
function handleCancelCredentials() {
// Dismiss the credentials prompt
if (onDismissCredentials) {
onDismissCredentials();
}
}
// Render credentials needed messages
if (isCredentialsNeeded && message.type === "credentials_needed") {
return (
<ChatCredentialsSetup
credentials={message.credentials}
agentName={message.agentName}
message={message.message}
onAllCredentialsComplete={handleAllCredentialsComplete}
onCancel={handleCancelCredentials}
className={className}
/>
);
}
// Render login needed messages
if (isLoginNeeded && message.type === "login_needed") {
// If user is already logged in, show success message instead of auth prompt
if (user) {
return (
<div className={cn("px-4 py-2", className)}>
<div className="my-4 overflow-hidden rounded-lg border border-green-200 bg-gradient-to-br from-green-50 to-emerald-50 dark:border-green-800 dark:from-green-950/30 dark:to-emerald-950/30">
<div className="px-6 py-4">
<div className="flex items-center gap-3">
<div className="flex h-10 w-10 items-center justify-center rounded-full bg-green-600">
<CheckCircleIcon
size={20}
weight="fill"
className="text-white"
/>
</div>
<div>
<h3 className="text-lg font-semibold text-neutral-900 dark:text-neutral-100">
Successfully Authenticated
</h3>
<p className="text-sm text-neutral-600 dark:text-neutral-400">
You&apos;re now signed in and ready to continue
</p>
</div>
</div>
</div>
</div>
</div>
);
}
// Show auth prompt if not logged in
return (
<div className={cn("px-4 py-2", className)}>
<AuthPromptWidget
message={message.message}
sessionId={message.sessionId}
agentInfo={message.agentInfo}
returnUrl="/chat"
/>
</div>
);
}
// Render tool call messages
if (isToolCall && message.type === "tool_call") {
return (
<div className={cn("px-4 py-2", className)}>
<ToolCallMessage toolName={message.toolName} />
</div>
);
}
// Render tool response messages
if (
(isToolResponse && message.type === "tool_response") ||
message.type === "no_results" ||
message.type === "agent_carousel" ||
message.type === "execution_started"
) {
return (
<div className={cn("px-4 py-2", className)}>
<ToolResponseMessage toolName={getToolActionPhrase(message.toolName)} />
</div>
);
}
// Render regular chat messages
if (message.type === "message") {
return (
<div
className={cn(
"flex gap-3 px-4 py-4",
isUser && "flex-row-reverse",
className,
)}
>
{/* Avatar */}
<div className="flex-shrink-0">
<div
className={cn(
"flex h-8 w-8 items-center justify-center rounded-full",
isUser && "bg-zinc-200 dark:bg-zinc-700",
isAssistant && "bg-purple-600 dark:bg-purple-500",
)}
>
{isUser ? (
<UserIcon className="h-5 w-5 text-zinc-700 dark:text-zinc-200" />
) : (
<RobotIcon className="h-5 w-5 text-white" />
)}
</div>
</div>
{/* Message Content */}
<div className={cn("flex max-w-[70%] flex-col", isUser && "items-end")}>
<MessageBubble variant={isUser ? "user" : "assistant"}>
<MarkdownContent content={message.content} />
</MessageBubble>
{/* Timestamp */}
<span
className={cn(
"mt-1 text-xs text-zinc-500 dark:text-zinc-400",
isUser && "text-right",
)}
>
{formattedTimestamp}
</span>
</div>
</div>
);
}
// Fallback for unknown message types
return null;
}

View File

@@ -1,5 +1,5 @@
import type { ToolArguments, ToolResult } from "@/types/chat";
import { formatDistanceToNow } from "date-fns";
import type { ToolArguments, ToolResult } from "@/types/chat";
export type ChatMessageData =
| {
@@ -65,7 +65,6 @@ export type ChatMessageData =
name: string;
description: string;
version?: number;
image_url?: string;
}>;
totalCount?: number;
timestamp?: string | Date;
@@ -78,17 +77,6 @@ export type ChatMessageData =
message?: string;
libraryAgentLink?: string;
timestamp?: string | Date;
}
| {
type: "inputs_needed";
toolName: string;
agentName?: string;
agentId?: string;
graphVersion?: number;
inputSchema: Record<string, any>;
credentialsSchema?: Record<string, any>;
message: string;
timestamp?: string | Date;
};
export function useChatMessage(message: ChatMessageData) {
@@ -108,6 +96,5 @@ export function useChatMessage(message: ChatMessageData) {
isNoResults: message.type === "no_results",
isAgentCarousel: message.type === "agent_carousel",
isExecutionStarted: message.type === "execution_started",
isInputsNeeded: message.type === "inputs_needed",
};
}

View File

@@ -1,7 +1,8 @@
import { Button } from "@/components/atoms/Button/Button";
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { Button } from "@/components/atoms/Button/Button";
import { CheckCircle, Play, ArrowSquareOut } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
import { ArrowSquareOut, CheckCircle, Play } from "@phosphor-icons/react";
export interface ExecutionStartedMessageProps {
executionId: string;
@@ -21,7 +22,7 @@ export function ExecutionStartedMessage({
return (
<div
className={cn(
"mx-4 my-2 flex flex-col gap-4 rounded-lg border border-green-200 bg-green-50 p-6",
"mx-4 my-2 flex flex-col gap-4 rounded-lg border border-green-200 bg-green-50 p-6 dark:border-green-900 dark:bg-green-950",
className,
)}
>
@@ -31,33 +32,48 @@ export function ExecutionStartedMessage({
<CheckCircle size={24} weight="bold" className="text-white" />
</div>
<div className="flex-1">
<Text variant="h3" className="mb-1 text-green-900">
<Text
variant="h3"
className="mb-1 text-green-900 dark:text-green-100"
>
Execution Started
</Text>
<Text variant="body" className="text-green-700">
<Text variant="body" className="text-green-700 dark:text-green-300">
{message}
</Text>
</div>
</div>
{/* Details */}
<div className="rounded-md bg-green-100 p-4">
<div className="rounded-md bg-green-100 p-4 dark:bg-green-900">
<div className="space-y-2">
{agentName && (
<div className="flex items-center justify-between">
<Text variant="small" className="font-semibold text-green-900">
<Text
variant="small"
className="font-semibold text-green-900 dark:text-green-100"
>
Agent:
</Text>
<Text variant="body" className="text-green-800">
<Text
variant="body"
className="text-green-800 dark:text-green-200"
>
{agentName}
</Text>
</div>
)}
<div className="flex items-center justify-between">
<Text variant="small" className="font-semibold text-green-900">
<Text
variant="small"
className="font-semibold text-green-900 dark:text-green-100"
>
Execution ID:
</Text>
<Text variant="small" className="font-mono text-green-800">
<Text
variant="small"
className="font-mono text-green-800 dark:text-green-200"
>
{executionId.slice(0, 16)}...
</Text>
</div>
@@ -78,7 +94,7 @@ export function ExecutionStartedMessage({
</div>
)}
<div className="flex items-center gap-2 text-green-600">
<div className="flex items-center gap-2 text-green-600 dark:text-green-400">
<Play size={16} weight="fill" />
<Text variant="small">
Your agent is now running. You can monitor its progress in the monitor

View File

@@ -1,9 +1,9 @@
"use client";
import { cn } from "@/lib/utils";
import React from "react";
import ReactMarkdown from "react-markdown";
import remarkGfm from "remark-gfm";
import { cn } from "@/lib/utils";
interface MarkdownContentProps {
content: string;
@@ -41,7 +41,7 @@ export function MarkdownContent({ content, className }: MarkdownContentProps) {
if (isInline) {
return (
<code
className="rounded bg-zinc-100 px-1.5 py-0.5 font-mono text-sm text-zinc-800"
className="rounded bg-zinc-100 px-1.5 py-0.5 font-mono text-sm text-zinc-800 dark:bg-zinc-800 dark:text-zinc-200"
{...props}
>
{children}
@@ -49,14 +49,17 @@ export function MarkdownContent({ content, className }: MarkdownContentProps) {
);
}
return (
<code className="font-mono text-sm text-zinc-100" {...props}>
<code
className="font-mono text-sm text-zinc-100 dark:text-zinc-200"
{...props}
>
{children}
</code>
);
},
pre: ({ children, ...props }) => (
<pre
className="my-2 overflow-x-auto rounded-md bg-zinc-900 p-3"
className="my-2 overflow-x-auto rounded-md bg-zinc-900 p-3 dark:bg-zinc-950"
{...props}
>
{children}
@@ -67,7 +70,7 @@ export function MarkdownContent({ content, className }: MarkdownContentProps) {
href={href}
target="_blank"
rel="noopener noreferrer"
className="text-purple-600 underline decoration-1 underline-offset-2 hover:text-purple-700"
className="text-purple-600 underline decoration-1 underline-offset-2 hover:text-purple-700 dark:text-purple-400 dark:hover:text-purple-300"
{...props}
>
{children}
@@ -123,7 +126,7 @@ export function MarkdownContent({ content, className }: MarkdownContentProps) {
return (
<input
type="checkbox"
className="mr-2 h-4 w-4 rounded border-zinc-300 text-purple-600 focus:ring-purple-500 disabled:cursor-not-allowed disabled:opacity-70"
className="mr-2 h-4 w-4 rounded border-zinc-300 text-purple-600 focus:ring-purple-500 disabled:cursor-not-allowed disabled:opacity-70 dark:border-zinc-600"
disabled
{...props}
/>
@@ -133,42 +136,57 @@ export function MarkdownContent({ content, className }: MarkdownContentProps) {
},
blockquote: ({ children, ...props }) => (
<blockquote
className="my-2 border-l-4 border-zinc-300 pl-3 italic text-zinc-700"
className="my-2 border-l-4 border-zinc-300 pl-3 italic text-zinc-700 dark:border-zinc-600 dark:text-zinc-300"
{...props}
>
{children}
</blockquote>
),
h1: ({ children, ...props }) => (
<h1 className="my-2 text-xl font-bold text-zinc-900" {...props}>
<h1
className="my-2 text-xl font-bold text-zinc-900 dark:text-zinc-100"
{...props}
>
{children}
</h1>
),
h2: ({ children, ...props }) => (
<h2 className="my-2 text-lg font-semibold text-zinc-800" {...props}>
<h2
className="my-2 text-lg font-semibold text-zinc-800 dark:text-zinc-200"
{...props}
>
{children}
</h2>
),
h3: ({ children, ...props }) => (
<h3
className="my-1 text-base font-semibold text-zinc-800"
className="my-1 text-base font-semibold text-zinc-800 dark:text-zinc-200"
{...props}
>
{children}
</h3>
),
h4: ({ children, ...props }) => (
<h4 className="my-1 text-sm font-medium text-zinc-700" {...props}>
<h4
className="my-1 text-sm font-medium text-zinc-700 dark:text-zinc-300"
{...props}
>
{children}
</h4>
),
h5: ({ children, ...props }) => (
<h5 className="my-1 text-sm font-medium text-zinc-700" {...props}>
<h5
className="my-1 text-sm font-medium text-zinc-700 dark:text-zinc-300"
{...props}
>
{children}
</h5>
),
h6: ({ children, ...props }) => (
<h6 className="my-1 text-xs font-medium text-zinc-600" {...props}>
<h6
className="my-1 text-xs font-medium text-zinc-600 dark:text-zinc-400"
{...props}
>
{children}
</h6>
),
@@ -178,12 +196,15 @@ export function MarkdownContent({ content, className }: MarkdownContentProps) {
</p>
),
hr: ({ ...props }) => (
<hr className="my-3 border-zinc-300" {...props} />
<hr
className="my-3 border-zinc-300 dark:border-zinc-700"
{...props}
/>
),
table: ({ children, ...props }) => (
<div className="my-2 overflow-x-auto">
<table
className="min-w-full divide-y divide-zinc-200 rounded border border-zinc-200"
className="min-w-full divide-y divide-zinc-200 rounded border border-zinc-200 dark:divide-zinc-700 dark:border-zinc-700"
{...props}
>
{children}
@@ -192,7 +213,7 @@ export function MarkdownContent({ content, className }: MarkdownContentProps) {
),
th: ({ children, ...props }) => (
<th
className="bg-zinc-50 px-3 py-2 text-left text-xs font-semibold text-zinc-700"
className="bg-zinc-50 px-3 py-2 text-left text-xs font-semibold text-zinc-700 dark:bg-zinc-800 dark:text-zinc-300"
{...props}
>
{children}
@@ -200,7 +221,7 @@ export function MarkdownContent({ content, className }: MarkdownContentProps) {
),
td: ({ children, ...props }) => (
<td
className="border-t border-zinc-200 px-3 py-2 text-sm"
className="border-t border-zinc-200 px-3 py-2 text-sm dark:border-zinc-700"
{...props}
>
{children}

View File

@@ -0,0 +1,28 @@
import { cn } from "@/lib/utils";
import { ReactNode } from "react";
export interface MessageBubbleProps {
children: ReactNode;
variant: "user" | "assistant";
className?: string;
}
export function MessageBubble({
children,
variant,
className,
}: MessageBubbleProps) {
return (
<div
className={cn(
"rounded-lg px-4 py-3 text-sm",
variant === "user" && "bg-violet-600 text-white dark:bg-violet-500",
variant === "assistant" &&
"border border-neutral-200 bg-white dark:border-neutral-700 dark:bg-neutral-900 dark:text-neutral-100",
className,
)}
>
{children}
</div>
);
}

View File

@@ -0,0 +1,61 @@
import { cn } from "@/lib/utils";
import { ChatMessage } from "../ChatMessage/ChatMessage";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
import { StreamingMessage } from "../StreamingMessage/StreamingMessage";
import { useMessageList } from "./useMessageList";
export interface MessageListProps {
messages: ChatMessageData[];
streamingChunks?: string[];
isStreaming?: boolean;
className?: string;
onStreamComplete?: () => void;
onSendMessage?: (content: string) => void;
}
export function MessageList({
messages,
streamingChunks = [],
isStreaming = false,
className,
onStreamComplete,
onSendMessage,
}: MessageListProps) {
const { messagesEndRef, messagesContainerRef } = useMessageList({
messageCount: messages.length,
isStreaming,
});
return (
<div
ref={messagesContainerRef}
className={cn(
"flex-1 overflow-y-auto",
"scrollbar-thin scrollbar-track-transparent scrollbar-thumb-zinc-300 dark:scrollbar-thumb-zinc-700",
className,
)}
>
<div className="space-y-0">
{/* Render all persisted messages */}
{messages.map((message, index) => (
<ChatMessage
key={index}
message={message}
onSendMessage={onSendMessage}
/>
))}
{/* Render streaming message if active */}
{isStreaming && streamingChunks.length > 0 && (
<StreamingMessage
chunks={streamingChunks}
onComplete={onStreamComplete}
/>
)}
{/* Invisible div to scroll to */}
<div ref={messagesEndRef} />
</div>
</div>
);
}

View File

@@ -1,6 +1,7 @@
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
import { MagnifyingGlass, X } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
export interface NoResultsMessageProps {
message: string;
@@ -16,26 +17,26 @@ export function NoResultsMessage({
return (
<div
className={cn(
"mx-4 my-2 flex flex-col items-center gap-4 rounded-lg border border-gray-200 bg-gray-50 p-6",
"mx-4 my-2 flex flex-col items-center gap-4 rounded-lg border border-gray-200 bg-gray-50 p-6 dark:border-gray-800 dark:bg-gray-900",
className,
)}
>
{/* Icon */}
<div className="relative flex h-16 w-16 items-center justify-center">
<div className="flex h-16 w-16 items-center justify-center rounded-full bg-gray-200">
<div className="flex h-16 w-16 items-center justify-center rounded-full bg-gray-200 dark:bg-gray-700">
<MagnifyingGlass size={32} weight="bold" className="text-gray-500" />
</div>
<div className="absolute -right-1 -top-1 flex h-8 w-8 items-center justify-center rounded-full bg-gray-400">
<div className="absolute -right-1 -top-1 flex h-8 w-8 items-center justify-center rounded-full bg-gray-400 dark:bg-gray-600">
<X size={20} weight="bold" className="text-white" />
</div>
</div>
{/* Content */}
<div className="text-center">
<Text variant="h3" className="mb-2 text-gray-900">
<Text variant="h3" className="mb-2 text-gray-900 dark:text-gray-100">
No Results Found
</Text>
<Text variant="body" className="text-gray-700">
<Text variant="body" className="text-gray-700 dark:text-gray-300">
{message}
</Text>
</div>
@@ -43,14 +44,17 @@ export function NoResultsMessage({
{/* Suggestions */}
{suggestions.length > 0 && (
<div className="w-full space-y-2">
<Text variant="small" className="font-semibold text-gray-900">
<Text
variant="small"
className="font-semibold text-gray-900 dark:text-gray-100"
>
Try these suggestions:
</Text>
<ul className="space-y-1 rounded-md bg-gray-100 p-4">
<ul className="space-y-1 rounded-md bg-gray-100 p-4 dark:bg-gray-800">
{suggestions.map((suggestion, index) => (
<li
key={index}
className="flex items-start gap-2 text-sm text-gray-700"
className="flex items-start gap-2 text-sm text-gray-700 dark:text-gray-300"
>
<span className="mt-1 text-gray-500"></span>
<span>{suggestion}</span>

View File

@@ -0,0 +1,51 @@
import React from "react";
import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
export interface QuickActionsWelcomeProps {
title: string;
description: string;
actions: string[];
onActionClick: (action: string) => void;
disabled?: boolean;
className?: string;
}
export function QuickActionsWelcome({
title,
description,
actions,
onActionClick,
disabled = false,
className,
}: QuickActionsWelcomeProps) {
return (
<div
className={cn("flex flex-1 items-center justify-center p-4", className)}
>
<div className="max-w-2xl text-center">
<Text
variant="h2"
className="mb-4 text-3xl font-bold text-zinc-900 dark:text-zinc-100"
>
{title}
</Text>
<Text variant="body" className="mb-8 text-zinc-600 dark:text-zinc-400">
{description}
</Text>
<div className="grid gap-2 sm:grid-cols-2">
{actions.map((action) => (
<button
key={action}
onClick={() => onActionClick(action)}
disabled={disabled}
className="rounded-lg border border-zinc-200 bg-white p-4 text-left text-sm hover:bg-zinc-50 disabled:cursor-not-allowed disabled:opacity-50 dark:border-zinc-800 dark:bg-zinc-900 dark:hover:bg-zinc-800"
>
{action}
</button>
))}
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,42 @@
import { cn } from "@/lib/utils";
import { Robot } from "@phosphor-icons/react";
import { MessageBubble } from "@/app/(platform)/chat/components/MessageBubble/MessageBubble";
import { MarkdownContent } from "@/app/(platform)/chat/components/MarkdownContent/MarkdownContent";
import { useStreamingMessage } from "./useStreamingMessage";
export interface StreamingMessageProps {
chunks: string[];
className?: string;
onComplete?: () => void;
}
export function StreamingMessage({
chunks,
className,
onComplete,
}: StreamingMessageProps) {
const { displayText } = useStreamingMessage({ chunks, onComplete });
return (
<div className={cn("flex gap-3 px-4 py-4", className)}>
{/* Avatar */}
<div className="flex-shrink-0">
<div className="flex h-8 w-8 items-center justify-center rounded-full bg-purple-600 dark:bg-purple-500">
<Robot className="h-5 w-5 text-white" />
</div>
</div>
{/* Message Content */}
<div className="flex max-w-[70%] flex-col">
<MessageBubble variant="assistant">
<MarkdownContent content={displayText} />
</MessageBubble>
{/* Timestamp */}
<span className="mt-1 text-xs text-neutral-500 dark:text-neutral-400">
Typing...
</span>
</div>
</div>
);
}

View File

@@ -0,0 +1,49 @@
import React from "react";
import { WrenchIcon } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
import { getToolActionPhrase } from "@/app/(platform)/chat/helpers";
export interface ToolCallMessageProps {
toolName: string;
className?: string;
}
export function ToolCallMessage({ toolName, className }: ToolCallMessageProps) {
return (
<div
className={cn(
"mx-10 max-w-[70%] overflow-hidden rounded-lg border transition-all duration-200",
"border-neutral-200 dark:border-neutral-700",
"bg-white dark:bg-neutral-900",
"animate-in fade-in-50 slide-in-from-top-1",
className,
)}
>
{/* Header */}
<div
className={cn(
"flex items-center justify-between px-3 py-2",
"bg-gradient-to-r from-neutral-50 to-neutral-100 dark:from-neutral-800/20 dark:to-neutral-700/20",
)}
>
<div className="flex items-center gap-2 overflow-hidden">
<WrenchIcon
size={16}
weight="bold"
className="flex-shrink-0 text-neutral-500 dark:text-neutral-400"
/>
<span className="relative inline-block overflow-hidden text-sm font-medium text-neutral-700 dark:text-neutral-300">
{getToolActionPhrase(toolName)}...
<span
className={cn(
"absolute inset-0 bg-gradient-to-r from-transparent via-white/50 to-transparent",
"dark:via-white/20",
"animate-shimmer",
)}
/>
</span>
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,52 @@
import React from "react";
import { WrenchIcon } from "@phosphor-icons/react";
import { cn } from "@/lib/utils";
import { getToolActionPhrase } from "@/app/(platform)/chat/helpers";
export interface ToolResponseMessageProps {
toolName: string;
success?: boolean;
className?: string;
}
export function ToolResponseMessage({
toolName,
success = true,
className,
}: ToolResponseMessageProps) {
return (
<div
className={cn(
"mx-10 max-w-[70%] overflow-hidden rounded-lg border transition-all duration-200",
success
? "border-neutral-200 dark:border-neutral-700"
: "border-red-200 dark:border-red-800",
"bg-white dark:bg-neutral-900",
"animate-in fade-in-50 slide-in-from-top-1",
className,
)}
>
{/* Header */}
<div
className={cn(
"flex items-center justify-between px-3 py-2",
"bg-gradient-to-r",
success
? "from-neutral-50 to-neutral-100 dark:from-neutral-800/20 dark:to-neutral-700/20"
: "from-red-50 to-red-100 dark:from-red-900/20 dark:to-red-800/20",
)}
>
<div className="flex items-center gap-2">
<WrenchIcon
size={16}
weight="bold"
className="text-neutral-500 dark:text-neutral-400"
/>
<span className="text-sm font-medium text-neutral-700 dark:text-neutral-300">
{getToolActionPhrase(toolName)}...
</span>
</div>
</div>
</div>
);
}

View File

@@ -64,3 +64,10 @@ export function getToolCompletionPhrase(toolName: string): string {
`Finished ${toolName.replace(/_/g, " ").replace("...", "")}`
);
}
/** Validate UUID v4 format */
export function isValidUUID(value: string): boolean {
const uuidRegex =
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
return uuidRegex.test(value);
}

Some files were not shown because too many files have changed in this diff Show More