mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
feat(copilot): HITL review for sensitive block execution (#12356)
## Summary
- Integrates existing Human-In-The-Loop (HITL) review infrastructure
into CoPilot's direct block execution (`run_block`) for blocks marked
with `is_sensitive_action=True`
- Removes the `PendingHumanReview → AgentGraphExecution` FK constraint
to support synthetic CoPilot session IDs (migration included)
- Adds `ReviewRequiredResponse` model + frontend `ReviewRequiredCard`
component to surface review status in the chat UI
- Auto-approval works within a CoPilot session: once a block is
approved, subsequent executions of the same block in the same session
are auto-approved (using `copilot-session-{session_id}` as
`graph_exec_id` and `copilot-node-{block_id}` as `node_id`)
## Test plan
- [x] All 11 `run_block_test.py` tests pass (3 new sensitive action
tests)
- [ ] Manual: Execute a block with `is_sensitive_action=True` in CoPilot
→ verify ReviewRequiredResponse is returned and rendered
- [ ] Manual: Approve in review panel → re-execute the same block →
verify auto-approval kicks in
- [ ] Manual: Verify non-sensitive blocks still execute without review
This commit is contained in:
@@ -638,7 +638,7 @@ async def test_process_review_action_auto_approve_creates_auto_approval_records(
|
||||
|
||||
# Mock get_node_executions to return node_id mapping
|
||||
mock_get_node_executions = mocker.patch(
|
||||
"backend.data.execution.get_node_executions"
|
||||
"backend.api.features.executions.review.routes.get_node_executions"
|
||||
)
|
||||
mock_node_exec = mocker.Mock(spec=NodeExecutionResult)
|
||||
mock_node_exec.node_exec_id = "test_node_123"
|
||||
@@ -936,7 +936,7 @@ async def test_process_review_action_auto_approve_only_applies_to_approved_revie
|
||||
|
||||
# Mock get_node_executions to return node_id mapping
|
||||
mock_get_node_executions = mocker.patch(
|
||||
"backend.data.execution.get_node_executions"
|
||||
"backend.api.features.executions.review.routes.get_node_executions"
|
||||
)
|
||||
mock_node_exec = mocker.Mock(spec=NodeExecutionResult)
|
||||
mock_node_exec.node_exec_id = "node_exec_approved"
|
||||
@@ -1148,7 +1148,7 @@ async def test_process_review_action_per_review_auto_approve_granularity(
|
||||
|
||||
# Mock get_node_executions to return batch node data
|
||||
mock_get_node_executions = mocker.patch(
|
||||
"backend.data.execution.get_node_executions"
|
||||
"backend.api.features.executions.review.routes.get_node_executions"
|
||||
)
|
||||
# Create mock node executions for each review
|
||||
mock_node_execs = []
|
||||
|
||||
@@ -6,10 +6,15 @@ import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, HTTPException, Query, Security, status
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from backend.copilot.constants import (
|
||||
is_copilot_synthetic_id,
|
||||
parse_node_id_from_exec_id,
|
||||
)
|
||||
from backend.data.execution import (
|
||||
ExecutionContext,
|
||||
ExecutionStatus,
|
||||
get_graph_execution_meta,
|
||||
get_node_executions,
|
||||
)
|
||||
from backend.data.graph import get_graph_settings
|
||||
from backend.data.human_review import (
|
||||
@@ -36,6 +41,38 @@ router = APIRouter(
|
||||
)
|
||||
|
||||
|
||||
async def _resolve_node_ids(
|
||||
node_exec_ids: list[str],
|
||||
graph_exec_id: str,
|
||||
is_copilot: bool,
|
||||
) -> dict[str, str]:
|
||||
"""Resolve node_exec_id -> node_id for auto-approval records.
|
||||
|
||||
CoPilot synthetic IDs encode node_id in the format "{node_id}:{random}".
|
||||
Graph executions look up node_id from NodeExecution records.
|
||||
"""
|
||||
if not node_exec_ids:
|
||||
return {}
|
||||
|
||||
if is_copilot:
|
||||
return {neid: parse_node_id_from_exec_id(neid) for neid in node_exec_ids}
|
||||
|
||||
node_execs = await get_node_executions(
|
||||
graph_exec_id=graph_exec_id, include_exec_data=False
|
||||
)
|
||||
node_exec_map = {ne.node_exec_id: ne.node_id for ne in node_execs}
|
||||
|
||||
result = {}
|
||||
for neid in node_exec_ids:
|
||||
if neid in node_exec_map:
|
||||
result[neid] = node_exec_map[neid]
|
||||
else:
|
||||
logger.error(
|
||||
f"Failed to resolve node_id for {neid}: Node execution not found."
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@router.get(
|
||||
"/pending",
|
||||
summary="Get Pending Reviews",
|
||||
@@ -110,14 +147,16 @@ async def list_pending_reviews_for_execution(
|
||||
"""
|
||||
|
||||
# Verify user owns the graph execution before returning reviews
|
||||
graph_exec = await get_graph_execution_meta(
|
||||
user_id=user_id, execution_id=graph_exec_id
|
||||
)
|
||||
if not graph_exec:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Graph execution #{graph_exec_id} not found",
|
||||
# (CoPilot synthetic IDs don't have graph execution records)
|
||||
if not is_copilot_synthetic_id(graph_exec_id):
|
||||
graph_exec = await get_graph_execution_meta(
|
||||
user_id=user_id, execution_id=graph_exec_id
|
||||
)
|
||||
if not graph_exec:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Graph execution #{graph_exec_id} not found",
|
||||
)
|
||||
|
||||
return await get_pending_reviews_for_execution(graph_exec_id, user_id)
|
||||
|
||||
@@ -160,30 +199,26 @@ async def process_review_action(
|
||||
)
|
||||
|
||||
graph_exec_id = next(iter(graph_exec_ids))
|
||||
is_copilot = is_copilot_synthetic_id(graph_exec_id)
|
||||
|
||||
# Validate execution status before processing reviews
|
||||
graph_exec_meta = await get_graph_execution_meta(
|
||||
user_id=user_id, execution_id=graph_exec_id
|
||||
)
|
||||
|
||||
if not graph_exec_meta:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Graph execution #{graph_exec_id} not found",
|
||||
)
|
||||
|
||||
# Only allow processing reviews if execution is paused for review
|
||||
# or incomplete (partial execution with some reviews already processed)
|
||||
if graph_exec_meta.status not in (
|
||||
ExecutionStatus.REVIEW,
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=f"Cannot process reviews while execution status is {graph_exec_meta.status}. "
|
||||
f"Reviews can only be processed when execution is paused (REVIEW status). "
|
||||
f"Current status: {graph_exec_meta.status}",
|
||||
# Validate execution status for graph executions (skip for CoPilot synthetic IDs)
|
||||
if not is_copilot:
|
||||
graph_exec_meta = await get_graph_execution_meta(
|
||||
user_id=user_id, execution_id=graph_exec_id
|
||||
)
|
||||
if not graph_exec_meta:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Graph execution #{graph_exec_id} not found",
|
||||
)
|
||||
if graph_exec_meta.status not in (
|
||||
ExecutionStatus.REVIEW,
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=f"Cannot process reviews while execution status is {graph_exec_meta.status}",
|
||||
)
|
||||
|
||||
# Build review decisions map and track which reviews requested auto-approval
|
||||
# Auto-approved reviews use original data (no modifications allowed)
|
||||
@@ -236,7 +271,7 @@ async def process_review_action(
|
||||
)
|
||||
return (node_id, False)
|
||||
|
||||
# Collect node_exec_ids that need auto-approval
|
||||
# Collect node_exec_ids that need auto-approval and resolve their node_ids
|
||||
node_exec_ids_needing_auto_approval = [
|
||||
node_exec_id
|
||||
for node_exec_id, review_result in updated_reviews.items()
|
||||
@@ -244,29 +279,16 @@ async def process_review_action(
|
||||
and auto_approve_requests.get(node_exec_id, False)
|
||||
]
|
||||
|
||||
# Batch-fetch node executions to get node_ids
|
||||
node_id_map = await _resolve_node_ids(
|
||||
node_exec_ids_needing_auto_approval, graph_exec_id, is_copilot
|
||||
)
|
||||
|
||||
# Deduplicate by node_id — one auto-approval per node
|
||||
nodes_needing_auto_approval: dict[str, Any] = {}
|
||||
if node_exec_ids_needing_auto_approval:
|
||||
from backend.data.execution import get_node_executions
|
||||
|
||||
node_execs = await get_node_executions(
|
||||
graph_exec_id=graph_exec_id, include_exec_data=False
|
||||
)
|
||||
node_exec_map = {node_exec.node_exec_id: node_exec for node_exec in node_execs}
|
||||
|
||||
for node_exec_id in node_exec_ids_needing_auto_approval:
|
||||
node_exec = node_exec_map.get(node_exec_id)
|
||||
if node_exec:
|
||||
review_result = updated_reviews[node_exec_id]
|
||||
# Use the first approved review for this node (deduplicate by node_id)
|
||||
if node_exec.node_id not in nodes_needing_auto_approval:
|
||||
nodes_needing_auto_approval[node_exec.node_id] = review_result
|
||||
else:
|
||||
logger.error(
|
||||
f"Failed to create auto-approval record for {node_exec_id}: "
|
||||
f"Node execution not found. This may indicate a race condition "
|
||||
f"or data inconsistency."
|
||||
)
|
||||
for node_exec_id in node_exec_ids_needing_auto_approval:
|
||||
node_id = node_id_map.get(node_exec_id)
|
||||
if node_id and node_id not in nodes_needing_auto_approval:
|
||||
nodes_needing_auto_approval[node_id] = updated_reviews[node_exec_id]
|
||||
|
||||
# Execute all auto-approval creations in parallel (deduplicated by node_id)
|
||||
auto_approval_results = await asyncio.gather(
|
||||
@@ -281,13 +303,11 @@ async def process_review_action(
|
||||
auto_approval_failed_count = 0
|
||||
for result in auto_approval_results:
|
||||
if isinstance(result, Exception):
|
||||
# Unexpected exception during auto-approval creation
|
||||
auto_approval_failed_count += 1
|
||||
logger.error(
|
||||
f"Unexpected exception during auto-approval creation: {result}"
|
||||
)
|
||||
elif isinstance(result, tuple) and len(result) == 2 and not result[1]:
|
||||
# Auto-approval creation failed (returned False)
|
||||
auto_approval_failed_count += 1
|
||||
|
||||
# Count results
|
||||
@@ -302,22 +322,20 @@ async def process_review_action(
|
||||
if review.status == ReviewStatus.REJECTED
|
||||
)
|
||||
|
||||
# Resume execution only if ALL pending reviews for this execution have been processed
|
||||
if updated_reviews:
|
||||
# Resume graph execution only for real graph executions (not CoPilot)
|
||||
# CoPilot sessions are resumed by the LLM retrying run_block with review_id
|
||||
if not is_copilot and updated_reviews:
|
||||
still_has_pending = await has_pending_reviews_for_graph_exec(graph_exec_id)
|
||||
|
||||
if not still_has_pending:
|
||||
# Get the graph_id from any processed review
|
||||
first_review = next(iter(updated_reviews.values()))
|
||||
|
||||
try:
|
||||
# Fetch user and settings to build complete execution context
|
||||
user = await get_user_by_id(user_id)
|
||||
settings = await get_graph_settings(
|
||||
user_id=user_id, graph_id=first_review.graph_id
|
||||
)
|
||||
|
||||
# Preserve user's timezone preference when resuming execution
|
||||
user_timezone = (
|
||||
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
|
||||
)
|
||||
|
||||
@@ -624,6 +624,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
execution_context: "ExecutionContext",
|
||||
is_graph_execution: bool = True,
|
||||
**kwargs,
|
||||
) -> tuple[bool, BlockInput]:
|
||||
"""
|
||||
@@ -652,6 +653,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
graph_version=graph_version,
|
||||
block_name=self.name,
|
||||
editable=True,
|
||||
is_graph_execution=is_graph_execution,
|
||||
)
|
||||
|
||||
if decision is None:
|
||||
|
||||
@@ -126,7 +126,7 @@ class PrintToConsoleBlock(Block):
|
||||
output_schema=PrintToConsoleBlock.Output,
|
||||
test_input={"text": "Hello, World!"},
|
||||
is_sensitive_action=True,
|
||||
disabled=True, # Disabled per Nick Tindle's request (OPEN-3000)
|
||||
disabled=True,
|
||||
test_output=[
|
||||
("output", "Hello, World!"),
|
||||
("status", "printed"),
|
||||
|
||||
@@ -67,6 +67,7 @@ class HITLReviewHelper:
|
||||
graph_version: int,
|
||||
block_name: str = "Block",
|
||||
editable: bool = False,
|
||||
is_graph_execution: bool = True,
|
||||
) -> Optional[ReviewResult]:
|
||||
"""
|
||||
Handle a review request for a block that requires human review.
|
||||
@@ -143,10 +144,11 @@ class HITLReviewHelper:
|
||||
logger.info(
|
||||
f"Block {block_name} pausing execution for node {node_exec_id} - awaiting human review"
|
||||
)
|
||||
await HITLReviewHelper.update_node_execution_status(
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.REVIEW,
|
||||
)
|
||||
if is_graph_execution:
|
||||
await HITLReviewHelper.update_node_execution_status(
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.REVIEW,
|
||||
)
|
||||
return None # Signal that execution should pause
|
||||
|
||||
# Mark review as processed if not already done
|
||||
@@ -168,6 +170,7 @@ class HITLReviewHelper:
|
||||
graph_version: int,
|
||||
block_name: str = "Block",
|
||||
editable: bool = False,
|
||||
is_graph_execution: bool = True,
|
||||
) -> Optional[ReviewDecision]:
|
||||
"""
|
||||
Handle a review request and return the decision in a single call.
|
||||
@@ -197,6 +200,7 @@ class HITLReviewHelper:
|
||||
graph_version=graph_version,
|
||||
block_name=block_name,
|
||||
editable=editable,
|
||||
is_graph_execution=is_graph_execution,
|
||||
)
|
||||
|
||||
if review_result is None:
|
||||
|
||||
@@ -6,6 +6,32 @@
|
||||
COPILOT_ERROR_PREFIX = "[__COPILOT_ERROR_f7a1__]" # Renders as ErrorCard
|
||||
COPILOT_SYSTEM_PREFIX = "[__COPILOT_SYSTEM_e3b0__]" # Renders as system info message
|
||||
|
||||
# Prefix for all synthetic IDs generated by CoPilot block execution.
|
||||
# Used to distinguish CoPilot-generated records from real graph execution records
|
||||
# in PendingHumanReview and other tables.
|
||||
COPILOT_SYNTHETIC_ID_PREFIX = "copilot-"
|
||||
|
||||
# Sub-prefixes for session-scoped and node-scoped synthetic IDs.
|
||||
COPILOT_SESSION_PREFIX = f"{COPILOT_SYNTHETIC_ID_PREFIX}session-"
|
||||
COPILOT_NODE_PREFIX = f"{COPILOT_SYNTHETIC_ID_PREFIX}node-"
|
||||
|
||||
# Separator used in synthetic node_exec_id to encode node_id.
|
||||
# Format: "{node_id}:{random_hex}" — extract node_id via rsplit(":", 1)[0]
|
||||
COPILOT_NODE_EXEC_ID_SEPARATOR = ":"
|
||||
|
||||
# Compaction notice messages shown to users.
|
||||
COMPACTION_DONE_MSG = "Earlier messages were summarized to fit within context limits."
|
||||
COMPACTION_TOOL_NAME = "context_compaction"
|
||||
|
||||
|
||||
def is_copilot_synthetic_id(id_value: str) -> bool:
|
||||
"""Check if an ID is a CoPilot synthetic ID (not from a real graph execution)."""
|
||||
return id_value.startswith(COPILOT_SYNTHETIC_ID_PREFIX)
|
||||
|
||||
|
||||
def parse_node_id_from_exec_id(node_exec_id: str) -> str:
|
||||
"""Extract node_id from a synthetic node_exec_id.
|
||||
|
||||
Format: "{node_id}:{random_hex}" → returns "{node_id}".
|
||||
"""
|
||||
return node_exec_id.rsplit(COPILOT_NODE_EXEC_ID_SEPARATOR, 1)[0]
|
||||
|
||||
@@ -12,6 +12,7 @@ from .agent_browser import BrowserActTool, BrowserNavigateTool, BrowserScreensho
|
||||
from .agent_output import AgentOutputTool
|
||||
from .base import BaseTool
|
||||
from .bash_exec import BashExecTool
|
||||
from .continue_run_block import ContinueRunBlockTool
|
||||
from .create_agent import CreateAgentTool
|
||||
from .customize_agent import CustomizeAgentTool
|
||||
from .edit_agent import EditAgentTool
|
||||
@@ -68,6 +69,7 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
|
||||
"move_agents_to_folder": MoveAgentsToFolderTool(),
|
||||
"run_agent": RunAgentTool(),
|
||||
"run_block": RunBlockTool(),
|
||||
"continue_run_block": ContinueRunBlockTool(),
|
||||
"run_mcp_tool": RunMCPToolTool(),
|
||||
"get_mcp_guide": GetMCPGuideTool(),
|
||||
"view_agent_output": AgentOutputTool(),
|
||||
|
||||
@@ -0,0 +1,157 @@
|
||||
"""Tool for continuing block execution after human review approval."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from backend.blocks import get_block
|
||||
from backend.copilot.constants import (
|
||||
COPILOT_NODE_PREFIX,
|
||||
COPILOT_SESSION_PREFIX,
|
||||
parse_node_id_from_exec_id,
|
||||
)
|
||||
from backend.copilot.model import ChatSession
|
||||
from backend.data.db_accessors import review_db
|
||||
|
||||
from .base import BaseTool
|
||||
from .helpers import execute_block, resolve_block_credentials
|
||||
from .models import ErrorResponse, ToolResponseBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ContinueRunBlockTool(BaseTool):
|
||||
"""Tool for continuing a block execution after human review approval."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "continue_run_block"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Continue executing a block after human review approval. "
|
||||
"Use this after a run_block call returned review_required. "
|
||||
"Pass the review_id from the review_required response. "
|
||||
"The block will execute with the original pre-approved input data."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"review_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The review_id from a previous review_required response. "
|
||||
"This resumes execution with the pre-approved input data."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["review_id"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
review_id = (
|
||||
kwargs.get("review_id", "").strip() if kwargs.get("review_id") else ""
|
||||
)
|
||||
session_id = session.session_id
|
||||
|
||||
if not review_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide a review_id", session_id=session_id
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="Authentication required", session_id=session_id
|
||||
)
|
||||
|
||||
# Look up and validate the review record via adapter
|
||||
reviews = await review_db().get_reviews_by_node_exec_ids([review_id], user_id)
|
||||
review = reviews.get(review_id)
|
||||
|
||||
if not review:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Review '{review_id}' not found or already executed. "
|
||||
"It may have been consumed by a previous continue_run_block call."
|
||||
),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Validate the review belongs to this session
|
||||
expected_graph_exec_id = f"{COPILOT_SESSION_PREFIX}{session_id}"
|
||||
if review.graph_exec_id != expected_graph_exec_id:
|
||||
return ErrorResponse(
|
||||
message="Review does not belong to this session.",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if review.status == ReviewStatus.WAITING:
|
||||
return ErrorResponse(
|
||||
message="Review has not been approved yet. "
|
||||
"Please wait for the user to approve the review first.",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if review.status == ReviewStatus.REJECTED:
|
||||
return ErrorResponse(
|
||||
message="Review was rejected. The block will not execute.",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Extract block_id from review_id: copilot-node-{block_id}:{random_hex}
|
||||
block_id = parse_node_id_from_exec_id(review_id).removeprefix(
|
||||
COPILOT_NODE_PREFIX
|
||||
)
|
||||
block = get_block(block_id)
|
||||
if not block:
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block_id}' not found", session_id=session_id
|
||||
)
|
||||
|
||||
input_data: dict[str, Any] = (
|
||||
review.payload if isinstance(review.payload, dict) else {}
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Continuing block {block.name} ({block_id}) for user {user_id} "
|
||||
f"with review_id={review_id}"
|
||||
)
|
||||
|
||||
matched_creds, missing_creds = await resolve_block_credentials(
|
||||
user_id, block, input_data
|
||||
)
|
||||
if missing_creds:
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block.name}' requires credentials that are not configured.",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
result = await execute_block(
|
||||
block=block,
|
||||
block_id=block_id,
|
||||
input_data=input_data,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
node_exec_id=review_id,
|
||||
matched_credentials=matched_creds,
|
||||
)
|
||||
|
||||
# Delete review record after successful execution (one-time use)
|
||||
if result.type != "error":
|
||||
await review_db().delete_review_by_node_exec_id(review_id, user_id)
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,186 @@
|
||||
"""Tests for ContinueRunBlockTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from ._test_data import make_session
|
||||
from .continue_run_block import ContinueRunBlockTool
|
||||
from .models import BlockOutputResponse, ErrorResponse
|
||||
|
||||
_TEST_USER_ID = "test-user-continue"
|
||||
|
||||
|
||||
def _make_review_model(
|
||||
node_exec_id: str,
|
||||
status: ReviewStatus = ReviewStatus.APPROVED,
|
||||
payload: dict | None = None,
|
||||
graph_exec_id: str = "",
|
||||
):
|
||||
"""Create a mock PendingHumanReviewModel."""
|
||||
mock = MagicMock()
|
||||
mock.node_exec_id = node_exec_id
|
||||
mock.status = status
|
||||
mock.payload = payload or {"text": "hello"}
|
||||
mock.graph_exec_id = graph_exec_id
|
||||
return mock
|
||||
|
||||
|
||||
class TestContinueRunBlock:
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_review_id_returns_error(self):
|
||||
tool = ContinueRunBlockTool()
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
review_id="",
|
||||
)
|
||||
|
||||
assert isinstance(response, ErrorResponse)
|
||||
assert "review_id" in response.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_review_not_found_returns_error(self):
|
||||
tool = ContinueRunBlockTool()
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_db = MagicMock()
|
||||
mock_db.get_reviews_by_node_exec_ids = AsyncMock(return_value={})
|
||||
|
||||
with patch(
|
||||
"backend.copilot.tools.continue_run_block.review_db",
|
||||
return_value=mock_db,
|
||||
):
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
review_id="copilot-node-some-block:abc12345",
|
||||
)
|
||||
|
||||
assert isinstance(response, ErrorResponse)
|
||||
assert "not found" in response.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_waiting_review_returns_error(self):
|
||||
tool = ContinueRunBlockTool()
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
review_id = "copilot-node-some-block:abc12345"
|
||||
graph_exec_id = f"copilot-session-{session.session_id}"
|
||||
review = _make_review_model(
|
||||
review_id, status=ReviewStatus.WAITING, graph_exec_id=graph_exec_id
|
||||
)
|
||||
|
||||
mock_db = MagicMock()
|
||||
mock_db.get_reviews_by_node_exec_ids = AsyncMock(
|
||||
return_value={review_id: review}
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.copilot.tools.continue_run_block.review_db",
|
||||
return_value=mock_db,
|
||||
):
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
review_id=review_id,
|
||||
)
|
||||
|
||||
assert isinstance(response, ErrorResponse)
|
||||
assert "not been approved" in response.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_rejected_review_returns_error(self):
|
||||
tool = ContinueRunBlockTool()
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
review_id = "copilot-node-some-block:abc12345"
|
||||
graph_exec_id = f"copilot-session-{session.session_id}"
|
||||
review = _make_review_model(
|
||||
review_id, status=ReviewStatus.REJECTED, graph_exec_id=graph_exec_id
|
||||
)
|
||||
|
||||
mock_db = MagicMock()
|
||||
mock_db.get_reviews_by_node_exec_ids = AsyncMock(
|
||||
return_value={review_id: review}
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.copilot.tools.continue_run_block.review_db",
|
||||
return_value=mock_db,
|
||||
):
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
review_id=review_id,
|
||||
)
|
||||
|
||||
assert isinstance(response, ErrorResponse)
|
||||
assert "rejected" in response.message.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_approved_review_executes_block(self):
|
||||
tool = ContinueRunBlockTool()
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
review_id = "copilot-node-delete-branch-id:abc12345"
|
||||
graph_exec_id = f"copilot-session-{session.session_id}"
|
||||
input_data = {"repo_url": "https://github.com/test/repo", "branch": "main"}
|
||||
review = _make_review_model(
|
||||
review_id,
|
||||
status=ReviewStatus.APPROVED,
|
||||
payload=input_data,
|
||||
graph_exec_id=graph_exec_id,
|
||||
)
|
||||
|
||||
mock_block = MagicMock()
|
||||
mock_block.name = "Delete Branch"
|
||||
|
||||
async def mock_execute(data, **kwargs):
|
||||
yield "result", "Branch deleted"
|
||||
|
||||
mock_block.execute = mock_execute
|
||||
mock_block.input_schema.get_credentials_fields_info.return_value = []
|
||||
|
||||
mock_workspace_db = MagicMock()
|
||||
mock_workspace_db.get_or_create_workspace = AsyncMock(
|
||||
return_value=MagicMock(id="test-workspace-id")
|
||||
)
|
||||
|
||||
mock_db = MagicMock()
|
||||
mock_db.get_reviews_by_node_exec_ids = AsyncMock(
|
||||
return_value={review_id: review}
|
||||
)
|
||||
mock_db.delete_review_by_node_exec_id = AsyncMock(return_value=1)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.continue_run_block.review_db",
|
||||
return_value=mock_db,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.continue_run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.workspace_db",
|
||||
return_value=mock_workspace_db,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
):
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
review_id=review_id,
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockOutputResponse)
|
||||
assert response.success is True
|
||||
assert response.block_name == "Delete Branch"
|
||||
# Verify review was deleted (one-time use)
|
||||
mock_db.delete_review_by_node_exec_id.assert_called_once_with(
|
||||
review_id, _TEST_USER_ID
|
||||
)
|
||||
@@ -1,7 +1,24 @@
|
||||
"""Shared helpers for chat tools."""
|
||||
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
from pydantic_core import PydanticUndefined
|
||||
|
||||
from backend.blocks._base import AnyBlockSchema
|
||||
from backend.copilot.constants import COPILOT_NODE_PREFIX, COPILOT_SESSION_PREFIX
|
||||
from backend.data.db_accessors import workspace_db
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.util.exceptions import BlockError
|
||||
|
||||
from .models import BlockOutputResponse, ErrorResponse, ToolResponseBase
|
||||
from .utils import match_credentials_to_requirements
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_inputs_from_schema(
|
||||
input_schema: dict[str, Any],
|
||||
@@ -27,3 +44,159 @@ def get_inputs_from_schema(
|
||||
for name, schema in properties.items()
|
||||
if name not in exclude
|
||||
]
|
||||
|
||||
|
||||
async def execute_block(
|
||||
*,
|
||||
block: AnyBlockSchema,
|
||||
block_id: str,
|
||||
input_data: dict[str, Any],
|
||||
user_id: str,
|
||||
session_id: str,
|
||||
node_exec_id: str,
|
||||
matched_credentials: dict[str, CredentialsMetaInput],
|
||||
sensitive_action_safe_mode: bool = False,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute a block with full context setup, credential injection, and error handling.
|
||||
|
||||
This is the shared execution path used by both ``run_block`` (after review
|
||||
check) and ``continue_run_block`` (after approval).
|
||||
|
||||
Returns:
|
||||
BlockOutputResponse on success, ErrorResponse on failure.
|
||||
"""
|
||||
try:
|
||||
workspace = await workspace_db().get_or_create_workspace(user_id)
|
||||
|
||||
synthetic_graph_id = f"{COPILOT_SESSION_PREFIX}{session_id}"
|
||||
synthetic_node_id = f"{COPILOT_NODE_PREFIX}{block_id}"
|
||||
|
||||
execution_context = ExecutionContext(
|
||||
user_id=user_id,
|
||||
graph_id=synthetic_graph_id,
|
||||
graph_exec_id=synthetic_graph_id,
|
||||
graph_version=1,
|
||||
node_id=synthetic_node_id,
|
||||
node_exec_id=node_exec_id,
|
||||
workspace_id=workspace.id,
|
||||
session_id=session_id,
|
||||
sensitive_action_safe_mode=sensitive_action_safe_mode,
|
||||
)
|
||||
|
||||
exec_kwargs: dict[str, Any] = {
|
||||
"user_id": user_id,
|
||||
"execution_context": execution_context,
|
||||
"workspace_id": workspace.id,
|
||||
"graph_exec_id": synthetic_graph_id,
|
||||
"node_exec_id": node_exec_id,
|
||||
"node_id": synthetic_node_id,
|
||||
"graph_version": 1,
|
||||
"graph_id": synthetic_graph_id,
|
||||
}
|
||||
|
||||
# Inject credentials
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
for field_name, cred_meta in matched_credentials.items():
|
||||
if field_name not in input_data:
|
||||
input_data[field_name] = cred_meta.model_dump()
|
||||
|
||||
actual_credentials = await creds_manager.get(
|
||||
user_id, cred_meta.id, lock=False
|
||||
)
|
||||
if actual_credentials:
|
||||
exec_kwargs[field_name] = actual_credentials
|
||||
else:
|
||||
return ErrorResponse(
|
||||
message=f"Failed to retrieve credentials for {field_name}",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Execute the block and collect outputs
|
||||
outputs: dict[str, list[Any]] = defaultdict(list)
|
||||
async for output_name, output_data in block.execute(
|
||||
input_data,
|
||||
**exec_kwargs,
|
||||
):
|
||||
outputs[output_name].append(output_data)
|
||||
|
||||
return BlockOutputResponse(
|
||||
message=f"Block '{block.name}' executed successfully",
|
||||
block_id=block_id,
|
||||
block_name=block.name,
|
||||
outputs=dict(outputs),
|
||||
success=True,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
except BlockError as e:
|
||||
logger.warning(f"Block execution failed: {e}")
|
||||
return ErrorResponse(
|
||||
message=f"Block execution failed: {e}",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error executing block: {e}", exc_info=True)
|
||||
return ErrorResponse(
|
||||
message=f"Failed to execute block: {str(e)}",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
async def resolve_block_credentials(
|
||||
user_id: str,
|
||||
block: AnyBlockSchema,
|
||||
input_data: dict[str, Any] | None = None,
|
||||
) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]:
|
||||
"""Resolve credentials for a block by matching user's available credentials.
|
||||
|
||||
Handles discriminated credentials (e.g. provider selection based on model).
|
||||
|
||||
Returns:
|
||||
(matched_credentials, missing_credentials)
|
||||
"""
|
||||
input_data = input_data or {}
|
||||
requirements = _resolve_discriminated_credentials(block, input_data)
|
||||
|
||||
if not requirements:
|
||||
return {}, []
|
||||
|
||||
return await match_credentials_to_requirements(user_id, requirements)
|
||||
|
||||
|
||||
def _resolve_discriminated_credentials(
|
||||
block: AnyBlockSchema,
|
||||
input_data: dict[str, Any],
|
||||
) -> dict[str, CredentialsFieldInfo]:
|
||||
"""Resolve credential requirements, applying discriminator logic where needed."""
|
||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||
if not credentials_fields_info:
|
||||
return {}
|
||||
|
||||
resolved: dict[str, CredentialsFieldInfo] = {}
|
||||
|
||||
for field_name, field_info in credentials_fields_info.items():
|
||||
effective_field_info = field_info
|
||||
|
||||
if field_info.discriminator and field_info.discriminator_mapping:
|
||||
discriminator_value = input_data.get(field_info.discriminator)
|
||||
if discriminator_value is None:
|
||||
field = block.input_schema.model_fields.get(field_info.discriminator)
|
||||
if field and field.default is not PydanticUndefined:
|
||||
discriminator_value = field.default
|
||||
|
||||
if (
|
||||
discriminator_value
|
||||
and discriminator_value in field_info.discriminator_mapping
|
||||
):
|
||||
effective_field_info = field_info.discriminate(discriminator_value)
|
||||
effective_field_info.discriminator_values.add(discriminator_value)
|
||||
logger.debug(
|
||||
f"Discriminated provider for {field_name}: "
|
||||
f"{discriminator_value} -> {effective_field_info.provider}"
|
||||
)
|
||||
|
||||
resolved[field_name] = effective_field_info
|
||||
|
||||
return resolved
|
||||
|
||||
@@ -39,6 +39,7 @@ class ResponseType(str, Enum):
|
||||
BLOCK_LIST = "block_list"
|
||||
BLOCK_DETAILS = "block_details"
|
||||
BLOCK_OUTPUT = "block_output"
|
||||
REVIEW_REQUIRED = "review_required"
|
||||
|
||||
# MCP
|
||||
MCP_GUIDE = "mcp_guide"
|
||||
@@ -458,6 +459,21 @@ class BlockOutputResponse(ToolResponseBase):
|
||||
success: bool = True
|
||||
|
||||
|
||||
class ReviewRequiredResponse(ToolResponseBase):
|
||||
"""Response when a block requires human review before execution."""
|
||||
|
||||
type: ResponseType = ResponseType.REVIEW_REQUIRED
|
||||
block_id: str
|
||||
block_name: str
|
||||
review_id: str = Field(description="The review ID for tracking approval status")
|
||||
graph_exec_id: str = Field(
|
||||
description="The graph execution ID for fetching review status"
|
||||
)
|
||||
input_data: dict[str, Any] = Field(
|
||||
description="The input data that requires review"
|
||||
)
|
||||
|
||||
|
||||
class WebFetchResponse(ToolResponseBase):
|
||||
"""Response for web_fetch tool."""
|
||||
|
||||
|
||||
@@ -534,7 +534,9 @@ class RunAgentTool(BaseTool):
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' is awaiting human review. "
|
||||
f"Check at {library_agent_link}."
|
||||
f"The user can approve or reject inline. After approval, "
|
||||
f"the execution resumes automatically. Use view_agent_output "
|
||||
f"with execution_id='{execution.id}' to check the result."
|
||||
),
|
||||
session_id=session_id,
|
||||
execution_id=execution.id,
|
||||
|
||||
@@ -2,38 +2,34 @@
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
from pydantic_core import PydanticUndefined
|
||||
|
||||
from backend.blocks import BlockType, get_block
|
||||
from backend.blocks._base import AnyBlockSchema
|
||||
from backend.copilot.constants import (
|
||||
COPILOT_NODE_EXEC_ID_SEPARATOR,
|
||||
COPILOT_NODE_PREFIX,
|
||||
COPILOT_SESSION_PREFIX,
|
||||
)
|
||||
from backend.copilot.model import ChatSession
|
||||
from backend.data.db_accessors import workspace_db
|
||||
from backend.data.db_accessors import review_db
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.util.exceptions import BlockError
|
||||
|
||||
from .base import BaseTool
|
||||
from .find_block import COPILOT_EXCLUDED_BLOCK_IDS, COPILOT_EXCLUDED_BLOCK_TYPES
|
||||
from .helpers import get_inputs_from_schema
|
||||
from .helpers import execute_block, get_inputs_from_schema, resolve_block_credentials
|
||||
from .models import (
|
||||
BlockDetails,
|
||||
BlockDetailsResponse,
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
InputValidationErrorResponse,
|
||||
ReviewRequiredResponse,
|
||||
SetupInfo,
|
||||
SetupRequirementsResponse,
|
||||
ToolResponseBase,
|
||||
UserReadiness,
|
||||
)
|
||||
from .utils import (
|
||||
build_missing_credentials_from_field_info,
|
||||
match_credentials_to_requirements,
|
||||
)
|
||||
from .utils import build_missing_credentials_from_field_info
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -52,7 +48,9 @@ class RunBlockTool(BaseTool):
|
||||
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
||||
"do NOT guess or make up block IDs. "
|
||||
"On first attempt (without input_data), returns detailed schema showing "
|
||||
"required inputs and outputs. Then call again with proper input_data to execute."
|
||||
"required inputs and outputs. Then call again with proper input_data to execute. "
|
||||
"If a block requires human review, use continue_run_block with the "
|
||||
"review_id after the user approves."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -166,11 +164,10 @@ class RunBlockTool(BaseTool):
|
||||
|
||||
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
|
||||
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
(
|
||||
matched_credentials,
|
||||
missing_credentials,
|
||||
) = await self._resolve_block_credentials(user_id, block, input_data)
|
||||
) = await resolve_block_credentials(user_id, block, input_data)
|
||||
|
||||
# Get block schemas for details/validation
|
||||
try:
|
||||
@@ -279,169 +276,97 @@ class RunBlockTool(BaseTool):
|
||||
user_authenticated=True,
|
||||
)
|
||||
|
||||
try:
|
||||
# Get or create user's workspace for CoPilot file operations
|
||||
workspace = await workspace_db().get_or_create_workspace(user_id)
|
||||
# Generate synthetic IDs for CoPilot context.
|
||||
# Encode node_id in node_exec_id so it can be extracted later
|
||||
# (e.g. for auto-approve, where we need node_id but have no NodeExecution row).
|
||||
synthetic_graph_id = f"{COPILOT_SESSION_PREFIX}{session.session_id}"
|
||||
synthetic_node_id = f"{COPILOT_NODE_PREFIX}{block_id}"
|
||||
|
||||
# Generate synthetic IDs for CoPilot context
|
||||
# Each chat session is treated as its own agent with one continuous run
|
||||
# This means:
|
||||
# - graph_id (agent) = session (memories scoped to session when limit_to_agent=True)
|
||||
# - graph_exec_id (run) = session (memories scoped to session when limit_to_run=True)
|
||||
# - node_exec_id = unique per block execution
|
||||
synthetic_graph_id = f"copilot-session-{session.session_id}"
|
||||
synthetic_graph_exec_id = f"copilot-session-{session.session_id}"
|
||||
synthetic_node_id = f"copilot-node-{block_id}"
|
||||
synthetic_node_exec_id = (
|
||||
f"copilot-{session.session_id}-{uuid.uuid4().hex[:8]}"
|
||||
)
|
||||
|
||||
# Create unified execution context with all required fields
|
||||
execution_context = ExecutionContext(
|
||||
# Execution identity
|
||||
user_id=user_id,
|
||||
graph_id=synthetic_graph_id,
|
||||
graph_exec_id=synthetic_graph_exec_id,
|
||||
graph_version=1, # Versions are 1-indexed
|
||||
node_id=synthetic_node_id,
|
||||
node_exec_id=synthetic_node_exec_id,
|
||||
# Workspace with session scoping
|
||||
workspace_id=workspace.id,
|
||||
session_id=session.session_id,
|
||||
)
|
||||
|
||||
# Prepare kwargs for block execution
|
||||
# Keep individual kwargs for backwards compatibility with existing blocks
|
||||
exec_kwargs: dict[str, Any] = {
|
||||
"user_id": user_id,
|
||||
"execution_context": execution_context,
|
||||
# Legacy: individual kwargs for blocks not yet using execution_context
|
||||
"workspace_id": workspace.id,
|
||||
"graph_exec_id": synthetic_graph_exec_id,
|
||||
"node_exec_id": synthetic_node_exec_id,
|
||||
"node_id": synthetic_node_id,
|
||||
"graph_version": 1, # Versions are 1-indexed
|
||||
"graph_id": synthetic_graph_id,
|
||||
}
|
||||
|
||||
for field_name, cred_meta in matched_credentials.items():
|
||||
# Inject metadata into input_data (for validation)
|
||||
if field_name not in input_data:
|
||||
input_data[field_name] = cred_meta.model_dump()
|
||||
|
||||
# Fetch actual credentials and pass as kwargs (for execution)
|
||||
actual_credentials = await creds_manager.get(
|
||||
user_id, cred_meta.id, lock=False
|
||||
)
|
||||
if actual_credentials:
|
||||
exec_kwargs[field_name] = actual_credentials
|
||||
else:
|
||||
return ErrorResponse(
|
||||
message=f"Failed to retrieve credentials for {field_name}",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Execute the block and collect outputs
|
||||
outputs: dict[str, list[Any]] = defaultdict(list)
|
||||
async for output_name, output_data in block.execute(
|
||||
input_data,
|
||||
**exec_kwargs,
|
||||
):
|
||||
outputs[output_name].append(output_data)
|
||||
|
||||
return BlockOutputResponse(
|
||||
message=f"Block '{block.name}' executed successfully",
|
||||
# Check for an existing WAITING review for this block with the same input.
|
||||
# If the LLM retries run_block with identical input, we reuse the existing
|
||||
# review instead of creating duplicates. Different inputs = new execution.
|
||||
existing_reviews = await review_db().get_pending_reviews_for_execution(
|
||||
synthetic_graph_id, user_id
|
||||
)
|
||||
existing_review = next(
|
||||
(
|
||||
r
|
||||
for r in existing_reviews
|
||||
if r.node_id == synthetic_node_id
|
||||
and r.status.value == "WAITING"
|
||||
and r.payload == input_data
|
||||
),
|
||||
None,
|
||||
)
|
||||
if existing_review:
|
||||
return ReviewRequiredResponse(
|
||||
message=(
|
||||
f"Block '{block.name}' requires human review. "
|
||||
f"After the user approves, call continue_run_block with "
|
||||
f"review_id='{existing_review.node_exec_id}' to execute."
|
||||
),
|
||||
session_id=session_id,
|
||||
block_id=block_id,
|
||||
block_name=block.name,
|
||||
outputs=dict(outputs),
|
||||
success=True,
|
||||
session_id=session_id,
|
||||
review_id=existing_review.node_exec_id,
|
||||
graph_exec_id=synthetic_graph_id,
|
||||
input_data=input_data,
|
||||
)
|
||||
|
||||
except BlockError as e:
|
||||
logger.warning(f"Block execution failed: {e}")
|
||||
return ErrorResponse(
|
||||
message=f"Block execution failed: {e}",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error executing block: {e}", exc_info=True)
|
||||
return ErrorResponse(
|
||||
message=f"Failed to execute block: {str(e)}",
|
||||
error=str(e),
|
||||
synthetic_node_exec_id = (
|
||||
f"{synthetic_node_id}{COPILOT_NODE_EXEC_ID_SEPARATOR}"
|
||||
f"{uuid.uuid4().hex[:8]}"
|
||||
)
|
||||
|
||||
# Check for HITL review before execution.
|
||||
# This creates the review record in the DB for CoPilot flows.
|
||||
review_context = ExecutionContext(
|
||||
user_id=user_id,
|
||||
graph_id=synthetic_graph_id,
|
||||
graph_exec_id=synthetic_graph_id,
|
||||
graph_version=1,
|
||||
node_id=synthetic_node_id,
|
||||
node_exec_id=synthetic_node_exec_id,
|
||||
sensitive_action_safe_mode=True,
|
||||
)
|
||||
should_pause, input_data = await block.is_block_exec_need_review(
|
||||
input_data,
|
||||
user_id=user_id,
|
||||
node_id=synthetic_node_id,
|
||||
node_exec_id=synthetic_node_exec_id,
|
||||
graph_exec_id=synthetic_graph_id,
|
||||
graph_id=synthetic_graph_id,
|
||||
graph_version=1,
|
||||
execution_context=review_context,
|
||||
is_graph_execution=False,
|
||||
)
|
||||
if should_pause:
|
||||
return ReviewRequiredResponse(
|
||||
message=(
|
||||
f"Block '{block.name}' requires human review. "
|
||||
f"After the user approves, call continue_run_block with "
|
||||
f"review_id='{synthetic_node_exec_id}' to execute."
|
||||
),
|
||||
session_id=session_id,
|
||||
block_id=block_id,
|
||||
block_name=block.name,
|
||||
review_id=synthetic_node_exec_id,
|
||||
graph_exec_id=synthetic_graph_id,
|
||||
input_data=input_data,
|
||||
)
|
||||
|
||||
async def _resolve_block_credentials(
|
||||
self,
|
||||
user_id: str,
|
||||
block: AnyBlockSchema,
|
||||
input_data: dict[str, Any] | None = None,
|
||||
) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]:
|
||||
"""
|
||||
Resolve credentials for a block by matching user's available credentials.
|
||||
|
||||
Args:
|
||||
user_id: User ID
|
||||
block: Block to resolve credentials for
|
||||
input_data: Input data for the block (used to determine provider via discriminator)
|
||||
|
||||
Returns:
|
||||
tuple of (matched_credentials, missing_credentials) - matched credentials
|
||||
are used for block execution, missing ones indicate setup requirements.
|
||||
"""
|
||||
input_data = input_data or {}
|
||||
requirements = self._resolve_discriminated_credentials(block, input_data)
|
||||
|
||||
if not requirements:
|
||||
return {}, []
|
||||
|
||||
return await match_credentials_to_requirements(user_id, requirements)
|
||||
return await execute_block(
|
||||
block=block,
|
||||
block_id=block_id,
|
||||
input_data=input_data,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
node_exec_id=synthetic_node_exec_id,
|
||||
matched_credentials=matched_credentials,
|
||||
)
|
||||
|
||||
def _get_inputs_list(self, block: AnyBlockSchema) -> list[dict[str, Any]]:
|
||||
"""Extract non-credential inputs from block schema."""
|
||||
schema = block.input_schema.jsonschema()
|
||||
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
|
||||
return get_inputs_from_schema(schema, exclude_fields=credentials_fields)
|
||||
|
||||
def _resolve_discriminated_credentials(
|
||||
self,
|
||||
block: AnyBlockSchema,
|
||||
input_data: dict[str, Any],
|
||||
) -> dict[str, CredentialsFieldInfo]:
|
||||
"""Resolve credential requirements, applying discriminator logic where needed."""
|
||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||
if not credentials_fields_info:
|
||||
return {}
|
||||
|
||||
resolved: dict[str, CredentialsFieldInfo] = {}
|
||||
|
||||
for field_name, field_info in credentials_fields_info.items():
|
||||
effective_field_info = field_info
|
||||
|
||||
if field_info.discriminator and field_info.discriminator_mapping:
|
||||
discriminator_value = input_data.get(field_info.discriminator)
|
||||
if discriminator_value is None:
|
||||
field = block.input_schema.model_fields.get(
|
||||
field_info.discriminator
|
||||
)
|
||||
if field and field.default is not PydanticUndefined:
|
||||
discriminator_value = field.default
|
||||
|
||||
if (
|
||||
discriminator_value
|
||||
and discriminator_value in field_info.discriminator_mapping
|
||||
):
|
||||
effective_field_info = field_info.discriminate(discriminator_value)
|
||||
# For host-scoped credentials, add the discriminator value
|
||||
# (e.g., URL) so _credential_is_for_host can match it
|
||||
effective_field_info.discriminator_values.add(discriminator_value)
|
||||
logger.debug(
|
||||
f"Discriminated provider for {field_name}: "
|
||||
f"{discriminator_value} -> {effective_field_info.provider}"
|
||||
)
|
||||
|
||||
resolved[field_name] = effective_field_info
|
||||
|
||||
return resolved
|
||||
|
||||
@@ -12,6 +12,7 @@ from .models import (
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
InputValidationErrorResponse,
|
||||
ReviewRequiredResponse,
|
||||
)
|
||||
from .run_block import RunBlockTool
|
||||
|
||||
@@ -27,9 +28,16 @@ def make_mock_block(
|
||||
mock.name = name
|
||||
mock.block_type = block_type
|
||||
mock.disabled = disabled
|
||||
mock.is_sensitive_action = False
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
|
||||
mock.input_schema.get_credentials_fields_info.return_value = []
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
|
||||
async def _no_review(input_data, **kwargs):
|
||||
return False, input_data
|
||||
|
||||
mock.is_block_exec_need_review = _no_review
|
||||
return mock
|
||||
|
||||
|
||||
@@ -46,6 +54,7 @@ def make_mock_block_with_schema(
|
||||
mock.name = name
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
mock.is_sensitive_action = False
|
||||
mock.description = f"Test block: {name}"
|
||||
|
||||
input_schema = {
|
||||
@@ -63,6 +72,12 @@ def make_mock_block_with_schema(
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = output_schema
|
||||
|
||||
# Default: no review needed, pass through input_data unchanged
|
||||
async def _no_review(input_data, **kwargs):
|
||||
return False, input_data
|
||||
|
||||
mock.is_block_exec_need_review = _no_review
|
||||
|
||||
return mock
|
||||
|
||||
|
||||
@@ -126,9 +141,15 @@ class TestRunBlockFiltering:
|
||||
"standard-id", "HTTP Request", BlockType.STANDARD
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=standard_block,
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=standard_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
@@ -154,12 +175,7 @@ class TestRunBlockInputValidation:
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_unknown_input_fields_are_rejected(self):
|
||||
"""run_block rejects unknown input fields instead of silently ignoring them.
|
||||
|
||||
Scenario: The AI Text Generator block has a field called 'model' (for LLM model
|
||||
selection), but the LLM calling the tool guesses wrong and sends 'LLM_Model'
|
||||
instead. The block should reject the request and return the valid schema.
|
||||
"""
|
||||
"""run_block rejects unknown input fields instead of silently ignoring them."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
@@ -182,27 +198,31 @@ class TestRunBlockInputValidation:
|
||||
output_properties={"response": {"type": "string"}},
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# Provide 'prompt' (correct) but 'LLM_Model' instead of 'model' (wrong key)
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Write a haiku about coding",
|
||||
"LLM_Model": "claude-opus-4-6", # WRONG KEY - should be 'model'
|
||||
"LLM_Model": "claude-opus-4-6",
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert "LLM_Model" in response.unrecognized_fields
|
||||
assert "Block was not executed" in response.message
|
||||
assert "inputs" in response.model_dump() # valid schema included
|
||||
assert "inputs" in response.model_dump()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_multiple_wrong_keys_are_all_reported(self):
|
||||
@@ -221,21 +241,26 @@ class TestRunBlockInputValidation:
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Hello", # correct
|
||||
"llm_model": "claude-opus-4-6", # WRONG - should be 'model'
|
||||
"system_prompt": "Be helpful", # WRONG - should be 'sys_prompt'
|
||||
"retries": 5, # WRONG - should be 'retry'
|
||||
"prompt": "Hello",
|
||||
"llm_model": "claude-opus-4-6",
|
||||
"system_prompt": "Be helpful",
|
||||
"retries": 5,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -262,23 +287,26 @@ class TestRunBlockInputValidation:
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# 'prompt' is missing AND 'LLM_Model' is an unknown field
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"LLM_Model": "claude-opus-4-6", # wrong key, and 'prompt' is missing
|
||||
"LLM_Model": "claude-opus-4-6",
|
||||
},
|
||||
)
|
||||
|
||||
# Unknown fields are caught first
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert "LLM_Model" in response.unrecognized_fields
|
||||
|
||||
@@ -313,7 +341,11 @@ class TestRunBlockInputValidation:
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.run_block.workspace_db",
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.workspace_db",
|
||||
return_value=mock_workspace_db,
|
||||
),
|
||||
):
|
||||
@@ -325,7 +357,7 @@ class TestRunBlockInputValidation:
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Write a haiku",
|
||||
"model": "gpt-4o-mini", # correct field name
|
||||
"model": "gpt-4o-mini",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -347,20 +379,191 @@ class TestRunBlockInputValidation:
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# Only provide valid optional field, missing required 'prompt'
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"model": "gpt-4o-mini", # valid but optional
|
||||
"model": "gpt-4o-mini",
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
|
||||
|
||||
class TestRunBlockSensitiveAction:
|
||||
"""Tests for sensitive action HITL review in RunBlockTool.
|
||||
|
||||
run_block calls is_block_exec_need_review() explicitly before execution.
|
||||
When review is needed (should_pause=True), ReviewRequiredResponse is returned.
|
||||
"""
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_sensitive_block_paused_returns_review_required(self):
|
||||
"""When is_block_exec_need_review returns should_pause=True, ReviewRequiredResponse is returned."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
input_data = {
|
||||
"repo_url": "https://github.com/test/repo",
|
||||
"branch": "feature-branch",
|
||||
}
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="delete-branch-id",
|
||||
name="Delete Branch",
|
||||
input_properties={
|
||||
"repo_url": {"type": "string"},
|
||||
"branch": {"type": "string"},
|
||||
},
|
||||
required_fields=["repo_url", "branch"],
|
||||
)
|
||||
mock_block.is_sensitive_action = True
|
||||
mock_block.is_block_exec_need_review = AsyncMock(
|
||||
return_value=(True, input_data)
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="delete-branch-id",
|
||||
input_data=input_data,
|
||||
)
|
||||
|
||||
assert isinstance(response, ReviewRequiredResponse)
|
||||
assert "requires human review" in response.message
|
||||
assert "continue_run_block" in response.message
|
||||
assert response.block_name == "Delete Branch"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_sensitive_block_executes_after_approval(self):
|
||||
"""After approval (should_pause=False), sensitive blocks execute and return outputs."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
input_data = {
|
||||
"repo_url": "https://github.com/test/repo",
|
||||
"branch": "feature-branch",
|
||||
}
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="delete-branch-id",
|
||||
name="Delete Branch",
|
||||
input_properties={
|
||||
"repo_url": {"type": "string"},
|
||||
"branch": {"type": "string"},
|
||||
},
|
||||
required_fields=["repo_url", "branch"],
|
||||
)
|
||||
mock_block.is_sensitive_action = True
|
||||
mock_block.is_block_exec_need_review = AsyncMock(
|
||||
return_value=(False, input_data)
|
||||
)
|
||||
|
||||
async def mock_execute(input_data, **kwargs):
|
||||
yield "result", "Branch deleted successfully"
|
||||
|
||||
mock_block.execute = mock_execute
|
||||
|
||||
mock_workspace_db = MagicMock()
|
||||
mock_workspace_db.get_or_create_workspace = AsyncMock(
|
||||
return_value=MagicMock(id="test-workspace-id")
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.workspace_db",
|
||||
return_value=mock_workspace_db,
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="delete-branch-id",
|
||||
input_data=input_data,
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockOutputResponse)
|
||||
assert response.success is True
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_non_sensitive_block_executes_normally(self):
|
||||
"""Non-sensitive blocks skip review and execute directly."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
input_data = {"url": "https://example.com"}
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="http-request-id",
|
||||
name="HTTP Request",
|
||||
input_properties={
|
||||
"url": {"type": "string"},
|
||||
},
|
||||
required_fields=["url"],
|
||||
)
|
||||
mock_block.is_sensitive_action = False
|
||||
mock_block.is_block_exec_need_review = AsyncMock(
|
||||
return_value=(False, input_data)
|
||||
)
|
||||
|
||||
async def mock_execute(input_data, **kwargs):
|
||||
yield "response", {"status": 200}
|
||||
|
||||
mock_block.execute = mock_execute
|
||||
|
||||
mock_workspace_db = MagicMock()
|
||||
mock_workspace_db.get_or_create_workspace = AsyncMock(
|
||||
return_value=MagicMock(id="test-workspace-id")
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.match_credentials_to_requirements",
|
||||
return_value=({}, []),
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.helpers.workspace_db",
|
||||
return_value=mock_workspace_db,
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="http-request-id",
|
||||
input_data=input_data,
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockOutputResponse)
|
||||
assert response.success is True
|
||||
|
||||
@@ -65,9 +65,8 @@ async def test_run_block_returns_details_when_no_input_provided():
|
||||
return_value=http_block,
|
||||
):
|
||||
# Mock credentials check to return no missing credentials
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
with patch(
|
||||
"backend.copilot.tools.run_block.resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=({}, []), # (matched_credentials, missing_credentials)
|
||||
):
|
||||
@@ -123,9 +122,8 @@ async def test_run_block_returns_details_when_only_credentials_provided():
|
||||
"backend.copilot.tools.run_block.get_block",
|
||||
return_value=mock,
|
||||
):
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
with patch(
|
||||
"backend.copilot.tools.run_block.resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(
|
||||
{
|
||||
|
||||
@@ -116,3 +116,16 @@ def workspace_db():
|
||||
workspace_db = get_database_manager_async_client()
|
||||
|
||||
return workspace_db
|
||||
|
||||
|
||||
def review_db():
|
||||
if db.is_connected():
|
||||
from backend.data import human_review as _review_db
|
||||
|
||||
review_db = _review_db
|
||||
else:
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
|
||||
review_db = get_database_manager_async_client()
|
||||
|
||||
return review_db
|
||||
|
||||
@@ -79,7 +79,10 @@ from backend.data.graph import (
|
||||
from backend.data.human_review import (
|
||||
cancel_pending_reviews_for_execution,
|
||||
check_approval,
|
||||
delete_review_by_node_exec_id,
|
||||
get_or_create_human_review,
|
||||
get_pending_reviews_for_execution,
|
||||
get_reviews_by_node_exec_ids,
|
||||
has_pending_reviews_for_graph_exec,
|
||||
update_review_processed_status,
|
||||
)
|
||||
@@ -246,7 +249,10 @@ class DatabaseManager(AppService):
|
||||
# ============ Human In The Loop ============ #
|
||||
cancel_pending_reviews_for_execution = _(cancel_pending_reviews_for_execution)
|
||||
check_approval = _(check_approval)
|
||||
delete_review_by_node_exec_id = _(delete_review_by_node_exec_id)
|
||||
get_or_create_human_review = _(get_or_create_human_review)
|
||||
get_pending_reviews_for_execution = _(get_pending_reviews_for_execution)
|
||||
get_reviews_by_node_exec_ids = _(get_reviews_by_node_exec_ids)
|
||||
has_pending_reviews_for_graph_exec = _(has_pending_reviews_for_graph_exec)
|
||||
update_review_processed_status = _(update_review_processed_status)
|
||||
|
||||
@@ -433,7 +439,10 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
# ============ Human In The Loop ============ #
|
||||
cancel_pending_reviews_for_execution = d.cancel_pending_reviews_for_execution
|
||||
check_approval = d.check_approval
|
||||
delete_review_by_node_exec_id = d.delete_review_by_node_exec_id
|
||||
get_or_create_human_review = d.get_or_create_human_review
|
||||
get_pending_reviews_for_execution = d.get_pending_reviews_for_execution
|
||||
get_reviews_by_node_exec_ids = d.get_reviews_by_node_exec_ids
|
||||
update_review_processed_status = d.update_review_processed_status
|
||||
|
||||
# ============ User Comms ============ #
|
||||
|
||||
@@ -17,6 +17,10 @@ from backend.api.features.executions.review.model import (
|
||||
PendingHumanReviewModel,
|
||||
SafeJsonData,
|
||||
)
|
||||
from backend.copilot.constants import (
|
||||
is_copilot_synthetic_id,
|
||||
parse_node_id_from_exec_id,
|
||||
)
|
||||
from backend.data.execution import get_graph_execution_meta
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
@@ -123,11 +127,13 @@ async def create_auto_approval_record(
|
||||
Raises:
|
||||
ValueError: If the graph execution doesn't belong to the user
|
||||
"""
|
||||
# Validate that the graph execution belongs to this user (defense in depth)
|
||||
graph_exec = await get_graph_execution_meta(
|
||||
# Validate ownership: if a graph execution record exists, it must belong
|
||||
# to this user. Non-graph executions (e.g. CoPilot) won't have a record.
|
||||
if not is_copilot_synthetic_id(
|
||||
graph_exec_id
|
||||
) and not await get_graph_execution_meta(
|
||||
user_id=user_id, execution_id=graph_exec_id
|
||||
)
|
||||
if not graph_exec:
|
||||
):
|
||||
raise ValueError(
|
||||
f"Graph execution {graph_exec_id} not found or doesn't belong to user {user_id}"
|
||||
)
|
||||
@@ -265,7 +271,7 @@ async def get_pending_review_by_node_exec_id(
|
||||
|
||||
async def get_reviews_by_node_exec_ids(
|
||||
node_exec_ids: list[str], user_id: str
|
||||
) -> dict[str, "PendingHumanReviewModel"]:
|
||||
) -> dict[str, PendingHumanReviewModel]:
|
||||
"""
|
||||
Get multiple reviews by their node execution IDs regardless of status.
|
||||
|
||||
@@ -292,21 +298,26 @@ async def get_reviews_by_node_exec_ids(
|
||||
if not reviews:
|
||||
return {}
|
||||
|
||||
# Batch fetch all node executions to avoid N+1 queries
|
||||
node_exec_ids_to_fetch = [review.nodeExecId for review in reviews]
|
||||
node_execs = await AgentNodeExecution.prisma().find_many(
|
||||
where={"id": {"in": node_exec_ids_to_fetch}},
|
||||
include={"Node": True},
|
||||
)
|
||||
|
||||
# Create mapping from node_exec_id to node_id
|
||||
node_exec_id_to_node_id = {
|
||||
node_exec.id: node_exec.agentNodeId for node_exec in node_execs
|
||||
# Split into synthetic (CoPilot) and real IDs for different resolution paths
|
||||
synthetic_ids = {
|
||||
r.nodeExecId for r in reviews if is_copilot_synthetic_id(r.nodeExecId)
|
||||
}
|
||||
real_ids = [r.nodeExecId for r in reviews if r.nodeExecId not in synthetic_ids]
|
||||
|
||||
# Batch fetch real node executions to avoid N+1 queries
|
||||
node_exec_id_to_node_id: dict[str, str] = {}
|
||||
if real_ids:
|
||||
node_execs = await AgentNodeExecution.prisma().find_many(
|
||||
where={"id": {"in": real_ids}},
|
||||
)
|
||||
node_exec_id_to_node_id = {ne.id: ne.agentNodeId for ne in node_execs}
|
||||
|
||||
result = {}
|
||||
for review in reviews:
|
||||
node_id = node_exec_id_to_node_id.get(review.nodeExecId, review.nodeExecId)
|
||||
if review.nodeExecId in synthetic_ids:
|
||||
node_id = parse_node_id_from_exec_id(review.nodeExecId)
|
||||
else:
|
||||
node_id = node_exec_id_to_node_id.get(review.nodeExecId, review.nodeExecId)
|
||||
result[review.nodeExecId] = PendingHumanReviewModel.from_db(
|
||||
review, node_id=node_id
|
||||
)
|
||||
@@ -331,6 +342,19 @@ async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool:
|
||||
return count > 0
|
||||
|
||||
|
||||
async def _resolve_node_id(node_exec_id: str, get_node_execution) -> str:
|
||||
"""Resolve node_id from a node_exec_id.
|
||||
|
||||
For CoPilot synthetic IDs (e.g. copilot-node-block-id:abc12345),
|
||||
extract the node_id portion (copilot-node-block-id).
|
||||
For real graph executions, look up the NodeExecution record.
|
||||
"""
|
||||
if is_copilot_synthetic_id(node_exec_id):
|
||||
return parse_node_id_from_exec_id(node_exec_id)
|
||||
node_exec = await get_node_execution(node_exec_id)
|
||||
return node_exec.node_id if node_exec else node_exec_id
|
||||
|
||||
|
||||
async def get_pending_reviews_for_user(
|
||||
user_id: str, page: int = 1, page_size: int = 25
|
||||
) -> list["PendingHumanReviewModel"]:
|
||||
@@ -361,8 +385,7 @@ async def get_pending_reviews_for_user(
|
||||
# Fetch node_id for each review from NodeExecution
|
||||
result = []
|
||||
for review in reviews:
|
||||
node_exec = await get_node_execution(review.nodeExecId)
|
||||
node_id = node_exec.node_id if node_exec else review.nodeExecId
|
||||
node_id = await _resolve_node_id(review.nodeExecId, get_node_execution)
|
||||
result.append(PendingHumanReviewModel.from_db(review, node_id=node_id))
|
||||
|
||||
return result
|
||||
@@ -370,7 +393,7 @@ async def get_pending_reviews_for_user(
|
||||
|
||||
async def get_pending_reviews_for_execution(
|
||||
graph_exec_id: str, user_id: str
|
||||
) -> list["PendingHumanReviewModel"]:
|
||||
) -> list[PendingHumanReviewModel]:
|
||||
"""
|
||||
Get all pending reviews for a specific graph execution.
|
||||
|
||||
@@ -396,8 +419,7 @@ async def get_pending_reviews_for_execution(
|
||||
# Fetch node_id for each review from NodeExecution
|
||||
result = []
|
||||
for review in reviews:
|
||||
node_exec = await get_node_execution(review.nodeExecId)
|
||||
node_id = node_exec.node_id if node_exec else review.nodeExecId
|
||||
node_id = await _resolve_node_id(review.nodeExecId, get_node_execution)
|
||||
result.append(PendingHumanReviewModel.from_db(review, node_id=node_id))
|
||||
|
||||
return result
|
||||
@@ -509,8 +531,12 @@ async def process_all_reviews_for_execution(
|
||||
|
||||
result = {}
|
||||
for review in all_result_reviews:
|
||||
node_exec = await get_node_execution(review.nodeExecId)
|
||||
node_id = node_exec.node_id if node_exec else review.nodeExecId
|
||||
if is_copilot_synthetic_id(review.nodeExecId):
|
||||
# CoPilot synthetic node_exec_ids encode node_id as "{node_id}:{random}"
|
||||
node_id = parse_node_id_from_exec_id(review.nodeExecId)
|
||||
else:
|
||||
node_exec = await get_node_execution(review.nodeExecId)
|
||||
node_id = node_exec.node_id if node_exec else review.nodeExecId
|
||||
result[review.nodeExecId] = PendingHumanReviewModel.from_db(
|
||||
review, node_id=node_id
|
||||
)
|
||||
@@ -564,3 +590,21 @@ async def cancel_pending_reviews_for_execution(graph_exec_id: str, user_id: str)
|
||||
},
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
async def delete_review_by_node_exec_id(node_exec_id: str, user_id: str) -> int:
|
||||
"""Delete a review record by node execution ID after it has been consumed.
|
||||
|
||||
Used by CoPilot's continue_run_block to clean up one-time-use review records
|
||||
after successful execution.
|
||||
|
||||
Args:
|
||||
node_exec_id: The node execution ID of the review to delete
|
||||
user_id: User ID for authorization
|
||||
|
||||
Returns:
|
||||
Number of records deleted
|
||||
"""
|
||||
return await PendingHumanReview.prisma().delete_many(
|
||||
where={"nodeExecId": node_exec_id, "userId": user_id}
|
||||
)
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
-- Remove GraphExecution foreign key from PendingHumanReview
|
||||
-- The graphExecId column remains for querying, but we remove the FK constraint
|
||||
-- to AgentGraphExecution since PendingHumanReview records can now be created
|
||||
-- with synthetic graph_exec_ids (e.g., CoPilot direct block execution uses
|
||||
-- "copilot-session-{session_id}" as graph_exec_id).
|
||||
|
||||
ALTER TABLE "PendingHumanReview" DROP CONSTRAINT IF EXISTS "PendingHumanReview_graphExecId_fkey";
|
||||
@@ -566,8 +566,6 @@ model AgentGraphExecution {
|
||||
shareToken String? @unique
|
||||
sharedAt DateTime?
|
||||
|
||||
PendingHumanReviews PendingHumanReview[]
|
||||
|
||||
@@index([agentGraphId, agentGraphVersion])
|
||||
@@index([userId, isDeleted, createdAt])
|
||||
@@index([createdAt])
|
||||
@@ -664,8 +662,7 @@ model PendingHumanReview {
|
||||
updatedAt DateTime? @updatedAt
|
||||
reviewedAt DateTime?
|
||||
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade)
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@unique([nodeExecId]) // One pending review per node execution
|
||||
@@index([userId, status])
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useMemo } from "react";
|
||||
import {
|
||||
Conversation,
|
||||
ConversationContent,
|
||||
@@ -8,6 +9,7 @@ import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner
|
||||
import { FileUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
||||
import { TOOL_PART_PREFIX } from "../JobStatsBar/constants";
|
||||
import { TurnStatsBar } from "../JobStatsBar/TurnStatsBar";
|
||||
import { CopilotPendingReviews } from "../CopilotPendingReviews/CopilotPendingReviews";
|
||||
import {
|
||||
buildRenderSegments,
|
||||
getTurnMessages,
|
||||
@@ -51,6 +53,50 @@ function renderSegments(
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract graph_exec_id from tool outputs that need review.
|
||||
* Handles both:
|
||||
* - run_block ReviewRequiredResponse (has graph_exec_id directly)
|
||||
* - run_agent ExecutionStartedResponse with status "REVIEW" (has execution_id)
|
||||
*/
|
||||
function extractGraphExecId(
|
||||
messages: UIMessage<unknown, UIDataTypes, UITools>[],
|
||||
): string | null {
|
||||
// Scan backwards — the most recent review output has the ID
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
const msg = messages[i];
|
||||
for (const part of msg.parts) {
|
||||
if ("output" in part && part.output) {
|
||||
const out =
|
||||
typeof part.output === "string"
|
||||
? (() => {
|
||||
try {
|
||||
return JSON.parse(part.output);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
})()
|
||||
: part.output;
|
||||
if (out && typeof out === "object") {
|
||||
// run_block: ReviewRequiredResponse has graph_exec_id
|
||||
if ("graph_exec_id" in out) {
|
||||
return (out as { graph_exec_id: string }).graph_exec_id;
|
||||
}
|
||||
// run_agent: ExecutionStartedResponse with status "REVIEW"
|
||||
if (
|
||||
"execution_id" in out &&
|
||||
"status" in out &&
|
||||
(out as { status: string }).status === "REVIEW"
|
||||
) {
|
||||
return (out as { execution_id: string }).execution_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function ChatMessagesContainer({
|
||||
messages,
|
||||
status,
|
||||
@@ -60,6 +106,7 @@ export function ChatMessagesContainer({
|
||||
sessionID,
|
||||
}: Props) {
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
const graphExecId = useMemo(() => extractGraphExecId(messages), [messages]);
|
||||
|
||||
const hasInflight = (() => {
|
||||
if (lastMessage?.role !== "assistant") return false;
|
||||
@@ -205,6 +252,7 @@ export function ChatMessagesContainer({
|
||||
</MessageContent>
|
||||
</Message>
|
||||
)}
|
||||
{graphExecId && <CopilotPendingReviews graphExecId={graphExecId} />}
|
||||
{error && (
|
||||
<details className="rounded-lg bg-red-50 p-4 text-sm text-red-700">
|
||||
<summary className="cursor-pointer font-medium">
|
||||
|
||||
@@ -130,6 +130,7 @@ export function MessagePartRenderer({ part, messageID, partIndex }: Props) {
|
||||
case "tool-get_doc_page":
|
||||
return <SearchDocsTool key={key} part={part as ToolUIPart} />;
|
||||
case "tool-run_block":
|
||||
case "tool-continue_run_block":
|
||||
return <RunBlockTool key={key} part={part as ToolUIPart} />;
|
||||
case "tool-run_mcp_tool":
|
||||
return <RunMCPToolComponent key={key} part={part as ToolUIPart} />;
|
||||
|
||||
@@ -19,6 +19,7 @@ const CUSTOM_TOOL_TYPES = new Set([
|
||||
"tool-search_docs",
|
||||
"tool-get_doc_page",
|
||||
"tool-run_block",
|
||||
"tool-continue_run_block",
|
||||
"tool-run_mcp_tool",
|
||||
"tool-run_agent",
|
||||
"tool-schedule_agent",
|
||||
@@ -33,6 +34,7 @@ const INTERACTIVE_RESPONSE_TYPES: ReadonlySet<string> = new Set([
|
||||
ResponseType.setup_requirements,
|
||||
ResponseType.agent_details,
|
||||
ResponseType.block_details,
|
||||
ResponseType.review_required,
|
||||
ResponseType.need_login,
|
||||
ResponseType.input_validation_error,
|
||||
ResponseType.agent_builder_clarification_needed,
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
"use client";
|
||||
|
||||
import { useCallback } from "react";
|
||||
import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList";
|
||||
import { useCopilotChatActions } from "../CopilotChatActionsProvider/useCopilotChatActions";
|
||||
import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
|
||||
interface Props {
|
||||
graphExecId: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Renders a single consolidated PendingReviewsList for all pending copilot
|
||||
* reviews in a session — mirrors the non-copilot review page behavior.
|
||||
* Works for both run_block (synthetic copilot-session-*) and run_agent (real graph exec) reviews.
|
||||
*/
|
||||
export function CopilotPendingReviews({ graphExecId }: Props) {
|
||||
const { onSend } = useCopilotChatActions();
|
||||
const { pendingReviews, refetch } = usePendingReviewsForExecution(
|
||||
graphExecId,
|
||||
{ enabled: !!graphExecId, refetchInterval: 2000 },
|
||||
);
|
||||
|
||||
// Graph executions auto-resume after approval; block reviews need continue_run_block.
|
||||
const isGraphExecution = !graphExecId.startsWith("copilot-session-");
|
||||
|
||||
const handleReviewComplete = useCallback(async () => {
|
||||
// Brief delay for the server to propagate the approval
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
const result = await refetch();
|
||||
const remaining = okData(result.data) || [];
|
||||
|
||||
if (remaining.length > 0) return;
|
||||
|
||||
if (isGraphExecution) {
|
||||
onSend(
|
||||
`All pending reviews have been processed. ` +
|
||||
`The agent execution will resume automatically for approved reviews. ` +
|
||||
`Use view_agent_output with execution_id="${graphExecId}" to check the result.`,
|
||||
);
|
||||
} else {
|
||||
onSend(
|
||||
`All pending reviews have been processed. ` +
|
||||
`For any approved reviews, call continue_run_block with the corresponding review_id to execute them. ` +
|
||||
`For rejected reviews, no further action is needed.`,
|
||||
);
|
||||
}
|
||||
}, [refetch, onSend, isGraphExecution, graphExecId]);
|
||||
|
||||
if (pendingReviews.length === 0) return null;
|
||||
|
||||
return (
|
||||
<div className="py-2">
|
||||
<PendingReviewsList
|
||||
reviews={pendingReviews}
|
||||
onReviewComplete={handleReviewComplete}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -15,6 +15,7 @@ import {
|
||||
isRunBlockBlockOutput,
|
||||
isRunBlockDetailsOutput,
|
||||
isRunBlockErrorOutput,
|
||||
isRunBlockReviewRequiredOutput,
|
||||
isRunBlockSetupRequirementsOutput,
|
||||
ToolIcon,
|
||||
} from "./helpers";
|
||||
@@ -54,10 +55,15 @@ export function RunBlockTool({ part }: Props) {
|
||||
part.state === "output-available" &&
|
||||
!!output &&
|
||||
!setupRequirementsOutput &&
|
||||
!isRunBlockReviewRequiredOutput(output) &&
|
||||
(isRunBlockBlockOutput(output) ||
|
||||
isRunBlockDetailsOutput(output) ||
|
||||
isRunBlockErrorOutput(output));
|
||||
|
||||
// Review UI is rendered at the chat level by CopilotPendingReviews,
|
||||
// not inside each tool card. This matches the non-copilot flow where
|
||||
// a single PendingReviewsList shows all reviews grouped together.
|
||||
|
||||
return (
|
||||
<div className="py-2">
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
|
||||
@@ -26,6 +26,18 @@ export interface BlockDetailsResponse {
|
||||
user_authenticated: boolean;
|
||||
}
|
||||
|
||||
/** Response when a block requires human review before execution. */
|
||||
export interface ReviewRequiredResponse {
|
||||
type: typeof ResponseType.review_required;
|
||||
message: string;
|
||||
session_id?: string | null;
|
||||
block_id: string;
|
||||
block_name: string;
|
||||
review_id: string;
|
||||
graph_exec_id: string;
|
||||
input_data: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface RunBlockInput {
|
||||
block_id?: string;
|
||||
block_name?: string;
|
||||
@@ -36,12 +48,14 @@ export type RunBlockToolOutput =
|
||||
| SetupRequirementsResponse
|
||||
| BlockDetailsResponse
|
||||
| BlockOutputResponse
|
||||
| ReviewRequiredResponse
|
||||
| ErrorResponse;
|
||||
|
||||
const RUN_BLOCK_OUTPUT_TYPES = new Set<string>([
|
||||
ResponseType.setup_requirements,
|
||||
ResponseType.block_details,
|
||||
ResponseType.block_output,
|
||||
ResponseType.review_required,
|
||||
ResponseType.error,
|
||||
]);
|
||||
|
||||
@@ -66,7 +80,19 @@ export function isRunBlockDetailsOutput(
|
||||
export function isRunBlockBlockOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is BlockOutputResponse {
|
||||
return output.type === ResponseType.block_output || "block_id" in output;
|
||||
return (
|
||||
output.type === ResponseType.block_output ||
|
||||
("block_id" in output && !("review_id" in output))
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockReviewRequiredOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is ReviewRequiredResponse {
|
||||
return (
|
||||
output.type === ResponseType.review_required ||
|
||||
("review_id" in output && "block_name" in output && "input_data" in output)
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockErrorOutput(
|
||||
@@ -91,6 +117,7 @@ function parseOutput(output: unknown): RunBlockToolOutput | null {
|
||||
if (typeof type === "string" && RUN_BLOCK_OUTPUT_TYPES.has(type)) {
|
||||
return output as RunBlockToolOutput;
|
||||
}
|
||||
if ("review_id" in output) return output as ReviewRequiredResponse;
|
||||
if ("block_id" in output) return output as BlockOutputResponse;
|
||||
if ("block" in output) return output as BlockDetailsResponse;
|
||||
if ("setup_info" in output) return output as SetupRequirementsResponse;
|
||||
@@ -135,6 +162,9 @@ export function getAnimationText(part: {
|
||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||
return `Setup needed for "${output.setup_info.agent_name}"`;
|
||||
}
|
||||
if (isRunBlockReviewRequiredOutput(output)) {
|
||||
return `Review needed for "${output.block_name}"`;
|
||||
}
|
||||
return "Error running block";
|
||||
}
|
||||
case "output-error":
|
||||
@@ -227,6 +257,14 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
||||
};
|
||||
}
|
||||
|
||||
if (isRunBlockReviewRequiredOutput(output)) {
|
||||
return {
|
||||
icon,
|
||||
title: output.block_name,
|
||||
description: "Sensitive action — awaiting review",
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
icon: (
|
||||
<WarningDiamondIcon size={32} weight="light" className="text-red-500" />
|
||||
|
||||
@@ -9,52 +9,52 @@
|
||||
/**
|
||||
* Types of tool responses.
|
||||
*/
|
||||
export type ResponseType = typeof ResponseType[keyof typeof ResponseType];
|
||||
|
||||
export type ResponseType = (typeof ResponseType)[keyof typeof ResponseType];
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-redeclare
|
||||
export const ResponseType = {
|
||||
error: 'error',
|
||||
no_results: 'no_results',
|
||||
need_login: 'need_login',
|
||||
agents_found: 'agents_found',
|
||||
agent_details: 'agent_details',
|
||||
setup_requirements: 'setup_requirements',
|
||||
input_validation_error: 'input_validation_error',
|
||||
execution_started: 'execution_started',
|
||||
agent_output: 'agent_output',
|
||||
understanding_updated: 'understanding_updated',
|
||||
suggested_goal: 'suggested_goal',
|
||||
agent_builder_guide: 'agent_builder_guide',
|
||||
agent_builder_preview: 'agent_builder_preview',
|
||||
agent_builder_saved: 'agent_builder_saved',
|
||||
agent_builder_clarification_needed: 'agent_builder_clarification_needed',
|
||||
agent_builder_validation_result: 'agent_builder_validation_result',
|
||||
agent_builder_fix_result: 'agent_builder_fix_result',
|
||||
block_list: 'block_list',
|
||||
block_details: 'block_details',
|
||||
block_output: 'block_output',
|
||||
mcp_guide: 'mcp_guide',
|
||||
mcp_tools_discovered: 'mcp_tools_discovered',
|
||||
mcp_tool_output: 'mcp_tool_output',
|
||||
doc_search_results: 'doc_search_results',
|
||||
doc_page: 'doc_page',
|
||||
workspace_file_list: 'workspace_file_list',
|
||||
workspace_file_content: 'workspace_file_content',
|
||||
workspace_file_metadata: 'workspace_file_metadata',
|
||||
workspace_file_written: 'workspace_file_written',
|
||||
workspace_file_deleted: 'workspace_file_deleted',
|
||||
folder_created: 'folder_created',
|
||||
folder_list: 'folder_list',
|
||||
folder_updated: 'folder_updated',
|
||||
folder_moved: 'folder_moved',
|
||||
folder_deleted: 'folder_deleted',
|
||||
agents_moved_to_folder: 'agents_moved_to_folder',
|
||||
browser_navigate: 'browser_navigate',
|
||||
browser_act: 'browser_act',
|
||||
browser_screenshot: 'browser_screenshot',
|
||||
bash_exec: 'bash_exec',
|
||||
web_fetch: 'web_fetch',
|
||||
feature_request_search: 'feature_request_search',
|
||||
feature_request_created: 'feature_request_created',
|
||||
error: "error",
|
||||
no_results: "no_results",
|
||||
need_login: "need_login",
|
||||
agents_found: "agents_found",
|
||||
agent_details: "agent_details",
|
||||
setup_requirements: "setup_requirements",
|
||||
input_validation_error: "input_validation_error",
|
||||
execution_started: "execution_started",
|
||||
agent_output: "agent_output",
|
||||
understanding_updated: "understanding_updated",
|
||||
suggested_goal: "suggested_goal",
|
||||
agent_builder_guide: "agent_builder_guide",
|
||||
agent_builder_preview: "agent_builder_preview",
|
||||
agent_builder_saved: "agent_builder_saved",
|
||||
agent_builder_clarification_needed: "agent_builder_clarification_needed",
|
||||
agent_builder_validation_result: "agent_builder_validation_result",
|
||||
agent_builder_fix_result: "agent_builder_fix_result",
|
||||
block_list: "block_list",
|
||||
block_details: "block_details",
|
||||
block_output: "block_output",
|
||||
review_required: "review_required",
|
||||
mcp_guide: "mcp_guide",
|
||||
mcp_tools_discovered: "mcp_tools_discovered",
|
||||
mcp_tool_output: "mcp_tool_output",
|
||||
doc_search_results: "doc_search_results",
|
||||
doc_page: "doc_page",
|
||||
workspace_file_list: "workspace_file_list",
|
||||
workspace_file_content: "workspace_file_content",
|
||||
workspace_file_metadata: "workspace_file_metadata",
|
||||
workspace_file_written: "workspace_file_written",
|
||||
workspace_file_deleted: "workspace_file_deleted",
|
||||
folder_created: "folder_created",
|
||||
folder_list: "folder_list",
|
||||
folder_updated: "folder_updated",
|
||||
folder_moved: "folder_moved",
|
||||
folder_deleted: "folder_deleted",
|
||||
agents_moved_to_folder: "agents_moved_to_folder",
|
||||
browser_navigate: "browser_navigate",
|
||||
browser_act: "browser_act",
|
||||
browser_screenshot: "browser_screenshot",
|
||||
bash_exec: "bash_exec",
|
||||
web_fetch: "web_fetch",
|
||||
feature_request_search: "feature_request_search",
|
||||
feature_request_created: "feature_request_created",
|
||||
} as const;
|
||||
|
||||
@@ -11557,6 +11557,7 @@
|
||||
"block_list",
|
||||
"block_details",
|
||||
"block_output",
|
||||
"review_required",
|
||||
"mcp_guide",
|
||||
"mcp_tools_discovered",
|
||||
"mcp_tool_output",
|
||||
|
||||
Reference in New Issue
Block a user