Compare commits

...

13 Commits

Author SHA1 Message Date
Zamil Majdy
dfd7c64068 feat(backend): Implement node-specific auto-approval using key pattern
- Add auto-approval via special nodeExecId key pattern (auto_approve_{graph_exec_id}_{node_id})
- Create auto-approval records in PendingHumanReview when user approves with auto-approve flag
- Check for existing auto-approval before requiring human review
- Remove node_id parameter from get_or_create_human_review
- Load graph settings properly when resuming execution after review
2026-01-21 22:21:00 -05:00
Zamil Majdy
02089bc047 fix(frontend): Add polling for pending reviews badge to update in real-time
- Add refetchInterval to execution details query to poll while running/review
- Add polling support to usePendingReviewsForExecution hook
- Poll pending reviews every 2 seconds when execution is in REVIEW status
- This ensures the "X Reviews Pending" badge updates without page refresh
2026-01-21 21:08:10 -05:00
Zamil Majdy
bed7b356bb fix(frontend): Reset card data when auto-approve toggle changes
Include autoApproveFuture in the key prop to force PendingReviewCard
to remount when the toggle changes, which resets its internal state
to the original payload data.
2026-01-21 21:04:56 -05:00
Zamil Majdy
4efc0ff502 fix(migration): Correct migration to only drop FK constraint, not non-existent column
The nodeId column was never added to PendingHumanReview. The migration
should only drop the foreign key constraint linking nodeExecId to
AgentNodeExecution, not try to drop a column that doesn't exist.
2026-01-21 20:13:41 -05:00
Zamil Majdy
4ad0528257 feat(hitl): Simplify auto-approval with toggle UX and remove node_id storage
- Remove nodeId column from PendingHumanReview schema (use in-memory tracking)
- Remove foreign key relation from PendingHumanReview to AgentNodeExecution
- Use ExecutionContext.auto_approved_node_ids for auto-approval tracking
- Add auto-approve toggle in frontend (default off)
- When toggle enabled: disable editing and use original data
- Backend looks up agentNodeId from AgentNodeExecution when auto-approving
- Update tests to reflect schema changes
2026-01-21 19:57:11 -05:00
Zamil Majdy
2f440ee80a Merge branch 'dev' into feat/sensitive-action-features 2026-01-21 19:08:32 -05:00
Zamil Majdy
2a55923ec0 Merge dev to get GraphSettings fix 2026-01-21 09:31:17 -05:00
Zamil Majdy
ad50f57a2b chore: add migration for nodeId field in PendingHumanReview
Adds database migration to add the nodeId column which tracks
the node ID in the graph definition for auto-approval tracking.
2026-01-20 23:03:03 -05:00
Zamil Majdy
aebd961ef5 fix: implement node-specific auto-approval for human reviews
Instead of disabling all safe modes when approving all future actions,
now tracks specific node IDs that should be auto-approved. This means
clicking "Approve all future actions" will only auto-approve future
reviews from the same blocks, not all reviews.

Changes:
- Add nodeId field to PendingHumanReview schema
- Add auto_approved_node_ids set to ExecutionContext
- Update review helper to check auto_approved_node_ids
- Change API from disable_future_reviews to auto_approve_node_ids
- Update frontend to pass node_ids when bulk approving
- Address PR feedback: remove barrel file, JSDoc comments, and cleanup
2026-01-20 22:15:51 -05:00
Zamil Majdy
bcccaa16cc fix: remove unused props from AIAgentSafetyPopup component
Removes hasSensitiveAction and hasHumanInTheLoop props that were only
used by the hook, not the component itself, fixing ESLint unused vars error.
2026-01-20 21:05:39 -05:00
Zamil Majdy
d5ddc41b18 feat: add bulk approval option for human reviews
Add "Approve all future actions" button to the review UI that:
- Approves all current pending reviews
- Disables safe mode for the remainder of the execution run
- Shows helper text about turning auto-approval on/off in settings

Backend changes:
- Add disable_future_reviews flag to ReviewRequest model
- Pass ExecutionContext with disabled safe modes when resuming

Frontend changes:
- Add "Approve all future actions" button to PendingReviewsList
- Include helper text per PRD requirements

Implements SECRT-1795
2026-01-20 20:45:50 -05:00
Zamil Majdy
95eab5b7eb feat: add one-time safety popup for AI-generated agent runs
Show a one-time safety popup the first time a user runs an agent with
sensitive actions or human-in-the-loop blocks. The popup explains that
agents may take real-world actions and that safety checks are enabled.

- Add AI_AGENT_SAFETY_POPUP_SHOWN localStorage key
- Create AIAgentSafetyPopup component with hook
- Integrate popup into RunAgentModal before first run

Implements SECRT-1798
2026-01-20 20:40:18 -05:00
Zamil Majdy
832d6e1696 fix: correct safe mode checks for sensitive action blocks
- Add skip_safe_mode_check parameter to HITLReviewHelper to avoid
  checking the wrong safe mode flag for sensitive action blocks
- Simplify SafeModeToggle and FloatingSafeModeToggle by removing
  unnecessary intermediate variables and isHITLStateUndetermined checks
2026-01-20 20:33:55 -05:00
20 changed files with 761 additions and 67 deletions

View File

@@ -179,6 +179,14 @@ class ReviewRequest(BaseModel):
reviews: List[ReviewItem] = Field( reviews: List[ReviewItem] = Field(
description="All reviews with their approval status, data, and messages" description="All reviews with their approval status, data, and messages"
) )
auto_approve_future_actions: bool = Field(
default=False,
description=(
"If true, future reviews from the same blocks (nodes) being approved "
"will be automatically approved for the remainder of this execution. "
"This only affects the current execution run."
),
)
@model_validator(mode="after") @model_validator(mode="after")
def validate_review_completeness(self): def validate_review_completeness(self):

View File

@@ -490,3 +490,321 @@ def test_process_review_action_invalid_node_exec_id(
# Should be a 400 Bad Request, not 500 Internal Server Error # Should be a 400 Bad Request, not 500 Internal Server Error
assert response.status_code == 400 assert response.status_code == 400
assert "Invalid node execution ID format" in response.json()["detail"] assert "Invalid node execution ID format" in response.json()["detail"]
def test_process_review_action_auto_approve_creates_auto_approval_records(
mocker: pytest_mock.MockerFixture,
sample_pending_review: PendingHumanReviewModel,
test_user_id: str,
) -> None:
"""Test that auto_approve_future_actions flag creates auto-approval records"""
from backend.data.execution import ExecutionContext, NodeExecutionResult
from backend.data.graph import GraphSettings
# Mock process_all_reviews
mock_process_all_reviews = mocker.patch(
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
)
approved_review = PendingHumanReviewModel(
node_exec_id="test_node_123",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "test payload"},
instructions="Please review",
editable=True,
status=ReviewStatus.APPROVED,
review_message="Approved",
was_edited=False,
processed=False,
created_at=FIXED_NOW,
updated_at=FIXED_NOW,
reviewed_at=FIXED_NOW,
)
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
# Mock get_node_execution to return node_id
mock_get_node_execution = mocker.patch(
"backend.api.features.executions.review.routes.get_node_execution"
)
mock_node_exec = mocker.Mock(spec=NodeExecutionResult)
mock_node_exec.node_id = "test_node_def_456"
mock_get_node_execution.return_value = mock_node_exec
# Mock create_auto_approval_record
mock_create_auto_approval = mocker.patch(
"backend.api.features.executions.review.routes.create_auto_approval_record"
)
# Mock has_pending_reviews_for_graph_exec
mock_has_pending = mocker.patch(
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
)
mock_has_pending.return_value = False
# Mock get_graph_settings to return custom settings
mock_get_settings = mocker.patch(
"backend.api.features.executions.review.routes.get_graph_settings"
)
mock_get_settings.return_value = GraphSettings(
human_in_the_loop_safe_mode=True,
sensitive_action_safe_mode=True,
)
# Mock add_graph_execution
mock_add_execution = mocker.patch(
"backend.api.features.executions.review.routes.add_graph_execution"
)
request_data = {
"reviews": [
{
"node_exec_id": "test_node_123",
"approved": True,
"message": "Approved",
}
],
"auto_approve_future_actions": True,
}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 200
# Verify process_all_reviews_for_execution was called (without auto_approve param)
mock_process_all_reviews.assert_called_once()
# Verify create_auto_approval_record was called for the approved review
mock_create_auto_approval.assert_called_once_with(
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
node_id="test_node_def_456",
payload={"data": "test payload"},
)
# Verify get_graph_settings was called with correct parameters
mock_get_settings.assert_called_once_with(
user_id=test_user_id, graph_id="test_graph_789"
)
# Verify add_graph_execution was called with proper ExecutionContext
mock_add_execution.assert_called_once()
call_kwargs = mock_add_execution.call_args.kwargs
execution_context = call_kwargs["execution_context"]
assert isinstance(execution_context, ExecutionContext)
assert execution_context.human_in_the_loop_safe_mode is True
assert execution_context.sensitive_action_safe_mode is True
def test_process_review_action_without_auto_approve_still_loads_settings(
mocker: pytest_mock.MockerFixture,
sample_pending_review: PendingHumanReviewModel,
test_user_id: str,
) -> None:
"""Test that execution context is created with settings even without auto-approve"""
from backend.data.execution import ExecutionContext
from backend.data.graph import GraphSettings
# Mock process_all_reviews
mock_process_all_reviews = mocker.patch(
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
)
approved_review = PendingHumanReviewModel(
node_exec_id="test_node_123",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "test payload"},
instructions="Please review",
editable=True,
status=ReviewStatus.APPROVED,
review_message="Approved",
was_edited=False,
processed=False,
created_at=FIXED_NOW,
updated_at=FIXED_NOW,
reviewed_at=FIXED_NOW,
)
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
# Mock create_auto_approval_record - should NOT be called when auto_approve is False
mock_create_auto_approval = mocker.patch(
"backend.api.features.executions.review.routes.create_auto_approval_record"
)
# Mock has_pending_reviews_for_graph_exec
mock_has_pending = mocker.patch(
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
)
mock_has_pending.return_value = False
# Mock get_graph_settings with sensitive_action_safe_mode enabled
mock_get_settings = mocker.patch(
"backend.api.features.executions.review.routes.get_graph_settings"
)
mock_get_settings.return_value = GraphSettings(
human_in_the_loop_safe_mode=False,
sensitive_action_safe_mode=True,
)
# Mock add_graph_execution
mock_add_execution = mocker.patch(
"backend.api.features.executions.review.routes.add_graph_execution"
)
# Request WITHOUT auto_approve_future_actions
request_data = {
"reviews": [
{
"node_exec_id": "test_node_123",
"approved": True,
"message": "Approved",
}
],
"auto_approve_future_actions": False,
}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 200
# Verify process_all_reviews_for_execution was called
mock_process_all_reviews.assert_called_once()
# Verify create_auto_approval_record was NOT called (auto_approve_future_actions=False)
mock_create_auto_approval.assert_not_called()
# Verify settings were loaded
mock_get_settings.assert_called_once()
# Verify ExecutionContext has proper settings
mock_add_execution.assert_called_once()
call_kwargs = mock_add_execution.call_args.kwargs
execution_context = call_kwargs["execution_context"]
assert isinstance(execution_context, ExecutionContext)
assert execution_context.human_in_the_loop_safe_mode is False
assert execution_context.sensitive_action_safe_mode is True
def test_process_review_action_auto_approve_only_applies_to_approved_reviews(
mocker: pytest_mock.MockerFixture,
test_user_id: str,
) -> None:
"""Test that auto_approve record is created only for approved reviews"""
from backend.data.execution import ExecutionContext, NodeExecutionResult
from backend.data.graph import GraphSettings
# Create two reviews - one approved, one rejected
approved_review = PendingHumanReviewModel(
node_exec_id="node_exec_approved",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "approved"},
instructions="Review",
editable=True,
status=ReviewStatus.APPROVED,
review_message=None,
was_edited=False,
processed=False,
created_at=FIXED_NOW,
updated_at=FIXED_NOW,
reviewed_at=FIXED_NOW,
)
rejected_review = PendingHumanReviewModel(
node_exec_id="node_exec_rejected",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "rejected"},
instructions="Review",
editable=True,
status=ReviewStatus.REJECTED,
review_message="Rejected",
was_edited=False,
processed=False,
created_at=FIXED_NOW,
updated_at=FIXED_NOW,
reviewed_at=FIXED_NOW,
)
# Mock process_all_reviews
mock_process_all_reviews = mocker.patch(
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
)
mock_process_all_reviews.return_value = {
"node_exec_approved": approved_review,
"node_exec_rejected": rejected_review,
}
# Mock get_node_execution to return node_id (only called for approved review)
mock_get_node_execution = mocker.patch(
"backend.api.features.executions.review.routes.get_node_execution"
)
mock_node_exec = mocker.Mock(spec=NodeExecutionResult)
mock_node_exec.node_id = "test_node_def_approved"
mock_get_node_execution.return_value = mock_node_exec
# Mock create_auto_approval_record
mock_create_auto_approval = mocker.patch(
"backend.api.features.executions.review.routes.create_auto_approval_record"
)
# Mock has_pending_reviews_for_graph_exec
mock_has_pending = mocker.patch(
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
)
mock_has_pending.return_value = False
# Mock get_graph_settings
mock_get_settings = mocker.patch(
"backend.api.features.executions.review.routes.get_graph_settings"
)
mock_get_settings.return_value = GraphSettings()
# Mock add_graph_execution
mock_add_execution = mocker.patch(
"backend.api.features.executions.review.routes.add_graph_execution"
)
request_data = {
"reviews": [
{"node_exec_id": "node_exec_approved", "approved": True},
{"node_exec_id": "node_exec_rejected", "approved": False},
],
"auto_approve_future_actions": True,
}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 200
# Verify process_all_reviews_for_execution was called
mock_process_all_reviews.assert_called_once()
# Verify create_auto_approval_record was called ONLY for the approved review
# (not for the rejected one)
mock_create_auto_approval.assert_called_once_with(
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
node_id="test_node_def_approved",
payload={"data": "approved"},
)
# Verify get_node_execution was called only for approved review
mock_get_node_execution.assert_called_once_with("node_exec_approved")
# Verify ExecutionContext was created (auto-approval is now DB-based)
call_kwargs = mock_add_execution.call_args.kwargs
execution_context = call_kwargs["execution_context"]
assert isinstance(execution_context, ExecutionContext)

View File

@@ -5,8 +5,14 @@ import autogpt_libs.auth as autogpt_auth_lib
from fastapi import APIRouter, HTTPException, Query, Security, status from fastapi import APIRouter, HTTPException, Query, Security, status
from prisma.enums import ReviewStatus from prisma.enums import ReviewStatus
from backend.data.execution import get_graph_execution_meta from backend.data.execution import (
ExecutionContext,
get_graph_execution_meta,
get_node_execution,
)
from backend.data.graph import get_graph_settings
from backend.data.human_review import ( from backend.data.human_review import (
create_auto_approval_record,
get_pending_reviews_for_execution, get_pending_reviews_for_execution,
get_pending_reviews_for_user, get_pending_reviews_for_user,
has_pending_reviews_for_graph_exec, has_pending_reviews_for_graph_exec,
@@ -128,14 +134,20 @@ async def process_review_action(
) )
# Build review decisions map # Build review decisions map
# When auto_approve_future_actions is true, ignore any edited data
# (auto-approved reviews should use original data for consistency)
review_decisions = {} review_decisions = {}
for review in request.reviews: for review in request.reviews:
review_status = ( review_status = (
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
) )
# If auto-approving future actions, don't allow data modifications
reviewed_data = (
None if request.auto_approve_future_actions else review.reviewed_data
)
review_decisions[review.node_exec_id] = ( review_decisions[review.node_exec_id] = (
review_status, review_status,
review.reviewed_data, reviewed_data,
review.message, review.message,
) )
@@ -145,6 +157,22 @@ async def process_review_action(
review_decisions=review_decisions, review_decisions=review_decisions,
) )
# Create auto-approval records for approved reviews if requested
if request.auto_approve_future_actions:
for node_exec_id, review in updated_reviews.items():
if review.status == ReviewStatus.APPROVED:
# Look up the node_id from the node execution
node_exec = await get_node_execution(node_exec_id)
if node_exec:
await create_auto_approval_record(
user_id=user_id,
graph_exec_id=review.graph_exec_id,
graph_id=review.graph_id,
graph_version=review.graph_version,
node_id=node_exec.node_id,
payload=review.payload,
)
# Count results # Count results
approved_count = sum( approved_count = sum(
1 1
@@ -169,10 +197,24 @@ async def process_review_action(
if not still_has_pending: if not still_has_pending:
# Resume execution # Resume execution
try: try:
# Load graph settings to create proper execution context
settings = await get_graph_settings(
user_id=user_id, graph_id=first_review.graph_id
)
# Create execution context with settings
# Note: auto-approval is now handled via database lookup in
# check_auto_approval(), no need to pass auto_approved_node_ids
execution_context = ExecutionContext(
human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode,
sensitive_action_safe_mode=settings.sensitive_action_safe_mode,
)
await add_graph_execution( await add_graph_execution(
graph_id=first_review.graph_id, graph_id=first_review.graph_id,
user_id=user_id, user_id=user_id,
graph_exec_id=graph_exec_id, graph_exec_id=graph_exec_id,
execution_context=execution_context,
) )
logger.info(f"Resumed execution {graph_exec_id}") logger.info(f"Resumed execution {graph_exec_id}")
except Exception as e: except Exception as e:

View File

@@ -116,6 +116,7 @@ class PrintToConsoleBlock(Block):
input_schema=PrintToConsoleBlock.Input, input_schema=PrintToConsoleBlock.Input,
output_schema=PrintToConsoleBlock.Output, output_schema=PrintToConsoleBlock.Output,
test_input={"text": "Hello, World!"}, test_input={"text": "Hello, World!"},
is_sensitive_action=True,
test_output=[ test_output=[
("output", "Hello, World!"), ("output", "Hello, World!"),
("status", "printed"), ("status", "printed"),

View File

@@ -10,7 +10,7 @@ from prisma.enums import ReviewStatus
from pydantic import BaseModel from pydantic import BaseModel
from backend.data.execution import ExecutionContext, ExecutionStatus from backend.data.execution import ExecutionContext, ExecutionStatus
from backend.data.human_review import ReviewResult from backend.data.human_review import ReviewResult, check_auto_approval
from backend.executor.manager import async_update_node_execution_status from backend.executor.manager import async_update_node_execution_status
from backend.util.clients import get_database_manager_async_client from backend.util.clients import get_database_manager_async_client
@@ -55,6 +55,7 @@ class HITLReviewHelper:
async def _handle_review_request( async def _handle_review_request(
input_data: Any, input_data: Any,
user_id: str, user_id: str,
node_id: str,
node_exec_id: str, node_exec_id: str,
graph_exec_id: str, graph_exec_id: str,
graph_id: str, graph_id: str,
@@ -69,6 +70,7 @@ class HITLReviewHelper:
Args: Args:
input_data: The input data to be reviewed input_data: The input data to be reviewed
user_id: ID of the user requesting the review user_id: ID of the user requesting the review
node_id: ID of the node in the graph definition
node_exec_id: ID of the node execution node_exec_id: ID of the node execution
graph_exec_id: ID of the graph execution graph_exec_id: ID of the graph execution
graph_id: ID of the graph graph_id: ID of the graph
@@ -83,15 +85,27 @@ class HITLReviewHelper:
Raises: Raises:
Exception: If review creation or status update fails Exception: If review creation or status update fails
""" """
# Skip review if safe mode is disabled - return auto-approved result # Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
if not execution_context.human_in_the_loop_safe_mode: # are handled by the caller:
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
# This function only handles auto-approval for specific nodes.
# Check if this node has been auto-approved in a previous review
auto_approval = await check_auto_approval(
graph_exec_id=graph_exec_id,
node_id=node_id,
)
if auto_approval:
logger.info( logger.info(
f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled" f"Block {block_name} skipping review for node {node_exec_id} - "
f"node {node_id} has auto-approval from previous review"
) )
# Return a new ReviewResult with the current node_exec_id but approved status
return ReviewResult( return ReviewResult(
data=input_data, data=input_data,
status=ReviewStatus.APPROVED, status=ReviewStatus.APPROVED,
message="Auto-approved (safe mode disabled)", message="Auto-approved (user approved all future actions for this block)",
processed=True, processed=True,
node_exec_id=node_exec_id, node_exec_id=node_exec_id,
) )
@@ -129,6 +143,7 @@ class HITLReviewHelper:
async def handle_review_decision( async def handle_review_decision(
input_data: Any, input_data: Any,
user_id: str, user_id: str,
node_id: str,
node_exec_id: str, node_exec_id: str,
graph_exec_id: str, graph_exec_id: str,
graph_id: str, graph_id: str,
@@ -143,6 +158,7 @@ class HITLReviewHelper:
Args: Args:
input_data: The input data to be reviewed input_data: The input data to be reviewed
user_id: ID of the user requesting the review user_id: ID of the user requesting the review
node_id: ID of the node in the graph definition
node_exec_id: ID of the node execution node_exec_id: ID of the node execution
graph_exec_id: ID of the graph execution graph_exec_id: ID of the graph execution
graph_id: ID of the graph graph_id: ID of the graph
@@ -158,6 +174,7 @@ class HITLReviewHelper:
review_result = await HITLReviewHelper._handle_review_request( review_result = await HITLReviewHelper._handle_review_request(
input_data=input_data, input_data=input_data,
user_id=user_id, user_id=user_id,
node_id=node_id,
node_exec_id=node_exec_id, node_exec_id=node_exec_id,
graph_exec_id=graph_exec_id, graph_exec_id=graph_exec_id,
graph_id=graph_id, graph_id=graph_id,

View File

@@ -97,6 +97,7 @@ class HumanInTheLoopBlock(Block):
input_data: Input, input_data: Input,
*, *,
user_id: str, user_id: str,
node_id: str,
node_exec_id: str, node_exec_id: str,
graph_exec_id: str, graph_exec_id: str,
graph_id: str, graph_id: str,
@@ -115,6 +116,7 @@ class HumanInTheLoopBlock(Block):
decision = await self.handle_review_decision( decision = await self.handle_review_decision(
input_data=input_data.data, input_data=input_data.data,
user_id=user_id, user_id=user_id,
node_id=node_id,
node_exec_id=node_exec_id, node_exec_id=node_exec_id,
graph_exec_id=graph_exec_id, graph_exec_id=graph_exec_id,
graph_id=graph_id, graph_id=graph_id,

View File

@@ -441,6 +441,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
static_output: bool = False, static_output: bool = False,
block_type: BlockType = BlockType.STANDARD, block_type: BlockType = BlockType.STANDARD,
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None, webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
is_sensitive_action: bool = False,
): ):
""" """
Initialize the block with the given schema. Initialize the block with the given schema.
@@ -473,8 +474,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
self.static_output = static_output self.static_output = static_output
self.block_type = block_type self.block_type = block_type
self.webhook_config = webhook_config self.webhook_config = webhook_config
self.is_sensitive_action = is_sensitive_action
self.execution_stats: NodeExecutionStats = NodeExecutionStats() self.execution_stats: NodeExecutionStats = NodeExecutionStats()
self.is_sensitive_action: bool = False
if self.webhook_config: if self.webhook_config:
if isinstance(self.webhook_config, BlockWebhookConfig): if isinstance(self.webhook_config, BlockWebhookConfig):
@@ -622,6 +623,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
input_data: BlockInput, input_data: BlockInput,
*, *,
user_id: str, user_id: str,
node_id: str,
node_exec_id: str, node_exec_id: str,
graph_exec_id: str, graph_exec_id: str,
graph_id: str, graph_id: str,
@@ -648,6 +650,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
decision = await HITLReviewHelper.handle_review_decision( decision = await HITLReviewHelper.handle_review_decision(
input_data=input_data, input_data=input_data,
user_id=user_id, user_id=user_id,
node_id=node_id,
node_exec_id=node_exec_id, node_exec_id=node_exec_id,
graph_exec_id=graph_exec_id, graph_exec_id=graph_exec_id,
graph_id=graph_id, graph_id=graph_id,

View File

@@ -32,6 +32,87 @@ class ReviewResult(BaseModel):
node_exec_id: str node_exec_id: str
def get_auto_approve_key(graph_exec_id: str, node_id: str) -> str:
"""Generate the special nodeExecId key for auto-approval records."""
return f"auto_approve_{graph_exec_id}_{node_id}"
async def check_auto_approval(
graph_exec_id: str,
node_id: str,
) -> Optional[ReviewResult]:
"""
Check if there's an existing auto-approval record for this node in this execution.
Auto-approval records are stored as PendingHumanReview entries with a special
nodeExecId pattern: "auto_approve_{graph_exec_id}_{node_id}"
Args:
graph_exec_id: ID of the graph execution
node_id: ID of the node definition (not execution)
Returns:
ReviewResult if auto-approval found, None otherwise
"""
auto_approve_key = get_auto_approve_key(graph_exec_id, node_id)
# Look for the auto-approval record by its special key
auto_approved_review = await PendingHumanReview.prisma().find_unique(
where={"nodeExecId": auto_approve_key},
)
if auto_approved_review and auto_approved_review.status == ReviewStatus.APPROVED:
logger.info(
f"Found auto-approval for node {node_id} in execution {graph_exec_id}"
)
return ReviewResult(
data=auto_approved_review.payload,
status=ReviewStatus.APPROVED,
message="Auto-approved (user approved all future actions for this block)",
processed=True,
node_exec_id=auto_approve_key,
)
return None
async def create_auto_approval_record(
user_id: str,
graph_exec_id: str,
graph_id: str,
graph_version: int,
node_id: str,
payload: SafeJsonData,
) -> None:
"""
Create an auto-approval record for a node in this execution.
This is stored as a PendingHumanReview with a special nodeExecId pattern
and status=APPROVED, so future executions of the same node can skip review.
"""
auto_approve_key = get_auto_approve_key(graph_exec_id, node_id)
await PendingHumanReview.prisma().upsert(
where={"nodeExecId": auto_approve_key},
data={
"create": {
"nodeExecId": auto_approve_key,
"userId": user_id,
"graphExecId": graph_exec_id,
"graphId": graph_id,
"graphVersion": graph_version,
"payload": SafeJson(payload),
"instructions": "Auto-approval record",
"editable": False,
"status": ReviewStatus.APPROVED,
"processed": True,
"reviewedAt": datetime.now(timezone.utc),
},
"update": {}, # Already exists, no update needed
},
)
async def get_or_create_human_review( async def get_or_create_human_review(
user_id: str, user_id: str,
node_exec_id: str, node_exec_id: str,

View File

@@ -46,8 +46,8 @@ async def test_get_or_create_human_review_new(
sample_db_review.status = ReviewStatus.WAITING sample_db_review.status = ReviewStatus.WAITING
sample_db_review.processed = False sample_db_review.processed = False
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review) mock_prisma.return_value.upsert = AsyncMock(return_value=sample_db_review)
result = await get_or_create_human_review( result = await get_or_create_human_review(
user_id="test-user-123", user_id="test-user-123",
@@ -75,8 +75,8 @@ async def test_get_or_create_human_review_approved(
sample_db_review.processed = False sample_db_review.processed = False
sample_db_review.reviewMessage = "Looks good" sample_db_review.reviewMessage = "Looks good"
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review) mock_prisma.return_value.upsert = AsyncMock(return_value=sample_db_review)
result = await get_or_create_human_review( result = await get_or_create_human_review(
user_id="test-user-123", user_id="test-user-123",

View File

@@ -0,0 +1,7 @@
-- Remove NodeExecution foreign key from PendingHumanReview
-- The nodeExecId column remains as the primary key, but we remove the FK constraint
-- to AgentNodeExecution since PendingHumanReview records can persist after node
-- execution records are deleted.
-- Drop foreign key constraint that linked PendingHumanReview.nodeExecId to AgentNodeExecution.id
ALTER TABLE "platform"."PendingHumanReview" DROP CONSTRAINT IF EXISTS "PendingHumanReview_nodeExecId_fkey";

View File

@@ -517,8 +517,6 @@ model AgentNodeExecution {
stats Json? stats Json?
PendingHumanReview PendingHumanReview?
@@index([agentGraphExecutionId, agentNodeId, executionStatus]) @@index([agentGraphExecutionId, agentNodeId, executionStatus])
@@index([agentNodeId, executionStatus]) @@index([agentNodeId, executionStatus])
@@index([addedTime, queuedTime]) @@index([addedTime, queuedTime])
@@ -567,6 +565,7 @@ enum ReviewStatus {
} }
// Pending human reviews for Human-in-the-loop blocks // Pending human reviews for Human-in-the-loop blocks
// Also stores auto-approval records with special nodeExecId patterns (e.g., "auto_approve_{graph_exec_id}_{node_id}")
model PendingHumanReview { model PendingHumanReview {
nodeExecId String @id nodeExecId String @id
userId String userId String
@@ -585,7 +584,6 @@ model PendingHumanReview {
reviewedAt DateTime? reviewedAt DateTime?
User User @relation(fields: [userId], references: [id], onDelete: Cascade) User User @relation(fields: [userId], references: [id], onDelete: Cascade)
NodeExecution AgentNodeExecution @relation(fields: [nodeExecId], references: [id], onDelete: Cascade)
GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade) GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade)
@@unique([nodeExecId]) // One pending review per node execution @@unique([nodeExecId]) // One pending review per node execution

View File

@@ -86,7 +86,6 @@ export function FloatingSafeModeToggle({
const { const {
currentHITLSafeMode, currentHITLSafeMode,
showHITLToggle, showHITLToggle,
isHITLStateUndetermined,
handleHITLToggle, handleHITLToggle,
currentSensitiveActionSafeMode, currentSensitiveActionSafeMode,
showSensitiveActionToggle, showSensitiveActionToggle,
@@ -99,16 +98,9 @@ export function FloatingSafeModeToggle({
return null; return null;
} }
const showHITL = showHITLToggle && !isHITLStateUndetermined;
const showSensitive = showSensitiveActionToggle;
if (!showHITL && !showSensitive) {
return null;
}
return ( return (
<div className={cn("fixed z-50 flex flex-col gap-2", className)}> <div className={cn("fixed z-50 flex flex-col gap-2", className)}>
{showHITL && ( {showHITLToggle && (
<SafeModeButton <SafeModeButton
isEnabled={currentHITLSafeMode} isEnabled={currentHITLSafeMode}
label="Human in the loop block approval" label="Human in the loop block approval"
@@ -119,7 +111,7 @@ export function FloatingSafeModeToggle({
fullWidth={fullWidth} fullWidth={fullWidth}
/> />
)} )}
{showSensitive && ( {showSensitiveActionToggle && (
<SafeModeButton <SafeModeButton
isEnabled={currentSensitiveActionSafeMode} isEnabled={currentSensitiveActionSafeMode}
label="Sensitive actions blocks approval" label="Sensitive actions blocks approval"

View File

@@ -14,6 +14,10 @@ import {
import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { useEffect, useRef, useState } from "react"; import { useEffect, useRef, useState } from "react";
import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal"; import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal";
import {
AIAgentSafetyPopup,
useAIAgentSafetyPopup,
} from "./components/AIAgentSafetyPopup/AIAgentSafetyPopup";
import { ModalHeader } from "./components/ModalHeader/ModalHeader"; import { ModalHeader } from "./components/ModalHeader/ModalHeader";
import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection"; import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection";
import { RunActions } from "./components/RunActions/RunActions"; import { RunActions } from "./components/RunActions/RunActions";
@@ -83,8 +87,17 @@ export function RunAgentModal({
const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false); const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false);
const [hasOverflow, setHasOverflow] = useState(false); const [hasOverflow, setHasOverflow] = useState(false);
const [isSafetyPopupOpen, setIsSafetyPopupOpen] = useState(false);
const [pendingRunAction, setPendingRunAction] = useState<(() => void) | null>(
null,
);
const contentRef = useRef<HTMLDivElement>(null); const contentRef = useRef<HTMLDivElement>(null);
const { shouldShowPopup, dismissPopup } = useAIAgentSafetyPopup(
agent.has_sensitive_action,
agent.has_human_in_the_loop,
);
const hasAnySetupFields = const hasAnySetupFields =
Object.keys(agentInputFields || {}).length > 0 || Object.keys(agentInputFields || {}).length > 0 ||
Object.keys(agentCredentialsInputFields || {}).length > 0; Object.keys(agentCredentialsInputFields || {}).length > 0;
@@ -165,6 +178,24 @@ export function RunAgentModal({
onScheduleCreated?.(schedule); onScheduleCreated?.(schedule);
} }
function handleRunWithSafetyCheck() {
if (shouldShowPopup) {
setPendingRunAction(() => handleRun);
setIsSafetyPopupOpen(true);
} else {
handleRun();
}
}
function handleSafetyPopupAcknowledge() {
setIsSafetyPopupOpen(false);
dismissPopup();
if (pendingRunAction) {
pendingRunAction();
setPendingRunAction(null);
}
}
return ( return (
<> <>
<Dialog <Dialog
@@ -248,7 +279,7 @@ export function RunAgentModal({
)} )}
<RunActions <RunActions
defaultRunType={defaultRunType} defaultRunType={defaultRunType}
onRun={handleRun} onRun={handleRunWithSafetyCheck}
isExecuting={isExecuting} isExecuting={isExecuting}
isSettingUpTrigger={isSettingUpTrigger} isSettingUpTrigger={isSettingUpTrigger}
isRunReady={allRequiredInputsAreSet} isRunReady={allRequiredInputsAreSet}
@@ -266,6 +297,11 @@ export function RunAgentModal({
</div> </div>
</Dialog.Content> </Dialog.Content>
</Dialog> </Dialog>
<AIAgentSafetyPopup
isOpen={isSafetyPopupOpen}
onAcknowledge={handleSafetyPopupAcknowledge}
/>
</> </>
); );
} }

View File

@@ -0,0 +1,95 @@
"use client";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { Key, storage } from "@/services/storage/local-storage";
import { ShieldCheckIcon } from "@phosphor-icons/react";
import { useCallback, useEffect, useState } from "react";
interface Props {
onAcknowledge: () => void;
isOpen: boolean;
}
export function AIAgentSafetyPopup({ onAcknowledge, isOpen }: Props) {
function handleAcknowledge() {
// Mark popup as shown so it won't appear again
storage.set(Key.AI_AGENT_SAFETY_POPUP_SHOWN, "true");
onAcknowledge();
}
if (!isOpen) return null;
return (
<Dialog
controlled={{ isOpen, set: () => {} }}
styling={{ maxWidth: "480px" }}
>
<Dialog.Content>
<div className="flex flex-col items-center p-6 text-center">
<div className="mb-6 flex h-16 w-16 items-center justify-center rounded-full bg-blue-50">
<ShieldCheckIcon
weight="fill"
size={32}
className="text-blue-600"
/>
</div>
<Text variant="h3" className="mb-4">
Safety Checks Enabled
</Text>
<Text variant="body" className="mb-2 text-zinc-700">
AI-generated agents may take actions that affect your data or
external systems.
</Text>
<Text variant="body" className="mb-8 text-zinc-700">
AutoGPT includes safety checks so you&apos;ll always have the
opportunity to review and approve sensitive actions before they
happen.
</Text>
<Button
variant="primary"
size="large"
className="w-full"
onClick={handleAcknowledge}
>
Got it
</Button>
</div>
</Dialog.Content>
</Dialog>
);
}
export function useAIAgentSafetyPopup(
hasSensitiveAction: boolean,
hasHumanInTheLoop: boolean,
) {
const [shouldShowPopup, setShouldShowPopup] = useState(false);
const [hasChecked, setHasChecked] = useState(false);
useEffect(() => {
// Only check once after mount (to avoid SSR issues)
if (hasChecked) return;
const hasSeenPopup =
storage.get(Key.AI_AGENT_SAFETY_POPUP_SHOWN) === "true";
const isRelevantAgent = hasSensitiveAction || hasHumanInTheLoop;
setShouldShowPopup(!hasSeenPopup && isRelevantAgent);
setHasChecked(true);
}, [hasSensitiveAction, hasHumanInTheLoop, hasChecked]);
const dismissPopup = useCallback(() => {
setShouldShowPopup(false);
}, []);
return {
shouldShowPopup,
dismissPopup,
};
}

View File

@@ -69,7 +69,6 @@ export function SafeModeToggle({ graph, className }: Props) {
const { const {
currentHITLSafeMode, currentHITLSafeMode,
showHITLToggle, showHITLToggle,
isHITLStateUndetermined,
handleHITLToggle, handleHITLToggle,
currentSensitiveActionSafeMode, currentSensitiveActionSafeMode,
showSensitiveActionToggle, showSensitiveActionToggle,
@@ -78,20 +77,13 @@ export function SafeModeToggle({ graph, className }: Props) {
shouldShowToggle, shouldShowToggle,
} = useAgentSafeMode(graph); } = useAgentSafeMode(graph);
if (!shouldShowToggle || isHITLStateUndetermined) { if (!shouldShowToggle) {
return null;
}
const showHITL = showHITLToggle && !isHITLStateUndetermined;
const showSensitive = showSensitiveActionToggle;
if (!showHITL && !showSensitive) {
return null; return null;
} }
return ( return (
<div className={cn("flex gap-1", className)}> <div className={cn("flex gap-1", className)}>
{showHITL && ( {showHITLToggle && (
<SafeModeIconButton <SafeModeIconButton
isEnabled={currentHITLSafeMode} isEnabled={currentHITLSafeMode}
label="Human-in-the-loop" label="Human-in-the-loop"
@@ -101,7 +93,7 @@ export function SafeModeToggle({ graph, className }: Props) {
isPending={isPending} isPending={isPending}
/> />
)} )}
{showSensitive && ( {showSensitiveActionToggle && (
<SafeModeIconButton <SafeModeIconButton
isEnabled={currentSensitiveActionSafeMode} isEnabled={currentSensitiveActionSafeMode}
label="Sensitive actions" label="Sensitive actions"

View File

@@ -9425,6 +9425,12 @@
"type": "array", "type": "array",
"title": "Reviews", "title": "Reviews",
"description": "All reviews with their approval status, data, and messages" "description": "All reviews with their approval status, data, and messages"
},
"auto_approve_future_actions": {
"type": "boolean",
"title": "Auto Approve Future Actions",
"description": "If true, future reviews from the same blocks (nodes) being approved will be automatically approved for the remainder of this execution. This only affects the current execution run.",
"default": false
} }
}, },
"type": "object", "type": "object",

View File

@@ -31,6 +31,29 @@ export function FloatingReviewsPanel({
query: { query: {
enabled: !!(graphId && executionId), enabled: !!(graphId && executionId),
select: okData, select: okData,
// Poll while execution is in progress to detect status changes
refetchInterval: (q) => {
// Note: refetchInterval callback receives raw data before select transform
const rawData = q.state.data as
| { status: number; data?: { status?: string } }
| undefined;
if (rawData?.status !== 200) return false;
const status = rawData?.data?.status;
if (!status) return false;
// Poll every 2 seconds while running or in review
if (
status === AgentExecutionStatus.RUNNING ||
status === AgentExecutionStatus.QUEUED ||
status === AgentExecutionStatus.INCOMPLETE ||
status === AgentExecutionStatus.REVIEW
) {
return 2000;
}
return false;
},
refetchIntervalInBackground: true,
}, },
}, },
); );
@@ -40,23 +63,27 @@ export function FloatingReviewsPanel({
useShallow((state) => state.graphExecutionStatus), useShallow((state) => state.graphExecutionStatus),
); );
// Determine if we should poll for pending reviews
const isInReviewStatus =
executionDetails?.status === AgentExecutionStatus.REVIEW ||
graphExecutionStatus === AgentExecutionStatus.REVIEW;
const { pendingReviews, isLoading, refetch } = usePendingReviewsForExecution( const { pendingReviews, isLoading, refetch } = usePendingReviewsForExecution(
executionId || "", executionId || "",
{
enabled: !!executionId,
// Poll every 2 seconds when in REVIEW status to catch new reviews
refetchInterval: isInReviewStatus ? 2000 : false,
},
); );
// Refetch pending reviews when execution status changes
useEffect(() => { useEffect(() => {
if (executionId) { if (executionId && executionDetails?.status) {
refetch(); refetch();
} }
}, [executionDetails?.status, executionId, refetch]); }, [executionDetails?.status, executionId, refetch]);
// Refetch when graph execution status changes to REVIEW
useEffect(() => {
if (graphExecutionStatus === AgentExecutionStatus.REVIEW && executionId) {
refetch();
}
}, [graphExecutionStatus, executionId, refetch]);
if ( if (
!executionId || !executionId ||
(!isLoading && (!isLoading &&

View File

@@ -1,8 +1,9 @@
import { useState } from "react"; import { useState, useCallback } from "react";
import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel"; import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel";
import { PendingReviewCard } from "@/components/organisms/PendingReviewCard/PendingReviewCard"; import { PendingReviewCard } from "@/components/organisms/PendingReviewCard/PendingReviewCard";
import { Text } from "@/components/atoms/Text/Text"; import { Text } from "@/components/atoms/Text/Text";
import { Button } from "@/components/atoms/Button/Button"; import { Button } from "@/components/atoms/Button/Button";
import { Switch } from "@/components/atoms/Switch/Switch";
import { useToast } from "@/components/molecules/Toast/use-toast"; import { useToast } from "@/components/molecules/Toast/use-toast";
import { ClockIcon, WarningIcon } from "@phosphor-icons/react"; import { ClockIcon, WarningIcon } from "@phosphor-icons/react";
import { usePostV2ProcessReviewAction } from "@/app/api/__generated__/endpoints/executions/executions"; import { usePostV2ProcessReviewAction } from "@/app/api/__generated__/endpoints/executions/executions";
@@ -40,6 +41,8 @@ export function PendingReviewsList({
"approve" | "reject" | null "approve" | "reject" | null
>(null); >(null);
const [autoApproveFuture, setAutoApproveFuture] = useState(false);
const { toast } = useToast(); const { toast } = useToast();
const reviewActionMutation = usePostV2ProcessReviewAction({ const reviewActionMutation = usePostV2ProcessReviewAction({
@@ -92,6 +95,26 @@ export function PendingReviewsList({
setReviewMessageMap((prev) => ({ ...prev, [nodeExecId]: message })); setReviewMessageMap((prev) => ({ ...prev, [nodeExecId]: message }));
} }
// Reset data to original values when toggling auto-approve
const handleAutoApproveFutureToggle = useCallback(
(enabled: boolean) => {
setAutoApproveFuture(enabled);
if (enabled) {
// Reset all data to original values
const originalData: Record<string, string> = {};
reviews.forEach((review) => {
originalData[review.node_exec_id] = JSON.stringify(
review.payload,
null,
2,
);
});
setReviewDataMap(originalData);
}
},
[reviews],
);
function processReviews(approved: boolean) { function processReviews(approved: boolean) {
if (reviews.length === 0) { if (reviews.length === 0) {
toast({ toast({
@@ -109,22 +132,31 @@ export function PendingReviewsList({
const reviewData = reviewDataMap[review.node_exec_id]; const reviewData = reviewDataMap[review.node_exec_id];
const reviewMessage = reviewMessageMap[review.node_exec_id]; const reviewMessage = reviewMessageMap[review.node_exec_id];
let parsedData: any = review.payload; // Default to original payload // When auto-approving future actions, send undefined (use original data)
// Otherwise, parse and send the edited data if available
let parsedData: any = undefined;
// Parse edited data if available and editable if (!autoApproveFuture) {
if (review.editable && reviewData) { // For regular approve/reject, use edited data if available
try { if (review.editable && reviewData) {
parsedData = JSON.parse(reviewData); try {
} catch (error) { parsedData = JSON.parse(reviewData);
toast({ } catch (error) {
title: "Invalid JSON", toast({
description: `Please fix the JSON format in review for node ${review.node_exec_id}: ${error instanceof Error ? error.message : "Invalid syntax"}`, title: "Invalid JSON",
variant: "destructive", description: `Please fix the JSON format in review for node ${review.node_exec_id}: ${error instanceof Error ? error.message : "Invalid syntax"}`,
}); variant: "destructive",
setPendingAction(null); });
return; setPendingAction(null);
return;
}
} else {
// No edits, use original payload
parsedData = review.payload;
} }
} }
// When autoApproveFuture is true, parsedData stays undefined
// Backend will use the original payload stored in the database
reviewItems.push({ reviewItems.push({
node_exec_id: review.node_exec_id, node_exec_id: review.node_exec_id,
@@ -137,6 +169,7 @@ export function PendingReviewsList({
reviewActionMutation.mutate({ reviewActionMutation.mutate({
data: { data: {
reviews: reviewItems, reviews: reviewItems,
auto_approve_future_actions: autoApproveFuture && approved,
}, },
}); });
} }
@@ -182,21 +215,37 @@ export function PendingReviewsList({
<div className="space-y-7"> <div className="space-y-7">
{reviews.map((review) => ( {reviews.map((review) => (
<PendingReviewCard <PendingReviewCard
key={review.node_exec_id} key={`${review.node_exec_id}-${autoApproveFuture}`}
review={review} review={review}
onReviewDataChange={handleReviewDataChange} onReviewDataChange={handleReviewDataChange}
onReviewMessageChange={handleReviewMessageChange} onReviewMessageChange={handleReviewMessageChange}
reviewMessage={reviewMessageMap[review.node_exec_id] || ""} reviewMessage={reviewMessageMap[review.node_exec_id] || ""}
isDisabled={autoApproveFuture}
/> />
))} ))}
</div> </div>
<div className="space-y-7"> <div className="space-y-4">
<Text variant="body" className="text-textGrey"> {/* Auto-approve toggle */}
Note: Changes you make here apply only to this task <div className="flex items-center gap-3">
</Text> <Switch
checked={autoApproveFuture}
onCheckedChange={handleAutoApproveFutureToggle}
disabled={reviewActionMutation.isPending}
/>
<Text variant="body" className="text-textBlack">
Auto-approve all future actions from these blocks
</Text>
</div>
<div className="flex gap-2"> {autoApproveFuture && (
<Text variant="small" className="text-amber-600">
Editing is disabled. Original data will be used for this and all
future reviews from these blocks.
</Text>
)}
<div className="flex flex-wrap gap-2">
<Button <Button
onClick={() => processReviews(true)} onClick={() => processReviews(true)}
disabled={reviewActionMutation.isPending || reviews.length === 0} disabled={reviewActionMutation.isPending || reviews.length === 0}
@@ -220,6 +269,11 @@ export function PendingReviewsList({
Reject Reject
</Button> </Button>
</div> </div>
<Text variant="small" className="text-textGrey">
You can turn auto-approval on or off anytime in this agent&apos;s
settings.
</Text>
</div> </div>
</div> </div>
); );

View File

@@ -15,8 +15,22 @@ export function usePendingReviews() {
}; };
} }
export function usePendingReviewsForExecution(graphExecId: string) { interface UsePendingReviewsForExecutionOptions {
const query = useGetV2GetPendingReviewsForExecution(graphExecId); enabled?: boolean;
refetchInterval?: number | false;
}
export function usePendingReviewsForExecution(
graphExecId: string,
options?: UsePendingReviewsForExecutionOptions,
) {
const query = useGetV2GetPendingReviewsForExecution(graphExecId, {
query: {
enabled: options?.enabled ?? !!graphExecId,
refetchInterval: options?.refetchInterval,
refetchIntervalInBackground: !!options?.refetchInterval,
},
});
return { return {
pendingReviews: okData(query.data) || [], pendingReviews: okData(query.data) || [],

View File

@@ -10,6 +10,7 @@ export enum Key {
LIBRARY_AGENTS_CACHE = "library-agents-cache", LIBRARY_AGENTS_CACHE = "library-agents-cache",
CHAT_SESSION_ID = "chat_session_id", CHAT_SESSION_ID = "chat_session_id",
COOKIE_CONSENT = "autogpt_cookie_consent", COOKIE_CONSENT = "autogpt_cookie_consent",
AI_AGENT_SAFETY_POPUP_SHOWN = "ai-agent-safety-popup-shown",
} }
function get(key: Key) { function get(key: Key) {