feat: add per-review auto-approval toggle for granular control

- Change from global auto_approve_future_actions to per-review auto_approve_future flag
- Each review item can now individually specify auto-approval
- Better UX: users can auto-approve some actions but not others
- Example: auto-approve file reads but not file writes
- Backward compatible: auto_approve_future defaults to False
- Add test for per-review granularity
- Update all existing tests to use new structure
This commit is contained in:
Zamil Majdy
2026-01-22 21:01:35 -05:00
parent 0111820f61
commit dbb56fa7aa
4 changed files with 260 additions and 45 deletions

View File

@@ -107,6 +107,13 @@ class ReviewItem(BaseModel):
reviewed_data: SafeJsonData | None = Field(
None, description="Optional edited data (ignored if approved=False)"
)
auto_approve_future: bool = Field(
default=False,
description=(
"If true and this review is approved, future executions of this same "
"block (node) will be automatically approved. This only affects approved reviews."
),
)
@field_validator("reviewed_data")
@classmethod
@@ -174,19 +181,14 @@ class ReviewRequest(BaseModel):
This request must include ALL pending reviews for a graph execution.
Each review will be either approved (with optional data modifications)
or rejected (data ignored). The execution will resume only after ALL reviews are processed.
Each review item can individually specify whether to auto-approve future executions
of the same block via the `auto_approve_future` field on ReviewItem.
"""
reviews: List[ReviewItem] = Field(
description="All reviews with their approval status, data, and messages"
)
auto_approve_future_actions: bool = Field(
default=False,
description=(
"If true, future reviews from the same blocks (nodes) being approved "
"will be automatically approved for the remainder of this execution. "
"This only affects the current execution run."
),
)
@model_validator(mode="after")
def validate_review_completeness(self):

View File

@@ -719,9 +719,9 @@ def test_process_review_action_auto_approve_creates_auto_approval_records(
"node_exec_id": "test_node_123",
"approved": True,
"message": "Approved",
"auto_approve_future": True,
}
],
"auto_approve_future_actions": True,
}
response = client.post("/api/review/action", json=request_data)
@@ -825,16 +825,16 @@ def test_process_review_action_without_auto_approve_still_loads_settings(
"backend.api.features.executions.review.routes.add_graph_execution"
)
# Request WITHOUT auto_approve_future_actions
# Request WITHOUT auto_approve_future (defaults to False)
request_data = {
"reviews": [
{
"node_exec_id": "test_node_123",
"approved": True,
"message": "Approved",
# auto_approve_future defaults to False
}
],
"auto_approve_future_actions": False,
}
response = client.post("/api/review/action", json=request_data)
@@ -844,7 +844,7 @@ def test_process_review_action_without_auto_approve_still_loads_settings(
# Verify process_all_reviews_for_execution was called
mock_process_all_reviews.assert_called_once()
# Verify create_auto_approval_record was NOT called (auto_approve_future_actions=False)
# Verify create_auto_approval_record was NOT called (auto_approve_future=False)
mock_create_auto_approval.assert_not_called()
# Verify settings were loaded
@@ -957,10 +957,17 @@ def test_process_review_action_auto_approve_only_applies_to_approved_reviews(
request_data = {
"reviews": [
{"node_exec_id": "node_exec_approved", "approved": True},
{"node_exec_id": "node_exec_rejected", "approved": False},
{
"node_exec_id": "node_exec_approved",
"approved": True,
"auto_approve_future": True,
},
{
"node_exec_id": "node_exec_rejected",
"approved": False,
"auto_approve_future": True, # Should be ignored since rejected
},
],
"auto_approve_future_actions": True,
}
response = client.post("/api/review/action", json=request_data)
@@ -988,3 +995,202 @@ def test_process_review_action_auto_approve_only_applies_to_approved_reviews(
call_kwargs = mock_add_execution.call_args.kwargs
execution_context = call_kwargs["execution_context"]
assert isinstance(execution_context, ExecutionContext)
def test_process_review_action_per_review_auto_approve_granularity(
client: fastapi.testclient.TestClient,
mocker: pytest_mock.MockerFixture,
sample_pending_review: PendingHumanReviewModel,
test_user_id: str,
) -> None:
"""Test that auto-approval can be set per-review (granular control)"""
# Mock get_pending_reviews_for_user - return all 3 reviews
mock_get_reviews_for_user = mocker.patch(
"backend.api.features.executions.review.routes.get_pending_reviews_for_user"
)
mock_get_reviews_for_user.return_value = [
PendingHumanReviewModel(
node_exec_id="node_1_auto",
user_id=test_user_id,
graph_exec_id="test_graph_exec",
graph_id="test_graph",
graph_version=1,
payload={"data": "node1"},
instructions="Review 1",
editable=True,
status=ReviewStatus.WAITING,
review_message=None,
was_edited=False,
processed=False,
created_at=FIXED_NOW,
),
PendingHumanReviewModel(
node_exec_id="node_2_manual",
user_id=test_user_id,
graph_exec_id="test_graph_exec",
graph_id="test_graph",
graph_version=1,
payload={"data": "node2"},
instructions="Review 2",
editable=True,
status=ReviewStatus.WAITING,
review_message=None,
was_edited=False,
processed=False,
created_at=FIXED_NOW,
),
PendingHumanReviewModel(
node_exec_id="node_3_auto",
user_id=test_user_id,
graph_exec_id="test_graph_exec",
graph_id="test_graph",
graph_version=1,
payload={"data": "node3"},
instructions="Review 3",
editable=True,
status=ReviewStatus.WAITING,
review_message=None,
was_edited=False,
processed=False,
created_at=FIXED_NOW,
),
]
# Mock process_all_reviews - return 3 approved reviews
mock_process_all_reviews = mocker.patch(
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
)
mock_process_all_reviews.return_value = {
"node_1_auto": PendingHumanReviewModel(
node_exec_id="node_1_auto",
user_id=test_user_id,
graph_exec_id="test_graph_exec",
graph_id="test_graph",
graph_version=1,
payload={"data": "node1"},
instructions="Review 1",
editable=True,
status=ReviewStatus.APPROVED,
review_message=None,
was_edited=False,
processed=False,
created_at=FIXED_NOW,
updated_at=FIXED_NOW,
reviewed_at=FIXED_NOW,
),
"node_2_manual": PendingHumanReviewModel(
node_exec_id="node_2_manual",
user_id=test_user_id,
graph_exec_id="test_graph_exec",
graph_id="test_graph",
graph_version=1,
payload={"data": "node2"},
instructions="Review 2",
editable=True,
status=ReviewStatus.APPROVED,
review_message=None,
was_edited=False,
processed=False,
created_at=FIXED_NOW,
updated_at=FIXED_NOW,
reviewed_at=FIXED_NOW,
),
"node_3_auto": PendingHumanReviewModel(
node_exec_id="node_3_auto",
user_id=test_user_id,
graph_exec_id="test_graph_exec",
graph_id="test_graph",
graph_version=1,
payload={"data": "node3"},
instructions="Review 3",
editable=True,
status=ReviewStatus.APPROVED,
review_message=None,
was_edited=False,
processed=False,
created_at=FIXED_NOW,
updated_at=FIXED_NOW,
reviewed_at=FIXED_NOW,
),
}
# Mock get_node_execution
mock_get_node_execution = mocker.patch(
"backend.api.features.executions.review.routes.get_node_execution"
)
def mock_get_node(node_exec_id: str):
mock_node = mocker.Mock(spec=NodeExecutionResult)
mock_node.node_id = f"node_def_{node_exec_id}"
return mock_node
mock_get_node_execution.side_effect = mock_get_node
# Mock create_auto_approval_record
mock_create_auto_approval = mocker.patch(
"backend.api.features.executions.review.routes.create_auto_approval_record"
)
# Mock get_graph_execution_meta
mock_get_graph_exec = mocker.patch(
"backend.api.features.executions.review.routes.get_graph_execution_meta"
)
mock_graph_exec_meta = mocker.Mock()
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
mock_get_graph_exec.return_value = mock_graph_exec_meta
# Mock has_pending_reviews_for_graph_exec
mock_has_pending = mocker.patch(
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
)
mock_has_pending.return_value = False
# Mock settings and execution
mock_get_settings = mocker.patch(
"backend.api.features.executions.review.routes.get_graph_settings"
)
mock_get_settings.return_value = GraphSettings(
human_in_the_loop_safe_mode=False, sensitive_action_safe_mode=False
)
mocker.patch("backend.api.features.executions.review.routes.add_graph_execution")
mocker.patch("backend.api.features.executions.review.routes.get_user_by_id")
# Request with granular auto-approval:
# - node_1_auto: auto_approve_future=True
# - node_2_manual: auto_approve_future=False (explicit)
# - node_3_auto: auto_approve_future=True
request_data = {
"reviews": [
{
"node_exec_id": "node_1_auto",
"approved": True,
"auto_approve_future": True,
},
{
"node_exec_id": "node_2_manual",
"approved": True,
"auto_approve_future": False, # Don't auto-approve this one
},
{
"node_exec_id": "node_3_auto",
"approved": True,
"auto_approve_future": True,
},
],
}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 200
# Verify create_auto_approval_record was called ONLY for reviews with auto_approve_future=True
assert mock_create_auto_approval.call_count == 2
# Check that it was called for node_1 and node_3, but NOT node_2
call_args_list = [call.kwargs for call in mock_create_auto_approval.call_args_list]
node_ids_with_auto_approval = [args["node_id"] for args in call_args_list]
assert "node_def_node_1_auto" in node_ids_with_auto_approval
assert "node_def_node_3_auto" in node_ids_with_auto_approval
assert "node_def_node_2_manual" not in node_ids_with_auto_approval

View File

@@ -175,21 +175,23 @@ async def process_review_action(
f"Current status: {graph_exec_meta.status}",
)
# Build review decisions map
# Build review decisions map and track which reviews requested auto-approval
# Auto-approved reviews use original data (no modifications allowed)
review_decisions = {}
auto_approve_requests = {} # Map node_exec_id -> auto_approve_future flag
for review in request.reviews:
review_status = (
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
)
reviewed_data = (
None if request.auto_approve_future_actions else review.reviewed_data
)
# If this review requested auto-approval, don't allow data modifications
reviewed_data = None if review.auto_approve_future else review.reviewed_data
review_decisions[review.node_exec_id] = (
review_status,
reviewed_data,
review.message,
)
auto_approve_requests[review.node_exec_id] = review.auto_approve_future
# Process all reviews
updated_reviews = await process_all_reviews_for_execution(
@@ -197,27 +199,32 @@ async def process_review_action(
review_decisions=review_decisions,
)
# Create auto-approval records for approved reviews
# Create auto-approval records for approved reviews that requested it
# Note: Processing sequentially to avoid event loop issues in tests
if request.auto_approve_future_actions:
for node_exec_id, review in updated_reviews.items():
if review.status == ReviewStatus.APPROVED:
try:
node_exec = await get_node_execution(node_exec_id)
if node_exec:
await create_auto_approval_record(
user_id=user_id,
graph_exec_id=review.graph_exec_id,
graph_id=review.graph_id,
graph_version=review.graph_version,
node_id=node_exec.node_id,
payload=review.payload,
)
except Exception as e:
logger.error(
f"Failed to create auto-approval record for {node_exec_id}",
exc_info=e,
for node_exec_id, review_result in updated_reviews.items():
# Only create auto-approval if:
# 1. This review was approved
# 2. The review requested auto-approval
if (
review_result.status == ReviewStatus.APPROVED
and auto_approve_requests.get(node_exec_id, False)
):
try:
node_exec = await get_node_execution(node_exec_id)
if node_exec:
await create_auto_approval_record(
user_id=user_id,
graph_exec_id=review_result.graph_exec_id,
graph_id=review_result.graph_id,
graph_version=review_result.graph_version,
node_id=node_exec.node_id,
payload=review_result.payload,
)
except Exception as e:
logger.error(
f"Failed to create auto-approval record for {node_exec_id}",
exc_info=e,
)
# Count results
approved_count = sum(

View File

@@ -9411,6 +9411,12 @@
],
"title": "Reviewed Data",
"description": "Optional edited data (ignored if approved=False)"
},
"auto_approve_future": {
"type": "boolean",
"title": "Auto Approve Future",
"description": "If true and this review is approved, future executions of this same block (node) will be automatically approved. This only affects approved reviews.",
"default": false
}
},
"type": "object",
@@ -9425,18 +9431,12 @@
"type": "array",
"title": "Reviews",
"description": "All reviews with their approval status, data, and messages"
},
"auto_approve_future_actions": {
"type": "boolean",
"title": "Auto Approve Future Actions",
"description": "If true, future reviews from the same blocks (nodes) being approved will be automatically approved for the remainder of this execution. This only affects the current execution run.",
"default": false
}
},
"type": "object",
"required": ["reviews"],
"title": "ReviewRequest",
"description": "Request model for processing ALL pending reviews for an execution.\n\nThis request must include ALL pending reviews for a graph execution.\nEach review will be either approved (with optional data modifications)\nor rejected (data ignored). The execution will resume only after ALL reviews are processed."
"description": "Request model for processing ALL pending reviews for an execution.\n\nThis request must include ALL pending reviews for a graph execution.\nEach review will be either approved (with optional data modifications)\nor rejected (data ignored). The execution will resume only after ALL reviews are processed.\n\nEach review item can individually specify whether to auto-approve future executions\nof the same block via the `auto_approve_future` field on ReviewItem."
},
"ReviewResponse": {
"properties": {