mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-22 21:48:12 -05:00
Compare commits
38 Commits
testing-cl
...
feat/sensi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a80f452ffe | ||
|
|
fd970c800c | ||
|
|
5fc1ec0ece | ||
|
|
9be3ec58ae | ||
|
|
e6ca904326 | ||
|
|
dbb56fa7aa | ||
|
|
0111820f61 | ||
|
|
1a1b1aa26d | ||
|
|
614ed8cf82 | ||
|
|
edd4c96aa6 | ||
|
|
cd231e2d69 | ||
|
|
399c472623 | ||
|
|
554e2beddf | ||
|
|
29fdda3fa8 | ||
|
|
67e6a8841c | ||
|
|
aea97db485 | ||
|
|
71a6969bbd | ||
|
|
e4c3f9995b | ||
|
|
3b58684abc | ||
|
|
e8d44a62fd | ||
|
|
be024da2a8 | ||
|
|
0df917e243 | ||
|
|
8688805a8c | ||
|
|
9bdda7dab0 | ||
|
|
7d377aabaa | ||
|
|
dfd7c64068 | ||
|
|
02089bc047 | ||
|
|
bed7b356bb | ||
|
|
4efc0ff502 | ||
|
|
4ad0528257 | ||
|
|
2f440ee80a | ||
|
|
2a55923ec0 | ||
|
|
ad50f57a2b | ||
|
|
aebd961ef5 | ||
|
|
bcccaa16cc | ||
|
|
d5ddc41b18 | ||
|
|
95eab5b7eb | ||
|
|
832d6e1696 |
@@ -107,6 +107,13 @@ class ReviewItem(BaseModel):
|
|||||||
reviewed_data: SafeJsonData | None = Field(
|
reviewed_data: SafeJsonData | None = Field(
|
||||||
None, description="Optional edited data (ignored if approved=False)"
|
None, description="Optional edited data (ignored if approved=False)"
|
||||||
)
|
)
|
||||||
|
auto_approve_future: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description=(
|
||||||
|
"If true and this review is approved, future executions of this same "
|
||||||
|
"block (node) will be automatically approved. This only affects approved reviews."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
@field_validator("reviewed_data")
|
@field_validator("reviewed_data")
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -174,6 +181,9 @@ class ReviewRequest(BaseModel):
|
|||||||
This request must include ALL pending reviews for a graph execution.
|
This request must include ALL pending reviews for a graph execution.
|
||||||
Each review will be either approved (with optional data modifications)
|
Each review will be either approved (with optional data modifications)
|
||||||
or rejected (data ignored). The execution will resume only after ALL reviews are processed.
|
or rejected (data ignored). The execution will resume only after ALL reviews are processed.
|
||||||
|
|
||||||
|
Each review item can individually specify whether to auto-approve future executions
|
||||||
|
of the same block via the `auto_approve_future` field on ReviewItem.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
reviews: List[ReviewItem] = Field(
|
reviews: List[ReviewItem] = Field(
|
||||||
|
|||||||
@@ -8,6 +8,12 @@ from prisma.enums import ReviewStatus
|
|||||||
from pytest_snapshot.plugin import Snapshot
|
from pytest_snapshot.plugin import Snapshot
|
||||||
|
|
||||||
from backend.api.rest_api import handle_internal_http_error
|
from backend.api.rest_api import handle_internal_http_error
|
||||||
|
from backend.data.execution import (
|
||||||
|
ExecutionContext,
|
||||||
|
ExecutionStatus,
|
||||||
|
NodeExecutionResult,
|
||||||
|
)
|
||||||
|
from backend.data.graph import GraphSettings
|
||||||
|
|
||||||
from .model import PendingHumanReviewModel
|
from .model import PendingHumanReviewModel
|
||||||
from .routes import router
|
from .routes import router
|
||||||
@@ -15,20 +21,24 @@ from .routes import router
|
|||||||
# Using a fixed timestamp for reproducible tests
|
# Using a fixed timestamp for reproducible tests
|
||||||
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
|
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
|
||||||
|
|
||||||
app = fastapi.FastAPI()
|
|
||||||
app.include_router(router, prefix="/api/review")
|
|
||||||
app.add_exception_handler(ValueError, handle_internal_http_error(400))
|
|
||||||
|
|
||||||
client = fastapi.testclient.TestClient(app)
|
@pytest.fixture
|
||||||
|
def app():
|
||||||
|
"""Create FastAPI app for testing"""
|
||||||
|
test_app = fastapi.FastAPI()
|
||||||
|
test_app.include_router(router, prefix="/api/review")
|
||||||
|
test_app.add_exception_handler(ValueError, handle_internal_http_error(400))
|
||||||
|
return test_app
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture
|
||||||
def setup_app_auth(mock_jwt_user):
|
def client(app, mock_jwt_user):
|
||||||
"""Setup auth overrides for all tests in this module"""
|
"""Create test client with auth overrides"""
|
||||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||||
|
|
||||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
|
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
|
||||||
yield
|
with fastapi.testclient.TestClient(app) as test_client:
|
||||||
|
yield test_client
|
||||||
app.dependency_overrides.clear()
|
app.dependency_overrides.clear()
|
||||||
|
|
||||||
|
|
||||||
@@ -55,6 +65,7 @@ def sample_pending_review(test_user_id: str) -> PendingHumanReviewModel:
|
|||||||
|
|
||||||
|
|
||||||
def test_get_pending_reviews_empty(
|
def test_get_pending_reviews_empty(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
snapshot: Snapshot,
|
snapshot: Snapshot,
|
||||||
test_user_id: str,
|
test_user_id: str,
|
||||||
@@ -73,6 +84,7 @@ def test_get_pending_reviews_empty(
|
|||||||
|
|
||||||
|
|
||||||
def test_get_pending_reviews_with_data(
|
def test_get_pending_reviews_with_data(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
sample_pending_review: PendingHumanReviewModel,
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
snapshot: Snapshot,
|
snapshot: Snapshot,
|
||||||
@@ -95,6 +107,7 @@ def test_get_pending_reviews_with_data(
|
|||||||
|
|
||||||
|
|
||||||
def test_get_pending_reviews_for_execution_success(
|
def test_get_pending_reviews_for_execution_success(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
sample_pending_review: PendingHumanReviewModel,
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
snapshot: Snapshot,
|
snapshot: Snapshot,
|
||||||
@@ -123,6 +136,7 @@ def test_get_pending_reviews_for_execution_success(
|
|||||||
|
|
||||||
|
|
||||||
def test_get_pending_reviews_for_execution_not_available(
|
def test_get_pending_reviews_for_execution_not_available(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test access denied when user doesn't own the execution"""
|
"""Test access denied when user doesn't own the execution"""
|
||||||
@@ -138,6 +152,7 @@ def test_get_pending_reviews_for_execution_not_available(
|
|||||||
|
|
||||||
|
|
||||||
def test_process_review_action_approve_success(
|
def test_process_review_action_approve_success(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
sample_pending_review: PendingHumanReviewModel,
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
test_user_id: str,
|
test_user_id: str,
|
||||||
@@ -145,6 +160,12 @@ def test_process_review_action_approve_success(
|
|||||||
"""Test successful review approval"""
|
"""Test successful review approval"""
|
||||||
# Mock the route functions
|
# Mock the route functions
|
||||||
|
|
||||||
|
# Mock get_pending_review_by_node_exec_id (called to find the graph_exec_id)
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
mock_get_reviews_for_user.return_value = sample_pending_review
|
||||||
|
|
||||||
mock_get_reviews_for_execution = mocker.patch(
|
mock_get_reviews_for_execution = mocker.patch(
|
||||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||||
)
|
)
|
||||||
@@ -173,6 +194,14 @@ def test_process_review_action_approve_success(
|
|||||||
)
|
)
|
||||||
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
|
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta to return execution in REVIEW status
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
mock_has_pending = mocker.patch(
|
mock_has_pending = mocker.patch(
|
||||||
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||||
)
|
)
|
||||||
@@ -202,6 +231,7 @@ def test_process_review_action_approve_success(
|
|||||||
|
|
||||||
|
|
||||||
def test_process_review_action_reject_success(
|
def test_process_review_action_reject_success(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
sample_pending_review: PendingHumanReviewModel,
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
test_user_id: str,
|
test_user_id: str,
|
||||||
@@ -209,6 +239,20 @@ def test_process_review_action_reject_success(
|
|||||||
"""Test successful review rejection"""
|
"""Test successful review rejection"""
|
||||||
# Mock the route functions
|
# Mock the route functions
|
||||||
|
|
||||||
|
# Mock get_pending_review_by_node_exec_id (called to find the graph_exec_id)
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
mock_get_reviews_for_user.return_value = sample_pending_review
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta to return execution in REVIEW status
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
mock_get_reviews_for_execution = mocker.patch(
|
mock_get_reviews_for_execution = mocker.patch(
|
||||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||||
)
|
)
|
||||||
@@ -262,6 +306,7 @@ def test_process_review_action_reject_success(
|
|||||||
|
|
||||||
|
|
||||||
def test_process_review_action_mixed_success(
|
def test_process_review_action_mixed_success(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
sample_pending_review: PendingHumanReviewModel,
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
test_user_id: str,
|
test_user_id: str,
|
||||||
@@ -288,6 +333,12 @@ def test_process_review_action_mixed_success(
|
|||||||
|
|
||||||
# Mock the route functions
|
# Mock the route functions
|
||||||
|
|
||||||
|
# Mock get_pending_review_by_node_exec_id (called to find the graph_exec_id)
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
mock_get_reviews_for_user.return_value = sample_pending_review
|
||||||
|
|
||||||
mock_get_reviews_for_execution = mocker.patch(
|
mock_get_reviews_for_execution = mocker.patch(
|
||||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||||
)
|
)
|
||||||
@@ -337,6 +388,14 @@ def test_process_review_action_mixed_success(
|
|||||||
"test_node_456": rejected_review,
|
"test_node_456": rejected_review,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta to return execution in REVIEW status
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
mock_has_pending = mocker.patch(
|
mock_has_pending = mocker.patch(
|
||||||
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||||
)
|
)
|
||||||
@@ -369,6 +428,7 @@ def test_process_review_action_mixed_success(
|
|||||||
|
|
||||||
|
|
||||||
def test_process_review_action_empty_request(
|
def test_process_review_action_empty_request(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
test_user_id: str,
|
test_user_id: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
@@ -386,10 +446,45 @@ def test_process_review_action_empty_request(
|
|||||||
|
|
||||||
|
|
||||||
def test_process_review_action_review_not_found(
|
def test_process_review_action_review_not_found(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
test_user_id: str,
|
test_user_id: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test error when review is not found"""
|
"""Test error when review is not found"""
|
||||||
|
# Create a review with the nonexistent_node ID so the route can find the graph_exec_id
|
||||||
|
nonexistent_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="nonexistent_node",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "test"},
|
||||||
|
instructions="Review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.WAITING,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=None,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=None,
|
||||||
|
reviewed_at=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock get_pending_review_by_node_exec_id (called to find the graph_exec_id)
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
mock_get_reviews_for_user.return_value = nonexistent_review
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta to return execution in REVIEW status
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
# Mock the functions that extract graph execution ID from the request
|
# Mock the functions that extract graph execution ID from the request
|
||||||
mock_get_reviews_for_execution = mocker.patch(
|
mock_get_reviews_for_execution = mocker.patch(
|
||||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||||
@@ -422,11 +517,26 @@ def test_process_review_action_review_not_found(
|
|||||||
|
|
||||||
|
|
||||||
def test_process_review_action_partial_failure(
|
def test_process_review_action_partial_failure(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
sample_pending_review: PendingHumanReviewModel,
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
test_user_id: str,
|
test_user_id: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test handling of partial failures in review processing"""
|
"""Test handling of partial failures in review processing"""
|
||||||
|
# Mock get_pending_review_by_node_exec_id (called to find the graph_exec_id)
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
mock_get_reviews_for_user.return_value = sample_pending_review
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta to return execution in REVIEW status
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
# Mock the route functions
|
# Mock the route functions
|
||||||
mock_get_reviews_for_execution = mocker.patch(
|
mock_get_reviews_for_execution = mocker.patch(
|
||||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||||
@@ -456,16 +566,50 @@ def test_process_review_action_partial_failure(
|
|||||||
|
|
||||||
|
|
||||||
def test_process_review_action_invalid_node_exec_id(
|
def test_process_review_action_invalid_node_exec_id(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
mocker: pytest_mock.MockerFixture,
|
mocker: pytest_mock.MockerFixture,
|
||||||
sample_pending_review: PendingHumanReviewModel,
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
test_user_id: str,
|
test_user_id: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test failure when trying to process review with invalid node execution ID"""
|
"""Test failure when trying to process review with invalid node execution ID"""
|
||||||
|
# Create a review with the invalid-node-format ID so the route can find the graph_exec_id
|
||||||
|
invalid_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="invalid-node-format",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "test"},
|
||||||
|
instructions="Review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.WAITING,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=None,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=None,
|
||||||
|
reviewed_at=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock get_pending_review_by_node_exec_id (called to find the graph_exec_id)
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
mock_get_reviews_for_user.return_value = invalid_review
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta to return execution in REVIEW status
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
# Mock the route functions
|
# Mock the route functions
|
||||||
mock_get_reviews_for_execution = mocker.patch(
|
mock_get_reviews_for_execution = mocker.patch(
|
||||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||||
)
|
)
|
||||||
mock_get_reviews_for_execution.return_value = [sample_pending_review]
|
mock_get_reviews_for_execution.return_value = [invalid_review]
|
||||||
|
|
||||||
# Mock validation failure - this should return 400, not 500
|
# Mock validation failure - this should return 400, not 500
|
||||||
mock_process_all_reviews = mocker.patch(
|
mock_process_all_reviews = mocker.patch(
|
||||||
@@ -490,3 +634,571 @@ def test_process_review_action_invalid_node_exec_id(
|
|||||||
# Should be a 400 Bad Request, not 500 Internal Server Error
|
# Should be a 400 Bad Request, not 500 Internal Server Error
|
||||||
assert response.status_code == 400
|
assert response.status_code == 400
|
||||||
assert "Invalid node execution ID format" in response.json()["detail"]
|
assert "Invalid node execution ID format" in response.json()["detail"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_process_review_action_auto_approve_creates_auto_approval_records(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
|
mocker: pytest_mock.MockerFixture,
|
||||||
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
|
test_user_id: str,
|
||||||
|
) -> None:
|
||||||
|
"""Test that auto_approve_future_actions flag creates auto-approval records"""
|
||||||
|
# Mock get_pending_review_by_node_exec_id (called to find the graph_exec_id)
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
mock_get_reviews_for_user.return_value = sample_pending_review
|
||||||
|
|
||||||
|
# Mock process_all_reviews
|
||||||
|
mock_process_all_reviews = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||||
|
)
|
||||||
|
approved_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="test_node_123",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "test payload"},
|
||||||
|
instructions="Please review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
review_message="Approved",
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
)
|
||||||
|
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
|
||||||
|
|
||||||
|
# Mock get_node_execution to return node_id
|
||||||
|
mock_get_node_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_node_execution"
|
||||||
|
)
|
||||||
|
mock_node_exec = mocker.Mock(spec=NodeExecutionResult)
|
||||||
|
mock_node_exec.node_id = "test_node_def_456"
|
||||||
|
mock_get_node_execution.return_value = mock_node_exec
|
||||||
|
|
||||||
|
# Mock create_auto_approval_record
|
||||||
|
mock_create_auto_approval = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.create_auto_approval_record"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta to return execution in REVIEW status
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
|
# Mock has_pending_reviews_for_graph_exec
|
||||||
|
mock_has_pending = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||||
|
)
|
||||||
|
mock_has_pending.return_value = False
|
||||||
|
|
||||||
|
# Mock get_graph_settings to return custom settings
|
||||||
|
mock_get_settings = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_settings"
|
||||||
|
)
|
||||||
|
mock_get_settings.return_value = GraphSettings(
|
||||||
|
human_in_the_loop_safe_mode=True,
|
||||||
|
sensitive_action_safe_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock add_graph_execution
|
||||||
|
mock_add_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.add_graph_execution"
|
||||||
|
)
|
||||||
|
|
||||||
|
request_data = {
|
||||||
|
"reviews": [
|
||||||
|
{
|
||||||
|
"node_exec_id": "test_node_123",
|
||||||
|
"approved": True,
|
||||||
|
"message": "Approved",
|
||||||
|
"auto_approve_future": True,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
response = client.post("/api/review/action", json=request_data)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
|
||||||
|
# Verify process_all_reviews_for_execution was called (without auto_approve param)
|
||||||
|
mock_process_all_reviews.assert_called_once()
|
||||||
|
|
||||||
|
# Verify create_auto_approval_record was called for the approved review
|
||||||
|
mock_create_auto_approval.assert_called_once_with(
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
node_id="test_node_def_456",
|
||||||
|
payload={"data": "test payload"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify get_graph_settings was called with correct parameters
|
||||||
|
mock_get_settings.assert_called_once_with(
|
||||||
|
user_id=test_user_id, graph_id="test_graph_789"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify add_graph_execution was called with proper ExecutionContext
|
||||||
|
mock_add_execution.assert_called_once()
|
||||||
|
call_kwargs = mock_add_execution.call_args.kwargs
|
||||||
|
execution_context = call_kwargs["execution_context"]
|
||||||
|
|
||||||
|
assert isinstance(execution_context, ExecutionContext)
|
||||||
|
assert execution_context.human_in_the_loop_safe_mode is True
|
||||||
|
assert execution_context.sensitive_action_safe_mode is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_process_review_action_without_auto_approve_still_loads_settings(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
|
mocker: pytest_mock.MockerFixture,
|
||||||
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
|
test_user_id: str,
|
||||||
|
) -> None:
|
||||||
|
"""Test that execution context is created with settings even without auto-approve"""
|
||||||
|
# Mock get_pending_review_by_node_exec_id (called to find the graph_exec_id)
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
mock_get_reviews_for_user.return_value = sample_pending_review
|
||||||
|
|
||||||
|
# Mock process_all_reviews
|
||||||
|
mock_process_all_reviews = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||||
|
)
|
||||||
|
approved_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="test_node_123",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "test payload"},
|
||||||
|
instructions="Please review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
review_message="Approved",
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
)
|
||||||
|
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
|
||||||
|
|
||||||
|
# Mock create_auto_approval_record - should NOT be called when auto_approve is False
|
||||||
|
mock_create_auto_approval = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.create_auto_approval_record"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta to return execution in REVIEW status
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
|
# Mock has_pending_reviews_for_graph_exec
|
||||||
|
mock_has_pending = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||||
|
)
|
||||||
|
mock_has_pending.return_value = False
|
||||||
|
|
||||||
|
# Mock get_graph_settings with sensitive_action_safe_mode enabled
|
||||||
|
mock_get_settings = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_settings"
|
||||||
|
)
|
||||||
|
mock_get_settings.return_value = GraphSettings(
|
||||||
|
human_in_the_loop_safe_mode=False,
|
||||||
|
sensitive_action_safe_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock add_graph_execution
|
||||||
|
mock_add_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.add_graph_execution"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Request WITHOUT auto_approve_future (defaults to False)
|
||||||
|
request_data = {
|
||||||
|
"reviews": [
|
||||||
|
{
|
||||||
|
"node_exec_id": "test_node_123",
|
||||||
|
"approved": True,
|
||||||
|
"message": "Approved",
|
||||||
|
# auto_approve_future defaults to False
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
response = client.post("/api/review/action", json=request_data)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
|
||||||
|
# Verify process_all_reviews_for_execution was called
|
||||||
|
mock_process_all_reviews.assert_called_once()
|
||||||
|
|
||||||
|
# Verify create_auto_approval_record was NOT called (auto_approve_future=False)
|
||||||
|
mock_create_auto_approval.assert_not_called()
|
||||||
|
|
||||||
|
# Verify settings were loaded
|
||||||
|
mock_get_settings.assert_called_once()
|
||||||
|
|
||||||
|
# Verify ExecutionContext has proper settings
|
||||||
|
mock_add_execution.assert_called_once()
|
||||||
|
call_kwargs = mock_add_execution.call_args.kwargs
|
||||||
|
execution_context = call_kwargs["execution_context"]
|
||||||
|
|
||||||
|
assert isinstance(execution_context, ExecutionContext)
|
||||||
|
assert execution_context.human_in_the_loop_safe_mode is False
|
||||||
|
assert execution_context.sensitive_action_safe_mode is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_process_review_action_auto_approve_only_applies_to_approved_reviews(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
|
mocker: pytest_mock.MockerFixture,
|
||||||
|
test_user_id: str,
|
||||||
|
) -> None:
|
||||||
|
"""Test that auto_approve record is created only for approved reviews"""
|
||||||
|
# Create two reviews - one approved, one rejected
|
||||||
|
approved_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_exec_approved",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "approved"},
|
||||||
|
instructions="Review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
)
|
||||||
|
rejected_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_exec_rejected",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "rejected"},
|
||||||
|
instructions="Review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.REJECTED,
|
||||||
|
review_message="Rejected",
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock get_pending_review_by_node_exec_id (called to find the graph_exec_id)
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
mock_get_reviews_for_user.return_value = approved_review
|
||||||
|
|
||||||
|
# Mock process_all_reviews
|
||||||
|
mock_process_all_reviews = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||||
|
)
|
||||||
|
mock_process_all_reviews.return_value = {
|
||||||
|
"node_exec_approved": approved_review,
|
||||||
|
"node_exec_rejected": rejected_review,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mock get_node_execution to return node_id (only called for approved review)
|
||||||
|
mock_get_node_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_node_execution"
|
||||||
|
)
|
||||||
|
mock_node_exec = mocker.Mock(spec=NodeExecutionResult)
|
||||||
|
mock_node_exec.node_id = "test_node_def_approved"
|
||||||
|
mock_get_node_execution.return_value = mock_node_exec
|
||||||
|
|
||||||
|
# Mock create_auto_approval_record
|
||||||
|
mock_create_auto_approval = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.create_auto_approval_record"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta to return execution in REVIEW status
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
|
# Mock has_pending_reviews_for_graph_exec
|
||||||
|
mock_has_pending = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||||
|
)
|
||||||
|
mock_has_pending.return_value = False
|
||||||
|
|
||||||
|
# Mock get_graph_settings
|
||||||
|
mock_get_settings = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_settings"
|
||||||
|
)
|
||||||
|
mock_get_settings.return_value = GraphSettings()
|
||||||
|
|
||||||
|
# Mock add_graph_execution
|
||||||
|
mock_add_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.add_graph_execution"
|
||||||
|
)
|
||||||
|
|
||||||
|
request_data = {
|
||||||
|
"reviews": [
|
||||||
|
{
|
||||||
|
"node_exec_id": "node_exec_approved",
|
||||||
|
"approved": True,
|
||||||
|
"auto_approve_future": True,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"node_exec_id": "node_exec_rejected",
|
||||||
|
"approved": False,
|
||||||
|
"auto_approve_future": True, # Should be ignored since rejected
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
response = client.post("/api/review/action", json=request_data)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
|
||||||
|
# Verify process_all_reviews_for_execution was called
|
||||||
|
mock_process_all_reviews.assert_called_once()
|
||||||
|
|
||||||
|
# Verify create_auto_approval_record was called ONLY for the approved review
|
||||||
|
# (not for the rejected one)
|
||||||
|
mock_create_auto_approval.assert_called_once_with(
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
node_id="test_node_def_approved",
|
||||||
|
payload={"data": "approved"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify get_node_execution was called only for approved review
|
||||||
|
mock_get_node_execution.assert_called_once_with("node_exec_approved")
|
||||||
|
|
||||||
|
# Verify ExecutionContext was created (auto-approval is now DB-based)
|
||||||
|
call_kwargs = mock_add_execution.call_args.kwargs
|
||||||
|
execution_context = call_kwargs["execution_context"]
|
||||||
|
assert isinstance(execution_context, ExecutionContext)
|
||||||
|
|
||||||
|
|
||||||
|
def test_process_review_action_per_review_auto_approve_granularity(
|
||||||
|
client: fastapi.testclient.TestClient,
|
||||||
|
mocker: pytest_mock.MockerFixture,
|
||||||
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
|
test_user_id: str,
|
||||||
|
) -> None:
|
||||||
|
"""Test that auto-approval can be set per-review (granular control)"""
|
||||||
|
# Mock get_pending_review_by_node_exec_id - return different reviews based on node_exec_id
|
||||||
|
mock_get_reviews_for_user = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_pending_review_by_node_exec_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a mapping of node_exec_id to review
|
||||||
|
review_map = {
|
||||||
|
"node_1_auto": PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_1_auto",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec",
|
||||||
|
graph_id="test_graph",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "node1"},
|
||||||
|
instructions="Review 1",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.WAITING,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
),
|
||||||
|
"node_2_manual": PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_2_manual",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec",
|
||||||
|
graph_id="test_graph",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "node2"},
|
||||||
|
instructions="Review 2",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.WAITING,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
),
|
||||||
|
"node_3_auto": PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_3_auto",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec",
|
||||||
|
graph_id="test_graph",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "node3"},
|
||||||
|
instructions="Review 3",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.WAITING,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Use side_effect to return different reviews based on node_exec_id parameter
|
||||||
|
def mock_get_review_by_id(node_exec_id: str, _user_id: str):
|
||||||
|
return review_map.get(node_exec_id)
|
||||||
|
|
||||||
|
mock_get_reviews_for_user.side_effect = mock_get_review_by_id
|
||||||
|
|
||||||
|
# Mock process_all_reviews - return 3 approved reviews
|
||||||
|
mock_process_all_reviews = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||||
|
)
|
||||||
|
mock_process_all_reviews.return_value = {
|
||||||
|
"node_1_auto": PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_1_auto",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec",
|
||||||
|
graph_id="test_graph",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "node1"},
|
||||||
|
instructions="Review 1",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
),
|
||||||
|
"node_2_manual": PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_2_manual",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec",
|
||||||
|
graph_id="test_graph",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "node2"},
|
||||||
|
instructions="Review 2",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
),
|
||||||
|
"node_3_auto": PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_3_auto",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec",
|
||||||
|
graph_id="test_graph",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "node3"},
|
||||||
|
instructions="Review 3",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mock get_node_execution
|
||||||
|
mock_get_node_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_node_execution"
|
||||||
|
)
|
||||||
|
|
||||||
|
def mock_get_node(node_exec_id: str):
|
||||||
|
mock_node = mocker.Mock(spec=NodeExecutionResult)
|
||||||
|
mock_node.node_id = f"node_def_{node_exec_id}"
|
||||||
|
return mock_node
|
||||||
|
|
||||||
|
mock_get_node_execution.side_effect = mock_get_node
|
||||||
|
|
||||||
|
# Mock create_auto_approval_record
|
||||||
|
mock_create_auto_approval = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.create_auto_approval_record"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock get_graph_execution_meta
|
||||||
|
mock_get_graph_exec = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||||
|
)
|
||||||
|
mock_graph_exec_meta = mocker.Mock()
|
||||||
|
mock_graph_exec_meta.status = ExecutionStatus.REVIEW
|
||||||
|
mock_get_graph_exec.return_value = mock_graph_exec_meta
|
||||||
|
|
||||||
|
# Mock has_pending_reviews_for_graph_exec
|
||||||
|
mock_has_pending = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||||
|
)
|
||||||
|
mock_has_pending.return_value = False
|
||||||
|
|
||||||
|
# Mock settings and execution
|
||||||
|
mock_get_settings = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_settings"
|
||||||
|
)
|
||||||
|
mock_get_settings.return_value = GraphSettings(
|
||||||
|
human_in_the_loop_safe_mode=False, sensitive_action_safe_mode=False
|
||||||
|
)
|
||||||
|
|
||||||
|
mocker.patch("backend.api.features.executions.review.routes.add_graph_execution")
|
||||||
|
mocker.patch("backend.api.features.executions.review.routes.get_user_by_id")
|
||||||
|
|
||||||
|
# Request with granular auto-approval:
|
||||||
|
# - node_1_auto: auto_approve_future=True
|
||||||
|
# - node_2_manual: auto_approve_future=False (explicit)
|
||||||
|
# - node_3_auto: auto_approve_future=True
|
||||||
|
request_data = {
|
||||||
|
"reviews": [
|
||||||
|
{
|
||||||
|
"node_exec_id": "node_1_auto",
|
||||||
|
"approved": True,
|
||||||
|
"auto_approve_future": True,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"node_exec_id": "node_2_manual",
|
||||||
|
"approved": True,
|
||||||
|
"auto_approve_future": False, # Don't auto-approve this one
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"node_exec_id": "node_3_auto",
|
||||||
|
"approved": True,
|
||||||
|
"auto_approve_future": True,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
response = client.post("/api/review/action", json=request_data)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
|
||||||
|
# Verify create_auto_approval_record was called ONLY for reviews with auto_approve_future=True
|
||||||
|
assert mock_create_auto_approval.call_count == 2
|
||||||
|
|
||||||
|
# Check that it was called for node_1 and node_3, but NOT node_2
|
||||||
|
call_args_list = [call.kwargs for call in mock_create_auto_approval.call_args_list]
|
||||||
|
node_ids_with_auto_approval = [args["node_id"] for args in call_args_list]
|
||||||
|
|
||||||
|
assert "node_def_node_1_auto" in node_ids_with_auto_approval
|
||||||
|
assert "node_def_node_3_auto" in node_ids_with_auto_approval
|
||||||
|
assert "node_def_node_2_manual" not in node_ids_with_auto_approval
|
||||||
|
|||||||
@@ -5,13 +5,23 @@ import autogpt_libs.auth as autogpt_auth_lib
|
|||||||
from fastapi import APIRouter, HTTPException, Query, Security, status
|
from fastapi import APIRouter, HTTPException, Query, Security, status
|
||||||
from prisma.enums import ReviewStatus
|
from prisma.enums import ReviewStatus
|
||||||
|
|
||||||
from backend.data.execution import get_graph_execution_meta
|
from backend.data.execution import (
|
||||||
|
ExecutionContext,
|
||||||
|
ExecutionStatus,
|
||||||
|
get_graph_execution_meta,
|
||||||
|
get_node_execution,
|
||||||
|
)
|
||||||
|
from backend.data.graph import get_graph_settings
|
||||||
from backend.data.human_review import (
|
from backend.data.human_review import (
|
||||||
|
create_auto_approval_record,
|
||||||
|
get_pending_review_by_node_exec_id,
|
||||||
get_pending_reviews_for_execution,
|
get_pending_reviews_for_execution,
|
||||||
get_pending_reviews_for_user,
|
get_pending_reviews_for_user,
|
||||||
has_pending_reviews_for_graph_exec,
|
has_pending_reviews_for_graph_exec,
|
||||||
process_all_reviews_for_execution,
|
process_all_reviews_for_execution,
|
||||||
)
|
)
|
||||||
|
from backend.data.model import USER_TIMEZONE_NOT_SET
|
||||||
|
from backend.data.user import get_user_by_id
|
||||||
from backend.executor.utils import add_graph_execution
|
from backend.executor.utils import add_graph_execution
|
||||||
|
|
||||||
from .model import PendingHumanReviewModel, ReviewRequest, ReviewResponse
|
from .model import PendingHumanReviewModel, ReviewRequest, ReviewResponse
|
||||||
@@ -127,17 +137,64 @@ async def process_review_action(
|
|||||||
detail="At least one review must be provided",
|
detail="At least one review must be provided",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Build review decisions map
|
# Get graph execution ID by directly looking up one of the requested reviews
|
||||||
|
# Use direct lookup to avoid pagination issues (can't miss reviews beyond first page)
|
||||||
|
matching_review = None
|
||||||
|
for node_exec_id in all_request_node_ids:
|
||||||
|
review = await get_pending_review_by_node_exec_id(node_exec_id, user_id)
|
||||||
|
if review:
|
||||||
|
matching_review = review
|
||||||
|
break
|
||||||
|
|
||||||
|
if not matching_review:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_404_NOT_FOUND,
|
||||||
|
detail="No pending reviews found for the requested node executions",
|
||||||
|
)
|
||||||
|
|
||||||
|
graph_exec_id = matching_review.graph_exec_id
|
||||||
|
|
||||||
|
# Validate execution status before processing reviews
|
||||||
|
graph_exec_meta = await get_graph_execution_meta(
|
||||||
|
user_id=user_id, execution_id=graph_exec_id
|
||||||
|
)
|
||||||
|
|
||||||
|
if not graph_exec_meta:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_404_NOT_FOUND,
|
||||||
|
detail=f"Graph execution #{graph_exec_id} not found",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Only allow processing reviews if execution is paused for review
|
||||||
|
# or incomplete (partial execution with some reviews already processed)
|
||||||
|
if graph_exec_meta.status not in (
|
||||||
|
ExecutionStatus.REVIEW,
|
||||||
|
ExecutionStatus.INCOMPLETE,
|
||||||
|
):
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_409_CONFLICT,
|
||||||
|
detail=f"Cannot process reviews while execution status is {graph_exec_meta.status}. "
|
||||||
|
f"Reviews can only be processed when execution is paused (REVIEW status). "
|
||||||
|
f"Current status: {graph_exec_meta.status}",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build review decisions map and track which reviews requested auto-approval
|
||||||
|
# Auto-approved reviews use original data (no modifications allowed)
|
||||||
review_decisions = {}
|
review_decisions = {}
|
||||||
|
auto_approve_requests = {} # Map node_exec_id -> auto_approve_future flag
|
||||||
|
|
||||||
for review in request.reviews:
|
for review in request.reviews:
|
||||||
review_status = (
|
review_status = (
|
||||||
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
|
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
|
||||||
)
|
)
|
||||||
|
# If this review requested auto-approval, don't allow data modifications
|
||||||
|
reviewed_data = None if review.auto_approve_future else review.reviewed_data
|
||||||
review_decisions[review.node_exec_id] = (
|
review_decisions[review.node_exec_id] = (
|
||||||
review_status,
|
review_status,
|
||||||
review.reviewed_data,
|
reviewed_data,
|
||||||
review.message,
|
review.message,
|
||||||
)
|
)
|
||||||
|
auto_approve_requests[review.node_exec_id] = review.auto_approve_future
|
||||||
|
|
||||||
# Process all reviews
|
# Process all reviews
|
||||||
updated_reviews = await process_all_reviews_for_execution(
|
updated_reviews = await process_all_reviews_for_execution(
|
||||||
@@ -145,6 +202,32 @@ async def process_review_action(
|
|||||||
review_decisions=review_decisions,
|
review_decisions=review_decisions,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Create auto-approval records for approved reviews that requested it
|
||||||
|
# Note: Processing sequentially to avoid event loop issues in tests
|
||||||
|
for node_exec_id, review_result in updated_reviews.items():
|
||||||
|
# Only create auto-approval if:
|
||||||
|
# 1. This review was approved
|
||||||
|
# 2. The review requested auto-approval
|
||||||
|
if review_result.status == ReviewStatus.APPROVED and auto_approve_requests.get(
|
||||||
|
node_exec_id, False
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
node_exec = await get_node_execution(node_exec_id)
|
||||||
|
if node_exec:
|
||||||
|
await create_auto_approval_record(
|
||||||
|
user_id=user_id,
|
||||||
|
graph_exec_id=review_result.graph_exec_id,
|
||||||
|
graph_id=review_result.graph_id,
|
||||||
|
graph_version=review_result.graph_version,
|
||||||
|
node_id=node_exec.node_id,
|
||||||
|
payload=review_result.payload,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to create auto-approval record for {node_exec_id}",
|
||||||
|
exc_info=e,
|
||||||
|
)
|
||||||
|
|
||||||
# Count results
|
# Count results
|
||||||
approved_count = sum(
|
approved_count = sum(
|
||||||
1
|
1
|
||||||
@@ -157,22 +240,37 @@ async def process_review_action(
|
|||||||
if review.status == ReviewStatus.REJECTED
|
if review.status == ReviewStatus.REJECTED
|
||||||
)
|
)
|
||||||
|
|
||||||
# Resume execution if we processed some reviews
|
# Resume execution only if ALL pending reviews for this execution have been processed
|
||||||
if updated_reviews:
|
if updated_reviews:
|
||||||
# Get graph execution ID from any processed review
|
|
||||||
first_review = next(iter(updated_reviews.values()))
|
|
||||||
graph_exec_id = first_review.graph_exec_id
|
|
||||||
|
|
||||||
# Check if any pending reviews remain for this execution
|
|
||||||
still_has_pending = await has_pending_reviews_for_graph_exec(graph_exec_id)
|
still_has_pending = await has_pending_reviews_for_graph_exec(graph_exec_id)
|
||||||
|
|
||||||
if not still_has_pending:
|
if not still_has_pending:
|
||||||
# Resume execution
|
# Get the graph_id from any processed review
|
||||||
|
first_review = next(iter(updated_reviews.values()))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# Fetch user and settings to build complete execution context
|
||||||
|
user = await get_user_by_id(user_id)
|
||||||
|
settings = await get_graph_settings(
|
||||||
|
user_id=user_id, graph_id=first_review.graph_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# Preserve user's timezone preference when resuming execution
|
||||||
|
user_timezone = (
|
||||||
|
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
|
||||||
|
)
|
||||||
|
|
||||||
|
execution_context = ExecutionContext(
|
||||||
|
human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode,
|
||||||
|
sensitive_action_safe_mode=settings.sensitive_action_safe_mode,
|
||||||
|
user_timezone=user_timezone,
|
||||||
|
)
|
||||||
|
|
||||||
await add_graph_execution(
|
await add_graph_execution(
|
||||||
graph_id=first_review.graph_id,
|
graph_id=first_review.graph_id,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
graph_exec_id=graph_exec_id,
|
graph_exec_id=graph_exec_id,
|
||||||
|
execution_context=execution_context,
|
||||||
)
|
)
|
||||||
logger.info(f"Resumed execution {graph_exec_id}")
|
logger.info(f"Resumed execution {graph_exec_id}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ Handles generation and storage of OpenAI embeddings for all content types
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import contextvars
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
from typing import Any
|
from typing import Any
|
||||||
@@ -21,6 +22,11 @@ from backend.util.json import dumps
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Context variable to track errors logged in the current task/operation
|
||||||
|
# This prevents spamming the same error multiple times when processing batches
|
||||||
|
_logged_errors: contextvars.ContextVar[set[str]] = contextvars.ContextVar(
|
||||||
|
"_logged_errors"
|
||||||
|
)
|
||||||
|
|
||||||
# OpenAI embedding model configuration
|
# OpenAI embedding model configuration
|
||||||
EMBEDDING_MODEL = "text-embedding-3-small"
|
EMBEDDING_MODEL = "text-embedding-3-small"
|
||||||
@@ -31,6 +37,42 @@ EMBEDDING_DIM = 1536
|
|||||||
EMBEDDING_MAX_TOKENS = 8191
|
EMBEDDING_MAX_TOKENS = 8191
|
||||||
|
|
||||||
|
|
||||||
|
def log_once_per_task(error_key: str, log_fn, message: str, **kwargs) -> bool:
|
||||||
|
"""
|
||||||
|
Log an error/warning only once per task/operation to avoid log spam.
|
||||||
|
|
||||||
|
Uses contextvars to track what has been logged in the current async context.
|
||||||
|
Useful when processing batches where the same error might occur for many items.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
error_key: Unique identifier for this error type
|
||||||
|
log_fn: Logger function to call (e.g., logger.error, logger.warning)
|
||||||
|
message: Message to log
|
||||||
|
**kwargs: Additional arguments to pass to log_fn
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the message was logged, False if it was suppressed (already logged)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
log_once_per_task("missing_api_key", logger.error, "API key not set")
|
||||||
|
"""
|
||||||
|
# Get current logged errors, or create a new set if this is the first call in this context
|
||||||
|
logged = _logged_errors.get(None)
|
||||||
|
if logged is None:
|
||||||
|
logged = set()
|
||||||
|
_logged_errors.set(logged)
|
||||||
|
|
||||||
|
if error_key in logged:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Log the message with a note that it will only appear once
|
||||||
|
log_fn(f"{message} (This message will only be shown once per task.)", **kwargs)
|
||||||
|
|
||||||
|
# Mark as logged
|
||||||
|
logged.add(error_key)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def build_searchable_text(
|
def build_searchable_text(
|
||||||
name: str,
|
name: str,
|
||||||
description: str,
|
description: str,
|
||||||
@@ -73,7 +115,11 @@ async def generate_embedding(text: str) -> list[float] | None:
|
|||||||
try:
|
try:
|
||||||
client = get_openai_client()
|
client = get_openai_client()
|
||||||
if not client:
|
if not client:
|
||||||
logger.error("openai_internal_api_key not set, cannot generate embedding")
|
log_once_per_task(
|
||||||
|
"openai_api_key_missing",
|
||||||
|
logger.error,
|
||||||
|
"openai_internal_api_key not set, cannot generate embeddings",
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Truncate text to token limit using tiktoken
|
# Truncate text to token limit using tiktoken
|
||||||
@@ -290,7 +336,12 @@ async def ensure_embedding(
|
|||||||
# Generate new embedding
|
# Generate new embedding
|
||||||
embedding = await generate_embedding(searchable_text)
|
embedding = await generate_embedding(searchable_text)
|
||||||
if embedding is None:
|
if embedding is None:
|
||||||
logger.warning(f"Could not generate embedding for version {version_id}")
|
log_once_per_task(
|
||||||
|
"embedding_generation_failed",
|
||||||
|
logger.warning,
|
||||||
|
"Could not generate embeddings (missing API key or service unavailable). "
|
||||||
|
"Embedding generation is disabled for this task.",
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Store the embedding with metadata using new function
|
# Store the embedding with metadata using new function
|
||||||
@@ -609,8 +660,11 @@ async def ensure_content_embedding(
|
|||||||
# Generate new embedding
|
# Generate new embedding
|
||||||
embedding = await generate_embedding(searchable_text)
|
embedding = await generate_embedding(searchable_text)
|
||||||
if embedding is None:
|
if embedding is None:
|
||||||
logger.warning(
|
log_once_per_task(
|
||||||
f"Could not generate embedding for {content_type}:{content_id}"
|
"embedding_generation_failed",
|
||||||
|
logger.warning,
|
||||||
|
"Could not generate embeddings (missing API key or service unavailable). "
|
||||||
|
"Embedding generation is disabled for this task.",
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|||||||
@@ -116,6 +116,7 @@ class PrintToConsoleBlock(Block):
|
|||||||
input_schema=PrintToConsoleBlock.Input,
|
input_schema=PrintToConsoleBlock.Input,
|
||||||
output_schema=PrintToConsoleBlock.Output,
|
output_schema=PrintToConsoleBlock.Output,
|
||||||
test_input={"text": "Hello, World!"},
|
test_input={"text": "Hello, World!"},
|
||||||
|
is_sensitive_action=True,
|
||||||
test_output=[
|
test_output=[
|
||||||
("output", "Hello, World!"),
|
("output", "Hello, World!"),
|
||||||
("status", "printed"),
|
("status", "printed"),
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from typing import Any, Optional
|
|||||||
from prisma.enums import ReviewStatus
|
from prisma.enums import ReviewStatus
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.execution import ExecutionContext, ExecutionStatus
|
from backend.data.execution import ExecutionStatus
|
||||||
from backend.data.human_review import ReviewResult
|
from backend.data.human_review import ReviewResult
|
||||||
from backend.executor.manager import async_update_node_execution_status
|
from backend.executor.manager import async_update_node_execution_status
|
||||||
from backend.util.clients import get_database_manager_async_client
|
from backend.util.clients import get_database_manager_async_client
|
||||||
@@ -28,6 +28,11 @@ class ReviewDecision(BaseModel):
|
|||||||
class HITLReviewHelper:
|
class HITLReviewHelper:
|
||||||
"""Helper class for Human-In-The-Loop review operations."""
|
"""Helper class for Human-In-The-Loop review operations."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def check_approval(**kwargs) -> Optional[ReviewResult]:
|
||||||
|
"""Check if there's an existing approval for this node execution."""
|
||||||
|
return await get_database_manager_async_client().check_approval(**kwargs)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def get_or_create_human_review(**kwargs) -> Optional[ReviewResult]:
|
async def get_or_create_human_review(**kwargs) -> Optional[ReviewResult]:
|
||||||
"""Create or retrieve a human review from the database."""
|
"""Create or retrieve a human review from the database."""
|
||||||
@@ -55,11 +60,11 @@ class HITLReviewHelper:
|
|||||||
async def _handle_review_request(
|
async def _handle_review_request(
|
||||||
input_data: Any,
|
input_data: Any,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
graph_id: str,
|
graph_id: str,
|
||||||
graph_version: int,
|
graph_version: int,
|
||||||
execution_context: ExecutionContext,
|
|
||||||
block_name: str = "Block",
|
block_name: str = "Block",
|
||||||
editable: bool = False,
|
editable: bool = False,
|
||||||
) -> Optional[ReviewResult]:
|
) -> Optional[ReviewResult]:
|
||||||
@@ -69,11 +74,11 @@ class HITLReviewHelper:
|
|||||||
Args:
|
Args:
|
||||||
input_data: The input data to be reviewed
|
input_data: The input data to be reviewed
|
||||||
user_id: ID of the user requesting the review
|
user_id: ID of the user requesting the review
|
||||||
|
node_id: ID of the node in the graph definition
|
||||||
node_exec_id: ID of the node execution
|
node_exec_id: ID of the node execution
|
||||||
graph_exec_id: ID of the graph execution
|
graph_exec_id: ID of the graph execution
|
||||||
graph_id: ID of the graph
|
graph_id: ID of the graph
|
||||||
graph_version: Version of the graph
|
graph_version: Version of the graph
|
||||||
execution_context: Current execution context
|
|
||||||
block_name: Name of the block requesting review
|
block_name: Name of the block requesting review
|
||||||
editable: Whether the reviewer can edit the data
|
editable: Whether the reviewer can edit the data
|
||||||
|
|
||||||
@@ -83,15 +88,40 @@ class HITLReviewHelper:
|
|||||||
Raises:
|
Raises:
|
||||||
Exception: If review creation or status update fails
|
Exception: If review creation or status update fails
|
||||||
"""
|
"""
|
||||||
# Skip review if safe mode is disabled - return auto-approved result
|
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
|
||||||
if not execution_context.human_in_the_loop_safe_mode:
|
# are handled by the caller:
|
||||||
|
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
|
||||||
|
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
|
||||||
|
# This function only handles checking for existing approvals.
|
||||||
|
|
||||||
|
# Check if this node has already been approved (normal or auto-approval)
|
||||||
|
if approval_result := await HITLReviewHelper.check_approval(
|
||||||
|
node_exec_id=node_exec_id,
|
||||||
|
graph_exec_id=graph_exec_id,
|
||||||
|
node_id=node_id,
|
||||||
|
user_id=user_id,
|
||||||
|
):
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled"
|
f"Block {block_name} skipping review for node {node_exec_id} - "
|
||||||
|
f"found existing approval"
|
||||||
|
)
|
||||||
|
# Return a new ReviewResult with the current node_exec_id but approved status
|
||||||
|
# For auto-approvals, always use current input_data
|
||||||
|
# For normal approvals, use approval_result.data unless it's None
|
||||||
|
is_auto_approval = approval_result.node_exec_id != node_exec_id
|
||||||
|
approved_data = (
|
||||||
|
input_data
|
||||||
|
if is_auto_approval
|
||||||
|
else (
|
||||||
|
approval_result.data
|
||||||
|
if approval_result.data is not None
|
||||||
|
else input_data
|
||||||
|
)
|
||||||
)
|
)
|
||||||
return ReviewResult(
|
return ReviewResult(
|
||||||
data=input_data,
|
data=approved_data,
|
||||||
status=ReviewStatus.APPROVED,
|
status=ReviewStatus.APPROVED,
|
||||||
message="Auto-approved (safe mode disabled)",
|
message=approval_result.message,
|
||||||
processed=True,
|
processed=True,
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
)
|
)
|
||||||
@@ -129,11 +159,11 @@ class HITLReviewHelper:
|
|||||||
async def handle_review_decision(
|
async def handle_review_decision(
|
||||||
input_data: Any,
|
input_data: Any,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
graph_id: str,
|
graph_id: str,
|
||||||
graph_version: int,
|
graph_version: int,
|
||||||
execution_context: ExecutionContext,
|
|
||||||
block_name: str = "Block",
|
block_name: str = "Block",
|
||||||
editable: bool = False,
|
editable: bool = False,
|
||||||
) -> Optional[ReviewDecision]:
|
) -> Optional[ReviewDecision]:
|
||||||
@@ -143,11 +173,11 @@ class HITLReviewHelper:
|
|||||||
Args:
|
Args:
|
||||||
input_data: The input data to be reviewed
|
input_data: The input data to be reviewed
|
||||||
user_id: ID of the user requesting the review
|
user_id: ID of the user requesting the review
|
||||||
|
node_id: ID of the node in the graph definition
|
||||||
node_exec_id: ID of the node execution
|
node_exec_id: ID of the node execution
|
||||||
graph_exec_id: ID of the graph execution
|
graph_exec_id: ID of the graph execution
|
||||||
graph_id: ID of the graph
|
graph_id: ID of the graph
|
||||||
graph_version: Version of the graph
|
graph_version: Version of the graph
|
||||||
execution_context: Current execution context
|
|
||||||
block_name: Name of the block requesting review
|
block_name: Name of the block requesting review
|
||||||
editable: Whether the reviewer can edit the data
|
editable: Whether the reviewer can edit the data
|
||||||
|
|
||||||
@@ -158,11 +188,11 @@ class HITLReviewHelper:
|
|||||||
review_result = await HITLReviewHelper._handle_review_request(
|
review_result = await HITLReviewHelper._handle_review_request(
|
||||||
input_data=input_data,
|
input_data=input_data,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
node_id=node_id,
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
graph_exec_id=graph_exec_id,
|
graph_exec_id=graph_exec_id,
|
||||||
graph_id=graph_id,
|
graph_id=graph_id,
|
||||||
graph_version=graph_version,
|
graph_version=graph_version,
|
||||||
execution_context=execution_context,
|
|
||||||
block_name=block_name,
|
block_name=block_name,
|
||||||
editable=editable,
|
editable=editable,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -97,6 +97,7 @@ class HumanInTheLoopBlock(Block):
|
|||||||
input_data: Input,
|
input_data: Input,
|
||||||
*,
|
*,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
graph_id: str,
|
graph_id: str,
|
||||||
@@ -115,11 +116,11 @@ class HumanInTheLoopBlock(Block):
|
|||||||
decision = await self.handle_review_decision(
|
decision = await self.handle_review_decision(
|
||||||
input_data=input_data.data,
|
input_data=input_data.data,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
node_id=node_id,
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
graph_exec_id=graph_exec_id,
|
graph_exec_id=graph_exec_id,
|
||||||
graph_id=graph_id,
|
graph_id=graph_id,
|
||||||
graph_version=graph_version,
|
graph_version=graph_version,
|
||||||
execution_context=execution_context,
|
|
||||||
block_name=self.name,
|
block_name=self.name,
|
||||||
editable=input_data.editable,
|
editable=input_data.editable,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -441,6 +441,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
static_output: bool = False,
|
static_output: bool = False,
|
||||||
block_type: BlockType = BlockType.STANDARD,
|
block_type: BlockType = BlockType.STANDARD,
|
||||||
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
|
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
|
||||||
|
is_sensitive_action: bool = False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initialize the block with the given schema.
|
Initialize the block with the given schema.
|
||||||
@@ -473,8 +474,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
self.static_output = static_output
|
self.static_output = static_output
|
||||||
self.block_type = block_type
|
self.block_type = block_type
|
||||||
self.webhook_config = webhook_config
|
self.webhook_config = webhook_config
|
||||||
|
self.is_sensitive_action = is_sensitive_action
|
||||||
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
|
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
|
||||||
self.is_sensitive_action: bool = False
|
|
||||||
|
|
||||||
if self.webhook_config:
|
if self.webhook_config:
|
||||||
if isinstance(self.webhook_config, BlockWebhookConfig):
|
if isinstance(self.webhook_config, BlockWebhookConfig):
|
||||||
@@ -622,6 +623,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
input_data: BlockInput,
|
input_data: BlockInput,
|
||||||
*,
|
*,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
graph_id: str,
|
graph_id: str,
|
||||||
@@ -648,11 +650,11 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
decision = await HITLReviewHelper.handle_review_decision(
|
decision = await HITLReviewHelper.handle_review_decision(
|
||||||
input_data=input_data,
|
input_data=input_data,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
node_id=node_id,
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
graph_exec_id=graph_exec_id,
|
graph_exec_id=graph_exec_id,
|
||||||
graph_id=graph_id,
|
graph_id=graph_id,
|
||||||
graph_version=graph_version,
|
graph_version=graph_version,
|
||||||
execution_context=execution_context,
|
|
||||||
block_name=self.name,
|
block_name=self.name,
|
||||||
editable=True,
|
editable=True,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ from backend.api.features.executions.review.model import (
|
|||||||
PendingHumanReviewModel,
|
PendingHumanReviewModel,
|
||||||
SafeJsonData,
|
SafeJsonData,
|
||||||
)
|
)
|
||||||
|
from backend.data.execution import get_graph_execution_meta
|
||||||
from backend.util.json import SafeJson
|
from backend.util.json import SafeJson
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -32,6 +33,117 @@ class ReviewResult(BaseModel):
|
|||||||
node_exec_id: str
|
node_exec_id: str
|
||||||
|
|
||||||
|
|
||||||
|
def get_auto_approve_key(graph_exec_id: str, node_id: str) -> str:
|
||||||
|
"""Generate the special nodeExecId key for auto-approval records."""
|
||||||
|
return f"auto_approve_{graph_exec_id}_{node_id}"
|
||||||
|
|
||||||
|
|
||||||
|
async def check_approval(
|
||||||
|
node_exec_id: str,
|
||||||
|
graph_exec_id: str,
|
||||||
|
node_id: str,
|
||||||
|
user_id: str,
|
||||||
|
) -> Optional[ReviewResult]:
|
||||||
|
"""
|
||||||
|
Check if there's an existing approval for this node execution.
|
||||||
|
|
||||||
|
Checks both:
|
||||||
|
1. Normal approval by node_exec_id (previous run of the same node execution)
|
||||||
|
2. Auto-approval by special key pattern "auto_approve_{graph_exec_id}_{node_id}"
|
||||||
|
|
||||||
|
Args:
|
||||||
|
node_exec_id: ID of the node execution
|
||||||
|
graph_exec_id: ID of the graph execution
|
||||||
|
node_id: ID of the node definition (not execution)
|
||||||
|
user_id: ID of the user (for data isolation)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ReviewResult if approval found (either normal or auto), None otherwise
|
||||||
|
"""
|
||||||
|
auto_approve_key = get_auto_approve_key(graph_exec_id, node_id)
|
||||||
|
|
||||||
|
# Check for either normal approval or auto-approval in a single query
|
||||||
|
existing_review = await PendingHumanReview.prisma().find_first(
|
||||||
|
where={
|
||||||
|
"OR": [
|
||||||
|
{"nodeExecId": node_exec_id},
|
||||||
|
{"nodeExecId": auto_approve_key},
|
||||||
|
],
|
||||||
|
"status": ReviewStatus.APPROVED,
|
||||||
|
"userId": user_id,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if existing_review:
|
||||||
|
is_auto_approval = existing_review.nodeExecId == auto_approve_key
|
||||||
|
logger.info(
|
||||||
|
f"Found {'auto-' if is_auto_approval else ''}approval for node {node_id} "
|
||||||
|
f"(exec: {node_exec_id}) in execution {graph_exec_id}"
|
||||||
|
)
|
||||||
|
return ReviewResult(
|
||||||
|
data=existing_review.payload,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
message=(
|
||||||
|
"Auto-approved (user approved all future actions for this node)"
|
||||||
|
if is_auto_approval
|
||||||
|
else existing_review.reviewMessage or ""
|
||||||
|
),
|
||||||
|
processed=True,
|
||||||
|
node_exec_id=existing_review.nodeExecId,
|
||||||
|
)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
async def create_auto_approval_record(
|
||||||
|
user_id: str,
|
||||||
|
graph_exec_id: str,
|
||||||
|
graph_id: str,
|
||||||
|
graph_version: int,
|
||||||
|
node_id: str,
|
||||||
|
payload: SafeJsonData,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Create an auto-approval record for a node in this execution.
|
||||||
|
|
||||||
|
This is stored as a PendingHumanReview with a special nodeExecId pattern
|
||||||
|
and status=APPROVED, so future executions of the same node can skip review.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the graph execution doesn't belong to the user
|
||||||
|
"""
|
||||||
|
# Validate that the graph execution belongs to this user (defense in depth)
|
||||||
|
graph_exec = await get_graph_execution_meta(
|
||||||
|
user_id=user_id, execution_id=graph_exec_id
|
||||||
|
)
|
||||||
|
if not graph_exec:
|
||||||
|
raise ValueError(
|
||||||
|
f"Graph execution {graph_exec_id} not found or doesn't belong to user {user_id}"
|
||||||
|
)
|
||||||
|
|
||||||
|
auto_approve_key = get_auto_approve_key(graph_exec_id, node_id)
|
||||||
|
|
||||||
|
await PendingHumanReview.prisma().upsert(
|
||||||
|
where={"nodeExecId": auto_approve_key},
|
||||||
|
data={
|
||||||
|
"create": {
|
||||||
|
"nodeExecId": auto_approve_key,
|
||||||
|
"userId": user_id,
|
||||||
|
"graphExecId": graph_exec_id,
|
||||||
|
"graphId": graph_id,
|
||||||
|
"graphVersion": graph_version,
|
||||||
|
"payload": SafeJson(payload),
|
||||||
|
"instructions": "Auto-approval record",
|
||||||
|
"editable": False,
|
||||||
|
"status": ReviewStatus.APPROVED,
|
||||||
|
"processed": True,
|
||||||
|
"reviewedAt": datetime.now(timezone.utc),
|
||||||
|
},
|
||||||
|
"update": {}, # Already exists, no update needed
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def get_or_create_human_review(
|
async def get_or_create_human_review(
|
||||||
user_id: str,
|
user_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
@@ -108,6 +220,29 @@ async def get_or_create_human_review(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def get_pending_review_by_node_exec_id(
|
||||||
|
node_exec_id: str, user_id: str
|
||||||
|
) -> Optional["PendingHumanReviewModel"]:
|
||||||
|
"""
|
||||||
|
Get a pending review by its node execution ID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
node_exec_id: The node execution ID to look up
|
||||||
|
user_id: User ID for authorization (only returns if review belongs to this user)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The pending review if found and belongs to user, None otherwise
|
||||||
|
"""
|
||||||
|
review = await PendingHumanReview.prisma().find_unique(
|
||||||
|
where={"nodeExecId": node_exec_id}
|
||||||
|
)
|
||||||
|
|
||||||
|
if not review or review.userId != user_id or review.status != ReviewStatus.WAITING:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return PendingHumanReviewModel.from_db(review)
|
||||||
|
|
||||||
|
|
||||||
async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool:
|
async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Check if a graph execution has any pending reviews.
|
Check if a graph execution has any pending reviews.
|
||||||
@@ -256,3 +391,44 @@ async def update_review_processed_status(node_exec_id: str, processed: bool) ->
|
|||||||
await PendingHumanReview.prisma().update(
|
await PendingHumanReview.prisma().update(
|
||||||
where={"nodeExecId": node_exec_id}, data={"processed": processed}
|
where={"nodeExecId": node_exec_id}, data={"processed": processed}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def cancel_pending_reviews_for_execution(graph_exec_id: str, user_id: str) -> int:
|
||||||
|
"""
|
||||||
|
Cancel all pending reviews for a graph execution (e.g., when execution is stopped).
|
||||||
|
|
||||||
|
Marks all WAITING reviews as REJECTED with a message indicating the execution was stopped.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
graph_exec_id: The graph execution ID
|
||||||
|
user_id: User ID who owns the execution (for security validation)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of reviews cancelled
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the graph execution doesn't belong to the user
|
||||||
|
"""
|
||||||
|
# Validate user ownership before cancelling reviews
|
||||||
|
graph_exec = await get_graph_execution_meta(
|
||||||
|
user_id=user_id, execution_id=graph_exec_id
|
||||||
|
)
|
||||||
|
if not graph_exec:
|
||||||
|
raise ValueError(
|
||||||
|
f"Graph execution {graph_exec_id} not found or doesn't belong to user {user_id}"
|
||||||
|
)
|
||||||
|
|
||||||
|
result = await PendingHumanReview.prisma().update_many(
|
||||||
|
where={
|
||||||
|
"graphExecId": graph_exec_id,
|
||||||
|
"userId": user_id,
|
||||||
|
"status": ReviewStatus.WAITING,
|
||||||
|
},
|
||||||
|
data={
|
||||||
|
"status": ReviewStatus.REJECTED,
|
||||||
|
"reviewMessage": "Execution was stopped by user",
|
||||||
|
"processed": True,
|
||||||
|
"reviewedAt": datetime.now(timezone.utc),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|||||||
@@ -46,8 +46,8 @@ async def test_get_or_create_human_review_new(
|
|||||||
sample_db_review.status = ReviewStatus.WAITING
|
sample_db_review.status = ReviewStatus.WAITING
|
||||||
sample_db_review.processed = False
|
sample_db_review.processed = False
|
||||||
|
|
||||||
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||||
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
mock_prisma.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
||||||
|
|
||||||
result = await get_or_create_human_review(
|
result = await get_or_create_human_review(
|
||||||
user_id="test-user-123",
|
user_id="test-user-123",
|
||||||
@@ -75,8 +75,8 @@ async def test_get_or_create_human_review_approved(
|
|||||||
sample_db_review.processed = False
|
sample_db_review.processed = False
|
||||||
sample_db_review.reviewMessage = "Looks good"
|
sample_db_review.reviewMessage = "Looks good"
|
||||||
|
|
||||||
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||||
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
mock_prisma.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
||||||
|
|
||||||
result = await get_or_create_human_review(
|
result = await get_or_create_human_review(
|
||||||
user_id="test-user-123",
|
user_id="test-user-123",
|
||||||
|
|||||||
@@ -50,6 +50,8 @@ from backend.data.graph import (
|
|||||||
validate_graph_execution_permissions,
|
validate_graph_execution_permissions,
|
||||||
)
|
)
|
||||||
from backend.data.human_review import (
|
from backend.data.human_review import (
|
||||||
|
cancel_pending_reviews_for_execution,
|
||||||
|
check_approval,
|
||||||
get_or_create_human_review,
|
get_or_create_human_review,
|
||||||
has_pending_reviews_for_graph_exec,
|
has_pending_reviews_for_graph_exec,
|
||||||
update_review_processed_status,
|
update_review_processed_status,
|
||||||
@@ -190,6 +192,8 @@ class DatabaseManager(AppService):
|
|||||||
get_user_notification_preference = _(get_user_notification_preference)
|
get_user_notification_preference = _(get_user_notification_preference)
|
||||||
|
|
||||||
# Human In The Loop
|
# Human In The Loop
|
||||||
|
cancel_pending_reviews_for_execution = _(cancel_pending_reviews_for_execution)
|
||||||
|
check_approval = _(check_approval)
|
||||||
get_or_create_human_review = _(get_or_create_human_review)
|
get_or_create_human_review = _(get_or_create_human_review)
|
||||||
has_pending_reviews_for_graph_exec = _(has_pending_reviews_for_graph_exec)
|
has_pending_reviews_for_graph_exec = _(has_pending_reviews_for_graph_exec)
|
||||||
update_review_processed_status = _(update_review_processed_status)
|
update_review_processed_status = _(update_review_processed_status)
|
||||||
@@ -313,6 +317,8 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
|||||||
set_execution_kv_data = d.set_execution_kv_data
|
set_execution_kv_data = d.set_execution_kv_data
|
||||||
|
|
||||||
# Human In The Loop
|
# Human In The Loop
|
||||||
|
cancel_pending_reviews_for_execution = d.cancel_pending_reviews_for_execution
|
||||||
|
check_approval = d.check_approval
|
||||||
get_or_create_human_review = d.get_or_create_human_review
|
get_or_create_human_review = d.get_or_create_human_review
|
||||||
update_review_processed_status = d.update_review_processed_status
|
update_review_processed_status = d.update_review_processed_status
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ from pydantic import BaseModel, JsonValue, ValidationError
|
|||||||
|
|
||||||
from backend.data import execution as execution_db
|
from backend.data import execution as execution_db
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
|
from backend.data import human_review as human_review_db
|
||||||
from backend.data import onboarding as onboarding_db
|
from backend.data import onboarding as onboarding_db
|
||||||
from backend.data import user as user_db
|
from backend.data import user as user_db
|
||||||
from backend.data.block import (
|
from backend.data.block import (
|
||||||
@@ -749,9 +750,27 @@ async def stop_graph_execution(
|
|||||||
if graph_exec.status in [
|
if graph_exec.status in [
|
||||||
ExecutionStatus.QUEUED,
|
ExecutionStatus.QUEUED,
|
||||||
ExecutionStatus.INCOMPLETE,
|
ExecutionStatus.INCOMPLETE,
|
||||||
|
ExecutionStatus.REVIEW,
|
||||||
]:
|
]:
|
||||||
# If the graph is still on the queue, we can prevent them from being executed
|
# If the graph is queued/incomplete/paused for review, terminate immediately
|
||||||
# by setting the status to TERMINATED.
|
# No need to wait for executor since it's not actively running
|
||||||
|
|
||||||
|
# If graph is in REVIEW status, clean up pending reviews before terminating
|
||||||
|
if graph_exec.status == ExecutionStatus.REVIEW:
|
||||||
|
# Use human_review_db if Prisma connected, else database manager
|
||||||
|
review_db = (
|
||||||
|
human_review_db
|
||||||
|
if prisma.is_connected()
|
||||||
|
else get_database_manager_async_client()
|
||||||
|
)
|
||||||
|
# Mark all pending reviews as rejected/cancelled
|
||||||
|
cancelled_count = await review_db.cancel_pending_reviews_for_execution(
|
||||||
|
graph_exec_id, user_id
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Cancelled {cancelled_count} pending review(s) for stopped execution {graph_exec_id}"
|
||||||
|
)
|
||||||
|
|
||||||
graph_exec.status = ExecutionStatus.TERMINATED
|
graph_exec.status = ExecutionStatus.TERMINATED
|
||||||
|
|
||||||
await asyncio.gather(
|
await asyncio.gather(
|
||||||
|
|||||||
@@ -670,3 +670,232 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture):
|
|||||||
# Verify nodes_to_skip was passed to to_graph_execution_entry
|
# Verify nodes_to_skip was passed to to_graph_execution_entry
|
||||||
assert "nodes_to_skip" in captured_kwargs
|
assert "nodes_to_skip" in captured_kwargs
|
||||||
assert captured_kwargs["nodes_to_skip"] == nodes_to_skip
|
assert captured_kwargs["nodes_to_skip"] == nodes_to_skip
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_stop_graph_execution_in_review_status_cancels_pending_reviews(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
):
|
||||||
|
"""Test that stopping an execution in REVIEW status cancels pending reviews."""
|
||||||
|
from backend.data.execution import ExecutionStatus, GraphExecutionMeta
|
||||||
|
from backend.executor.utils import stop_graph_execution
|
||||||
|
|
||||||
|
user_id = "test-user"
|
||||||
|
graph_exec_id = "test-exec-123"
|
||||||
|
|
||||||
|
# Mock graph execution in REVIEW status
|
||||||
|
mock_graph_exec = mocker.MagicMock(spec=GraphExecutionMeta)
|
||||||
|
mock_graph_exec.id = graph_exec_id
|
||||||
|
mock_graph_exec.status = ExecutionStatus.REVIEW
|
||||||
|
|
||||||
|
# Mock dependencies
|
||||||
|
mock_get_queue = mocker.patch("backend.executor.utils.get_async_execution_queue")
|
||||||
|
mock_queue_client = mocker.AsyncMock()
|
||||||
|
mock_get_queue.return_value = mock_queue_client
|
||||||
|
|
||||||
|
mock_prisma = mocker.patch("backend.executor.utils.prisma")
|
||||||
|
mock_prisma.is_connected.return_value = True
|
||||||
|
|
||||||
|
mock_human_review_db = mocker.patch("backend.executor.utils.human_review_db")
|
||||||
|
mock_human_review_db.cancel_pending_reviews_for_execution = mocker.AsyncMock(
|
||||||
|
return_value=2 # 2 reviews cancelled
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_execution_db = mocker.patch("backend.executor.utils.execution_db")
|
||||||
|
mock_execution_db.get_graph_execution_meta = mocker.AsyncMock(
|
||||||
|
return_value=mock_graph_exec
|
||||||
|
)
|
||||||
|
mock_execution_db.update_graph_execution_stats = mocker.AsyncMock()
|
||||||
|
|
||||||
|
mock_get_event_bus = mocker.patch(
|
||||||
|
"backend.executor.utils.get_async_execution_event_bus"
|
||||||
|
)
|
||||||
|
mock_event_bus = mocker.MagicMock()
|
||||||
|
mock_event_bus.publish = mocker.AsyncMock()
|
||||||
|
mock_get_event_bus.return_value = mock_event_bus
|
||||||
|
|
||||||
|
mock_get_child_executions = mocker.patch(
|
||||||
|
"backend.executor.utils._get_child_executions"
|
||||||
|
)
|
||||||
|
mock_get_child_executions.return_value = [] # No children
|
||||||
|
|
||||||
|
# Call stop_graph_execution with timeout to allow status check
|
||||||
|
await stop_graph_execution(
|
||||||
|
user_id=user_id,
|
||||||
|
graph_exec_id=graph_exec_id,
|
||||||
|
wait_timeout=1.0, # Wait to allow status check
|
||||||
|
cascade=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify pending reviews were cancelled
|
||||||
|
mock_human_review_db.cancel_pending_reviews_for_execution.assert_called_once_with(
|
||||||
|
graph_exec_id, user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify execution status was updated to TERMINATED
|
||||||
|
mock_execution_db.update_graph_execution_stats.assert_called_once()
|
||||||
|
call_kwargs = mock_execution_db.update_graph_execution_stats.call_args[1]
|
||||||
|
assert call_kwargs["graph_exec_id"] == graph_exec_id
|
||||||
|
assert call_kwargs["status"] == ExecutionStatus.TERMINATED
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_stop_graph_execution_with_database_manager_when_prisma_disconnected(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
):
|
||||||
|
"""Test that stop uses database manager when Prisma is not connected."""
|
||||||
|
from backend.data.execution import ExecutionStatus, GraphExecutionMeta
|
||||||
|
from backend.executor.utils import stop_graph_execution
|
||||||
|
|
||||||
|
user_id = "test-user"
|
||||||
|
graph_exec_id = "test-exec-456"
|
||||||
|
|
||||||
|
# Mock graph execution in REVIEW status
|
||||||
|
mock_graph_exec = mocker.MagicMock(spec=GraphExecutionMeta)
|
||||||
|
mock_graph_exec.id = graph_exec_id
|
||||||
|
mock_graph_exec.status = ExecutionStatus.REVIEW
|
||||||
|
|
||||||
|
# Mock dependencies
|
||||||
|
mock_get_queue = mocker.patch("backend.executor.utils.get_async_execution_queue")
|
||||||
|
mock_queue_client = mocker.AsyncMock()
|
||||||
|
mock_get_queue.return_value = mock_queue_client
|
||||||
|
|
||||||
|
# Prisma is NOT connected
|
||||||
|
mock_prisma = mocker.patch("backend.executor.utils.prisma")
|
||||||
|
mock_prisma.is_connected.return_value = False
|
||||||
|
|
||||||
|
# Mock database manager client
|
||||||
|
mock_get_db_manager = mocker.patch(
|
||||||
|
"backend.executor.utils.get_database_manager_async_client"
|
||||||
|
)
|
||||||
|
mock_db_manager = mocker.AsyncMock()
|
||||||
|
mock_db_manager.get_graph_execution_meta = mocker.AsyncMock(
|
||||||
|
return_value=mock_graph_exec
|
||||||
|
)
|
||||||
|
mock_db_manager.cancel_pending_reviews_for_execution = mocker.AsyncMock(
|
||||||
|
return_value=3 # 3 reviews cancelled
|
||||||
|
)
|
||||||
|
mock_db_manager.update_graph_execution_stats = mocker.AsyncMock()
|
||||||
|
mock_get_db_manager.return_value = mock_db_manager
|
||||||
|
|
||||||
|
mock_get_event_bus = mocker.patch(
|
||||||
|
"backend.executor.utils.get_async_execution_event_bus"
|
||||||
|
)
|
||||||
|
mock_event_bus = mocker.MagicMock()
|
||||||
|
mock_event_bus.publish = mocker.AsyncMock()
|
||||||
|
mock_get_event_bus.return_value = mock_event_bus
|
||||||
|
|
||||||
|
mock_get_child_executions = mocker.patch(
|
||||||
|
"backend.executor.utils._get_child_executions"
|
||||||
|
)
|
||||||
|
mock_get_child_executions.return_value = [] # No children
|
||||||
|
|
||||||
|
# Call stop_graph_execution with timeout
|
||||||
|
await stop_graph_execution(
|
||||||
|
user_id=user_id,
|
||||||
|
graph_exec_id=graph_exec_id,
|
||||||
|
wait_timeout=1.0,
|
||||||
|
cascade=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify database manager was used for cancel_pending_reviews
|
||||||
|
mock_db_manager.cancel_pending_reviews_for_execution.assert_called_once_with(
|
||||||
|
graph_exec_id, user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify execution status was updated via database manager
|
||||||
|
mock_db_manager.update_graph_execution_stats.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_stop_graph_execution_cascades_to_child_with_reviews(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
):
|
||||||
|
"""Test that stopping parent execution cascades to children and cancels their reviews."""
|
||||||
|
from backend.data.execution import ExecutionStatus, GraphExecutionMeta
|
||||||
|
from backend.executor.utils import stop_graph_execution
|
||||||
|
|
||||||
|
user_id = "test-user"
|
||||||
|
parent_exec_id = "parent-exec"
|
||||||
|
child_exec_id = "child-exec"
|
||||||
|
|
||||||
|
# Mock parent execution in RUNNING status
|
||||||
|
mock_parent_exec = mocker.MagicMock(spec=GraphExecutionMeta)
|
||||||
|
mock_parent_exec.id = parent_exec_id
|
||||||
|
mock_parent_exec.status = ExecutionStatus.RUNNING
|
||||||
|
|
||||||
|
# Mock child execution in REVIEW status
|
||||||
|
mock_child_exec = mocker.MagicMock(spec=GraphExecutionMeta)
|
||||||
|
mock_child_exec.id = child_exec_id
|
||||||
|
mock_child_exec.status = ExecutionStatus.REVIEW
|
||||||
|
|
||||||
|
# Mock dependencies
|
||||||
|
mock_get_queue = mocker.patch("backend.executor.utils.get_async_execution_queue")
|
||||||
|
mock_queue_client = mocker.AsyncMock()
|
||||||
|
mock_get_queue.return_value = mock_queue_client
|
||||||
|
|
||||||
|
mock_prisma = mocker.patch("backend.executor.utils.prisma")
|
||||||
|
mock_prisma.is_connected.return_value = True
|
||||||
|
|
||||||
|
mock_human_review_db = mocker.patch("backend.executor.utils.human_review_db")
|
||||||
|
mock_human_review_db.cancel_pending_reviews_for_execution = mocker.AsyncMock(
|
||||||
|
return_value=1 # 1 child review cancelled
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock execution_db to return different status based on which execution is queried
|
||||||
|
mock_execution_db = mocker.patch("backend.executor.utils.execution_db")
|
||||||
|
|
||||||
|
# Track call count to simulate status transition
|
||||||
|
call_count = {"count": 0}
|
||||||
|
|
||||||
|
async def get_exec_meta_side_effect(execution_id, user_id):
|
||||||
|
call_count["count"] += 1
|
||||||
|
if execution_id == parent_exec_id:
|
||||||
|
# After a few calls (child processing happens), transition parent to TERMINATED
|
||||||
|
# This simulates the executor service processing the stop request
|
||||||
|
if call_count["count"] > 3:
|
||||||
|
mock_parent_exec.status = ExecutionStatus.TERMINATED
|
||||||
|
return mock_parent_exec
|
||||||
|
elif execution_id == child_exec_id:
|
||||||
|
return mock_child_exec
|
||||||
|
return None
|
||||||
|
|
||||||
|
mock_execution_db.get_graph_execution_meta = mocker.AsyncMock(
|
||||||
|
side_effect=get_exec_meta_side_effect
|
||||||
|
)
|
||||||
|
mock_execution_db.update_graph_execution_stats = mocker.AsyncMock()
|
||||||
|
|
||||||
|
mock_get_event_bus = mocker.patch(
|
||||||
|
"backend.executor.utils.get_async_execution_event_bus"
|
||||||
|
)
|
||||||
|
mock_event_bus = mocker.MagicMock()
|
||||||
|
mock_event_bus.publish = mocker.AsyncMock()
|
||||||
|
mock_get_event_bus.return_value = mock_event_bus
|
||||||
|
|
||||||
|
# Mock _get_child_executions to return the child
|
||||||
|
mock_get_child_executions = mocker.patch(
|
||||||
|
"backend.executor.utils._get_child_executions"
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_children_side_effect(parent_id):
|
||||||
|
if parent_id == parent_exec_id:
|
||||||
|
return [mock_child_exec]
|
||||||
|
return []
|
||||||
|
|
||||||
|
mock_get_child_executions.side_effect = get_children_side_effect
|
||||||
|
|
||||||
|
# Call stop_graph_execution on parent with cascade=True
|
||||||
|
await stop_graph_execution(
|
||||||
|
user_id=user_id,
|
||||||
|
graph_exec_id=parent_exec_id,
|
||||||
|
wait_timeout=1.0,
|
||||||
|
cascade=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify child reviews were cancelled
|
||||||
|
mock_human_review_db.cancel_pending_reviews_for_execution.assert_called_once_with(
|
||||||
|
child_exec_id, user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify both parent and child status updates
|
||||||
|
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
|
||||||
|
|||||||
@@ -0,0 +1,7 @@
|
|||||||
|
-- Remove NodeExecution foreign key from PendingHumanReview
|
||||||
|
-- The nodeExecId column remains as the primary key, but we remove the FK constraint
|
||||||
|
-- to AgentNodeExecution since PendingHumanReview records can persist after node
|
||||||
|
-- execution records are deleted.
|
||||||
|
|
||||||
|
-- Drop foreign key constraint that linked PendingHumanReview.nodeExecId to AgentNodeExecution.id
|
||||||
|
ALTER TABLE "PendingHumanReview" DROP CONSTRAINT IF EXISTS "PendingHumanReview_nodeExecId_fkey";
|
||||||
@@ -517,8 +517,6 @@ model AgentNodeExecution {
|
|||||||
|
|
||||||
stats Json?
|
stats Json?
|
||||||
|
|
||||||
PendingHumanReview PendingHumanReview?
|
|
||||||
|
|
||||||
@@index([agentGraphExecutionId, agentNodeId, executionStatus])
|
@@index([agentGraphExecutionId, agentNodeId, executionStatus])
|
||||||
@@index([agentNodeId, executionStatus])
|
@@index([agentNodeId, executionStatus])
|
||||||
@@index([addedTime, queuedTime])
|
@@index([addedTime, queuedTime])
|
||||||
@@ -567,6 +565,7 @@ enum ReviewStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pending human reviews for Human-in-the-loop blocks
|
// Pending human reviews for Human-in-the-loop blocks
|
||||||
|
// Also stores auto-approval records with special nodeExecId patterns (e.g., "auto_approve_{graph_exec_id}_{node_id}")
|
||||||
model PendingHumanReview {
|
model PendingHumanReview {
|
||||||
nodeExecId String @id
|
nodeExecId String @id
|
||||||
userId String
|
userId String
|
||||||
@@ -585,7 +584,6 @@ model PendingHumanReview {
|
|||||||
reviewedAt DateTime?
|
reviewedAt DateTime?
|
||||||
|
|
||||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||||
NodeExecution AgentNodeExecution @relation(fields: [nodeExecId], references: [id], onDelete: Cascade)
|
|
||||||
GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade)
|
GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
@@unique([nodeExecId]) // One pending review per node execution
|
@@unique([nodeExecId]) // One pending review per node execution
|
||||||
|
|||||||
@@ -86,7 +86,6 @@ export function FloatingSafeModeToggle({
|
|||||||
const {
|
const {
|
||||||
currentHITLSafeMode,
|
currentHITLSafeMode,
|
||||||
showHITLToggle,
|
showHITLToggle,
|
||||||
isHITLStateUndetermined,
|
|
||||||
handleHITLToggle,
|
handleHITLToggle,
|
||||||
currentSensitiveActionSafeMode,
|
currentSensitiveActionSafeMode,
|
||||||
showSensitiveActionToggle,
|
showSensitiveActionToggle,
|
||||||
@@ -99,16 +98,9 @@ export function FloatingSafeModeToggle({
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
const showHITL = showHITLToggle && !isHITLStateUndetermined;
|
|
||||||
const showSensitive = showSensitiveActionToggle;
|
|
||||||
|
|
||||||
if (!showHITL && !showSensitive) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className={cn("fixed z-50 flex flex-col gap-2", className)}>
|
<div className={cn("fixed z-50 flex flex-col gap-2", className)}>
|
||||||
{showHITL && (
|
{showHITLToggle && (
|
||||||
<SafeModeButton
|
<SafeModeButton
|
||||||
isEnabled={currentHITLSafeMode}
|
isEnabled={currentHITLSafeMode}
|
||||||
label="Human in the loop block approval"
|
label="Human in the loop block approval"
|
||||||
@@ -119,7 +111,7 @@ export function FloatingSafeModeToggle({
|
|||||||
fullWidth={fullWidth}
|
fullWidth={fullWidth}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
{showSensitive && (
|
{showSensitiveActionToggle && (
|
||||||
<SafeModeButton
|
<SafeModeButton
|
||||||
isEnabled={currentSensitiveActionSafeMode}
|
isEnabled={currentSensitiveActionSafeMode}
|
||||||
label="Sensitive actions blocks approval"
|
label="Sensitive actions blocks approval"
|
||||||
|
|||||||
@@ -14,6 +14,10 @@ import {
|
|||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||||
import { useEffect, useRef, useState } from "react";
|
import { useEffect, useRef, useState } from "react";
|
||||||
import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal";
|
import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal";
|
||||||
|
import {
|
||||||
|
AIAgentSafetyPopup,
|
||||||
|
useAIAgentSafetyPopup,
|
||||||
|
} from "./components/AIAgentSafetyPopup/AIAgentSafetyPopup";
|
||||||
import { ModalHeader } from "./components/ModalHeader/ModalHeader";
|
import { ModalHeader } from "./components/ModalHeader/ModalHeader";
|
||||||
import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection";
|
import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection";
|
||||||
import { RunActions } from "./components/RunActions/RunActions";
|
import { RunActions } from "./components/RunActions/RunActions";
|
||||||
@@ -83,8 +87,18 @@ export function RunAgentModal({
|
|||||||
|
|
||||||
const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false);
|
const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false);
|
||||||
const [hasOverflow, setHasOverflow] = useState(false);
|
const [hasOverflow, setHasOverflow] = useState(false);
|
||||||
|
const [isSafetyPopupOpen, setIsSafetyPopupOpen] = useState(false);
|
||||||
|
const [pendingRunAction, setPendingRunAction] = useState<(() => void) | null>(
|
||||||
|
null,
|
||||||
|
);
|
||||||
const contentRef = useRef<HTMLDivElement>(null);
|
const contentRef = useRef<HTMLDivElement>(null);
|
||||||
|
|
||||||
|
const { shouldShowPopup, dismissPopup } = useAIAgentSafetyPopup(
|
||||||
|
agent.id,
|
||||||
|
agent.has_sensitive_action,
|
||||||
|
agent.has_human_in_the_loop,
|
||||||
|
);
|
||||||
|
|
||||||
const hasAnySetupFields =
|
const hasAnySetupFields =
|
||||||
Object.keys(agentInputFields || {}).length > 0 ||
|
Object.keys(agentInputFields || {}).length > 0 ||
|
||||||
Object.keys(agentCredentialsInputFields || {}).length > 0;
|
Object.keys(agentCredentialsInputFields || {}).length > 0;
|
||||||
@@ -165,6 +179,24 @@ export function RunAgentModal({
|
|||||||
onScheduleCreated?.(schedule);
|
onScheduleCreated?.(schedule);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function handleRunWithSafetyCheck() {
|
||||||
|
if (shouldShowPopup) {
|
||||||
|
setPendingRunAction(() => handleRun);
|
||||||
|
setIsSafetyPopupOpen(true);
|
||||||
|
} else {
|
||||||
|
handleRun();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleSafetyPopupAcknowledge() {
|
||||||
|
setIsSafetyPopupOpen(false);
|
||||||
|
dismissPopup();
|
||||||
|
if (pendingRunAction) {
|
||||||
|
pendingRunAction();
|
||||||
|
setPendingRunAction(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<Dialog
|
<Dialog
|
||||||
@@ -248,7 +280,7 @@ export function RunAgentModal({
|
|||||||
)}
|
)}
|
||||||
<RunActions
|
<RunActions
|
||||||
defaultRunType={defaultRunType}
|
defaultRunType={defaultRunType}
|
||||||
onRun={handleRun}
|
onRun={handleRunWithSafetyCheck}
|
||||||
isExecuting={isExecuting}
|
isExecuting={isExecuting}
|
||||||
isSettingUpTrigger={isSettingUpTrigger}
|
isSettingUpTrigger={isSettingUpTrigger}
|
||||||
isRunReady={allRequiredInputsAreSet}
|
isRunReady={allRequiredInputsAreSet}
|
||||||
@@ -266,6 +298,12 @@ export function RunAgentModal({
|
|||||||
</div>
|
</div>
|
||||||
</Dialog.Content>
|
</Dialog.Content>
|
||||||
</Dialog>
|
</Dialog>
|
||||||
|
|
||||||
|
<AIAgentSafetyPopup
|
||||||
|
agentId={agent.id}
|
||||||
|
isOpen={isSafetyPopupOpen}
|
||||||
|
onAcknowledge={handleSafetyPopupAcknowledge}
|
||||||
|
/>
|
||||||
</>
|
</>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,108 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
|
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||||
|
import { Key, storage } from "@/services/storage/local-storage";
|
||||||
|
import { ShieldCheckIcon } from "@phosphor-icons/react";
|
||||||
|
import { useCallback, useEffect, useState } from "react";
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
agentId: string;
|
||||||
|
onAcknowledge: () => void;
|
||||||
|
isOpen: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function AIAgentSafetyPopup({ agentId, onAcknowledge, isOpen }: Props) {
|
||||||
|
function handleAcknowledge() {
|
||||||
|
// Add this agent to the list of agents for which popup has been shown
|
||||||
|
const seenAgentsJson = storage.get(Key.AI_AGENT_SAFETY_POPUP_SHOWN);
|
||||||
|
const seenAgents: string[] = seenAgentsJson
|
||||||
|
? JSON.parse(seenAgentsJson)
|
||||||
|
: [];
|
||||||
|
|
||||||
|
if (!seenAgents.includes(agentId)) {
|
||||||
|
seenAgents.push(agentId);
|
||||||
|
storage.set(Key.AI_AGENT_SAFETY_POPUP_SHOWN, JSON.stringify(seenAgents));
|
||||||
|
}
|
||||||
|
|
||||||
|
onAcknowledge();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!isOpen) return null;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Dialog
|
||||||
|
controlled={{ isOpen, set: () => {} }}
|
||||||
|
styling={{ maxWidth: "480px" }}
|
||||||
|
>
|
||||||
|
<Dialog.Content>
|
||||||
|
<div className="flex flex-col items-center p-6 text-center">
|
||||||
|
<div className="mb-6 flex h-16 w-16 items-center justify-center rounded-full bg-blue-50">
|
||||||
|
<ShieldCheckIcon
|
||||||
|
weight="fill"
|
||||||
|
size={32}
|
||||||
|
className="text-blue-600"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<Text variant="h3" className="mb-4">
|
||||||
|
Safety Checks Enabled
|
||||||
|
</Text>
|
||||||
|
|
||||||
|
<Text variant="body" className="mb-2 text-zinc-700">
|
||||||
|
AI-generated agents may take actions that affect your data or
|
||||||
|
external systems.
|
||||||
|
</Text>
|
||||||
|
|
||||||
|
<Text variant="body" className="mb-8 text-zinc-700">
|
||||||
|
AutoGPT includes safety checks so you'll always have the
|
||||||
|
opportunity to review and approve sensitive actions before they
|
||||||
|
happen.
|
||||||
|
</Text>
|
||||||
|
|
||||||
|
<Button
|
||||||
|
variant="primary"
|
||||||
|
size="large"
|
||||||
|
className="w-full"
|
||||||
|
onClick={handleAcknowledge}
|
||||||
|
>
|
||||||
|
Got it
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
</Dialog.Content>
|
||||||
|
</Dialog>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function useAIAgentSafetyPopup(
|
||||||
|
agentId: string,
|
||||||
|
hasSensitiveAction: boolean,
|
||||||
|
hasHumanInTheLoop: boolean,
|
||||||
|
) {
|
||||||
|
const [shouldShowPopup, setShouldShowPopup] = useState(false);
|
||||||
|
const [hasChecked, setHasChecked] = useState(false);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (hasChecked) return;
|
||||||
|
|
||||||
|
const seenAgentsJson = storage.get(Key.AI_AGENT_SAFETY_POPUP_SHOWN);
|
||||||
|
const seenAgents: string[] = seenAgentsJson
|
||||||
|
? JSON.parse(seenAgentsJson)
|
||||||
|
: [];
|
||||||
|
const hasSeenPopupForThisAgent = seenAgents.includes(agentId);
|
||||||
|
const isRelevantAgent = hasSensitiveAction || hasHumanInTheLoop;
|
||||||
|
|
||||||
|
setShouldShowPopup(!hasSeenPopupForThisAgent && isRelevantAgent);
|
||||||
|
setHasChecked(true);
|
||||||
|
}, [agentId, hasSensitiveAction, hasHumanInTheLoop, hasChecked]);
|
||||||
|
|
||||||
|
const dismissPopup = useCallback(() => {
|
||||||
|
setShouldShowPopup(false);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return {
|
||||||
|
shouldShowPopup,
|
||||||
|
dismissPopup,
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -69,7 +69,6 @@ export function SafeModeToggle({ graph, className }: Props) {
|
|||||||
const {
|
const {
|
||||||
currentHITLSafeMode,
|
currentHITLSafeMode,
|
||||||
showHITLToggle,
|
showHITLToggle,
|
||||||
isHITLStateUndetermined,
|
|
||||||
handleHITLToggle,
|
handleHITLToggle,
|
||||||
currentSensitiveActionSafeMode,
|
currentSensitiveActionSafeMode,
|
||||||
showSensitiveActionToggle,
|
showSensitiveActionToggle,
|
||||||
@@ -78,20 +77,13 @@ export function SafeModeToggle({ graph, className }: Props) {
|
|||||||
shouldShowToggle,
|
shouldShowToggle,
|
||||||
} = useAgentSafeMode(graph);
|
} = useAgentSafeMode(graph);
|
||||||
|
|
||||||
if (!shouldShowToggle || isHITLStateUndetermined) {
|
if (!shouldShowToggle) {
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const showHITL = showHITLToggle && !isHITLStateUndetermined;
|
|
||||||
const showSensitive = showSensitiveActionToggle;
|
|
||||||
|
|
||||||
if (!showHITL && !showSensitive) {
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className={cn("flex gap-1", className)}>
|
<div className={cn("flex gap-1", className)}>
|
||||||
{showHITL && (
|
{showHITLToggle && (
|
||||||
<SafeModeIconButton
|
<SafeModeIconButton
|
||||||
isEnabled={currentHITLSafeMode}
|
isEnabled={currentHITLSafeMode}
|
||||||
label="Human-in-the-loop"
|
label="Human-in-the-loop"
|
||||||
@@ -101,7 +93,7 @@ export function SafeModeToggle({ graph, className }: Props) {
|
|||||||
isPending={isPending}
|
isPending={isPending}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
{showSensitive && (
|
{showSensitiveActionToggle && (
|
||||||
<SafeModeIconButton
|
<SafeModeIconButton
|
||||||
isEnabled={currentSensitiveActionSafeMode}
|
isEnabled={currentSensitiveActionSafeMode}
|
||||||
label="Sensitive actions"
|
label="Sensitive actions"
|
||||||
|
|||||||
@@ -9411,6 +9411,12 @@
|
|||||||
],
|
],
|
||||||
"title": "Reviewed Data",
|
"title": "Reviewed Data",
|
||||||
"description": "Optional edited data (ignored if approved=False)"
|
"description": "Optional edited data (ignored if approved=False)"
|
||||||
|
},
|
||||||
|
"auto_approve_future": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "Auto Approve Future",
|
||||||
|
"description": "If true and this review is approved, future executions of this same block (node) will be automatically approved. This only affects approved reviews.",
|
||||||
|
"default": false
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@@ -9430,7 +9436,7 @@
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"required": ["reviews"],
|
"required": ["reviews"],
|
||||||
"title": "ReviewRequest",
|
"title": "ReviewRequest",
|
||||||
"description": "Request model for processing ALL pending reviews for an execution.\n\nThis request must include ALL pending reviews for a graph execution.\nEach review will be either approved (with optional data modifications)\nor rejected (data ignored). The execution will resume only after ALL reviews are processed."
|
"description": "Request model for processing ALL pending reviews for an execution.\n\nThis request must include ALL pending reviews for a graph execution.\nEach review will be either approved (with optional data modifications)\nor rejected (data ignored). The execution will resume only after ALL reviews are processed.\n\nEach review item can individually specify whether to auto-approve future executions\nof the same block via the `auto_approve_future` field on ReviewItem."
|
||||||
},
|
},
|
||||||
"ReviewResponse": {
|
"ReviewResponse": {
|
||||||
"properties": {
|
"properties": {
|
||||||
|
|||||||
@@ -31,6 +31,29 @@ export function FloatingReviewsPanel({
|
|||||||
query: {
|
query: {
|
||||||
enabled: !!(graphId && executionId),
|
enabled: !!(graphId && executionId),
|
||||||
select: okData,
|
select: okData,
|
||||||
|
// Poll while execution is in progress to detect status changes
|
||||||
|
refetchInterval: (q) => {
|
||||||
|
// Note: refetchInterval callback receives raw data before select transform
|
||||||
|
const rawData = q.state.data as
|
||||||
|
| { status: number; data?: { status?: string } }
|
||||||
|
| undefined;
|
||||||
|
if (rawData?.status !== 200) return false;
|
||||||
|
|
||||||
|
const status = rawData?.data?.status;
|
||||||
|
if (!status) return false;
|
||||||
|
|
||||||
|
// Poll every 2 seconds while running or in review
|
||||||
|
if (
|
||||||
|
status === AgentExecutionStatus.RUNNING ||
|
||||||
|
status === AgentExecutionStatus.QUEUED ||
|
||||||
|
status === AgentExecutionStatus.INCOMPLETE ||
|
||||||
|
status === AgentExecutionStatus.REVIEW
|
||||||
|
) {
|
||||||
|
return 2000;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
},
|
||||||
|
refetchIntervalInBackground: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
@@ -40,28 +63,47 @@ export function FloatingReviewsPanel({
|
|||||||
useShallow((state) => state.graphExecutionStatus),
|
useShallow((state) => state.graphExecutionStatus),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Determine if we should poll for pending reviews
|
||||||
|
const isInReviewStatus =
|
||||||
|
executionDetails?.status === AgentExecutionStatus.REVIEW ||
|
||||||
|
graphExecutionStatus === AgentExecutionStatus.REVIEW;
|
||||||
|
|
||||||
const { pendingReviews, isLoading, refetch } = usePendingReviewsForExecution(
|
const { pendingReviews, isLoading, refetch } = usePendingReviewsForExecution(
|
||||||
executionId || "",
|
executionId || "",
|
||||||
|
{
|
||||||
|
enabled: !!executionId,
|
||||||
|
// Poll every 2 seconds when in REVIEW status to catch new reviews
|
||||||
|
refetchInterval: isInReviewStatus ? 2000 : false,
|
||||||
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Refetch pending reviews when execution status changes
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (executionId) {
|
if (executionId && executionDetails?.status) {
|
||||||
refetch();
|
refetch();
|
||||||
}
|
}
|
||||||
}, [executionDetails?.status, executionId, refetch]);
|
}, [executionDetails?.status, executionId, refetch]);
|
||||||
|
|
||||||
// Refetch when graph execution status changes to REVIEW
|
// Hide panel if:
|
||||||
useEffect(() => {
|
// 1. No execution ID
|
||||||
if (graphExecutionStatus === AgentExecutionStatus.REVIEW && executionId) {
|
// 2. No pending reviews and not in REVIEW status
|
||||||
refetch();
|
// 3. Execution is RUNNING or QUEUED (hasn't paused for review yet)
|
||||||
|
if (!executionId) {
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
}, [graphExecutionStatus, executionId, refetch]);
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
!executionId ||
|
!isLoading &&
|
||||||
(!isLoading &&
|
|
||||||
pendingReviews.length === 0 &&
|
pendingReviews.length === 0 &&
|
||||||
executionDetails?.status !== AgentExecutionStatus.REVIEW)
|
executionDetails?.status !== AgentExecutionStatus.REVIEW
|
||||||
|
) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't show panel while execution is still running/queued (not paused for review)
|
||||||
|
if (
|
||||||
|
executionDetails?.status === AgentExecutionStatus.RUNNING ||
|
||||||
|
executionDetails?.status === AgentExecutionStatus.QUEUED
|
||||||
) {
|
) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -44,6 +44,8 @@ interface PendingReviewCardProps {
|
|||||||
onReviewMessageChange?: (nodeExecId: string, message: string) => void;
|
onReviewMessageChange?: (nodeExecId: string, message: string) => void;
|
||||||
isDisabled?: boolean;
|
isDisabled?: boolean;
|
||||||
onToggleDisabled?: (nodeExecId: string) => void;
|
onToggleDisabled?: (nodeExecId: string) => void;
|
||||||
|
autoApproveFuture?: boolean;
|
||||||
|
onAutoApproveFutureChange?: (nodeExecId: string, enabled: boolean) => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function PendingReviewCard({
|
export function PendingReviewCard({
|
||||||
@@ -53,6 +55,8 @@ export function PendingReviewCard({
|
|||||||
onReviewMessageChange,
|
onReviewMessageChange,
|
||||||
isDisabled = false,
|
isDisabled = false,
|
||||||
onToggleDisabled,
|
onToggleDisabled,
|
||||||
|
autoApproveFuture = false,
|
||||||
|
onAutoApproveFutureChange,
|
||||||
}: PendingReviewCardProps) {
|
}: PendingReviewCardProps) {
|
||||||
const extractedData = extractReviewData(review.payload);
|
const extractedData = extractReviewData(review.payload);
|
||||||
const isDataEditable = review.editable;
|
const isDataEditable = review.editable;
|
||||||
@@ -210,6 +214,29 @@ export function PendingReviewCard({
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{/* Auto-approve toggle for this review */}
|
||||||
|
{!showSimplified && !isDisabled && onAutoApproveFutureChange && (
|
||||||
|
<div className="space-y-2 rounded-lg border border-blue-200 bg-blue-50 p-3">
|
||||||
|
<div className="flex items-center gap-3">
|
||||||
|
<Switch
|
||||||
|
checked={autoApproveFuture}
|
||||||
|
onCheckedChange={(enabled: boolean) =>
|
||||||
|
onAutoApproveFutureChange(review.node_exec_id, enabled)
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
<Text variant="small" className="text-textBlack">
|
||||||
|
Auto-approve future executions of this block
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
{autoApproveFuture && (
|
||||||
|
<Text variant="small" className="text-amber-600">
|
||||||
|
Editing disabled. Original data will be used for this and all
|
||||||
|
future reviews from this block.
|
||||||
|
</Text>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
{!showSimplified && isDisabled && (
|
{!showSimplified && isDisabled && (
|
||||||
<div>
|
<div>
|
||||||
<Text variant="body" className="mb-2 font-semibold">
|
<Text variant="body" className="mb-2 font-semibold">
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
import { useState } from "react";
|
import { useState, useCallback } from "react";
|
||||||
import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel";
|
import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel";
|
||||||
import { PendingReviewCard } from "@/components/organisms/PendingReviewCard/PendingReviewCard";
|
import { PendingReviewCard } from "@/components/organisms/PendingReviewCard/PendingReviewCard";
|
||||||
import { Text } from "@/components/atoms/Text/Text";
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
|
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { ClockIcon, WarningIcon } from "@phosphor-icons/react";
|
import { ClockIcon, WarningIcon } from "@phosphor-icons/react";
|
||||||
import { usePostV2ProcessReviewAction } from "@/app/api/__generated__/endpoints/executions/executions";
|
import { usePostV2ProcessReviewAction } from "@/app/api/__generated__/endpoints/executions/executions";
|
||||||
@@ -40,6 +41,11 @@ export function PendingReviewsList({
|
|||||||
"approve" | "reject" | null
|
"approve" | "reject" | null
|
||||||
>(null);
|
>(null);
|
||||||
|
|
||||||
|
// Track per-review auto-approval state
|
||||||
|
const [autoApproveFutureMap, setAutoApproveFutureMap] = useState<
|
||||||
|
Record<string, boolean>
|
||||||
|
>({});
|
||||||
|
|
||||||
const { toast } = useToast();
|
const { toast } = useToast();
|
||||||
|
|
||||||
const reviewActionMutation = usePostV2ProcessReviewAction({
|
const reviewActionMutation = usePostV2ProcessReviewAction({
|
||||||
@@ -92,6 +98,25 @@ export function PendingReviewsList({
|
|||||||
setReviewMessageMap((prev) => ({ ...prev, [nodeExecId]: message }));
|
setReviewMessageMap((prev) => ({ ...prev, [nodeExecId]: message }));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle per-review auto-approval toggle
|
||||||
|
function handleAutoApproveFutureToggle(nodeExecId: string, enabled: boolean) {
|
||||||
|
setAutoApproveFutureMap((prev) => ({
|
||||||
|
...prev,
|
||||||
|
[nodeExecId]: enabled,
|
||||||
|
}));
|
||||||
|
|
||||||
|
if (enabled) {
|
||||||
|
// Reset this review's data to original value
|
||||||
|
const review = reviews.find((r) => r.node_exec_id === nodeExecId);
|
||||||
|
if (review) {
|
||||||
|
setReviewDataMap((prev) => ({
|
||||||
|
...prev,
|
||||||
|
[nodeExecId]: JSON.stringify(review.payload, null, 2),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function processReviews(approved: boolean) {
|
function processReviews(approved: boolean) {
|
||||||
if (reviews.length === 0) {
|
if (reviews.length === 0) {
|
||||||
toast({
|
toast({
|
||||||
@@ -108,10 +133,14 @@ export function PendingReviewsList({
|
|||||||
for (const review of reviews) {
|
for (const review of reviews) {
|
||||||
const reviewData = reviewDataMap[review.node_exec_id];
|
const reviewData = reviewDataMap[review.node_exec_id];
|
||||||
const reviewMessage = reviewMessageMap[review.node_exec_id];
|
const reviewMessage = reviewMessageMap[review.node_exec_id];
|
||||||
|
const autoApproveThisReview = autoApproveFutureMap[review.node_exec_id];
|
||||||
|
|
||||||
let parsedData: any = review.payload; // Default to original payload
|
// When auto-approving future actions for this review, send undefined (use original data)
|
||||||
|
// Otherwise, parse and send the edited data if available
|
||||||
|
let parsedData: any = undefined;
|
||||||
|
|
||||||
// Parse edited data if available and editable
|
if (!autoApproveThisReview) {
|
||||||
|
// For regular approve/reject, use edited data if available
|
||||||
if (review.editable && reviewData) {
|
if (review.editable && reviewData) {
|
||||||
try {
|
try {
|
||||||
parsedData = JSON.parse(reviewData);
|
parsedData = JSON.parse(reviewData);
|
||||||
@@ -124,13 +153,20 @@ export function PendingReviewsList({
|
|||||||
setPendingAction(null);
|
setPendingAction(null);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// No edits, use original payload
|
||||||
|
parsedData = review.payload;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
// When autoApproveThisReview is true, parsedData stays undefined
|
||||||
|
// Backend will use the original payload stored in the database
|
||||||
|
|
||||||
reviewItems.push({
|
reviewItems.push({
|
||||||
node_exec_id: review.node_exec_id,
|
node_exec_id: review.node_exec_id,
|
||||||
approved,
|
approved,
|
||||||
reviewed_data: parsedData,
|
reviewed_data: parsedData,
|
||||||
message: reviewMessage || undefined,
|
message: reviewMessage || undefined,
|
||||||
|
auto_approve_future: autoApproveThisReview && approved,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -182,21 +218,21 @@ export function PendingReviewsList({
|
|||||||
<div className="space-y-7">
|
<div className="space-y-7">
|
||||||
{reviews.map((review) => (
|
{reviews.map((review) => (
|
||||||
<PendingReviewCard
|
<PendingReviewCard
|
||||||
key={review.node_exec_id}
|
key={`${review.node_exec_id}`}
|
||||||
review={review}
|
review={review}
|
||||||
onReviewDataChange={handleReviewDataChange}
|
onReviewDataChange={handleReviewDataChange}
|
||||||
onReviewMessageChange={handleReviewMessageChange}
|
onReviewMessageChange={handleReviewMessageChange}
|
||||||
reviewMessage={reviewMessageMap[review.node_exec_id] || ""}
|
reviewMessage={reviewMessageMap[review.node_exec_id] || ""}
|
||||||
|
isDisabled={autoApproveFutureMap[review.node_exec_id] || false}
|
||||||
|
autoApproveFuture={autoApproveFutureMap[review.node_exec_id] || false}
|
||||||
|
onAutoApproveFutureChange={handleAutoApproveFutureToggle}
|
||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="space-y-7">
|
<div className="space-y-4">
|
||||||
<Text variant="body" className="text-textGrey">
|
|
||||||
Note: Changes you make here apply only to this task
|
|
||||||
</Text>
|
|
||||||
|
|
||||||
<div className="flex gap-2">
|
<div className="flex flex-wrap gap-2">
|
||||||
<Button
|
<Button
|
||||||
onClick={() => processReviews(true)}
|
onClick={() => processReviews(true)}
|
||||||
disabled={reviewActionMutation.isPending || reviews.length === 0}
|
disabled={reviewActionMutation.isPending || reviews.length === 0}
|
||||||
@@ -220,6 +256,11 @@ export function PendingReviewsList({
|
|||||||
Reject
|
Reject
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<Text variant="small" className="text-textGrey">
|
||||||
|
You can turn auto-approval on or off anytime in this agent's
|
||||||
|
settings.
|
||||||
|
</Text>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -15,8 +15,22 @@ export function usePendingReviews() {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
export function usePendingReviewsForExecution(graphExecId: string) {
|
interface UsePendingReviewsForExecutionOptions {
|
||||||
const query = useGetV2GetPendingReviewsForExecution(graphExecId);
|
enabled?: boolean;
|
||||||
|
refetchInterval?: number | false;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function usePendingReviewsForExecution(
|
||||||
|
graphExecId: string,
|
||||||
|
options?: UsePendingReviewsForExecutionOptions,
|
||||||
|
) {
|
||||||
|
const query = useGetV2GetPendingReviewsForExecution(graphExecId, {
|
||||||
|
query: {
|
||||||
|
enabled: options?.enabled ?? !!graphExecId,
|
||||||
|
refetchInterval: options?.refetchInterval,
|
||||||
|
refetchIntervalInBackground: !!options?.refetchInterval,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
pendingReviews: okData(query.data) || [],
|
pendingReviews: okData(query.data) || [],
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ export enum Key {
|
|||||||
LIBRARY_AGENTS_CACHE = "library-agents-cache",
|
LIBRARY_AGENTS_CACHE = "library-agents-cache",
|
||||||
CHAT_SESSION_ID = "chat_session_id",
|
CHAT_SESSION_ID = "chat_session_id",
|
||||||
COOKIE_CONSENT = "autogpt_cookie_consent",
|
COOKIE_CONSENT = "autogpt_cookie_consent",
|
||||||
|
AI_AGENT_SAFETY_POPUP_SHOWN = "ai-agent-safety-popup-shown",
|
||||||
}
|
}
|
||||||
|
|
||||||
function get(key: Key) {
|
function get(key: Key) {
|
||||||
|
|||||||
Reference in New Issue
Block a user