Compare commits
23 Commits
testing-cl
...
feat/sensi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dfd7c64068 | ||
|
|
02089bc047 | ||
|
|
bed7b356bb | ||
|
|
4efc0ff502 | ||
|
|
4ad0528257 | ||
|
|
2f440ee80a | ||
|
|
5d0cd88d98 | ||
|
|
033f58c075 | ||
|
|
40ef2d511f | ||
|
|
2a55923ec0 | ||
|
|
b714c0c221 | ||
|
|
ebabc4287e | ||
|
|
ad50f57a2b | ||
|
|
aebd961ef5 | ||
|
|
bcccaa16cc | ||
|
|
d5ddc41b18 | ||
|
|
95eab5b7eb | ||
|
|
832d6e1696 | ||
|
|
8b25e62959 | ||
|
|
35a13e3df5 | ||
|
|
2169b433c9 | ||
|
|
fa0b7029dd | ||
|
|
c20ca47bb0 |
@@ -218,6 +218,7 @@ async def save_agent_to_library(
|
|||||||
library_agents = await library_db.create_library_agent(
|
library_agents = await library_db.create_library_agent(
|
||||||
graph=created_graph,
|
graph=created_graph,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
sensitive_action_safe_mode=True,
|
||||||
create_library_agents_for_sub_graphs=False,
|
create_library_agents_for_sub_graphs=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ from .models import (
|
|||||||
UserReadiness,
|
UserReadiness,
|
||||||
)
|
)
|
||||||
from .utils import (
|
from .utils import (
|
||||||
check_user_has_required_credentials,
|
build_missing_credentials_from_graph,
|
||||||
extract_credentials_from_schema,
|
extract_credentials_from_schema,
|
||||||
fetch_graph_from_store_slug,
|
fetch_graph_from_store_slug,
|
||||||
get_or_create_library_agent,
|
get_or_create_library_agent,
|
||||||
@@ -237,15 +237,13 @@ class RunAgentTool(BaseTool):
|
|||||||
# Return credentials needed response with input data info
|
# Return credentials needed response with input data info
|
||||||
# The UI handles credential setup automatically, so the message
|
# The UI handles credential setup automatically, so the message
|
||||||
# focuses on asking about input data
|
# focuses on asking about input data
|
||||||
credentials = extract_credentials_from_schema(
|
requirements_creds_dict = build_missing_credentials_from_graph(
|
||||||
graph.credentials_input_schema
|
graph, None
|
||||||
)
|
)
|
||||||
missing_creds_check = await check_user_has_required_credentials(
|
missing_credentials_dict = build_missing_credentials_from_graph(
|
||||||
user_id, credentials
|
graph, graph_credentials
|
||||||
)
|
)
|
||||||
missing_credentials_dict = {
|
requirements_creds_list = list(requirements_creds_dict.values())
|
||||||
c.id: c.model_dump() for c in missing_creds_check
|
|
||||||
}
|
|
||||||
|
|
||||||
return SetupRequirementsResponse(
|
return SetupRequirementsResponse(
|
||||||
message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE),
|
message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE),
|
||||||
@@ -259,7 +257,7 @@ class RunAgentTool(BaseTool):
|
|||||||
ready_to_run=False,
|
ready_to_run=False,
|
||||||
),
|
),
|
||||||
requirements={
|
requirements={
|
||||||
"credentials": [c.model_dump() for c in credentials],
|
"credentials": requirements_creds_list,
|
||||||
"inputs": self._get_inputs_list(graph.input_schema),
|
"inputs": self._get_inputs_list(graph.input_schema),
|
||||||
"execution_modes": self._get_execution_modes(graph),
|
"execution_modes": self._get_execution_modes(graph),
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ from .models import (
|
|||||||
ToolResponseBase,
|
ToolResponseBase,
|
||||||
UserReadiness,
|
UserReadiness,
|
||||||
)
|
)
|
||||||
|
from .utils import build_missing_credentials_from_field_info
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -189,7 +190,11 @@ class RunBlockTool(BaseTool):
|
|||||||
|
|
||||||
if missing_credentials:
|
if missing_credentials:
|
||||||
# Return setup requirements response with missing credentials
|
# Return setup requirements response with missing credentials
|
||||||
missing_creds_dict = {c.id: c.model_dump() for c in missing_credentials}
|
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||||
|
missing_creds_dict = build_missing_credentials_from_field_info(
|
||||||
|
credentials_fields_info, set(matched_credentials.keys())
|
||||||
|
)
|
||||||
|
missing_creds_list = list(missing_creds_dict.values())
|
||||||
|
|
||||||
return SetupRequirementsResponse(
|
return SetupRequirementsResponse(
|
||||||
message=(
|
message=(
|
||||||
@@ -206,7 +211,7 @@ class RunBlockTool(BaseTool):
|
|||||||
ready_to_run=False,
|
ready_to_run=False,
|
||||||
),
|
),
|
||||||
requirements={
|
requirements={
|
||||||
"credentials": [c.model_dump() for c in missing_credentials],
|
"credentials": missing_creds_list,
|
||||||
"inputs": self._get_inputs_list(block),
|
"inputs": self._get_inputs_list(block),
|
||||||
"execution_modes": ["immediate"],
|
"execution_modes": ["immediate"],
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from backend.api.features.library import model as library_model
|
|||||||
from backend.api.features.store import db as store_db
|
from backend.api.features.store import db as store_db
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
from backend.data.graph import GraphModel
|
from backend.data.graph import GraphModel
|
||||||
from backend.data.model import CredentialsMetaInput
|
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
||||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||||
from backend.util.exceptions import NotFoundError
|
from backend.util.exceptions import NotFoundError
|
||||||
|
|
||||||
@@ -89,6 +89,59 @@ def extract_credentials_from_schema(
|
|||||||
return credentials
|
return credentials
|
||||||
|
|
||||||
|
|
||||||
|
def _serialize_missing_credential(
|
||||||
|
field_key: str, field_info: CredentialsFieldInfo
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Convert credential field info into a serializable dict that preserves all supported
|
||||||
|
credential types (e.g., api_key + oauth2) so the UI can offer multiple options.
|
||||||
|
"""
|
||||||
|
supported_types = sorted(field_info.supported_types)
|
||||||
|
provider = next(iter(field_info.provider), "unknown")
|
||||||
|
scopes = sorted(field_info.required_scopes or [])
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": field_key,
|
||||||
|
"title": field_key.replace("_", " ").title(),
|
||||||
|
"provider": provider,
|
||||||
|
"provider_name": provider.replace("_", " ").title(),
|
||||||
|
"type": supported_types[0] if supported_types else "api_key",
|
||||||
|
"types": supported_types,
|
||||||
|
"scopes": scopes,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def build_missing_credentials_from_graph(
|
||||||
|
graph: GraphModel, matched_credentials: dict[str, CredentialsMetaInput] | None
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Build a missing_credentials mapping from a graph's aggregated credentials inputs,
|
||||||
|
preserving all supported credential types for each field.
|
||||||
|
"""
|
||||||
|
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
|
||||||
|
aggregated_fields = graph.aggregate_credentials_inputs()
|
||||||
|
|
||||||
|
return {
|
||||||
|
field_key: _serialize_missing_credential(field_key, field_info)
|
||||||
|
for field_key, (field_info, _node_fields) in aggregated_fields.items()
|
||||||
|
if field_key not in matched_keys
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def build_missing_credentials_from_field_info(
|
||||||
|
credential_fields: dict[str, CredentialsFieldInfo],
|
||||||
|
matched_keys: set[str],
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Build missing_credentials mapping from a simple credentials field info dictionary.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
field_key: _serialize_missing_credential(field_key, field_info)
|
||||||
|
for field_key, field_info in credential_fields.items()
|
||||||
|
if field_key not in matched_keys
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def extract_credentials_as_dict(
|
def extract_credentials_as_dict(
|
||||||
credentials_input_schema: dict[str, Any] | None,
|
credentials_input_schema: dict[str, Any] | None,
|
||||||
) -> dict[str, CredentialsMetaInput]:
|
) -> dict[str, CredentialsMetaInput]:
|
||||||
|
|||||||
@@ -179,6 +179,14 @@ class ReviewRequest(BaseModel):
|
|||||||
reviews: List[ReviewItem] = Field(
|
reviews: List[ReviewItem] = Field(
|
||||||
description="All reviews with their approval status, data, and messages"
|
description="All reviews with their approval status, data, and messages"
|
||||||
)
|
)
|
||||||
|
auto_approve_future_actions: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description=(
|
||||||
|
"If true, future reviews from the same blocks (nodes) being approved "
|
||||||
|
"will be automatically approved for the remainder of this execution. "
|
||||||
|
"This only affects the current execution run."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def validate_review_completeness(self):
|
def validate_review_completeness(self):
|
||||||
|
|||||||
@@ -490,3 +490,321 @@ def test_process_review_action_invalid_node_exec_id(
|
|||||||
# Should be a 400 Bad Request, not 500 Internal Server Error
|
# Should be a 400 Bad Request, not 500 Internal Server Error
|
||||||
assert response.status_code == 400
|
assert response.status_code == 400
|
||||||
assert "Invalid node execution ID format" in response.json()["detail"]
|
assert "Invalid node execution ID format" in response.json()["detail"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_process_review_action_auto_approve_creates_auto_approval_records(
|
||||||
|
mocker: pytest_mock.MockerFixture,
|
||||||
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
|
test_user_id: str,
|
||||||
|
) -> None:
|
||||||
|
"""Test that auto_approve_future_actions flag creates auto-approval records"""
|
||||||
|
from backend.data.execution import ExecutionContext, NodeExecutionResult
|
||||||
|
from backend.data.graph import GraphSettings
|
||||||
|
|
||||||
|
# Mock process_all_reviews
|
||||||
|
mock_process_all_reviews = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||||
|
)
|
||||||
|
approved_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="test_node_123",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "test payload"},
|
||||||
|
instructions="Please review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
review_message="Approved",
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
)
|
||||||
|
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
|
||||||
|
|
||||||
|
# Mock get_node_execution to return node_id
|
||||||
|
mock_get_node_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_node_execution"
|
||||||
|
)
|
||||||
|
mock_node_exec = mocker.Mock(spec=NodeExecutionResult)
|
||||||
|
mock_node_exec.node_id = "test_node_def_456"
|
||||||
|
mock_get_node_execution.return_value = mock_node_exec
|
||||||
|
|
||||||
|
# Mock create_auto_approval_record
|
||||||
|
mock_create_auto_approval = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.create_auto_approval_record"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock has_pending_reviews_for_graph_exec
|
||||||
|
mock_has_pending = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||||
|
)
|
||||||
|
mock_has_pending.return_value = False
|
||||||
|
|
||||||
|
# Mock get_graph_settings to return custom settings
|
||||||
|
mock_get_settings = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_settings"
|
||||||
|
)
|
||||||
|
mock_get_settings.return_value = GraphSettings(
|
||||||
|
human_in_the_loop_safe_mode=True,
|
||||||
|
sensitive_action_safe_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock add_graph_execution
|
||||||
|
mock_add_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.add_graph_execution"
|
||||||
|
)
|
||||||
|
|
||||||
|
request_data = {
|
||||||
|
"reviews": [
|
||||||
|
{
|
||||||
|
"node_exec_id": "test_node_123",
|
||||||
|
"approved": True,
|
||||||
|
"message": "Approved",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"auto_approve_future_actions": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
response = client.post("/api/review/action", json=request_data)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
|
||||||
|
# Verify process_all_reviews_for_execution was called (without auto_approve param)
|
||||||
|
mock_process_all_reviews.assert_called_once()
|
||||||
|
|
||||||
|
# Verify create_auto_approval_record was called for the approved review
|
||||||
|
mock_create_auto_approval.assert_called_once_with(
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
node_id="test_node_def_456",
|
||||||
|
payload={"data": "test payload"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify get_graph_settings was called with correct parameters
|
||||||
|
mock_get_settings.assert_called_once_with(
|
||||||
|
user_id=test_user_id, graph_id="test_graph_789"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify add_graph_execution was called with proper ExecutionContext
|
||||||
|
mock_add_execution.assert_called_once()
|
||||||
|
call_kwargs = mock_add_execution.call_args.kwargs
|
||||||
|
execution_context = call_kwargs["execution_context"]
|
||||||
|
|
||||||
|
assert isinstance(execution_context, ExecutionContext)
|
||||||
|
assert execution_context.human_in_the_loop_safe_mode is True
|
||||||
|
assert execution_context.sensitive_action_safe_mode is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_process_review_action_without_auto_approve_still_loads_settings(
|
||||||
|
mocker: pytest_mock.MockerFixture,
|
||||||
|
sample_pending_review: PendingHumanReviewModel,
|
||||||
|
test_user_id: str,
|
||||||
|
) -> None:
|
||||||
|
"""Test that execution context is created with settings even without auto-approve"""
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.graph import GraphSettings
|
||||||
|
|
||||||
|
# Mock process_all_reviews
|
||||||
|
mock_process_all_reviews = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||||
|
)
|
||||||
|
approved_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="test_node_123",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "test payload"},
|
||||||
|
instructions="Please review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
review_message="Approved",
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
)
|
||||||
|
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
|
||||||
|
|
||||||
|
# Mock create_auto_approval_record - should NOT be called when auto_approve is False
|
||||||
|
mock_create_auto_approval = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.create_auto_approval_record"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock has_pending_reviews_for_graph_exec
|
||||||
|
mock_has_pending = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||||
|
)
|
||||||
|
mock_has_pending.return_value = False
|
||||||
|
|
||||||
|
# Mock get_graph_settings with sensitive_action_safe_mode enabled
|
||||||
|
mock_get_settings = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_settings"
|
||||||
|
)
|
||||||
|
mock_get_settings.return_value = GraphSettings(
|
||||||
|
human_in_the_loop_safe_mode=False,
|
||||||
|
sensitive_action_safe_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock add_graph_execution
|
||||||
|
mock_add_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.add_graph_execution"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Request WITHOUT auto_approve_future_actions
|
||||||
|
request_data = {
|
||||||
|
"reviews": [
|
||||||
|
{
|
||||||
|
"node_exec_id": "test_node_123",
|
||||||
|
"approved": True,
|
||||||
|
"message": "Approved",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"auto_approve_future_actions": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
response = client.post("/api/review/action", json=request_data)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
|
||||||
|
# Verify process_all_reviews_for_execution was called
|
||||||
|
mock_process_all_reviews.assert_called_once()
|
||||||
|
|
||||||
|
# Verify create_auto_approval_record was NOT called (auto_approve_future_actions=False)
|
||||||
|
mock_create_auto_approval.assert_not_called()
|
||||||
|
|
||||||
|
# Verify settings were loaded
|
||||||
|
mock_get_settings.assert_called_once()
|
||||||
|
|
||||||
|
# Verify ExecutionContext has proper settings
|
||||||
|
mock_add_execution.assert_called_once()
|
||||||
|
call_kwargs = mock_add_execution.call_args.kwargs
|
||||||
|
execution_context = call_kwargs["execution_context"]
|
||||||
|
|
||||||
|
assert isinstance(execution_context, ExecutionContext)
|
||||||
|
assert execution_context.human_in_the_loop_safe_mode is False
|
||||||
|
assert execution_context.sensitive_action_safe_mode is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_process_review_action_auto_approve_only_applies_to_approved_reviews(
|
||||||
|
mocker: pytest_mock.MockerFixture,
|
||||||
|
test_user_id: str,
|
||||||
|
) -> None:
|
||||||
|
"""Test that auto_approve record is created only for approved reviews"""
|
||||||
|
from backend.data.execution import ExecutionContext, NodeExecutionResult
|
||||||
|
from backend.data.graph import GraphSettings
|
||||||
|
|
||||||
|
# Create two reviews - one approved, one rejected
|
||||||
|
approved_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_exec_approved",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "approved"},
|
||||||
|
instructions="Review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
review_message=None,
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
)
|
||||||
|
rejected_review = PendingHumanReviewModel(
|
||||||
|
node_exec_id="node_exec_rejected",
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
payload={"data": "rejected"},
|
||||||
|
instructions="Review",
|
||||||
|
editable=True,
|
||||||
|
status=ReviewStatus.REJECTED,
|
||||||
|
review_message="Rejected",
|
||||||
|
was_edited=False,
|
||||||
|
processed=False,
|
||||||
|
created_at=FIXED_NOW,
|
||||||
|
updated_at=FIXED_NOW,
|
||||||
|
reviewed_at=FIXED_NOW,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock process_all_reviews
|
||||||
|
mock_process_all_reviews = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||||
|
)
|
||||||
|
mock_process_all_reviews.return_value = {
|
||||||
|
"node_exec_approved": approved_review,
|
||||||
|
"node_exec_rejected": rejected_review,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mock get_node_execution to return node_id (only called for approved review)
|
||||||
|
mock_get_node_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_node_execution"
|
||||||
|
)
|
||||||
|
mock_node_exec = mocker.Mock(spec=NodeExecutionResult)
|
||||||
|
mock_node_exec.node_id = "test_node_def_approved"
|
||||||
|
mock_get_node_execution.return_value = mock_node_exec
|
||||||
|
|
||||||
|
# Mock create_auto_approval_record
|
||||||
|
mock_create_auto_approval = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.create_auto_approval_record"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock has_pending_reviews_for_graph_exec
|
||||||
|
mock_has_pending = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||||
|
)
|
||||||
|
mock_has_pending.return_value = False
|
||||||
|
|
||||||
|
# Mock get_graph_settings
|
||||||
|
mock_get_settings = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.get_graph_settings"
|
||||||
|
)
|
||||||
|
mock_get_settings.return_value = GraphSettings()
|
||||||
|
|
||||||
|
# Mock add_graph_execution
|
||||||
|
mock_add_execution = mocker.patch(
|
||||||
|
"backend.api.features.executions.review.routes.add_graph_execution"
|
||||||
|
)
|
||||||
|
|
||||||
|
request_data = {
|
||||||
|
"reviews": [
|
||||||
|
{"node_exec_id": "node_exec_approved", "approved": True},
|
||||||
|
{"node_exec_id": "node_exec_rejected", "approved": False},
|
||||||
|
],
|
||||||
|
"auto_approve_future_actions": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
response = client.post("/api/review/action", json=request_data)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
|
||||||
|
# Verify process_all_reviews_for_execution was called
|
||||||
|
mock_process_all_reviews.assert_called_once()
|
||||||
|
|
||||||
|
# Verify create_auto_approval_record was called ONLY for the approved review
|
||||||
|
# (not for the rejected one)
|
||||||
|
mock_create_auto_approval.assert_called_once_with(
|
||||||
|
user_id=test_user_id,
|
||||||
|
graph_exec_id="test_graph_exec_456",
|
||||||
|
graph_id="test_graph_789",
|
||||||
|
graph_version=1,
|
||||||
|
node_id="test_node_def_approved",
|
||||||
|
payload={"data": "approved"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify get_node_execution was called only for approved review
|
||||||
|
mock_get_node_execution.assert_called_once_with("node_exec_approved")
|
||||||
|
|
||||||
|
# Verify ExecutionContext was created (auto-approval is now DB-based)
|
||||||
|
call_kwargs = mock_add_execution.call_args.kwargs
|
||||||
|
execution_context = call_kwargs["execution_context"]
|
||||||
|
assert isinstance(execution_context, ExecutionContext)
|
||||||
|
|||||||
@@ -5,8 +5,14 @@ import autogpt_libs.auth as autogpt_auth_lib
|
|||||||
from fastapi import APIRouter, HTTPException, Query, Security, status
|
from fastapi import APIRouter, HTTPException, Query, Security, status
|
||||||
from prisma.enums import ReviewStatus
|
from prisma.enums import ReviewStatus
|
||||||
|
|
||||||
from backend.data.execution import get_graph_execution_meta
|
from backend.data.execution import (
|
||||||
|
ExecutionContext,
|
||||||
|
get_graph_execution_meta,
|
||||||
|
get_node_execution,
|
||||||
|
)
|
||||||
|
from backend.data.graph import get_graph_settings
|
||||||
from backend.data.human_review import (
|
from backend.data.human_review import (
|
||||||
|
create_auto_approval_record,
|
||||||
get_pending_reviews_for_execution,
|
get_pending_reviews_for_execution,
|
||||||
get_pending_reviews_for_user,
|
get_pending_reviews_for_user,
|
||||||
has_pending_reviews_for_graph_exec,
|
has_pending_reviews_for_graph_exec,
|
||||||
@@ -128,14 +134,20 @@ async def process_review_action(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Build review decisions map
|
# Build review decisions map
|
||||||
|
# When auto_approve_future_actions is true, ignore any edited data
|
||||||
|
# (auto-approved reviews should use original data for consistency)
|
||||||
review_decisions = {}
|
review_decisions = {}
|
||||||
for review in request.reviews:
|
for review in request.reviews:
|
||||||
review_status = (
|
review_status = (
|
||||||
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
|
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
|
||||||
)
|
)
|
||||||
|
# If auto-approving future actions, don't allow data modifications
|
||||||
|
reviewed_data = (
|
||||||
|
None if request.auto_approve_future_actions else review.reviewed_data
|
||||||
|
)
|
||||||
review_decisions[review.node_exec_id] = (
|
review_decisions[review.node_exec_id] = (
|
||||||
review_status,
|
review_status,
|
||||||
review.reviewed_data,
|
reviewed_data,
|
||||||
review.message,
|
review.message,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -145,6 +157,22 @@ async def process_review_action(
|
|||||||
review_decisions=review_decisions,
|
review_decisions=review_decisions,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Create auto-approval records for approved reviews if requested
|
||||||
|
if request.auto_approve_future_actions:
|
||||||
|
for node_exec_id, review in updated_reviews.items():
|
||||||
|
if review.status == ReviewStatus.APPROVED:
|
||||||
|
# Look up the node_id from the node execution
|
||||||
|
node_exec = await get_node_execution(node_exec_id)
|
||||||
|
if node_exec:
|
||||||
|
await create_auto_approval_record(
|
||||||
|
user_id=user_id,
|
||||||
|
graph_exec_id=review.graph_exec_id,
|
||||||
|
graph_id=review.graph_id,
|
||||||
|
graph_version=review.graph_version,
|
||||||
|
node_id=node_exec.node_id,
|
||||||
|
payload=review.payload,
|
||||||
|
)
|
||||||
|
|
||||||
# Count results
|
# Count results
|
||||||
approved_count = sum(
|
approved_count = sum(
|
||||||
1
|
1
|
||||||
@@ -169,10 +197,24 @@ async def process_review_action(
|
|||||||
if not still_has_pending:
|
if not still_has_pending:
|
||||||
# Resume execution
|
# Resume execution
|
||||||
try:
|
try:
|
||||||
|
# Load graph settings to create proper execution context
|
||||||
|
settings = await get_graph_settings(
|
||||||
|
user_id=user_id, graph_id=first_review.graph_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create execution context with settings
|
||||||
|
# Note: auto-approval is now handled via database lookup in
|
||||||
|
# check_auto_approval(), no need to pass auto_approved_node_ids
|
||||||
|
execution_context = ExecutionContext(
|
||||||
|
human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode,
|
||||||
|
sensitive_action_safe_mode=settings.sensitive_action_safe_mode,
|
||||||
|
)
|
||||||
|
|
||||||
await add_graph_execution(
|
await add_graph_execution(
|
||||||
graph_id=first_review.graph_id,
|
graph_id=first_review.graph_id,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
graph_exec_id=graph_exec_id,
|
graph_exec_id=graph_exec_id,
|
||||||
|
execution_context=execution_context,
|
||||||
)
|
)
|
||||||
logger.info(f"Resumed execution {graph_exec_id}")
|
logger.info(f"Resumed execution {graph_exec_id}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -401,27 +401,11 @@ async def add_generated_agent_image(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _initialize_graph_settings(graph: graph_db.GraphModel) -> GraphSettings:
|
|
||||||
"""
|
|
||||||
Initialize GraphSettings based on graph content.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
graph: The graph to analyze
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
GraphSettings with appropriate human_in_the_loop_safe_mode value
|
|
||||||
"""
|
|
||||||
if graph.has_human_in_the_loop:
|
|
||||||
# Graph has HITL blocks - set safe mode to True by default
|
|
||||||
return GraphSettings(human_in_the_loop_safe_mode=True)
|
|
||||||
else:
|
|
||||||
# Graph has no HITL blocks - keep None
|
|
||||||
return GraphSettings(human_in_the_loop_safe_mode=None)
|
|
||||||
|
|
||||||
|
|
||||||
async def create_library_agent(
|
async def create_library_agent(
|
||||||
graph: graph_db.GraphModel,
|
graph: graph_db.GraphModel,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
|
hitl_safe_mode: bool = True,
|
||||||
|
sensitive_action_safe_mode: bool = False,
|
||||||
create_library_agents_for_sub_graphs: bool = True,
|
create_library_agents_for_sub_graphs: bool = True,
|
||||||
) -> list[library_model.LibraryAgent]:
|
) -> list[library_model.LibraryAgent]:
|
||||||
"""
|
"""
|
||||||
@@ -430,6 +414,8 @@ async def create_library_agent(
|
|||||||
Args:
|
Args:
|
||||||
agent: The agent/Graph to add to the library.
|
agent: The agent/Graph to add to the library.
|
||||||
user_id: The user to whom the agent will be added.
|
user_id: The user to whom the agent will be added.
|
||||||
|
hitl_safe_mode: Whether HITL blocks require manual review (default True).
|
||||||
|
sensitive_action_safe_mode: Whether sensitive action blocks require review.
|
||||||
create_library_agents_for_sub_graphs: If True, creates LibraryAgent records for sub-graphs as well.
|
create_library_agents_for_sub_graphs: If True, creates LibraryAgent records for sub-graphs as well.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -465,7 +451,11 @@ async def create_library_agent(
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
settings=SafeJson(
|
settings=SafeJson(
|
||||||
_initialize_graph_settings(graph_entry).model_dump()
|
GraphSettings.from_graph(
|
||||||
|
graph_entry,
|
||||||
|
hitl_safe_mode=hitl_safe_mode,
|
||||||
|
sensitive_action_safe_mode=sensitive_action_safe_mode,
|
||||||
|
).model_dump()
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
include=library_agent_include(
|
include=library_agent_include(
|
||||||
@@ -627,33 +617,6 @@ async def update_library_agent(
|
|||||||
raise DatabaseError("Failed to update library agent") from e
|
raise DatabaseError("Failed to update library agent") from e
|
||||||
|
|
||||||
|
|
||||||
async def update_library_agent_settings(
|
|
||||||
user_id: str,
|
|
||||||
agent_id: str,
|
|
||||||
settings: GraphSettings,
|
|
||||||
) -> library_model.LibraryAgent:
|
|
||||||
"""
|
|
||||||
Updates the settings for a specific LibraryAgent.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_id: The owner of the LibraryAgent.
|
|
||||||
agent_id: The ID of the LibraryAgent to update.
|
|
||||||
settings: New GraphSettings to apply.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The updated LibraryAgent.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
NotFoundError: If the specified LibraryAgent does not exist.
|
|
||||||
DatabaseError: If there's an error in the update operation.
|
|
||||||
"""
|
|
||||||
return await update_library_agent(
|
|
||||||
library_agent_id=agent_id,
|
|
||||||
user_id=user_id,
|
|
||||||
settings=settings,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def delete_library_agent(
|
async def delete_library_agent(
|
||||||
library_agent_id: str, user_id: str, soft_delete: bool = True
|
library_agent_id: str, user_id: str, soft_delete: bool = True
|
||||||
) -> None:
|
) -> None:
|
||||||
@@ -838,7 +801,7 @@ async def add_store_agent_to_library(
|
|||||||
"isCreatedByUser": False,
|
"isCreatedByUser": False,
|
||||||
"useGraphIsActiveVersion": False,
|
"useGraphIsActiveVersion": False,
|
||||||
"settings": SafeJson(
|
"settings": SafeJson(
|
||||||
_initialize_graph_settings(graph_model).model_dump()
|
GraphSettings.from_graph(graph_model).model_dump()
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
include=library_agent_include(
|
include=library_agent_include(
|
||||||
@@ -1228,8 +1191,15 @@ async def fork_library_agent(
|
|||||||
)
|
)
|
||||||
new_graph = await on_graph_activate(new_graph, user_id=user_id)
|
new_graph = await on_graph_activate(new_graph, user_id=user_id)
|
||||||
|
|
||||||
# Create a library agent for the new graph
|
# Create a library agent for the new graph, preserving safe mode settings
|
||||||
return (await create_library_agent(new_graph, user_id))[0]
|
return (
|
||||||
|
await create_library_agent(
|
||||||
|
new_graph,
|
||||||
|
user_id,
|
||||||
|
hitl_safe_mode=original_agent.settings.human_in_the_loop_safe_mode,
|
||||||
|
sensitive_action_safe_mode=original_agent.settings.sensitive_action_safe_mode,
|
||||||
|
)
|
||||||
|
)[0]
|
||||||
except prisma.errors.PrismaError as e:
|
except prisma.errors.PrismaError as e:
|
||||||
logger.error(f"Database error cloning library agent: {e}")
|
logger.error(f"Database error cloning library agent: {e}")
|
||||||
raise DatabaseError("Failed to fork library agent") from e
|
raise DatabaseError("Failed to fork library agent") from e
|
||||||
|
|||||||
@@ -73,6 +73,12 @@ class LibraryAgent(pydantic.BaseModel):
|
|||||||
has_external_trigger: bool = pydantic.Field(
|
has_external_trigger: bool = pydantic.Field(
|
||||||
description="Whether the agent has an external trigger (e.g. webhook) node"
|
description="Whether the agent has an external trigger (e.g. webhook) node"
|
||||||
)
|
)
|
||||||
|
has_human_in_the_loop: bool = pydantic.Field(
|
||||||
|
description="Whether the agent has human-in-the-loop blocks"
|
||||||
|
)
|
||||||
|
has_sensitive_action: bool = pydantic.Field(
|
||||||
|
description="Whether the agent has sensitive action blocks"
|
||||||
|
)
|
||||||
trigger_setup_info: Optional[GraphTriggerInfo] = None
|
trigger_setup_info: Optional[GraphTriggerInfo] = None
|
||||||
|
|
||||||
# Indicates whether there's a new output (based on recent runs)
|
# Indicates whether there's a new output (based on recent runs)
|
||||||
@@ -180,6 +186,8 @@ class LibraryAgent(pydantic.BaseModel):
|
|||||||
graph.credentials_input_schema if sub_graphs is not None else None
|
graph.credentials_input_schema if sub_graphs is not None else None
|
||||||
),
|
),
|
||||||
has_external_trigger=graph.has_external_trigger,
|
has_external_trigger=graph.has_external_trigger,
|
||||||
|
has_human_in_the_loop=graph.has_human_in_the_loop,
|
||||||
|
has_sensitive_action=graph.has_sensitive_action,
|
||||||
trigger_setup_info=graph.trigger_setup_info,
|
trigger_setup_info=graph.trigger_setup_info,
|
||||||
new_output=new_output,
|
new_output=new_output,
|
||||||
can_access_graph=can_access_graph,
|
can_access_graph=can_access_graph,
|
||||||
|
|||||||
@@ -52,6 +52,8 @@ async def test_get_library_agents_success(
|
|||||||
output_schema={"type": "object", "properties": {}},
|
output_schema={"type": "object", "properties": {}},
|
||||||
credentials_input_schema={"type": "object", "properties": {}},
|
credentials_input_schema={"type": "object", "properties": {}},
|
||||||
has_external_trigger=False,
|
has_external_trigger=False,
|
||||||
|
has_human_in_the_loop=False,
|
||||||
|
has_sensitive_action=False,
|
||||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||||
recommended_schedule_cron=None,
|
recommended_schedule_cron=None,
|
||||||
new_output=False,
|
new_output=False,
|
||||||
@@ -75,6 +77,8 @@ async def test_get_library_agents_success(
|
|||||||
output_schema={"type": "object", "properties": {}},
|
output_schema={"type": "object", "properties": {}},
|
||||||
credentials_input_schema={"type": "object", "properties": {}},
|
credentials_input_schema={"type": "object", "properties": {}},
|
||||||
has_external_trigger=False,
|
has_external_trigger=False,
|
||||||
|
has_human_in_the_loop=False,
|
||||||
|
has_sensitive_action=False,
|
||||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||||
recommended_schedule_cron=None,
|
recommended_schedule_cron=None,
|
||||||
new_output=False,
|
new_output=False,
|
||||||
@@ -150,6 +154,8 @@ async def test_get_favorite_library_agents_success(
|
|||||||
output_schema={"type": "object", "properties": {}},
|
output_schema={"type": "object", "properties": {}},
|
||||||
credentials_input_schema={"type": "object", "properties": {}},
|
credentials_input_schema={"type": "object", "properties": {}},
|
||||||
has_external_trigger=False,
|
has_external_trigger=False,
|
||||||
|
has_human_in_the_loop=False,
|
||||||
|
has_sensitive_action=False,
|
||||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||||
recommended_schedule_cron=None,
|
recommended_schedule_cron=None,
|
||||||
new_output=False,
|
new_output=False,
|
||||||
@@ -218,6 +224,8 @@ def test_add_agent_to_library_success(
|
|||||||
output_schema={"type": "object", "properties": {}},
|
output_schema={"type": "object", "properties": {}},
|
||||||
credentials_input_schema={"type": "object", "properties": {}},
|
credentials_input_schema={"type": "object", "properties": {}},
|
||||||
has_external_trigger=False,
|
has_external_trigger=False,
|
||||||
|
has_human_in_the_loop=False,
|
||||||
|
has_sensitive_action=False,
|
||||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||||
new_output=False,
|
new_output=False,
|
||||||
can_access_graph=True,
|
can_access_graph=True,
|
||||||
|
|||||||
@@ -154,6 +154,7 @@ async def store_content_embedding(
|
|||||||
|
|
||||||
# Upsert the embedding
|
# Upsert the embedding
|
||||||
# WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT
|
# WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT
|
||||||
|
# Use unqualified ::vector - pgvector is in search_path on all environments
|
||||||
await execute_raw_with_schema(
|
await execute_raw_with_schema(
|
||||||
"""
|
"""
|
||||||
INSERT INTO {schema_prefix}"UnifiedContentEmbedding" (
|
INSERT INTO {schema_prefix}"UnifiedContentEmbedding" (
|
||||||
@@ -177,7 +178,6 @@ async def store_content_embedding(
|
|||||||
searchable_text,
|
searchable_text,
|
||||||
metadata_json,
|
metadata_json,
|
||||||
client=client,
|
client=client,
|
||||||
set_public_search_path=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(f"Stored embedding for {content_type}:{content_id}")
|
logger.info(f"Stored embedding for {content_type}:{content_id}")
|
||||||
@@ -236,7 +236,6 @@ async def get_content_embedding(
|
|||||||
content_type,
|
content_type,
|
||||||
content_id,
|
content_id,
|
||||||
user_id,
|
user_id,
|
||||||
set_public_search_path=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if result and len(result) > 0:
|
if result and len(result) > 0:
|
||||||
@@ -871,31 +870,45 @@ async def semantic_search(
|
|||||||
# Add content type parameters and build placeholders dynamically
|
# Add content type parameters and build placeholders dynamically
|
||||||
content_type_start_idx = len(params) + 1
|
content_type_start_idx = len(params) + 1
|
||||||
content_type_placeholders = ", ".join(
|
content_type_placeholders = ", ".join(
|
||||||
f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"'
|
"$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"'
|
||||||
for i in range(len(content_types))
|
for i in range(len(content_types))
|
||||||
)
|
)
|
||||||
params.extend([ct.value for ct in content_types])
|
params.extend([ct.value for ct in content_types])
|
||||||
|
|
||||||
sql = f"""
|
# Build min_similarity param index before appending
|
||||||
|
min_similarity_idx = len(params) + 1
|
||||||
|
params.append(min_similarity)
|
||||||
|
|
||||||
|
# Use unqualified ::vector and <=> operator - pgvector is in search_path on all environments
|
||||||
|
sql = (
|
||||||
|
"""
|
||||||
SELECT
|
SELECT
|
||||||
"contentId" as content_id,
|
"contentId" as content_id,
|
||||||
"contentType" as content_type,
|
"contentType" as content_type,
|
||||||
"searchableText" as searchable_text,
|
"searchableText" as searchable_text,
|
||||||
metadata,
|
metadata,
|
||||||
1 - (embedding <=> '{embedding_str}'::vector) as similarity
|
1 - (embedding <=> '"""
|
||||||
FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding"
|
+ embedding_str
|
||||||
WHERE "contentType" IN ({content_type_placeholders})
|
+ """'::vector) as similarity
|
||||||
{user_filter}
|
FROM {schema_prefix}"UnifiedContentEmbedding"
|
||||||
AND 1 - (embedding <=> '{embedding_str}'::vector) >= ${len(params) + 1}
|
WHERE "contentType" IN ("""
|
||||||
|
+ content_type_placeholders
|
||||||
|
+ """)
|
||||||
|
"""
|
||||||
|
+ user_filter
|
||||||
|
+ """
|
||||||
|
AND 1 - (embedding <=> '"""
|
||||||
|
+ embedding_str
|
||||||
|
+ """'::vector) >= $"""
|
||||||
|
+ str(min_similarity_idx)
|
||||||
|
+ """
|
||||||
ORDER BY similarity DESC
|
ORDER BY similarity DESC
|
||||||
LIMIT $1
|
LIMIT $1
|
||||||
"""
|
"""
|
||||||
params.append(min_similarity)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
results = await query_raw_with_schema(
|
results = await query_raw_with_schema(sql, *params)
|
||||||
sql, *params, set_public_search_path=True
|
|
||||||
)
|
|
||||||
return [
|
return [
|
||||||
{
|
{
|
||||||
"content_id": row["content_id"],
|
"content_id": row["content_id"],
|
||||||
@@ -922,31 +935,41 @@ async def semantic_search(
|
|||||||
# Add content type parameters and build placeholders dynamically
|
# Add content type parameters and build placeholders dynamically
|
||||||
content_type_start_idx = len(params_lexical) + 1
|
content_type_start_idx = len(params_lexical) + 1
|
||||||
content_type_placeholders_lexical = ", ".join(
|
content_type_placeholders_lexical = ", ".join(
|
||||||
f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"'
|
"$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"'
|
||||||
for i in range(len(content_types))
|
for i in range(len(content_types))
|
||||||
)
|
)
|
||||||
params_lexical.extend([ct.value for ct in content_types])
|
params_lexical.extend([ct.value for ct in content_types])
|
||||||
|
|
||||||
sql_lexical = f"""
|
# Build query param index before appending
|
||||||
|
query_param_idx = len(params_lexical) + 1
|
||||||
|
params_lexical.append(f"%{query}%")
|
||||||
|
|
||||||
|
# Use regular string (not f-string) for template to preserve {schema_prefix} placeholders
|
||||||
|
sql_lexical = (
|
||||||
|
"""
|
||||||
SELECT
|
SELECT
|
||||||
"contentId" as content_id,
|
"contentId" as content_id,
|
||||||
"contentType" as content_type,
|
"contentType" as content_type,
|
||||||
"searchableText" as searchable_text,
|
"searchableText" as searchable_text,
|
||||||
metadata,
|
metadata,
|
||||||
0.0 as similarity
|
0.0 as similarity
|
||||||
FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding"
|
FROM {schema_prefix}"UnifiedContentEmbedding"
|
||||||
WHERE "contentType" IN ({content_type_placeholders_lexical})
|
WHERE "contentType" IN ("""
|
||||||
{user_filter}
|
+ content_type_placeholders_lexical
|
||||||
AND "searchableText" ILIKE ${len(params_lexical) + 1}
|
+ """)
|
||||||
|
"""
|
||||||
|
+ user_filter
|
||||||
|
+ """
|
||||||
|
AND "searchableText" ILIKE $"""
|
||||||
|
+ str(query_param_idx)
|
||||||
|
+ """
|
||||||
ORDER BY "updatedAt" DESC
|
ORDER BY "updatedAt" DESC
|
||||||
LIMIT $1
|
LIMIT $1
|
||||||
"""
|
"""
|
||||||
params_lexical.append(f"%{query}%")
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
results = await query_raw_with_schema(
|
results = await query_raw_with_schema(sql_lexical, *params_lexical)
|
||||||
sql_lexical, *params_lexical, set_public_search_path=True
|
|
||||||
)
|
|
||||||
return [
|
return [
|
||||||
{
|
{
|
||||||
"content_id": row["content_id"],
|
"content_id": row["content_id"],
|
||||||
|
|||||||
@@ -155,18 +155,14 @@ async def test_store_embedding_success(mocker):
|
|||||||
)
|
)
|
||||||
|
|
||||||
assert result is True
|
assert result is True
|
||||||
# execute_raw is called twice: once for SET search_path, once for INSERT
|
# execute_raw is called once for INSERT (no separate SET search_path needed)
|
||||||
assert mock_client.execute_raw.call_count == 2
|
assert mock_client.execute_raw.call_count == 1
|
||||||
|
|
||||||
# First call: SET search_path
|
# Verify the INSERT query with the actual data
|
||||||
first_call_args = mock_client.execute_raw.call_args_list[0][0]
|
call_args = mock_client.execute_raw.call_args_list[0][0]
|
||||||
assert "SET search_path" in first_call_args[0]
|
assert "test-version-id" in call_args
|
||||||
|
assert "[0.1,0.2,0.3]" in call_args
|
||||||
# Second call: INSERT query with the actual data
|
assert None in call_args # userId should be None for store agents
|
||||||
second_call_args = mock_client.execute_raw.call_args_list[1][0]
|
|
||||||
assert "test-version-id" in second_call_args
|
|
||||||
assert "[0.1,0.2,0.3]" in second_call_args
|
|
||||||
assert None in second_call_args # userId should be None for store agents
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ from dataclasses import dataclass
|
|||||||
from typing import Any, Literal
|
from typing import Any, Literal
|
||||||
|
|
||||||
from prisma.enums import ContentType
|
from prisma.enums import ContentType
|
||||||
from rank_bm25 import BM25Okapi
|
from rank_bm25 import BM25Okapi # type: ignore[import-untyped]
|
||||||
|
|
||||||
from backend.api.features.store.embeddings import (
|
from backend.api.features.store.embeddings import (
|
||||||
EMBEDDING_DIM,
|
EMBEDDING_DIM,
|
||||||
@@ -363,9 +363,7 @@ async def unified_hybrid_search(
|
|||||||
LIMIT {limit_param} OFFSET {offset_param}
|
LIMIT {limit_param} OFFSET {offset_param}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
results = await query_raw_with_schema(
|
results = await query_raw_with_schema(sql_query, *params)
|
||||||
sql_query, *params, set_public_search_path=True
|
|
||||||
)
|
|
||||||
|
|
||||||
total = results[0]["total_count"] if results else 0
|
total = results[0]["total_count"] if results else 0
|
||||||
# Apply BM25 reranking
|
# Apply BM25 reranking
|
||||||
@@ -688,9 +686,7 @@ async def hybrid_search(
|
|||||||
LIMIT {limit_param} OFFSET {offset_param}
|
LIMIT {limit_param} OFFSET {offset_param}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
results = await query_raw_with_schema(
|
results = await query_raw_with_schema(sql_query, *params)
|
||||||
sql_query, *params, set_public_search_path=True
|
|
||||||
)
|
|
||||||
|
|
||||||
total = results[0]["total_count"] if results else 0
|
total = results[0]["total_count"] if results else 0
|
||||||
|
|
||||||
|
|||||||
@@ -761,10 +761,8 @@ async def create_new_graph(
|
|||||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||||
graph.validate_graph(for_run=False)
|
graph.validate_graph(for_run=False)
|
||||||
|
|
||||||
# The return value of the create graph & library function is intentionally not used here,
|
|
||||||
# as the graph already valid and no sub-graphs are returned back.
|
|
||||||
await graph_db.create_graph(graph, user_id=user_id)
|
await graph_db.create_graph(graph, user_id=user_id)
|
||||||
await library_db.create_library_agent(graph, user_id=user_id)
|
await library_db.create_library_agent(graph, user_id)
|
||||||
activated_graph = await on_graph_activate(graph, user_id=user_id)
|
activated_graph = await on_graph_activate(graph, user_id=user_id)
|
||||||
|
|
||||||
if create_graph.source == "builder":
|
if create_graph.source == "builder":
|
||||||
@@ -888,21 +886,19 @@ async def set_graph_active_version(
|
|||||||
async def _update_library_agent_version_and_settings(
|
async def _update_library_agent_version_and_settings(
|
||||||
user_id: str, agent_graph: graph_db.GraphModel
|
user_id: str, agent_graph: graph_db.GraphModel
|
||||||
) -> library_model.LibraryAgent:
|
) -> library_model.LibraryAgent:
|
||||||
# Keep the library agent up to date with the new active version
|
|
||||||
library = await library_db.update_agent_version_in_library(
|
library = await library_db.update_agent_version_in_library(
|
||||||
user_id, agent_graph.id, agent_graph.version
|
user_id, agent_graph.id, agent_graph.version
|
||||||
)
|
)
|
||||||
# If the graph has HITL node, initialize the setting if it's not already set.
|
updated_settings = GraphSettings.from_graph(
|
||||||
if (
|
graph=agent_graph,
|
||||||
agent_graph.has_human_in_the_loop
|
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
||||||
and library.settings.human_in_the_loop_safe_mode is None
|
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
||||||
):
|
)
|
||||||
await library_db.update_library_agent_settings(
|
if updated_settings != library.settings:
|
||||||
|
library = await library_db.update_library_agent(
|
||||||
|
library_agent_id=library.id,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
agent_id=library.id,
|
settings=updated_settings,
|
||||||
settings=library.settings.model_copy(
|
|
||||||
update={"human_in_the_loop_safe_mode": True}
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
return library
|
return library
|
||||||
|
|
||||||
@@ -919,21 +915,18 @@ async def update_graph_settings(
|
|||||||
user_id: Annotated[str, Security(get_user_id)],
|
user_id: Annotated[str, Security(get_user_id)],
|
||||||
) -> GraphSettings:
|
) -> GraphSettings:
|
||||||
"""Update graph settings for the user's library agent."""
|
"""Update graph settings for the user's library agent."""
|
||||||
# Get the library agent for this graph
|
|
||||||
library_agent = await library_db.get_library_agent_by_graph_id(
|
library_agent = await library_db.get_library_agent_by_graph_id(
|
||||||
graph_id=graph_id, user_id=user_id
|
graph_id=graph_id, user_id=user_id
|
||||||
)
|
)
|
||||||
if not library_agent:
|
if not library_agent:
|
||||||
raise HTTPException(404, f"Graph #{graph_id} not found in user's library")
|
raise HTTPException(404, f"Graph #{graph_id} not found in user's library")
|
||||||
|
|
||||||
# Update the library agent settings
|
updated_agent = await library_db.update_library_agent(
|
||||||
updated_agent = await library_db.update_library_agent_settings(
|
library_agent_id=library_agent.id,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
agent_id=library_agent.id,
|
|
||||||
settings=settings,
|
settings=settings,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Return the updated settings
|
|
||||||
return GraphSettings.model_validate(updated_agent.settings)
|
return GraphSettings.model_validate(updated_agent.settings)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -116,6 +116,7 @@ class PrintToConsoleBlock(Block):
|
|||||||
input_schema=PrintToConsoleBlock.Input,
|
input_schema=PrintToConsoleBlock.Input,
|
||||||
output_schema=PrintToConsoleBlock.Output,
|
output_schema=PrintToConsoleBlock.Output,
|
||||||
test_input={"text": "Hello, World!"},
|
test_input={"text": "Hello, World!"},
|
||||||
|
is_sensitive_action=True,
|
||||||
test_output=[
|
test_output=[
|
||||||
("output", "Hello, World!"),
|
("output", "Hello, World!"),
|
||||||
("status", "printed"),
|
("status", "printed"),
|
||||||
|
|||||||
@@ -680,3 +680,58 @@ class ListIsEmptyBlock(Block):
|
|||||||
|
|
||||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
yield "is_empty", len(input_data.list) == 0
|
yield "is_empty", len(input_data.list) == 0
|
||||||
|
|
||||||
|
|
||||||
|
class ConcatenateListsBlock(Block):
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
lists: List[List[Any]] = SchemaField(
|
||||||
|
description="A list of lists to concatenate together. All lists will be combined in order into a single list.",
|
||||||
|
placeholder="e.g., [[1, 2], [3, 4], [5, 6]]",
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
concatenated_list: List[Any] = SchemaField(
|
||||||
|
description="The concatenated list containing all elements from all input lists in order."
|
||||||
|
)
|
||||||
|
error: str = SchemaField(
|
||||||
|
description="Error message if concatenation failed due to invalid input types."
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="3cf9298b-5817-4141-9d80-7c2cc5199c8e",
|
||||||
|
description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order.",
|
||||||
|
categories={BlockCategory.BASIC},
|
||||||
|
input_schema=ConcatenateListsBlock.Input,
|
||||||
|
output_schema=ConcatenateListsBlock.Output,
|
||||||
|
test_input=[
|
||||||
|
{"lists": [[1, 2, 3], [4, 5, 6]]},
|
||||||
|
{"lists": [["a", "b"], ["c"], ["d", "e", "f"]]},
|
||||||
|
{"lists": [[1, 2], []]},
|
||||||
|
{"lists": []},
|
||||||
|
],
|
||||||
|
test_output=[
|
||||||
|
("concatenated_list", [1, 2, 3, 4, 5, 6]),
|
||||||
|
("concatenated_list", ["a", "b", "c", "d", "e", "f"]),
|
||||||
|
("concatenated_list", [1, 2]),
|
||||||
|
("concatenated_list", []),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
concatenated = []
|
||||||
|
for idx, lst in enumerate(input_data.lists):
|
||||||
|
if lst is None:
|
||||||
|
# Skip None values to avoid errors
|
||||||
|
continue
|
||||||
|
if not isinstance(lst, list):
|
||||||
|
# Type validation: each item must be a list
|
||||||
|
# Strings are iterable and would cause extend() to iterate character-by-character
|
||||||
|
# Non-iterable types would raise TypeError
|
||||||
|
yield "error", (
|
||||||
|
f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. "
|
||||||
|
f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
concatenated.extend(lst)
|
||||||
|
yield "concatenated_list", concatenated
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from prisma.enums import ReviewStatus
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.execution import ExecutionContext, ExecutionStatus
|
from backend.data.execution import ExecutionContext, ExecutionStatus
|
||||||
from backend.data.human_review import ReviewResult
|
from backend.data.human_review import ReviewResult, check_auto_approval
|
||||||
from backend.executor.manager import async_update_node_execution_status
|
from backend.executor.manager import async_update_node_execution_status
|
||||||
from backend.util.clients import get_database_manager_async_client
|
from backend.util.clients import get_database_manager_async_client
|
||||||
|
|
||||||
@@ -55,6 +55,7 @@ class HITLReviewHelper:
|
|||||||
async def _handle_review_request(
|
async def _handle_review_request(
|
||||||
input_data: Any,
|
input_data: Any,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
graph_id: str,
|
graph_id: str,
|
||||||
@@ -69,6 +70,7 @@ class HITLReviewHelper:
|
|||||||
Args:
|
Args:
|
||||||
input_data: The input data to be reviewed
|
input_data: The input data to be reviewed
|
||||||
user_id: ID of the user requesting the review
|
user_id: ID of the user requesting the review
|
||||||
|
node_id: ID of the node in the graph definition
|
||||||
node_exec_id: ID of the node execution
|
node_exec_id: ID of the node execution
|
||||||
graph_exec_id: ID of the graph execution
|
graph_exec_id: ID of the graph execution
|
||||||
graph_id: ID of the graph
|
graph_id: ID of the graph
|
||||||
@@ -83,15 +85,27 @@ class HITLReviewHelper:
|
|||||||
Raises:
|
Raises:
|
||||||
Exception: If review creation or status update fails
|
Exception: If review creation or status update fails
|
||||||
"""
|
"""
|
||||||
# Skip review if safe mode is disabled - return auto-approved result
|
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
|
||||||
if not execution_context.safe_mode:
|
# are handled by the caller:
|
||||||
|
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
|
||||||
|
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
|
||||||
|
# This function only handles auto-approval for specific nodes.
|
||||||
|
|
||||||
|
# Check if this node has been auto-approved in a previous review
|
||||||
|
auto_approval = await check_auto_approval(
|
||||||
|
graph_exec_id=graph_exec_id,
|
||||||
|
node_id=node_id,
|
||||||
|
)
|
||||||
|
if auto_approval:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled"
|
f"Block {block_name} skipping review for node {node_exec_id} - "
|
||||||
|
f"node {node_id} has auto-approval from previous review"
|
||||||
)
|
)
|
||||||
|
# Return a new ReviewResult with the current node_exec_id but approved status
|
||||||
return ReviewResult(
|
return ReviewResult(
|
||||||
data=input_data,
|
data=input_data,
|
||||||
status=ReviewStatus.APPROVED,
|
status=ReviewStatus.APPROVED,
|
||||||
message="Auto-approved (safe mode disabled)",
|
message="Auto-approved (user approved all future actions for this block)",
|
||||||
processed=True,
|
processed=True,
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
)
|
)
|
||||||
@@ -129,6 +143,7 @@ class HITLReviewHelper:
|
|||||||
async def handle_review_decision(
|
async def handle_review_decision(
|
||||||
input_data: Any,
|
input_data: Any,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
graph_id: str,
|
graph_id: str,
|
||||||
@@ -143,6 +158,7 @@ class HITLReviewHelper:
|
|||||||
Args:
|
Args:
|
||||||
input_data: The input data to be reviewed
|
input_data: The input data to be reviewed
|
||||||
user_id: ID of the user requesting the review
|
user_id: ID of the user requesting the review
|
||||||
|
node_id: ID of the node in the graph definition
|
||||||
node_exec_id: ID of the node execution
|
node_exec_id: ID of the node execution
|
||||||
graph_exec_id: ID of the graph execution
|
graph_exec_id: ID of the graph execution
|
||||||
graph_id: ID of the graph
|
graph_id: ID of the graph
|
||||||
@@ -158,6 +174,7 @@ class HITLReviewHelper:
|
|||||||
review_result = await HITLReviewHelper._handle_review_request(
|
review_result = await HITLReviewHelper._handle_review_request(
|
||||||
input_data=input_data,
|
input_data=input_data,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
node_id=node_id,
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
graph_exec_id=graph_exec_id,
|
graph_exec_id=graph_exec_id,
|
||||||
graph_id=graph_id,
|
graph_id=graph_id,
|
||||||
|
|||||||
@@ -97,6 +97,7 @@ class HumanInTheLoopBlock(Block):
|
|||||||
input_data: Input,
|
input_data: Input,
|
||||||
*,
|
*,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
graph_id: str,
|
graph_id: str,
|
||||||
@@ -104,7 +105,7 @@ class HumanInTheLoopBlock(Block):
|
|||||||
execution_context: ExecutionContext,
|
execution_context: ExecutionContext,
|
||||||
**_kwargs,
|
**_kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
if not execution_context.safe_mode:
|
if not execution_context.human_in_the_loop_safe_mode:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
|
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
|
||||||
)
|
)
|
||||||
@@ -115,6 +116,7 @@ class HumanInTheLoopBlock(Block):
|
|||||||
decision = await self.handle_review_decision(
|
decision = await self.handle_review_decision(
|
||||||
input_data=input_data.data,
|
input_data=input_data.data,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
node_id=node_id,
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
graph_exec_id=graph_exec_id,
|
graph_exec_id=graph_exec_id,
|
||||||
graph_id=graph_id,
|
graph_id=graph_id,
|
||||||
|
|||||||
@@ -79,6 +79,10 @@ class ModelMetadata(NamedTuple):
|
|||||||
provider: str
|
provider: str
|
||||||
context_window: int
|
context_window: int
|
||||||
max_output_tokens: int | None
|
max_output_tokens: int | None
|
||||||
|
display_name: str
|
||||||
|
provider_name: str
|
||||||
|
creator_name: str
|
||||||
|
price_tier: Literal[1, 2, 3]
|
||||||
|
|
||||||
|
|
||||||
class LlmModelMeta(EnumMeta):
|
class LlmModelMeta(EnumMeta):
|
||||||
@@ -171,6 +175,26 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
|||||||
V0_1_5_LG = "v0-1.5-lg"
|
V0_1_5_LG = "v0-1.5-lg"
|
||||||
V0_1_0_MD = "v0-1.0-md"
|
V0_1_0_MD = "v0-1.0-md"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __get_pydantic_json_schema__(cls, schema, handler):
|
||||||
|
json_schema = handler(schema)
|
||||||
|
llm_model_metadata = {}
|
||||||
|
for model in cls:
|
||||||
|
model_name = model.value
|
||||||
|
metadata = model.metadata
|
||||||
|
llm_model_metadata[model_name] = {
|
||||||
|
"creator": metadata.creator_name,
|
||||||
|
"creator_name": metadata.creator_name,
|
||||||
|
"title": metadata.display_name,
|
||||||
|
"provider": metadata.provider,
|
||||||
|
"provider_name": metadata.provider_name,
|
||||||
|
"name": model_name,
|
||||||
|
"price_tier": metadata.price_tier,
|
||||||
|
}
|
||||||
|
json_schema["llm_model"] = True
|
||||||
|
json_schema["llm_model_metadata"] = llm_model_metadata
|
||||||
|
return json_schema
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def metadata(self) -> ModelMetadata:
|
def metadata(self) -> ModelMetadata:
|
||||||
return MODEL_METADATA[self]
|
return MODEL_METADATA[self]
|
||||||
@@ -190,119 +214,291 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
|||||||
|
|
||||||
MODEL_METADATA = {
|
MODEL_METADATA = {
|
||||||
# https://platform.openai.com/docs/models
|
# https://platform.openai.com/docs/models
|
||||||
LlmModel.O3: ModelMetadata("openai", 200000, 100000),
|
LlmModel.O3: ModelMetadata("openai", 200000, 100000, "O3", "OpenAI", "OpenAI", 2),
|
||||||
LlmModel.O3_MINI: ModelMetadata("openai", 200000, 100000), # o3-mini-2025-01-31
|
LlmModel.O3_MINI: ModelMetadata(
|
||||||
LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17
|
"openai", 200000, 100000, "O3 Mini", "OpenAI", "OpenAI", 1
|
||||||
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
|
), # o3-mini-2025-01-31
|
||||||
|
LlmModel.O1: ModelMetadata(
|
||||||
|
"openai", 200000, 100000, "O1", "OpenAI", "OpenAI", 3
|
||||||
|
), # o1-2024-12-17
|
||||||
|
LlmModel.O1_MINI: ModelMetadata(
|
||||||
|
"openai", 128000, 65536, "O1 Mini", "OpenAI", "OpenAI", 2
|
||||||
|
), # o1-mini-2024-09-12
|
||||||
# GPT-5 models
|
# GPT-5 models
|
||||||
LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000),
|
LlmModel.GPT5_2: ModelMetadata(
|
||||||
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
|
"openai", 400000, 128000, "GPT-5.2", "OpenAI", "OpenAI", 3
|
||||||
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
|
),
|
||||||
LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
|
LlmModel.GPT5_1: ModelMetadata(
|
||||||
LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
|
"openai", 400000, 128000, "GPT-5.1", "OpenAI", "OpenAI", 2
|
||||||
LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),
|
),
|
||||||
LlmModel.GPT41: ModelMetadata("openai", 1047576, 32768),
|
LlmModel.GPT5: ModelMetadata(
|
||||||
LlmModel.GPT41_MINI: ModelMetadata("openai", 1047576, 32768),
|
"openai", 400000, 128000, "GPT-5", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GPT5_MINI: ModelMetadata(
|
||||||
|
"openai", 400000, 128000, "GPT-5 Mini", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GPT5_NANO: ModelMetadata(
|
||||||
|
"openai", 400000, 128000, "GPT-5 Nano", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GPT5_CHAT: ModelMetadata(
|
||||||
|
"openai", 400000, 16384, "GPT-5 Chat Latest", "OpenAI", "OpenAI", 2
|
||||||
|
),
|
||||||
|
LlmModel.GPT41: ModelMetadata(
|
||||||
|
"openai", 1047576, 32768, "GPT-4.1", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GPT41_MINI: ModelMetadata(
|
||||||
|
"openai", 1047576, 32768, "GPT-4.1 Mini", "OpenAI", "OpenAI", 1
|
||||||
|
),
|
||||||
LlmModel.GPT4O_MINI: ModelMetadata(
|
LlmModel.GPT4O_MINI: ModelMetadata(
|
||||||
"openai", 128000, 16384
|
"openai", 128000, 16384, "GPT-4o Mini", "OpenAI", "OpenAI", 1
|
||||||
), # gpt-4o-mini-2024-07-18
|
), # gpt-4o-mini-2024-07-18
|
||||||
LlmModel.GPT4O: ModelMetadata("openai", 128000, 16384), # gpt-4o-2024-08-06
|
LlmModel.GPT4O: ModelMetadata(
|
||||||
|
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
|
||||||
|
), # gpt-4o-2024-08-06
|
||||||
LlmModel.GPT4_TURBO: ModelMetadata(
|
LlmModel.GPT4_TURBO: ModelMetadata(
|
||||||
"openai", 128000, 4096
|
"openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3
|
||||||
), # gpt-4-turbo-2024-04-09
|
), # gpt-4-turbo-2024-04-09
|
||||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, 4096), # gpt-3.5-turbo-0125
|
LlmModel.GPT3_5_TURBO: ModelMetadata(
|
||||||
|
"openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1
|
||||||
|
), # gpt-3.5-turbo-0125
|
||||||
# https://docs.anthropic.com/en/docs/about-claude/models
|
# https://docs.anthropic.com/en/docs/about-claude/models
|
||||||
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
||||||
"anthropic", 200000, 32000
|
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
|
||||||
), # claude-opus-4-1-20250805
|
), # claude-opus-4-1-20250805
|
||||||
LlmModel.CLAUDE_4_OPUS: ModelMetadata(
|
LlmModel.CLAUDE_4_OPUS: ModelMetadata(
|
||||||
"anthropic", 200000, 32000
|
"anthropic", 200000, 32000, "Claude Opus 4", "Anthropic", "Anthropic", 3
|
||||||
), # claude-4-opus-20250514
|
), # claude-4-opus-20250514
|
||||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||||
"anthropic", 200000, 64000
|
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
|
||||||
), # claude-4-sonnet-20250514
|
), # claude-4-sonnet-20250514
|
||||||
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
||||||
"anthropic", 200000, 64000
|
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
|
||||||
), # claude-opus-4-5-20251101
|
), # claude-opus-4-5-20251101
|
||||||
LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
|
LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
|
||||||
"anthropic", 200000, 64000
|
"anthropic", 200000, 64000, "Claude Sonnet 4.5", "Anthropic", "Anthropic", 3
|
||||||
), # claude-sonnet-4-5-20250929
|
), # claude-sonnet-4-5-20250929
|
||||||
LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
|
LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
|
||||||
"anthropic", 200000, 64000
|
"anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2
|
||||||
), # claude-haiku-4-5-20251001
|
), # claude-haiku-4-5-20251001
|
||||||
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
|
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
|
||||||
"anthropic", 200000, 64000
|
"anthropic", 200000, 64000, "Claude 3.7 Sonnet", "Anthropic", "Anthropic", 2
|
||||||
), # claude-3-7-sonnet-20250219
|
), # claude-3-7-sonnet-20250219
|
||||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
|
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
|
||||||
"anthropic", 200000, 4096
|
"anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1
|
||||||
), # claude-3-haiku-20240307
|
), # claude-3-haiku-20240307
|
||||||
# https://docs.aimlapi.com/api-overview/model-database/text-models
|
# https://docs.aimlapi.com/api-overview/model-database/text-models
|
||||||
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000, 8000),
|
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata(
|
||||||
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000, 40000),
|
"aiml_api", 32000, 8000, "Qwen 2.5 72B Instruct Turbo", "AI/ML", "Qwen", 1
|
||||||
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000, None),
|
),
|
||||||
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000, 2000),
|
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata(
|
||||||
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000, None),
|
"aiml_api",
|
||||||
# https://console.groq.com/docs/models
|
128000,
|
||||||
LlmModel.LLAMA3_3_70B: ModelMetadata("groq", 128000, 32768),
|
40000,
|
||||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 128000, 8192),
|
"Llama 3.1 Nemotron 70B Instruct",
|
||||||
# https://ollama.com/library
|
"AI/ML",
|
||||||
LlmModel.OLLAMA_LLAMA3_3: ModelMetadata("ollama", 8192, None),
|
"Nvidia",
|
||||||
LlmModel.OLLAMA_LLAMA3_2: ModelMetadata("ollama", 8192, None),
|
1,
|
||||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, None),
|
),
|
||||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, None),
|
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata(
|
||||||
LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None),
|
"aiml_api", 128000, None, "Llama 3.3 70B Instruct Turbo", "AI/ML", "Meta", 1
|
||||||
# https://openrouter.ai/models
|
),
|
||||||
LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192),
|
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata(
|
||||||
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata("open_router", 1048576, 65535),
|
"aiml_api", 131000, 2000, "Llama 3.1 70B Instruct Turbo", "AI/ML", "Meta", 1
|
||||||
LlmModel.GEMINI_2_5_FLASH: ModelMetadata("open_router", 1048576, 65535),
|
),
|
||||||
LlmModel.GEMINI_2_0_FLASH: ModelMetadata("open_router", 1048576, 8192),
|
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata(
|
||||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
|
"aiml_api", 128000, None, "Llama 3.2 3B Instruct Turbo", "AI/ML", "Meta", 1
|
||||||
"open_router", 1048576, 65535
|
),
|
||||||
|
# https://console.groq.com/docs/models
|
||||||
|
LlmModel.LLAMA3_3_70B: ModelMetadata(
|
||||||
|
"groq", 128000, 32768, "Llama 3.3 70B Versatile", "Groq", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.LLAMA3_1_8B: ModelMetadata(
|
||||||
|
"groq", 128000, 8192, "Llama 3.1 8B Instant", "Groq", "Meta", 1
|
||||||
|
),
|
||||||
|
# https://ollama.com/library
|
||||||
|
LlmModel.OLLAMA_LLAMA3_3: ModelMetadata(
|
||||||
|
"ollama", 8192, None, "Llama 3.3", "Ollama", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.OLLAMA_LLAMA3_2: ModelMetadata(
|
||||||
|
"ollama", 8192, None, "Llama 3.2", "Ollama", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata(
|
||||||
|
"ollama", 8192, None, "Llama 3", "Ollama", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata(
|
||||||
|
"ollama", 8192, None, "Llama 3.1 405B", "Ollama", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.OLLAMA_DOLPHIN: ModelMetadata(
|
||||||
|
"ollama", 32768, None, "Dolphin Mistral Latest", "Ollama", "Mistral AI", 1
|
||||||
|
),
|
||||||
|
# https://openrouter.ai/models
|
||||||
|
LlmModel.GEMINI_2_5_PRO: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
1050000,
|
||||||
|
8192,
|
||||||
|
"Gemini 2.5 Pro Preview 03.25",
|
||||||
|
"OpenRouter",
|
||||||
|
"Google",
|
||||||
|
2,
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata(
|
||||||
|
"open_router", 1048576, 65535, "Gemini 3 Pro Preview", "OpenRouter", "Google", 2
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_2_5_FLASH: ModelMetadata(
|
||||||
|
"open_router", 1048576, 65535, "Gemini 2.5 Flash", "OpenRouter", "Google", 1
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_2_0_FLASH: ModelMetadata(
|
||||||
|
"open_router", 1048576, 8192, "Gemini 2.0 Flash 001", "OpenRouter", "Google", 1
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
1048576,
|
||||||
|
65535,
|
||||||
|
"Gemini 2.5 Flash Lite Preview 06.17",
|
||||||
|
"OpenRouter",
|
||||||
|
"Google",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
1048576,
|
||||||
|
8192,
|
||||||
|
"Gemini 2.0 Flash Lite 001",
|
||||||
|
"OpenRouter",
|
||||||
|
"Google",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.MISTRAL_NEMO: ModelMetadata(
|
||||||
|
"open_router", 128000, 4096, "Mistral Nemo", "OpenRouter", "Mistral AI", 1
|
||||||
|
),
|
||||||
|
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata(
|
||||||
|
"open_router", 128000, 4096, "Command R 08.2024", "OpenRouter", "Cohere", 1
|
||||||
|
),
|
||||||
|
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata(
|
||||||
|
"open_router", 128000, 4096, "Command R Plus 08.2024", "OpenRouter", "Cohere", 2
|
||||||
|
),
|
||||||
|
LlmModel.DEEPSEEK_CHAT: ModelMetadata(
|
||||||
|
"open_router", 64000, 2048, "DeepSeek Chat", "OpenRouter", "DeepSeek", 1
|
||||||
|
),
|
||||||
|
LlmModel.DEEPSEEK_R1_0528: ModelMetadata(
|
||||||
|
"open_router", 163840, 163840, "DeepSeek R1 0528", "OpenRouter", "DeepSeek", 1
|
||||||
|
),
|
||||||
|
LlmModel.PERPLEXITY_SONAR: ModelMetadata(
|
||||||
|
"open_router", 127000, 8000, "Sonar", "OpenRouter", "Perplexity", 1
|
||||||
|
),
|
||||||
|
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata(
|
||||||
|
"open_router", 200000, 8000, "Sonar Pro", "OpenRouter", "Perplexity", 2
|
||||||
),
|
),
|
||||||
LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata("open_router", 1048576, 8192),
|
|
||||||
LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 128000, 4096),
|
|
||||||
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 128000, 4096),
|
|
||||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 128000, 4096),
|
|
||||||
LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 64000, 2048),
|
|
||||||
LlmModel.DEEPSEEK_R1_0528: ModelMetadata("open_router", 163840, 163840),
|
|
||||||
LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 8000),
|
|
||||||
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000),
|
|
||||||
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
|
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
|
||||||
"open_router",
|
"open_router",
|
||||||
128000,
|
128000,
|
||||||
16000,
|
16000,
|
||||||
|
"Sonar Deep Research",
|
||||||
|
"OpenRouter",
|
||||||
|
"Perplexity",
|
||||||
|
3,
|
||||||
),
|
),
|
||||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
|
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
|
||||||
"open_router", 131000, 4096
|
"open_router",
|
||||||
|
131000,
|
||||||
|
4096,
|
||||||
|
"Hermes 3 Llama 3.1 405B",
|
||||||
|
"OpenRouter",
|
||||||
|
"Nous Research",
|
||||||
|
1,
|
||||||
),
|
),
|
||||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata(
|
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata(
|
||||||
"open_router", 12288, 12288
|
"open_router",
|
||||||
|
12288,
|
||||||
|
12288,
|
||||||
|
"Hermes 3 Llama 3.1 70B",
|
||||||
|
"OpenRouter",
|
||||||
|
"Nous Research",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata(
|
||||||
|
"open_router", 131072, 131072, "GPT-OSS 120B", "OpenRouter", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata(
|
||||||
|
"open_router", 131072, 32768, "GPT-OSS 20B", "OpenRouter", "OpenAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata(
|
||||||
|
"open_router", 300000, 5120, "Nova Lite V1", "OpenRouter", "Amazon", 1
|
||||||
|
),
|
||||||
|
LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata(
|
||||||
|
"open_router", 128000, 5120, "Nova Micro V1", "OpenRouter", "Amazon", 1
|
||||||
|
),
|
||||||
|
LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata(
|
||||||
|
"open_router", 300000, 5120, "Nova Pro V1", "OpenRouter", "Amazon", 1
|
||||||
|
),
|
||||||
|
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata(
|
||||||
|
"open_router", 65536, 4096, "WizardLM 2 8x22B", "OpenRouter", "Microsoft", 1
|
||||||
|
),
|
||||||
|
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata(
|
||||||
|
"open_router", 4096, 4096, "MythoMax L2 13B", "OpenRouter", "Gryphe", 1
|
||||||
|
),
|
||||||
|
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata(
|
||||||
|
"open_router", 131072, 131072, "Llama 4 Scout", "OpenRouter", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata(
|
||||||
|
"open_router", 1048576, 1000000, "Llama 4 Maverick", "OpenRouter", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.GROK_4: ModelMetadata(
|
||||||
|
"open_router", 256000, 256000, "Grok 4", "OpenRouter", "xAI", 3
|
||||||
|
),
|
||||||
|
LlmModel.GROK_4_FAST: ModelMetadata(
|
||||||
|
"open_router", 2000000, 30000, "Grok 4 Fast", "OpenRouter", "xAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GROK_4_1_FAST: ModelMetadata(
|
||||||
|
"open_router", 2000000, 30000, "Grok 4.1 Fast", "OpenRouter", "xAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.GROK_CODE_FAST_1: ModelMetadata(
|
||||||
|
"open_router", 256000, 10000, "Grok Code Fast 1", "OpenRouter", "xAI", 1
|
||||||
|
),
|
||||||
|
LlmModel.KIMI_K2: ModelMetadata(
|
||||||
|
"open_router", 131000, 131000, "Kimi K2", "OpenRouter", "Moonshot AI", 1
|
||||||
|
),
|
||||||
|
LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata(
|
||||||
|
"open_router",
|
||||||
|
262144,
|
||||||
|
262144,
|
||||||
|
"Qwen 3 235B A22B Thinking 2507",
|
||||||
|
"OpenRouter",
|
||||||
|
"Qwen",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.QWEN3_CODER: ModelMetadata(
|
||||||
|
"open_router", 262144, 262144, "Qwen 3 Coder", "OpenRouter", "Qwen", 3
|
||||||
),
|
),
|
||||||
LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata("open_router", 131072, 131072),
|
|
||||||
LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata("open_router", 131072, 32768),
|
|
||||||
LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata("open_router", 300000, 5120),
|
|
||||||
LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata("open_router", 128000, 5120),
|
|
||||||
LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata("open_router", 300000, 5120),
|
|
||||||
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata("open_router", 65536, 4096),
|
|
||||||
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4096, 4096),
|
|
||||||
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072),
|
|
||||||
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000),
|
|
||||||
LlmModel.GROK_4: ModelMetadata("open_router", 256000, 256000),
|
|
||||||
LlmModel.GROK_4_FAST: ModelMetadata("open_router", 2000000, 30000),
|
|
||||||
LlmModel.GROK_4_1_FAST: ModelMetadata("open_router", 2000000, 30000),
|
|
||||||
LlmModel.GROK_CODE_FAST_1: ModelMetadata("open_router", 256000, 10000),
|
|
||||||
LlmModel.KIMI_K2: ModelMetadata("open_router", 131000, 131000),
|
|
||||||
LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata("open_router", 262144, 262144),
|
|
||||||
LlmModel.QWEN3_CODER: ModelMetadata("open_router", 262144, 262144),
|
|
||||||
# Llama API models
|
# Llama API models
|
||||||
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata("llama_api", 128000, 4028),
|
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata(
|
||||||
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028),
|
"llama_api",
|
||||||
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028),
|
128000,
|
||||||
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028),
|
4028,
|
||||||
|
"Llama 4 Scout 17B 16E Instruct FP8",
|
||||||
|
"Llama API",
|
||||||
|
"Meta",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata(
|
||||||
|
"llama_api",
|
||||||
|
128000,
|
||||||
|
4028,
|
||||||
|
"Llama 4 Maverick 17B 128E Instruct FP8",
|
||||||
|
"Llama API",
|
||||||
|
"Meta",
|
||||||
|
1,
|
||||||
|
),
|
||||||
|
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata(
|
||||||
|
"llama_api", 128000, 4028, "Llama 3.3 8B Instruct", "Llama API", "Meta", 1
|
||||||
|
),
|
||||||
|
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata(
|
||||||
|
"llama_api", 128000, 4028, "Llama 3.3 70B Instruct", "Llama API", "Meta", 1
|
||||||
|
),
|
||||||
# v0 by Vercel models
|
# v0 by Vercel models
|
||||||
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000),
|
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000, "v0 1.5 MD", "V0", "V0", 1),
|
||||||
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000),
|
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000, "v0 1.5 LG", "V0", "V0", 1),
|
||||||
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
|
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000, "v0 1.0 MD", "V0", "V0", 1),
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
|
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
|
||||||
|
|||||||
@@ -242,7 +242,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -343,7 +343,7 @@ async def test_smart_decision_maker_parameter_validation():
|
|||||||
|
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -409,7 +409,7 @@ async def test_smart_decision_maker_parameter_validation():
|
|||||||
|
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -471,7 +471,7 @@ async def test_smart_decision_maker_parameter_validation():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -535,7 +535,7 @@ async def test_smart_decision_maker_parameter_validation():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -658,7 +658,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -730,7 +730,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -786,7 +786,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
@@ -905,7 +905,7 @@ async def test_smart_decision_maker_agent_mode():
|
|||||||
# Create a mock execution context
|
# Create a mock execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(
|
mock_execution_context = ExecutionContext(
|
||||||
safe_mode=False,
|
human_in_the_loop_safe_mode=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create a mock execution processor for agent mode tests
|
# Create a mock execution processor for agent mode tests
|
||||||
@@ -1027,7 +1027,7 @@ async def test_smart_decision_maker_traditional_mode_default():
|
|||||||
|
|
||||||
# Create execution context
|
# Create execution context
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
|
|
||||||
# Create a mock execution processor for tests
|
# Create a mock execution processor for tests
|
||||||
|
|
||||||
|
|||||||
@@ -386,7 +386,7 @@ async def test_output_yielding_with_dynamic_fields():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
|
||||||
mock_execution_processor = MagicMock()
|
mock_execution_processor = MagicMock()
|
||||||
|
|
||||||
async for output_name, output_value in block.run(
|
async for output_name, output_value in block.run(
|
||||||
@@ -609,7 +609,9 @@ async def test_validation_errors_dont_pollute_conversation():
|
|||||||
outputs = {}
|
outputs = {}
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
|
|
||||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
mock_execution_context = ExecutionContext(
|
||||||
|
human_in_the_loop_safe_mode=False
|
||||||
|
)
|
||||||
|
|
||||||
# Create a proper mock execution processor for agent mode
|
# Create a proper mock execution processor for agent mode
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|||||||
@@ -441,6 +441,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
static_output: bool = False,
|
static_output: bool = False,
|
||||||
block_type: BlockType = BlockType.STANDARD,
|
block_type: BlockType = BlockType.STANDARD,
|
||||||
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
|
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
|
||||||
|
is_sensitive_action: bool = False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initialize the block with the given schema.
|
Initialize the block with the given schema.
|
||||||
@@ -473,8 +474,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
self.static_output = static_output
|
self.static_output = static_output
|
||||||
self.block_type = block_type
|
self.block_type = block_type
|
||||||
self.webhook_config = webhook_config
|
self.webhook_config = webhook_config
|
||||||
|
self.is_sensitive_action = is_sensitive_action
|
||||||
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
|
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
|
||||||
self.requires_human_review: bool = False
|
|
||||||
|
|
||||||
if self.webhook_config:
|
if self.webhook_config:
|
||||||
if isinstance(self.webhook_config, BlockWebhookConfig):
|
if isinstance(self.webhook_config, BlockWebhookConfig):
|
||||||
@@ -622,6 +623,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
input_data: BlockInput,
|
input_data: BlockInput,
|
||||||
*,
|
*,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
graph_id: str,
|
graph_id: str,
|
||||||
@@ -637,8 +639,9 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
- should_pause: True if execution should be paused for review
|
- should_pause: True if execution should be paused for review
|
||||||
- input_data_to_use: The input data to use (may be modified by reviewer)
|
- input_data_to_use: The input data to use (may be modified by reviewer)
|
||||||
"""
|
"""
|
||||||
# Skip review if not required or safe mode is disabled
|
if not (
|
||||||
if not self.requires_human_review or not execution_context.safe_mode:
|
self.is_sensitive_action and execution_context.sensitive_action_safe_mode
|
||||||
|
):
|
||||||
return False, input_data
|
return False, input_data
|
||||||
|
|
||||||
from backend.blocks.helpers.review import HITLReviewHelper
|
from backend.blocks.helpers.review import HITLReviewHelper
|
||||||
@@ -647,6 +650,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|||||||
decision = await HITLReviewHelper.handle_review_decision(
|
decision = await HITLReviewHelper.handle_review_decision(
|
||||||
input_data=input_data,
|
input_data=input_data,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
node_id=node_id,
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
graph_exec_id=graph_exec_id,
|
graph_exec_id=graph_exec_id,
|
||||||
graph_id=graph_id,
|
graph_id=graph_id,
|
||||||
|
|||||||
@@ -99,10 +99,15 @@ MODEL_COST: dict[LlmModel, int] = {
|
|||||||
LlmModel.OPENAI_GPT_OSS_20B: 1,
|
LlmModel.OPENAI_GPT_OSS_20B: 1,
|
||||||
LlmModel.GEMINI_2_5_PRO: 4,
|
LlmModel.GEMINI_2_5_PRO: 4,
|
||||||
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
|
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
|
||||||
|
LlmModel.GEMINI_2_5_FLASH: 1,
|
||||||
|
LlmModel.GEMINI_2_0_FLASH: 1,
|
||||||
|
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
|
||||||
|
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
|
||||||
LlmModel.MISTRAL_NEMO: 1,
|
LlmModel.MISTRAL_NEMO: 1,
|
||||||
LlmModel.COHERE_COMMAND_R_08_2024: 1,
|
LlmModel.COHERE_COMMAND_R_08_2024: 1,
|
||||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
|
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
|
||||||
LlmModel.DEEPSEEK_CHAT: 2,
|
LlmModel.DEEPSEEK_CHAT: 2,
|
||||||
|
LlmModel.DEEPSEEK_R1_0528: 1,
|
||||||
LlmModel.PERPLEXITY_SONAR: 1,
|
LlmModel.PERPLEXITY_SONAR: 1,
|
||||||
LlmModel.PERPLEXITY_SONAR_PRO: 5,
|
LlmModel.PERPLEXITY_SONAR_PRO: 5,
|
||||||
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10,
|
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10,
|
||||||
@@ -126,11 +131,6 @@ MODEL_COST: dict[LlmModel, int] = {
|
|||||||
LlmModel.KIMI_K2: 1,
|
LlmModel.KIMI_K2: 1,
|
||||||
LlmModel.QWEN3_235B_A22B_THINKING: 1,
|
LlmModel.QWEN3_235B_A22B_THINKING: 1,
|
||||||
LlmModel.QWEN3_CODER: 9,
|
LlmModel.QWEN3_CODER: 9,
|
||||||
LlmModel.GEMINI_2_5_FLASH: 1,
|
|
||||||
LlmModel.GEMINI_2_0_FLASH: 1,
|
|
||||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
|
|
||||||
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
|
|
||||||
LlmModel.DEEPSEEK_R1_0528: 1,
|
|
||||||
# v0 by Vercel models
|
# v0 by Vercel models
|
||||||
LlmModel.V0_1_5_MD: 1,
|
LlmModel.V0_1_5_MD: 1,
|
||||||
LlmModel.V0_1_5_LG: 2,
|
LlmModel.V0_1_5_LG: 2,
|
||||||
|
|||||||
@@ -38,20 +38,6 @@ POOL_TIMEOUT = os.getenv("DB_POOL_TIMEOUT")
|
|||||||
if POOL_TIMEOUT:
|
if POOL_TIMEOUT:
|
||||||
DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT)
|
DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT)
|
||||||
|
|
||||||
# Add public schema to search_path for pgvector type access
|
|
||||||
# The vector extension is in public schema, but search_path is determined by schema parameter
|
|
||||||
# Extract the schema from DATABASE_URL or default to 'public' (matching get_database_schema())
|
|
||||||
parsed_url = urlparse(DATABASE_URL)
|
|
||||||
url_params = dict(parse_qsl(parsed_url.query))
|
|
||||||
db_schema = url_params.get("schema", "public")
|
|
||||||
# Build search_path, avoiding duplicates if db_schema is already 'public'
|
|
||||||
search_path_schemas = list(
|
|
||||||
dict.fromkeys([db_schema, "public"])
|
|
||||||
) # Preserves order, removes duplicates
|
|
||||||
search_path = ",".join(search_path_schemas)
|
|
||||||
# This allows using ::vector without schema qualification
|
|
||||||
DATABASE_URL = add_param(DATABASE_URL, "options", f"-c search_path={search_path}")
|
|
||||||
|
|
||||||
HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None
|
HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None
|
||||||
|
|
||||||
prisma = Prisma(
|
prisma = Prisma(
|
||||||
@@ -127,38 +113,48 @@ async def _raw_with_schema(
|
|||||||
*args,
|
*args,
|
||||||
execute: bool = False,
|
execute: bool = False,
|
||||||
client: Prisma | None = None,
|
client: Prisma | None = None,
|
||||||
set_public_search_path: bool = False,
|
|
||||||
) -> list[dict] | int:
|
) -> list[dict] | int:
|
||||||
"""Internal: Execute raw SQL with proper schema handling.
|
"""Internal: Execute raw SQL with proper schema handling.
|
||||||
|
|
||||||
Use query_raw_with_schema() or execute_raw_with_schema() instead.
|
Use query_raw_with_schema() or execute_raw_with_schema() instead.
|
||||||
|
|
||||||
|
Supports placeholders:
|
||||||
|
- {schema_prefix}: Table/type prefix (e.g., "platform".)
|
||||||
|
- {schema}: Raw schema name for application tables (e.g., platform)
|
||||||
|
|
||||||
|
Note on pgvector types:
|
||||||
|
Use unqualified ::vector and <=> operator in queries. PostgreSQL resolves
|
||||||
|
these via search_path, which includes the schema where pgvector is installed
|
||||||
|
on all environments (local, CI, dev).
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
query_template: SQL query with {schema_prefix} placeholder
|
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
|
||||||
*args: Query parameters
|
*args: Query parameters
|
||||||
execute: If False, executes SELECT query. If True, executes INSERT/UPDATE/DELETE.
|
execute: If False, executes SELECT query. If True, executes INSERT/UPDATE/DELETE.
|
||||||
client: Optional Prisma client for transactions (only used when execute=True).
|
client: Optional Prisma client for transactions (only used when execute=True).
|
||||||
set_public_search_path: If True, sets search_path to include public schema.
|
|
||||||
Needed for pgvector types and other public schema objects.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
- list[dict] if execute=False (query results)
|
- list[dict] if execute=False (query results)
|
||||||
- int if execute=True (number of affected rows)
|
- int if execute=True (number of affected rows)
|
||||||
|
|
||||||
|
Example with vector type:
|
||||||
|
await execute_raw_with_schema(
|
||||||
|
'INSERT INTO {schema_prefix}"Embedding" (vec) VALUES ($1::vector)',
|
||||||
|
embedding_data
|
||||||
|
)
|
||||||
"""
|
"""
|
||||||
schema = get_database_schema()
|
schema = get_database_schema()
|
||||||
schema_prefix = f'"{schema}".' if schema != "public" else ""
|
schema_prefix = f'"{schema}".' if schema != "public" else ""
|
||||||
formatted_query = query_template.format(schema_prefix=schema_prefix)
|
|
||||||
|
formatted_query = query_template.format(
|
||||||
|
schema_prefix=schema_prefix,
|
||||||
|
schema=schema,
|
||||||
|
)
|
||||||
|
|
||||||
import prisma as prisma_module
|
import prisma as prisma_module
|
||||||
|
|
||||||
db_client = client if client else prisma_module.get_client()
|
db_client = client if client else prisma_module.get_client()
|
||||||
|
|
||||||
# Set search_path to include public schema if requested
|
|
||||||
# Prisma doesn't support the 'options' connection parameter, so we set it per-session
|
|
||||||
# This is idempotent and safe to call multiple times
|
|
||||||
if set_public_search_path:
|
|
||||||
await db_client.execute_raw(f"SET search_path = {schema}, public") # type: ignore
|
|
||||||
|
|
||||||
if execute:
|
if execute:
|
||||||
result = await db_client.execute_raw(formatted_query, *args) # type: ignore
|
result = await db_client.execute_raw(formatted_query, *args) # type: ignore
|
||||||
else:
|
else:
|
||||||
@@ -167,16 +163,12 @@ async def _raw_with_schema(
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
async def query_raw_with_schema(
|
async def query_raw_with_schema(query_template: str, *args) -> list[dict]:
|
||||||
query_template: str, *args, set_public_search_path: bool = False
|
|
||||||
) -> list[dict]:
|
|
||||||
"""Execute raw SQL SELECT query with proper schema handling.
|
"""Execute raw SQL SELECT query with proper schema handling.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
query_template: SQL query with {schema_prefix} placeholder
|
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
|
||||||
*args: Query parameters
|
*args: Query parameters
|
||||||
set_public_search_path: If True, sets search_path to include public schema.
|
|
||||||
Needed for pgvector types and other public schema objects.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of result rows as dictionaries
|
List of result rows as dictionaries
|
||||||
@@ -187,23 +179,20 @@ async def query_raw_with_schema(
|
|||||||
user_id
|
user_id
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
return await _raw_with_schema(query_template, *args, execute=False, set_public_search_path=set_public_search_path) # type: ignore
|
return await _raw_with_schema(query_template, *args, execute=False) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
async def execute_raw_with_schema(
|
async def execute_raw_with_schema(
|
||||||
query_template: str,
|
query_template: str,
|
||||||
*args,
|
*args,
|
||||||
client: Prisma | None = None,
|
client: Prisma | None = None,
|
||||||
set_public_search_path: bool = False,
|
|
||||||
) -> int:
|
) -> int:
|
||||||
"""Execute raw SQL command (INSERT/UPDATE/DELETE) with proper schema handling.
|
"""Execute raw SQL command (INSERT/UPDATE/DELETE) with proper schema handling.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
query_template: SQL query with {schema_prefix} placeholder
|
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
|
||||||
*args: Query parameters
|
*args: Query parameters
|
||||||
client: Optional Prisma client for transactions
|
client: Optional Prisma client for transactions
|
||||||
set_public_search_path: If True, sets search_path to include public schema.
|
|
||||||
Needed for pgvector types and other public schema objects.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Number of affected rows
|
Number of affected rows
|
||||||
@@ -215,7 +204,7 @@ async def execute_raw_with_schema(
|
|||||||
client=tx # Optional transaction client
|
client=tx # Optional transaction client
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
return await _raw_with_schema(query_template, *args, execute=True, client=client, set_public_search_path=set_public_search_path) # type: ignore
|
return await _raw_with_schema(query_template, *args, execute=True, client=client) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
class BaseDbModel(BaseModel):
|
class BaseDbModel(BaseModel):
|
||||||
|
|||||||
@@ -103,8 +103,18 @@ class RedisEventBus(BaseRedisEventBus[M], ABC):
|
|||||||
return redis.get_redis()
|
return redis.get_redis()
|
||||||
|
|
||||||
def publish_event(self, event: M, channel_key: str):
|
def publish_event(self, event: M, channel_key: str):
|
||||||
message, full_channel_name = self._serialize_message(event, channel_key)
|
"""
|
||||||
self.connection.publish(full_channel_name, message)
|
Publish an event to Redis. Gracefully handles connection failures
|
||||||
|
by logging the error instead of raising exceptions.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
message, full_channel_name = self._serialize_message(event, channel_key)
|
||||||
|
self.connection.publish(full_channel_name, message)
|
||||||
|
except Exception:
|
||||||
|
logger.exception(
|
||||||
|
f"Failed to publish event to Redis channel {channel_key}. "
|
||||||
|
"Event bus operation will continue without Redis connectivity."
|
||||||
|
)
|
||||||
|
|
||||||
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
|
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
|
||||||
pubsub, full_channel_name = self._get_pubsub_channel(
|
pubsub, full_channel_name = self._get_pubsub_channel(
|
||||||
@@ -128,9 +138,19 @@ class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
|
|||||||
return await redis.get_redis_async()
|
return await redis.get_redis_async()
|
||||||
|
|
||||||
async def publish_event(self, event: M, channel_key: str):
|
async def publish_event(self, event: M, channel_key: str):
|
||||||
message, full_channel_name = self._serialize_message(event, channel_key)
|
"""
|
||||||
connection = await self.connection
|
Publish an event to Redis. Gracefully handles connection failures
|
||||||
await connection.publish(full_channel_name, message)
|
by logging the error instead of raising exceptions.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
message, full_channel_name = self._serialize_message(event, channel_key)
|
||||||
|
connection = await self.connection
|
||||||
|
await connection.publish(full_channel_name, message)
|
||||||
|
except Exception:
|
||||||
|
logger.exception(
|
||||||
|
f"Failed to publish event to Redis channel {channel_key}. "
|
||||||
|
"Event bus operation will continue without Redis connectivity."
|
||||||
|
)
|
||||||
|
|
||||||
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
|
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
|
||||||
pubsub, full_channel_name = self._get_pubsub_channel(
|
pubsub, full_channel_name = self._get_pubsub_channel(
|
||||||
|
|||||||
56
autogpt_platform/backend/backend/data/event_bus_test.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
"""
|
||||||
|
Tests for event_bus graceful degradation when Redis is unavailable.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from backend.data.event_bus import AsyncRedisEventBus
|
||||||
|
|
||||||
|
|
||||||
|
class TestEvent(BaseModel):
|
||||||
|
"""Test event model."""
|
||||||
|
|
||||||
|
message: str
|
||||||
|
|
||||||
|
|
||||||
|
class TestNotificationBus(AsyncRedisEventBus[TestEvent]):
|
||||||
|
"""Test implementation of AsyncRedisEventBus."""
|
||||||
|
|
||||||
|
Model = TestEvent
|
||||||
|
|
||||||
|
@property
|
||||||
|
def event_bus_name(self) -> str:
|
||||||
|
return "test_event_bus"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_publish_event_handles_connection_failure_gracefully():
|
||||||
|
"""Test that publish_event logs exception instead of raising when Redis is unavailable."""
|
||||||
|
bus = TestNotificationBus()
|
||||||
|
event = TestEvent(message="test message")
|
||||||
|
|
||||||
|
# Mock get_redis_async to raise connection error
|
||||||
|
with patch(
|
||||||
|
"backend.data.event_bus.redis.get_redis_async",
|
||||||
|
side_effect=ConnectionError("Authentication required."),
|
||||||
|
):
|
||||||
|
# Should not raise exception
|
||||||
|
await bus.publish_event(event, "test_channel")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_publish_event_works_with_redis_available():
|
||||||
|
"""Test that publish_event works normally when Redis is available."""
|
||||||
|
bus = TestNotificationBus()
|
||||||
|
event = TestEvent(message="test message")
|
||||||
|
|
||||||
|
# Mock successful Redis connection
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.publish = AsyncMock()
|
||||||
|
|
||||||
|
with patch("backend.data.event_bus.redis.get_redis_async", return_value=mock_redis):
|
||||||
|
await bus.publish_event(event, "test_channel")
|
||||||
|
mock_redis.publish.assert_called_once()
|
||||||
@@ -81,7 +81,10 @@ class ExecutionContext(BaseModel):
|
|||||||
This includes information needed by blocks, sub-graphs, and execution management.
|
This includes information needed by blocks, sub-graphs, and execution management.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
safe_mode: bool = True
|
model_config = {"extra": "ignore"}
|
||||||
|
|
||||||
|
human_in_the_loop_safe_mode: bool = True
|
||||||
|
sensitive_action_safe_mode: bool = False
|
||||||
user_timezone: str = "UTC"
|
user_timezone: str = "UTC"
|
||||||
root_execution_id: Optional[str] = None
|
root_execution_id: Optional[str] = None
|
||||||
parent_execution_id: Optional[str] = None
|
parent_execution_id: Optional[str] = None
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import logging
|
|||||||
import uuid
|
import uuid
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from typing import TYPE_CHECKING, Any, Literal, Optional, cast
|
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast
|
||||||
|
|
||||||
from prisma.enums import SubmissionStatus
|
from prisma.enums import SubmissionStatus
|
||||||
from prisma.models import (
|
from prisma.models import (
|
||||||
@@ -20,7 +20,7 @@ from prisma.types import (
|
|||||||
AgentNodeLinkCreateInput,
|
AgentNodeLinkCreateInput,
|
||||||
StoreListingVersionWhereInput,
|
StoreListingVersionWhereInput,
|
||||||
)
|
)
|
||||||
from pydantic import BaseModel, Field, create_model
|
from pydantic import BaseModel, BeforeValidator, Field, create_model
|
||||||
from pydantic.fields import computed_field
|
from pydantic.fields import computed_field
|
||||||
|
|
||||||
from backend.blocks.agent import AgentExecutorBlock
|
from backend.blocks.agent import AgentExecutorBlock
|
||||||
@@ -62,7 +62,31 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class GraphSettings(BaseModel):
|
class GraphSettings(BaseModel):
|
||||||
human_in_the_loop_safe_mode: bool | None = None
|
# Use Annotated with BeforeValidator to coerce None to default values.
|
||||||
|
# This handles cases where the database has null values for these fields.
|
||||||
|
model_config = {"extra": "ignore"}
|
||||||
|
|
||||||
|
human_in_the_loop_safe_mode: Annotated[
|
||||||
|
bool, BeforeValidator(lambda v: v if v is not None else True)
|
||||||
|
] = True
|
||||||
|
sensitive_action_safe_mode: Annotated[
|
||||||
|
bool, BeforeValidator(lambda v: v if v is not None else False)
|
||||||
|
] = False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_graph(
|
||||||
|
cls,
|
||||||
|
graph: "GraphModel",
|
||||||
|
hitl_safe_mode: bool | None = None,
|
||||||
|
sensitive_action_safe_mode: bool = False,
|
||||||
|
) -> "GraphSettings":
|
||||||
|
# Default to True if not explicitly set
|
||||||
|
if hitl_safe_mode is None:
|
||||||
|
hitl_safe_mode = True
|
||||||
|
return cls(
|
||||||
|
human_in_the_loop_safe_mode=hitl_safe_mode,
|
||||||
|
sensitive_action_safe_mode=sensitive_action_safe_mode,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Link(BaseDbModel):
|
class Link(BaseDbModel):
|
||||||
@@ -244,10 +268,14 @@ class BaseGraph(BaseDbModel):
|
|||||||
return any(
|
return any(
|
||||||
node.block_id
|
node.block_id
|
||||||
for node in self.nodes
|
for node in self.nodes
|
||||||
if (
|
if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||||
node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
|
)
|
||||||
or node.block.requires_human_review
|
|
||||||
)
|
@computed_field
|
||||||
|
@property
|
||||||
|
def has_sensitive_action(self) -> bool:
|
||||||
|
return any(
|
||||||
|
node.block_id for node in self.nodes if node.block.is_sensitive_action
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -32,6 +32,87 @@ class ReviewResult(BaseModel):
|
|||||||
node_exec_id: str
|
node_exec_id: str
|
||||||
|
|
||||||
|
|
||||||
|
def get_auto_approve_key(graph_exec_id: str, node_id: str) -> str:
|
||||||
|
"""Generate the special nodeExecId key for auto-approval records."""
|
||||||
|
return f"auto_approve_{graph_exec_id}_{node_id}"
|
||||||
|
|
||||||
|
|
||||||
|
async def check_auto_approval(
|
||||||
|
graph_exec_id: str,
|
||||||
|
node_id: str,
|
||||||
|
) -> Optional[ReviewResult]:
|
||||||
|
"""
|
||||||
|
Check if there's an existing auto-approval record for this node in this execution.
|
||||||
|
|
||||||
|
Auto-approval records are stored as PendingHumanReview entries with a special
|
||||||
|
nodeExecId pattern: "auto_approve_{graph_exec_id}_{node_id}"
|
||||||
|
|
||||||
|
Args:
|
||||||
|
graph_exec_id: ID of the graph execution
|
||||||
|
node_id: ID of the node definition (not execution)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ReviewResult if auto-approval found, None otherwise
|
||||||
|
"""
|
||||||
|
auto_approve_key = get_auto_approve_key(graph_exec_id, node_id)
|
||||||
|
|
||||||
|
# Look for the auto-approval record by its special key
|
||||||
|
auto_approved_review = await PendingHumanReview.prisma().find_unique(
|
||||||
|
where={"nodeExecId": auto_approve_key},
|
||||||
|
)
|
||||||
|
|
||||||
|
if auto_approved_review and auto_approved_review.status == ReviewStatus.APPROVED:
|
||||||
|
logger.info(
|
||||||
|
f"Found auto-approval for node {node_id} in execution {graph_exec_id}"
|
||||||
|
)
|
||||||
|
return ReviewResult(
|
||||||
|
data=auto_approved_review.payload,
|
||||||
|
status=ReviewStatus.APPROVED,
|
||||||
|
message="Auto-approved (user approved all future actions for this block)",
|
||||||
|
processed=True,
|
||||||
|
node_exec_id=auto_approve_key,
|
||||||
|
)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
async def create_auto_approval_record(
|
||||||
|
user_id: str,
|
||||||
|
graph_exec_id: str,
|
||||||
|
graph_id: str,
|
||||||
|
graph_version: int,
|
||||||
|
node_id: str,
|
||||||
|
payload: SafeJsonData,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Create an auto-approval record for a node in this execution.
|
||||||
|
|
||||||
|
This is stored as a PendingHumanReview with a special nodeExecId pattern
|
||||||
|
and status=APPROVED, so future executions of the same node can skip review.
|
||||||
|
"""
|
||||||
|
auto_approve_key = get_auto_approve_key(graph_exec_id, node_id)
|
||||||
|
|
||||||
|
await PendingHumanReview.prisma().upsert(
|
||||||
|
where={"nodeExecId": auto_approve_key},
|
||||||
|
data={
|
||||||
|
"create": {
|
||||||
|
"nodeExecId": auto_approve_key,
|
||||||
|
"userId": user_id,
|
||||||
|
"graphExecId": graph_exec_id,
|
||||||
|
"graphId": graph_id,
|
||||||
|
"graphVersion": graph_version,
|
||||||
|
"payload": SafeJson(payload),
|
||||||
|
"instructions": "Auto-approval record",
|
||||||
|
"editable": False,
|
||||||
|
"status": ReviewStatus.APPROVED,
|
||||||
|
"processed": True,
|
||||||
|
"reviewedAt": datetime.now(timezone.utc),
|
||||||
|
},
|
||||||
|
"update": {}, # Already exists, no update needed
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def get_or_create_human_review(
|
async def get_or_create_human_review(
|
||||||
user_id: str,
|
user_id: str,
|
||||||
node_exec_id: str,
|
node_exec_id: str,
|
||||||
|
|||||||
@@ -46,8 +46,8 @@ async def test_get_or_create_human_review_new(
|
|||||||
sample_db_review.status = ReviewStatus.WAITING
|
sample_db_review.status = ReviewStatus.WAITING
|
||||||
sample_db_review.processed = False
|
sample_db_review.processed = False
|
||||||
|
|
||||||
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||||
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
mock_prisma.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
||||||
|
|
||||||
result = await get_or_create_human_review(
|
result = await get_or_create_human_review(
|
||||||
user_id="test-user-123",
|
user_id="test-user-123",
|
||||||
@@ -75,8 +75,8 @@ async def test_get_or_create_human_review_approved(
|
|||||||
sample_db_review.processed = False
|
sample_db_review.processed = False
|
||||||
sample_db_review.reviewMessage = "Looks good"
|
sample_db_review.reviewMessage = "Looks good"
|
||||||
|
|
||||||
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||||
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
mock_prisma.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
||||||
|
|
||||||
result = await get_or_create_human_review(
|
result = await get_or_create_human_review(
|
||||||
user_id="test-user-123",
|
user_id="test-user-123",
|
||||||
|
|||||||
@@ -309,7 +309,7 @@ def ensure_embeddings_coverage():
|
|||||||
|
|
||||||
# Process in batches until no more missing embeddings
|
# Process in batches until no more missing embeddings
|
||||||
while True:
|
while True:
|
||||||
result = db_client.backfill_missing_embeddings(batch_size=10)
|
result = db_client.backfill_missing_embeddings(batch_size=100)
|
||||||
|
|
||||||
total_processed += result["processed"]
|
total_processed += result["processed"]
|
||||||
total_success += result["success"]
|
total_success += result["success"]
|
||||||
|
|||||||
@@ -873,11 +873,8 @@ async def add_graph_execution(
|
|||||||
settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id)
|
settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id)
|
||||||
|
|
||||||
execution_context = ExecutionContext(
|
execution_context = ExecutionContext(
|
||||||
safe_mode=(
|
human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode,
|
||||||
settings.human_in_the_loop_safe_mode
|
sensitive_action_safe_mode=settings.sensitive_action_safe_mode,
|
||||||
if settings.human_in_the_loop_safe_mode is not None
|
|
||||||
else True
|
|
||||||
),
|
|
||||||
user_timezone=(
|
user_timezone=(
|
||||||
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
|
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -386,6 +386,7 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
|||||||
mock_user.timezone = "UTC"
|
mock_user.timezone = "UTC"
|
||||||
mock_settings = mocker.MagicMock()
|
mock_settings = mocker.MagicMock()
|
||||||
mock_settings.human_in_the_loop_safe_mode = True
|
mock_settings.human_in_the_loop_safe_mode = True
|
||||||
|
mock_settings.sensitive_action_safe_mode = False
|
||||||
|
|
||||||
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
||||||
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
||||||
@@ -651,6 +652,7 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture):
|
|||||||
mock_user.timezone = "UTC"
|
mock_user.timezone = "UTC"
|
||||||
mock_settings = mocker.MagicMock()
|
mock_settings = mocker.MagicMock()
|
||||||
mock_settings.human_in_the_loop_safe_mode = True
|
mock_settings.human_in_the_loop_safe_mode = True
|
||||||
|
mock_settings.sensitive_action_safe_mode = False
|
||||||
|
|
||||||
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
||||||
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
-- CreateExtension
|
-- CreateExtension
|
||||||
-- Supabase: pgvector must be enabled via Dashboard → Database → Extensions first
|
-- Supabase: pgvector must be enabled via Dashboard → Database → Extensions first
|
||||||
-- Create in public schema so vector type is available across all schemas
|
-- Creates extension in current schema (determined by search_path from DATABASE_URL ?schema= param)
|
||||||
|
-- This ensures vector type is in the same schema as tables, making ::vector work without explicit qualification
|
||||||
DO $$
|
DO $$
|
||||||
BEGIN
|
BEGIN
|
||||||
CREATE EXTENSION IF NOT EXISTS "vector" WITH SCHEMA "public";
|
CREATE EXTENSION IF NOT EXISTS "vector";
|
||||||
EXCEPTION WHEN OTHERS THEN
|
EXCEPTION WHEN OTHERS THEN
|
||||||
RAISE NOTICE 'vector extension not available or already exists, skipping';
|
RAISE NOTICE 'vector extension not available or already exists, skipping';
|
||||||
END $$;
|
END $$;
|
||||||
@@ -19,7 +20,7 @@ CREATE TABLE "UnifiedContentEmbedding" (
|
|||||||
"contentType" "ContentType" NOT NULL,
|
"contentType" "ContentType" NOT NULL,
|
||||||
"contentId" TEXT NOT NULL,
|
"contentId" TEXT NOT NULL,
|
||||||
"userId" TEXT,
|
"userId" TEXT,
|
||||||
"embedding" public.vector(1536) NOT NULL,
|
"embedding" vector(1536) NOT NULL,
|
||||||
"searchableText" TEXT NOT NULL,
|
"searchableText" TEXT NOT NULL,
|
||||||
"metadata" JSONB NOT NULL DEFAULT '{}',
|
"metadata" JSONB NOT NULL DEFAULT '{}',
|
||||||
|
|
||||||
@@ -45,4 +46,4 @@ CREATE UNIQUE INDEX "UnifiedContentEmbedding_contentType_contentId_userId_key" O
|
|||||||
-- Uses cosine distance operator (<=>), which matches the query in hybrid_search.py
|
-- Uses cosine distance operator (<=>), which matches the query in hybrid_search.py
|
||||||
-- Note: Drop first in case Prisma created a btree index (Prisma doesn't support HNSW)
|
-- Note: Drop first in case Prisma created a btree index (Prisma doesn't support HNSW)
|
||||||
DROP INDEX IF EXISTS "UnifiedContentEmbedding_embedding_idx";
|
DROP INDEX IF EXISTS "UnifiedContentEmbedding_embedding_idx";
|
||||||
CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" public.vector_cosine_ops);
|
CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" vector_cosine_ops);
|
||||||
|
|||||||
@@ -0,0 +1,7 @@
|
|||||||
|
-- Remove NodeExecution foreign key from PendingHumanReview
|
||||||
|
-- The nodeExecId column remains as the primary key, but we remove the FK constraint
|
||||||
|
-- to AgentNodeExecution since PendingHumanReview records can persist after node
|
||||||
|
-- execution records are deleted.
|
||||||
|
|
||||||
|
-- Drop foreign key constraint that linked PendingHumanReview.nodeExecId to AgentNodeExecution.id
|
||||||
|
ALTER TABLE "platform"."PendingHumanReview" DROP CONSTRAINT IF EXISTS "PendingHumanReview_nodeExecId_fkey";
|
||||||
@@ -517,8 +517,6 @@ model AgentNodeExecution {
|
|||||||
|
|
||||||
stats Json?
|
stats Json?
|
||||||
|
|
||||||
PendingHumanReview PendingHumanReview?
|
|
||||||
|
|
||||||
@@index([agentGraphExecutionId, agentNodeId, executionStatus])
|
@@index([agentGraphExecutionId, agentNodeId, executionStatus])
|
||||||
@@index([agentNodeId, executionStatus])
|
@@index([agentNodeId, executionStatus])
|
||||||
@@index([addedTime, queuedTime])
|
@@index([addedTime, queuedTime])
|
||||||
@@ -567,6 +565,7 @@ enum ReviewStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pending human reviews for Human-in-the-loop blocks
|
// Pending human reviews for Human-in-the-loop blocks
|
||||||
|
// Also stores auto-approval records with special nodeExecId patterns (e.g., "auto_approve_{graph_exec_id}_{node_id}")
|
||||||
model PendingHumanReview {
|
model PendingHumanReview {
|
||||||
nodeExecId String @id
|
nodeExecId String @id
|
||||||
userId String
|
userId String
|
||||||
@@ -585,7 +584,6 @@ model PendingHumanReview {
|
|||||||
reviewedAt DateTime?
|
reviewedAt DateTime?
|
||||||
|
|
||||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||||
NodeExecution AgentNodeExecution @relation(fields: [nodeExecId], references: [id], onDelete: Cascade)
|
|
||||||
GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade)
|
GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
@@unique([nodeExecId]) // One pending review per node execution
|
@@unique([nodeExecId]) // One pending review per node execution
|
||||||
|
|||||||
@@ -366,12 +366,12 @@ def generate_block_markdown(
|
|||||||
lines.append("")
|
lines.append("")
|
||||||
|
|
||||||
# What it is (full description)
|
# What it is (full description)
|
||||||
lines.append(f"### What it is")
|
lines.append("### What it is")
|
||||||
lines.append(block.description or "No description available.")
|
lines.append(block.description or "No description available.")
|
||||||
lines.append("")
|
lines.append("")
|
||||||
|
|
||||||
# How it works (manual section)
|
# How it works (manual section)
|
||||||
lines.append(f"### How it works")
|
lines.append("### How it works")
|
||||||
how_it_works = manual_content.get(
|
how_it_works = manual_content.get(
|
||||||
"how_it_works", "_Add technical explanation here._"
|
"how_it_works", "_Add technical explanation here._"
|
||||||
)
|
)
|
||||||
@@ -383,7 +383,7 @@ def generate_block_markdown(
|
|||||||
# Inputs table (auto-generated)
|
# Inputs table (auto-generated)
|
||||||
visible_inputs = [f for f in block.inputs if not f.hidden]
|
visible_inputs = [f for f in block.inputs if not f.hidden]
|
||||||
if visible_inputs:
|
if visible_inputs:
|
||||||
lines.append(f"### Inputs")
|
lines.append("### Inputs")
|
||||||
lines.append("")
|
lines.append("")
|
||||||
lines.append("| Input | Description | Type | Required |")
|
lines.append("| Input | Description | Type | Required |")
|
||||||
lines.append("|-------|-------------|------|----------|")
|
lines.append("|-------|-------------|------|----------|")
|
||||||
@@ -400,7 +400,7 @@ def generate_block_markdown(
|
|||||||
# Outputs table (auto-generated)
|
# Outputs table (auto-generated)
|
||||||
visible_outputs = [f for f in block.outputs if not f.hidden]
|
visible_outputs = [f for f in block.outputs if not f.hidden]
|
||||||
if visible_outputs:
|
if visible_outputs:
|
||||||
lines.append(f"### Outputs")
|
lines.append("### Outputs")
|
||||||
lines.append("")
|
lines.append("")
|
||||||
lines.append("| Output | Description | Type |")
|
lines.append("| Output | Description | Type |")
|
||||||
lines.append("|--------|-------------|------|")
|
lines.append("|--------|-------------|------|")
|
||||||
@@ -414,7 +414,7 @@ def generate_block_markdown(
|
|||||||
lines.append("")
|
lines.append("")
|
||||||
|
|
||||||
# Possible use case (manual section)
|
# Possible use case (manual section)
|
||||||
lines.append(f"### Possible use case")
|
lines.append("### Possible use case")
|
||||||
use_case = manual_content.get("use_case", "_Add practical use case examples here._")
|
use_case = manual_content.get("use_case", "_Add practical use case examples here._")
|
||||||
lines.append("<!-- MANUAL: use_case -->")
|
lines.append("<!-- MANUAL: use_case -->")
|
||||||
lines.append(use_case)
|
lines.append(use_case)
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
"forked_from_version": null,
|
"forked_from_version": null,
|
||||||
"has_external_trigger": false,
|
"has_external_trigger": false,
|
||||||
"has_human_in_the_loop": false,
|
"has_human_in_the_loop": false,
|
||||||
|
"has_sensitive_action": false,
|
||||||
"id": "graph-123",
|
"id": "graph-123",
|
||||||
"input_schema": {
|
"input_schema": {
|
||||||
"properties": {},
|
"properties": {},
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
"forked_from_version": null,
|
"forked_from_version": null,
|
||||||
"has_external_trigger": false,
|
"has_external_trigger": false,
|
||||||
"has_human_in_the_loop": false,
|
"has_human_in_the_loop": false,
|
||||||
|
"has_sensitive_action": false,
|
||||||
"id": "graph-123",
|
"id": "graph-123",
|
||||||
"input_schema": {
|
"input_schema": {
|
||||||
"properties": {},
|
"properties": {},
|
||||||
|
|||||||
@@ -27,6 +27,8 @@
|
|||||||
"properties": {}
|
"properties": {}
|
||||||
},
|
},
|
||||||
"has_external_trigger": false,
|
"has_external_trigger": false,
|
||||||
|
"has_human_in_the_loop": false,
|
||||||
|
"has_sensitive_action": false,
|
||||||
"trigger_setup_info": null,
|
"trigger_setup_info": null,
|
||||||
"new_output": false,
|
"new_output": false,
|
||||||
"can_access_graph": true,
|
"can_access_graph": true,
|
||||||
@@ -34,7 +36,8 @@
|
|||||||
"is_favorite": false,
|
"is_favorite": false,
|
||||||
"recommended_schedule_cron": null,
|
"recommended_schedule_cron": null,
|
||||||
"settings": {
|
"settings": {
|
||||||
"human_in_the_loop_safe_mode": null
|
"human_in_the_loop_safe_mode": true,
|
||||||
|
"sensitive_action_safe_mode": false
|
||||||
},
|
},
|
||||||
"marketplace_listing": null
|
"marketplace_listing": null
|
||||||
},
|
},
|
||||||
@@ -65,6 +68,8 @@
|
|||||||
"properties": {}
|
"properties": {}
|
||||||
},
|
},
|
||||||
"has_external_trigger": false,
|
"has_external_trigger": false,
|
||||||
|
"has_human_in_the_loop": false,
|
||||||
|
"has_sensitive_action": false,
|
||||||
"trigger_setup_info": null,
|
"trigger_setup_info": null,
|
||||||
"new_output": false,
|
"new_output": false,
|
||||||
"can_access_graph": false,
|
"can_access_graph": false,
|
||||||
@@ -72,7 +77,8 @@
|
|||||||
"is_favorite": false,
|
"is_favorite": false,
|
||||||
"recommended_schedule_cron": null,
|
"recommended_schedule_cron": null,
|
||||||
"settings": {
|
"settings": {
|
||||||
"human_in_the_loop_safe_mode": null
|
"human_in_the_loop_safe_mode": true,
|
||||||
|
"sensitive_action_safe_mode": false
|
||||||
},
|
},
|
||||||
"marketplace_listing": null
|
"marketplace_listing": null
|
||||||
}
|
}
|
||||||
|
|||||||
BIN
autogpt_platform/frontend/public/integrations/amazon.png
Normal file
|
After Width: | Height: | Size: 5.9 KiB |
|
After Width: | Height: | Size: 19 KiB |
BIN
autogpt_platform/frontend/public/integrations/cohere.png
Normal file
|
After Width: | Height: | Size: 26 KiB |
BIN
autogpt_platform/frontend/public/integrations/deepseek.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
BIN
autogpt_platform/frontend/public/integrations/gemini.png
Normal file
|
After Width: | Height: | Size: 72 KiB |
BIN
autogpt_platform/frontend/public/integrations/gryphe.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
autogpt_platform/frontend/public/integrations/microsoft.webp
Normal file
|
After Width: | Height: | Size: 374 B |
BIN
autogpt_platform/frontend/public/integrations/mistral.png
Normal file
|
After Width: | Height: | Size: 663 B |
BIN
autogpt_platform/frontend/public/integrations/moonshot.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
BIN
autogpt_platform/frontend/public/integrations/nousresearch.avif
Normal file
|
After Width: | Height: | Size: 4.1 KiB |
BIN
autogpt_platform/frontend/public/integrations/perplexity.webp
Normal file
|
After Width: | Height: | Size: 2.5 KiB |
BIN
autogpt_platform/frontend/public/integrations/qwen.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
autogpt_platform/frontend/public/integrations/xai.webp
Normal file
|
After Width: | Height: | Size: 1.8 KiB |
@@ -5,10 +5,11 @@ import {
|
|||||||
TooltipContent,
|
TooltipContent,
|
||||||
TooltipTrigger,
|
TooltipTrigger,
|
||||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||||
import { PlayIcon, StopIcon } from "@phosphor-icons/react";
|
import { CircleNotchIcon, PlayIcon, StopIcon } from "@phosphor-icons/react";
|
||||||
import { useShallow } from "zustand/react/shallow";
|
import { useShallow } from "zustand/react/shallow";
|
||||||
import { RunInputDialog } from "../RunInputDialog/RunInputDialog";
|
import { RunInputDialog } from "../RunInputDialog/RunInputDialog";
|
||||||
import { useRunGraph } from "./useRunGraph";
|
import { useRunGraph } from "./useRunGraph";
|
||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
|
||||||
export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
||||||
const {
|
const {
|
||||||
@@ -24,6 +25,31 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
|||||||
useShallow((state) => state.isGraphRunning),
|
useShallow((state) => state.isGraphRunning),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const isLoading = isExecutingGraph || isTerminatingGraph || isSaving;
|
||||||
|
|
||||||
|
// Determine which icon to show with proper animation
|
||||||
|
const renderIcon = () => {
|
||||||
|
const iconClass = cn(
|
||||||
|
"size-4 transition-transform duration-200 ease-out",
|
||||||
|
!isLoading && "group-hover:scale-110",
|
||||||
|
);
|
||||||
|
|
||||||
|
if (isLoading) {
|
||||||
|
return (
|
||||||
|
<CircleNotchIcon
|
||||||
|
className={cn(iconClass, "animate-spin")}
|
||||||
|
weight="bold"
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isGraphRunning) {
|
||||||
|
return <StopIcon className={iconClass} weight="fill" />;
|
||||||
|
}
|
||||||
|
|
||||||
|
return <PlayIcon className={iconClass} weight="fill" />;
|
||||||
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
@@ -33,18 +59,18 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
|||||||
variant={isGraphRunning ? "destructive" : "primary"}
|
variant={isGraphRunning ? "destructive" : "primary"}
|
||||||
data-id={isGraphRunning ? "stop-graph-button" : "run-graph-button"}
|
data-id={isGraphRunning ? "stop-graph-button" : "run-graph-button"}
|
||||||
onClick={isGraphRunning ? handleStopGraph : handleRunGraph}
|
onClick={isGraphRunning ? handleStopGraph : handleRunGraph}
|
||||||
disabled={!flowID || isExecutingGraph || isTerminatingGraph}
|
disabled={!flowID || isLoading}
|
||||||
loading={isExecutingGraph || isTerminatingGraph || isSaving}
|
className="group"
|
||||||
>
|
>
|
||||||
{!isGraphRunning ? (
|
{renderIcon()}
|
||||||
<PlayIcon className="size-4" />
|
|
||||||
) : (
|
|
||||||
<StopIcon className="size-4" />
|
|
||||||
)}
|
|
||||||
</Button>
|
</Button>
|
||||||
</TooltipTrigger>
|
</TooltipTrigger>
|
||||||
<TooltipContent>
|
<TooltipContent>
|
||||||
{isGraphRunning ? "Stop agent" : "Run agent"}
|
{isLoading
|
||||||
|
? "Processing..."
|
||||||
|
: isGraphRunning
|
||||||
|
? "Stop agent"
|
||||||
|
: "Run agent"}
|
||||||
</TooltipContent>
|
</TooltipContent>
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
<RunInputDialog
|
<RunInputDialog
|
||||||
|
|||||||
@@ -61,63 +61,67 @@ export const RunInputDialog = ({
|
|||||||
isOpen,
|
isOpen,
|
||||||
set: setIsOpen,
|
set: setIsOpen,
|
||||||
}}
|
}}
|
||||||
styling={{ maxWidth: "600px", minWidth: "600px" }}
|
styling={{ maxWidth: "700px", minWidth: "700px" }}
|
||||||
>
|
>
|
||||||
<Dialog.Content>
|
<Dialog.Content>
|
||||||
<div className="space-y-6 p-1" data-id="run-input-dialog-content">
|
<div
|
||||||
{/* Credentials Section */}
|
className="grid grid-cols-[1fr_auto] gap-10 p-1"
|
||||||
{hasCredentials() && credentialFields.length > 0 && (
|
data-id="run-input-dialog-content"
|
||||||
<div data-id="run-input-credentials-section">
|
>
|
||||||
<div className="mb-4">
|
<div className="space-y-6">
|
||||||
<Text variant="h4" className="text-gray-900">
|
{/* Credentials Section */}
|
||||||
Credentials
|
{hasCredentials() && credentialFields.length > 0 && (
|
||||||
</Text>
|
<div data-id="run-input-credentials-section">
|
||||||
|
<div className="mb-4">
|
||||||
|
<Text variant="h4" className="text-gray-900">
|
||||||
|
Credentials
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
<div className="px-2" data-id="run-input-credentials-form">
|
||||||
|
<CredentialsGroupedView
|
||||||
|
credentialFields={credentialFields}
|
||||||
|
requiredCredentials={requiredCredentials}
|
||||||
|
inputCredentials={credentialValues}
|
||||||
|
inputValues={inputValues}
|
||||||
|
onCredentialChange={handleCredentialFieldChange}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div className="px-2" data-id="run-input-credentials-form">
|
)}
|
||||||
<CredentialsGroupedView
|
|
||||||
credentialFields={credentialFields}
|
|
||||||
requiredCredentials={requiredCredentials}
|
|
||||||
inputCredentials={credentialValues}
|
|
||||||
inputValues={inputValues}
|
|
||||||
onCredentialChange={handleCredentialFieldChange}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* Inputs Section */}
|
{/* Inputs Section */}
|
||||||
{hasInputs() && (
|
{hasInputs() && (
|
||||||
<div data-id="run-input-inputs-section">
|
<div data-id="run-input-inputs-section">
|
||||||
<div className="mb-4">
|
<div className="mb-4">
|
||||||
<Text variant="h4" className="text-gray-900">
|
<Text variant="h4" className="text-gray-900">
|
||||||
Inputs
|
Inputs
|
||||||
</Text>
|
</Text>
|
||||||
|
</div>
|
||||||
|
<div data-id="run-input-inputs-form">
|
||||||
|
<FormRenderer
|
||||||
|
jsonSchema={inputSchema as RJSFSchema}
|
||||||
|
handleChange={(v) => handleInputChange(v.formData)}
|
||||||
|
uiSchema={uiSchema}
|
||||||
|
initialValues={{}}
|
||||||
|
formContext={{
|
||||||
|
showHandles: false,
|
||||||
|
size: "large",
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div data-id="run-input-inputs-form">
|
)}
|
||||||
<FormRenderer
|
</div>
|
||||||
jsonSchema={inputSchema as RJSFSchema}
|
|
||||||
handleChange={(v) => handleInputChange(v.formData)}
|
|
||||||
uiSchema={uiSchema}
|
|
||||||
initialValues={{}}
|
|
||||||
formContext={{
|
|
||||||
showHandles: false,
|
|
||||||
size: "large",
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* Action Button */}
|
|
||||||
<div
|
<div
|
||||||
className="flex justify-end pt-2"
|
className="flex flex-col items-end justify-start"
|
||||||
data-id="run-input-actions-section"
|
data-id="run-input-actions-section"
|
||||||
>
|
>
|
||||||
{purpose === "run" && (
|
{purpose === "run" && (
|
||||||
<Button
|
<Button
|
||||||
variant="primary"
|
variant="primary"
|
||||||
size="large"
|
size="large"
|
||||||
className="group h-fit min-w-0 gap-2"
|
className="group h-fit min-w-0 gap-2 px-10"
|
||||||
onClick={handleManualRun}
|
onClick={handleManualRun}
|
||||||
loading={isExecutingGraph}
|
loading={isExecutingGraph}
|
||||||
data-id="run-input-manual-run-button"
|
data-id="run-input-manual-run-button"
|
||||||
@@ -132,7 +136,7 @@ export const RunInputDialog = ({
|
|||||||
<Button
|
<Button
|
||||||
variant="primary"
|
variant="primary"
|
||||||
size="large"
|
size="large"
|
||||||
className="group h-fit min-w-0 gap-2"
|
className="group h-fit min-w-0 gap-2 px-10"
|
||||||
onClick={() => setOpenCronSchedulerDialog(true)}
|
onClick={() => setOpenCronSchedulerDialog(true)}
|
||||||
data-id="run-input-schedule-button"
|
data-id="run-input-schedule-button"
|
||||||
>
|
>
|
||||||
|
|||||||
@@ -18,69 +18,110 @@ interface Props {
|
|||||||
fullWidth?: boolean;
|
fullWidth?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
interface SafeModeButtonProps {
|
||||||
|
isEnabled: boolean;
|
||||||
|
label: string;
|
||||||
|
tooltipEnabled: string;
|
||||||
|
tooltipDisabled: string;
|
||||||
|
onToggle: () => void;
|
||||||
|
isPending: boolean;
|
||||||
|
fullWidth?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
function SafeModeButton({
|
||||||
|
isEnabled,
|
||||||
|
label,
|
||||||
|
tooltipEnabled,
|
||||||
|
tooltipDisabled,
|
||||||
|
onToggle,
|
||||||
|
isPending,
|
||||||
|
fullWidth = false,
|
||||||
|
}: SafeModeButtonProps) {
|
||||||
|
return (
|
||||||
|
<Tooltip delayDuration={100}>
|
||||||
|
<TooltipTrigger asChild>
|
||||||
|
<Button
|
||||||
|
variant={isEnabled ? "primary" : "outline"}
|
||||||
|
size="small"
|
||||||
|
onClick={onToggle}
|
||||||
|
disabled={isPending}
|
||||||
|
className={cn("justify-start", fullWidth ? "w-full" : "")}
|
||||||
|
>
|
||||||
|
{isEnabled ? (
|
||||||
|
<>
|
||||||
|
<ShieldCheckIcon weight="bold" size={16} />
|
||||||
|
<Text variant="body" className="text-zinc-200">
|
||||||
|
{label}: ON
|
||||||
|
</Text>
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
<>
|
||||||
|
<ShieldIcon weight="bold" size={16} />
|
||||||
|
<Text variant="body" className="text-zinc-600">
|
||||||
|
{label}: OFF
|
||||||
|
</Text>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</Button>
|
||||||
|
</TooltipTrigger>
|
||||||
|
<TooltipContent>
|
||||||
|
<div className="text-center">
|
||||||
|
<div className="font-medium">
|
||||||
|
{label}: {isEnabled ? "ON" : "OFF"}
|
||||||
|
</div>
|
||||||
|
<div className="mt-1 text-xs text-muted-foreground">
|
||||||
|
{isEnabled ? tooltipEnabled : tooltipDisabled}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</TooltipContent>
|
||||||
|
</Tooltip>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
export function FloatingSafeModeToggle({
|
export function FloatingSafeModeToggle({
|
||||||
graph,
|
graph,
|
||||||
className,
|
className,
|
||||||
fullWidth = false,
|
fullWidth = false,
|
||||||
}: Props) {
|
}: Props) {
|
||||||
const {
|
const {
|
||||||
currentSafeMode,
|
currentHITLSafeMode,
|
||||||
|
showHITLToggle,
|
||||||
|
handleHITLToggle,
|
||||||
|
currentSensitiveActionSafeMode,
|
||||||
|
showSensitiveActionToggle,
|
||||||
|
handleSensitiveActionToggle,
|
||||||
isPending,
|
isPending,
|
||||||
shouldShowToggle,
|
shouldShowToggle,
|
||||||
isStateUndetermined,
|
|
||||||
handleToggle,
|
|
||||||
} = useAgentSafeMode(graph);
|
} = useAgentSafeMode(graph);
|
||||||
|
|
||||||
if (!shouldShowToggle || isStateUndetermined || isPending) {
|
if (!shouldShowToggle || isPending) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className={cn("fixed z-50", className)}>
|
<div className={cn("fixed z-50 flex flex-col gap-2", className)}>
|
||||||
<Tooltip delayDuration={100}>
|
{showHITLToggle && (
|
||||||
<TooltipTrigger asChild>
|
<SafeModeButton
|
||||||
<Button
|
isEnabled={currentHITLSafeMode}
|
||||||
variant={currentSafeMode! ? "primary" : "outline"}
|
label="Human in the loop block approval"
|
||||||
key={graph.id}
|
tooltipEnabled="The agent will pause at human-in-the-loop blocks and wait for your approval"
|
||||||
size="small"
|
tooltipDisabled="Human in the loop blocks will proceed automatically"
|
||||||
title={
|
onToggle={handleHITLToggle}
|
||||||
currentSafeMode!
|
isPending={isPending}
|
||||||
? "Safe Mode: ON. Human in the loop blocks require manual review"
|
fullWidth={fullWidth}
|
||||||
: "Safe Mode: OFF. Human in the loop blocks proceed automatically"
|
/>
|
||||||
}
|
)}
|
||||||
onClick={handleToggle}
|
{showSensitiveActionToggle && (
|
||||||
className={cn(fullWidth ? "w-full" : "")}
|
<SafeModeButton
|
||||||
>
|
isEnabled={currentSensitiveActionSafeMode}
|
||||||
{currentSafeMode! ? (
|
label="Sensitive actions blocks approval"
|
||||||
<>
|
tooltipEnabled="The agent will pause at sensitive action blocks and wait for your approval"
|
||||||
<ShieldCheckIcon weight="bold" size={16} />
|
tooltipDisabled="Sensitive action blocks will proceed automatically"
|
||||||
<Text variant="body" className="text-zinc-200">
|
onToggle={handleSensitiveActionToggle}
|
||||||
Safe Mode: ON
|
isPending={isPending}
|
||||||
</Text>
|
fullWidth={fullWidth}
|
||||||
</>
|
/>
|
||||||
) : (
|
)}
|
||||||
<>
|
|
||||||
<ShieldIcon weight="bold" size={16} />
|
|
||||||
<Text variant="body" className="text-zinc-600">
|
|
||||||
Safe Mode: OFF
|
|
||||||
</Text>
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
</Button>
|
|
||||||
</TooltipTrigger>
|
|
||||||
<TooltipContent>
|
|
||||||
<div className="text-center">
|
|
||||||
<div className="font-medium">
|
|
||||||
Safe Mode: {currentSafeMode! ? "ON" : "OFF"}
|
|
||||||
</div>
|
|
||||||
<div className="mt-1 text-xs text-muted-foreground">
|
|
||||||
{currentSafeMode!
|
|
||||||
? "Human in the loop blocks require manual review"
|
|
||||||
: "Human in the loop blocks proceed automatically"}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</TooltipContent>
|
|
||||||
</Tooltip>
|
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,14 +53,14 @@ export const CustomControls = memo(
|
|||||||
const controls = [
|
const controls = [
|
||||||
{
|
{
|
||||||
id: "zoom-in-button",
|
id: "zoom-in-button",
|
||||||
icon: <PlusIcon className="size-4" />,
|
icon: <PlusIcon className="size-3.5 text-zinc-600" />,
|
||||||
label: "Zoom In",
|
label: "Zoom In",
|
||||||
onClick: () => zoomIn(),
|
onClick: () => zoomIn(),
|
||||||
className: "h-10 w-10 border-none",
|
className: "h-10 w-10 border-none",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "zoom-out-button",
|
id: "zoom-out-button",
|
||||||
icon: <MinusIcon className="size-4" />,
|
icon: <MinusIcon className="size-3.5 text-zinc-600" />,
|
||||||
label: "Zoom Out",
|
label: "Zoom Out",
|
||||||
onClick: () => zoomOut(),
|
onClick: () => zoomOut(),
|
||||||
className: "h-10 w-10 border-none",
|
className: "h-10 w-10 border-none",
|
||||||
@@ -68,9 +68,9 @@ export const CustomControls = memo(
|
|||||||
{
|
{
|
||||||
id: "tutorial-button",
|
id: "tutorial-button",
|
||||||
icon: isTutorialLoading ? (
|
icon: isTutorialLoading ? (
|
||||||
<CircleNotchIcon className="size-4 animate-spin" />
|
<CircleNotchIcon className="size-3.5 animate-spin text-zinc-600" />
|
||||||
) : (
|
) : (
|
||||||
<ChalkboardIcon className="size-4" />
|
<ChalkboardIcon className="size-3.5 text-zinc-600" />
|
||||||
),
|
),
|
||||||
label: isTutorialLoading ? "Loading Tutorial..." : "Start Tutorial",
|
label: isTutorialLoading ? "Loading Tutorial..." : "Start Tutorial",
|
||||||
onClick: handleTutorialClick,
|
onClick: handleTutorialClick,
|
||||||
@@ -79,7 +79,7 @@ export const CustomControls = memo(
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "fit-view-button",
|
id: "fit-view-button",
|
||||||
icon: <FrameCornersIcon className="size-4" />,
|
icon: <FrameCornersIcon className="size-3.5 text-zinc-600" />,
|
||||||
label: "Fit View",
|
label: "Fit View",
|
||||||
onClick: () => fitView({ padding: 0.2, duration: 800, maxZoom: 1 }),
|
onClick: () => fitView({ padding: 0.2, duration: 800, maxZoom: 1 }),
|
||||||
className: "h-10 w-10 border-none",
|
className: "h-10 w-10 border-none",
|
||||||
@@ -87,9 +87,9 @@ export const CustomControls = memo(
|
|||||||
{
|
{
|
||||||
id: "lock-button",
|
id: "lock-button",
|
||||||
icon: !isLocked ? (
|
icon: !isLocked ? (
|
||||||
<LockOpenIcon className="size-4" />
|
<LockOpenIcon className="size-3.5 text-zinc-600" />
|
||||||
) : (
|
) : (
|
||||||
<LockIcon className="size-4" />
|
<LockIcon className="size-3.5 text-zinc-600" />
|
||||||
),
|
),
|
||||||
label: "Toggle Lock",
|
label: "Toggle Lock",
|
||||||
onClick: () => setIsLocked(!isLocked),
|
onClick: () => setIsLocked(!isLocked),
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ export type CustomEdgeData = {
|
|||||||
beadUp?: number;
|
beadUp?: number;
|
||||||
beadDown?: number;
|
beadDown?: number;
|
||||||
beadData?: Map<string, NodeExecutionResult["status"]>;
|
beadData?: Map<string, NodeExecutionResult["status"]>;
|
||||||
|
edgeColorClass?: string;
|
||||||
|
edgeHexColor?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
export type CustomEdge = XYEdge<CustomEdgeData, "custom">;
|
export type CustomEdge = XYEdge<CustomEdgeData, "custom">;
|
||||||
@@ -36,7 +38,6 @@ const CustomEdge = ({
|
|||||||
selected,
|
selected,
|
||||||
}: EdgeProps<CustomEdge>) => {
|
}: EdgeProps<CustomEdge>) => {
|
||||||
const removeConnection = useEdgeStore((state) => state.removeEdge);
|
const removeConnection = useEdgeStore((state) => state.removeEdge);
|
||||||
// Subscribe to the brokenEdgeIDs map and check if this edge is broken across any node
|
|
||||||
const isBroken = useNodeStore((state) => state.isEdgeBroken(id));
|
const isBroken = useNodeStore((state) => state.isEdgeBroken(id));
|
||||||
const [isHovered, setIsHovered] = useState(false);
|
const [isHovered, setIsHovered] = useState(false);
|
||||||
|
|
||||||
@@ -52,6 +53,7 @@ const CustomEdge = ({
|
|||||||
const isStatic = data?.isStatic ?? false;
|
const isStatic = data?.isStatic ?? false;
|
||||||
const beadUp = data?.beadUp ?? 0;
|
const beadUp = data?.beadUp ?? 0;
|
||||||
const beadDown = data?.beadDown ?? 0;
|
const beadDown = data?.beadDown ?? 0;
|
||||||
|
const edgeColorClass = data?.edgeColorClass;
|
||||||
|
|
||||||
const handleRemoveEdge = () => {
|
const handleRemoveEdge = () => {
|
||||||
removeConnection(id);
|
removeConnection(id);
|
||||||
@@ -70,7 +72,9 @@ const CustomEdge = ({
|
|||||||
? "!stroke-red-500 !stroke-[2px] [stroke-dasharray:4]"
|
? "!stroke-red-500 !stroke-[2px] [stroke-dasharray:4]"
|
||||||
: selected
|
: selected
|
||||||
? "stroke-zinc-800"
|
? "stroke-zinc-800"
|
||||||
: "stroke-zinc-500/50 hover:stroke-zinc-500",
|
: edgeColorClass
|
||||||
|
? cn(edgeColorClass, "opacity-70 hover:opacity-100")
|
||||||
|
: "stroke-zinc-500/50 hover:stroke-zinc-500",
|
||||||
)}
|
)}
|
||||||
/>
|
/>
|
||||||
<JSBeads
|
<JSBeads
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import { useCallback } from "react";
|
|||||||
import { useNodeStore } from "../../../stores/nodeStore";
|
import { useNodeStore } from "../../../stores/nodeStore";
|
||||||
import { useHistoryStore } from "../../../stores/historyStore";
|
import { useHistoryStore } from "../../../stores/historyStore";
|
||||||
import { CustomEdge } from "./CustomEdge";
|
import { CustomEdge } from "./CustomEdge";
|
||||||
|
import { getEdgeColorFromOutputType } from "../nodes/helpers";
|
||||||
|
|
||||||
export const useCustomEdge = () => {
|
export const useCustomEdge = () => {
|
||||||
const edges = useEdgeStore((s) => s.edges);
|
const edges = useEdgeStore((s) => s.edges);
|
||||||
@@ -34,8 +35,13 @@ export const useCustomEdge = () => {
|
|||||||
if (exists) return;
|
if (exists) return;
|
||||||
|
|
||||||
const nodes = useNodeStore.getState().nodes;
|
const nodes = useNodeStore.getState().nodes;
|
||||||
const isStatic = nodes.find((n) => n.id === conn.source)?.data
|
const sourceNode = nodes.find((n) => n.id === conn.source);
|
||||||
?.staticOutput;
|
const isStatic = sourceNode?.data?.staticOutput;
|
||||||
|
|
||||||
|
const { colorClass, hexColor } = getEdgeColorFromOutputType(
|
||||||
|
sourceNode?.data?.outputSchema,
|
||||||
|
conn.sourceHandle,
|
||||||
|
);
|
||||||
|
|
||||||
addEdge({
|
addEdge({
|
||||||
source: conn.source,
|
source: conn.source,
|
||||||
@@ -44,6 +50,8 @@ export const useCustomEdge = () => {
|
|||||||
targetHandle: conn.targetHandle,
|
targetHandle: conn.targetHandle,
|
||||||
data: {
|
data: {
|
||||||
isStatic,
|
isStatic,
|
||||||
|
edgeColorClass: colorClass,
|
||||||
|
edgeHexColor: hexColor,
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -187,3 +187,38 @@ export const getTypeDisplayInfo = (schema: any) => {
|
|||||||
hexColor,
|
hexColor,
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export function getEdgeColorFromOutputType(
|
||||||
|
outputSchema: RJSFSchema | undefined,
|
||||||
|
sourceHandle: string,
|
||||||
|
): { colorClass: string; hexColor: string } {
|
||||||
|
const defaultColor = {
|
||||||
|
colorClass: "stroke-zinc-500/50",
|
||||||
|
hexColor: "#6b7280",
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!outputSchema?.properties) return defaultColor;
|
||||||
|
|
||||||
|
const properties = outputSchema.properties as Record<string, unknown>;
|
||||||
|
const handleParts = sourceHandle.split("_#_");
|
||||||
|
let currentSchema: Record<string, unknown> = properties;
|
||||||
|
|
||||||
|
for (let i = 0; i < handleParts.length; i++) {
|
||||||
|
const part = handleParts[i];
|
||||||
|
const fieldSchema = currentSchema[part] as Record<string, unknown>;
|
||||||
|
if (!fieldSchema) return defaultColor;
|
||||||
|
|
||||||
|
if (i === handleParts.length - 1) {
|
||||||
|
const { hexColor, colorClass } = getTypeDisplayInfo(fieldSchema);
|
||||||
|
return { colorClass: colorClass.replace("!text-", "stroke-"), hexColor };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fieldSchema.properties) {
|
||||||
|
currentSchema = fieldSchema.properties as Record<string, unknown>;
|
||||||
|
} else {
|
||||||
|
return defaultColor;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaultColor;
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,7 +1,32 @@
|
|||||||
// These are SVG Phosphor icons
|
type IconOptions = {
|
||||||
|
size?: number;
|
||||||
|
color?: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
const DEFAULT_SIZE = 16;
|
||||||
|
const DEFAULT_COLOR = "#52525b"; // zinc-600
|
||||||
|
|
||||||
|
const iconPaths = {
|
||||||
|
ClickIcon: `M88,24V16a8,8,0,0,1,16,0v8a8,8,0,0,1-16,0ZM16,104h8a8,8,0,0,0,0-16H16a8,8,0,0,0,0,16ZM124.42,39.16a8,8,0,0,0,10.74-3.58l8-16a8,8,0,0,0-14.31-7.16l-8,16A8,8,0,0,0,124.42,39.16Zm-96,81.69-16,8a8,8,0,0,0,7.16,14.31l16-8a8,8,0,1,0-7.16-14.31ZM219.31,184a16,16,0,0,1,0,22.63l-12.68,12.68a16,16,0,0,1-22.63,0L132.7,168,115,214.09c0,.1-.08.21-.13.32a15.83,15.83,0,0,1-14.6,9.59l-.79,0a15.83,15.83,0,0,1-14.41-11L32.8,52.92A16,16,0,0,1,52.92,32.8L213,85.07a16,16,0,0,1,1.41,29.8l-.32.13L168,132.69ZM208,195.31,156.69,144h0a16,16,0,0,1,4.93-26l.32-.14,45.95-17.64L48,48l52.2,159.86,17.65-46c0-.11.08-.22.13-.33a16,16,0,0,1,11.69-9.34,16.72,16.72,0,0,1,3-.28,16,16,0,0,1,11.3,4.69L195.31,208Z`,
|
||||||
|
Keyboard: `M224,48H32A16,16,0,0,0,16,64V192a16,16,0,0,0,16,16H224a16,16,0,0,0,16-16V64A16,16,0,0,0,224,48Zm0,144H32V64H224V192Zm-16-64a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,128Zm0-32a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,96ZM72,160a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16h8A8,8,0,0,1,72,160Zm96,0a8,8,0,0,1-8,8H96a8,8,0,0,1,0-16h64A8,8,0,0,1,168,160Zm40,0a8,8,0,0,1-8,8h-8a8,8,0,0,1,0-16h8A8,8,0,0,1,208,160Z`,
|
||||||
|
Drag: `M188,80a27.79,27.79,0,0,0-13.36,3.4,28,28,0,0,0-46.64-11A28,28,0,0,0,80,92v20H68a28,28,0,0,0-28,28v12a88,88,0,0,0,176,0V108A28,28,0,0,0,188,80Zm12,72a72,72,0,0,1-144,0V140a12,12,0,0,1,12-12H80v24a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V108a12,12,0,0,1,24,0Z`,
|
||||||
|
};
|
||||||
|
|
||||||
|
function createIcon(path: string, options: IconOptions = {}): string {
|
||||||
|
const size = options.size ?? DEFAULT_SIZE;
|
||||||
|
const color = options.color ?? DEFAULT_COLOR;
|
||||||
|
return `<svg xmlns="http://www.w3.org/2000/svg" width="${size}" height="${size}" fill="${color}" viewBox="0 0 256 256"><path d="${path}"></path></svg>`;
|
||||||
|
}
|
||||||
|
|
||||||
export const ICONS = {
|
export const ICONS = {
|
||||||
ClickIcon: `<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="#000000" viewBox="0 0 256 256"><path d="M88,24V16a8,8,0,0,1,16,0v8a8,8,0,0,1-16,0ZM16,104h8a8,8,0,0,0,0-16H16a8,8,0,0,0,0,16ZM124.42,39.16a8,8,0,0,0,10.74-3.58l8-16a8,8,0,0,0-14.31-7.16l-8,16A8,8,0,0,0,124.42,39.16Zm-96,81.69-16,8a8,8,0,0,0,7.16,14.31l16-8a8,8,0,1,0-7.16-14.31ZM219.31,184a16,16,0,0,1,0,22.63l-12.68,12.68a16,16,0,0,1-22.63,0L132.7,168,115,214.09c0,.1-.08.21-.13.32a15.83,15.83,0,0,1-14.6,9.59l-.79,0a15.83,15.83,0,0,1-14.41-11L32.8,52.92A16,16,0,0,1,52.92,32.8L213,85.07a16,16,0,0,1,1.41,29.8l-.32.13L168,132.69ZM208,195.31,156.69,144h0a16,16,0,0,1,4.93-26l.32-.14,45.95-17.64L48,48l52.2,159.86,17.65-46c0-.11.08-.22.13-.33a16,16,0,0,1,11.69-9.34,16.72,16.72,0,0,1,3-.28,16,16,0,0,1,11.3,4.69L195.31,208Z"></path></svg>`,
|
ClickIcon: createIcon(iconPaths.ClickIcon),
|
||||||
Keyboard: `<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="#000000" viewBox="0 0 256 256"><path d="M224,48H32A16,16,0,0,0,16,64V192a16,16,0,0,0,16,16H224a16,16,0,0,0,16-16V64A16,16,0,0,0,224,48Zm0,144H32V64H224V192Zm-16-64a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,128Zm0-32a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,96ZM72,160a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16h8A8,8,0,0,1,72,160Zm96,0a8,8,0,0,1-8,8H96a8,8,0,0,1,0-16h64A8,8,0,0,1,168,160Zm40,0a8,8,0,0,1-8,8h-8a8,8,0,0,1,0-16h8A8,8,0,0,1,208,160Z"></path></svg>`,
|
Keyboard: createIcon(iconPaths.Keyboard),
|
||||||
Drag: `<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="#000000" viewBox="0 0 256 256"><path d="M188,80a27.79,27.79,0,0,0-13.36,3.4,28,28,0,0,0-46.64-11A28,28,0,0,0,80,92v20H68a28,28,0,0,0-28,28v12a88,88,0,0,0,176,0V108A28,28,0,0,0,188,80Zm12,72a72,72,0,0,1-144,0V140a12,12,0,0,1,12-12H80v24a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V108a12,12,0,0,1,24,0Z"></path></svg>`,
|
Drag: createIcon(iconPaths.Drag),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export function getIcon(
|
||||||
|
name: keyof typeof iconPaths,
|
||||||
|
options?: IconOptions,
|
||||||
|
): string {
|
||||||
|
return createIcon(iconPaths[name], options);
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import {
|
|||||||
} from "./helpers";
|
} from "./helpers";
|
||||||
import { useNodeStore } from "../../../stores/nodeStore";
|
import { useNodeStore } from "../../../stores/nodeStore";
|
||||||
import { useEdgeStore } from "../../../stores/edgeStore";
|
import { useEdgeStore } from "../../../stores/edgeStore";
|
||||||
|
import { useTutorialStore } from "../../../stores/tutorialStore";
|
||||||
|
|
||||||
let isTutorialLoading = false;
|
let isTutorialLoading = false;
|
||||||
let tutorialLoadingCallback: ((loading: boolean) => void) | null = null;
|
let tutorialLoadingCallback: ((loading: boolean) => void) | null = null;
|
||||||
@@ -60,12 +61,14 @@ export const startTutorial = async () => {
|
|||||||
handleTutorialComplete();
|
handleTutorialComplete();
|
||||||
removeTutorialStyles();
|
removeTutorialStyles();
|
||||||
clearPrefetchedBlocks();
|
clearPrefetchedBlocks();
|
||||||
|
useTutorialStore.getState().setIsTutorialRunning(false);
|
||||||
});
|
});
|
||||||
|
|
||||||
tour.on("cancel", () => {
|
tour.on("cancel", () => {
|
||||||
handleTutorialCancel(tour);
|
handleTutorialCancel(tour);
|
||||||
removeTutorialStyles();
|
removeTutorialStyles();
|
||||||
clearPrefetchedBlocks();
|
clearPrefetchedBlocks();
|
||||||
|
useTutorialStore.getState().setIsTutorialRunning(false);
|
||||||
});
|
});
|
||||||
|
|
||||||
for (const step of tour.steps) {
|
for (const step of tour.steps) {
|
||||||
|
|||||||
@@ -267,23 +267,34 @@ export function extractCredentialsNeeded(
|
|||||||
| undefined;
|
| undefined;
|
||||||
if (missingCreds && Object.keys(missingCreds).length > 0) {
|
if (missingCreds && Object.keys(missingCreds).length > 0) {
|
||||||
const agentName = (setupInfo?.agent_name as string) || "this block";
|
const agentName = (setupInfo?.agent_name as string) || "this block";
|
||||||
const credentials = Object.values(missingCreds).map((credInfo) => ({
|
const credentials = Object.values(missingCreds).map((credInfo) => {
|
||||||
provider: (credInfo.provider as string) || "unknown",
|
// Normalize to array at boundary - prefer 'types' array, fall back to single 'type'
|
||||||
providerName:
|
const typesArray = credInfo.types as
|
||||||
(credInfo.provider_name as string) ||
|
| Array<"api_key" | "oauth2" | "user_password" | "host_scoped">
|
||||||
(credInfo.provider as string) ||
|
| undefined;
|
||||||
"Unknown Provider",
|
const singleType =
|
||||||
credentialType:
|
|
||||||
(credInfo.type as
|
(credInfo.type as
|
||||||
| "api_key"
|
| "api_key"
|
||||||
| "oauth2"
|
| "oauth2"
|
||||||
| "user_password"
|
| "user_password"
|
||||||
| "host_scoped") || "api_key",
|
| "host_scoped"
|
||||||
title:
|
| undefined) || "api_key";
|
||||||
(credInfo.title as string) ||
|
const credentialTypes =
|
||||||
`${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`,
|
typesArray && typesArray.length > 0 ? typesArray : [singleType];
|
||||||
scopes: credInfo.scopes as string[] | undefined,
|
|
||||||
}));
|
return {
|
||||||
|
provider: (credInfo.provider as string) || "unknown",
|
||||||
|
providerName:
|
||||||
|
(credInfo.provider_name as string) ||
|
||||||
|
(credInfo.provider as string) ||
|
||||||
|
"Unknown Provider",
|
||||||
|
credentialTypes,
|
||||||
|
title:
|
||||||
|
(credInfo.title as string) ||
|
||||||
|
`${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`,
|
||||||
|
scopes: credInfo.scopes as string[] | undefined,
|
||||||
|
};
|
||||||
|
});
|
||||||
return {
|
return {
|
||||||
type: "credentials_needed",
|
type: "credentials_needed",
|
||||||
toolName,
|
toolName,
|
||||||
@@ -358,11 +369,14 @@ export function extractInputsNeeded(
|
|||||||
credentials.forEach((cred) => {
|
credentials.forEach((cred) => {
|
||||||
const id = cred.id as string;
|
const id = cred.id as string;
|
||||||
if (id) {
|
if (id) {
|
||||||
|
const credentialTypes = Array.isArray(cred.types)
|
||||||
|
? cred.types
|
||||||
|
: [(cred.type as string) || "api_key"];
|
||||||
credentialsSchema[id] = {
|
credentialsSchema[id] = {
|
||||||
type: "object",
|
type: "object",
|
||||||
properties: {},
|
properties: {},
|
||||||
credentials_provider: [cred.provider as string],
|
credentials_provider: [cred.provider as string],
|
||||||
credentials_types: [(cred.type as string) || "api_key"],
|
credentials_types: credentialTypes,
|
||||||
credentials_scopes: cred.scopes as string[] | undefined,
|
credentials_scopes: cred.scopes as string[] | undefined,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,9 @@ import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
|
|||||||
export interface CredentialInfo {
|
export interface CredentialInfo {
|
||||||
provider: string;
|
provider: string;
|
||||||
providerName: string;
|
providerName: string;
|
||||||
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
|
credentialTypes: Array<
|
||||||
|
"api_key" | "oauth2" | "user_password" | "host_scoped"
|
||||||
|
>;
|
||||||
title: string;
|
title: string;
|
||||||
scopes?: string[];
|
scopes?: string[];
|
||||||
}
|
}
|
||||||
@@ -30,7 +32,7 @@ function createSchemaFromCredentialInfo(
|
|||||||
type: "object",
|
type: "object",
|
||||||
properties: {},
|
properties: {},
|
||||||
credentials_provider: [credential.provider],
|
credentials_provider: [credential.provider],
|
||||||
credentials_types: [credential.credentialType],
|
credentials_types: credential.credentialTypes,
|
||||||
credentials_scopes: credential.scopes,
|
credentials_scopes: credential.scopes,
|
||||||
discriminator: undefined,
|
discriminator: undefined,
|
||||||
discriminator_mapping: undefined,
|
discriminator_mapping: undefined,
|
||||||
|
|||||||
@@ -41,7 +41,9 @@ export type ChatMessageData =
|
|||||||
credentials: Array<{
|
credentials: Array<{
|
||||||
provider: string;
|
provider: string;
|
||||||
providerName: string;
|
providerName: string;
|
||||||
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
|
credentialTypes: Array<
|
||||||
|
"api_key" | "oauth2" | "user_password" | "host_scoped"
|
||||||
|
>;
|
||||||
title: string;
|
title: string;
|
||||||
scopes?: string[];
|
scopes?: string[];
|
||||||
}>;
|
}>;
|
||||||
|
|||||||
@@ -31,10 +31,18 @@ export function AgentSettingsModal({
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } =
|
const {
|
||||||
useAgentSafeMode(agent);
|
currentHITLSafeMode,
|
||||||
|
showHITLToggle,
|
||||||
|
handleHITLToggle,
|
||||||
|
currentSensitiveActionSafeMode,
|
||||||
|
showSensitiveActionToggle,
|
||||||
|
handleSensitiveActionToggle,
|
||||||
|
isPending,
|
||||||
|
shouldShowToggle,
|
||||||
|
} = useAgentSafeMode(agent);
|
||||||
|
|
||||||
if (!hasHITLBlocks) return null;
|
if (!shouldShowToggle) return null;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Dialog
|
<Dialog
|
||||||
@@ -57,23 +65,48 @@ export function AgentSettingsModal({
|
|||||||
)}
|
)}
|
||||||
<Dialog.Content>
|
<Dialog.Content>
|
||||||
<div className="space-y-6">
|
<div className="space-y-6">
|
||||||
<div className="flex w-full flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
{showHITLToggle && (
|
||||||
<div className="flex w-full items-start justify-between gap-4">
|
<div className="flex w-full flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||||
<div className="flex-1">
|
<div className="flex w-full items-start justify-between gap-4">
|
||||||
<Text variant="large-semibold">Require human approval</Text>
|
<div className="flex-1">
|
||||||
<Text variant="large" className="mt-1 text-zinc-900">
|
<Text variant="large-semibold">
|
||||||
The agent will pause and wait for your review before
|
Human-in-the-loop approval
|
||||||
continuing
|
</Text>
|
||||||
</Text>
|
<Text variant="large" className="mt-1 text-zinc-900">
|
||||||
|
The agent will pause at human-in-the-loop blocks and wait
|
||||||
|
for your review before continuing
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
<Switch
|
||||||
|
checked={currentHITLSafeMode || false}
|
||||||
|
onCheckedChange={handleHITLToggle}
|
||||||
|
disabled={isPending}
|
||||||
|
className="mt-1"
|
||||||
|
/>
|
||||||
</div>
|
</div>
|
||||||
<Switch
|
|
||||||
checked={currentSafeMode || false}
|
|
||||||
onCheckedChange={handleToggle}
|
|
||||||
disabled={isPending}
|
|
||||||
className="mt-1"
|
|
||||||
/>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
)}
|
||||||
|
{showSensitiveActionToggle && (
|
||||||
|
<div className="flex w-full flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||||
|
<div className="flex w-full items-start justify-between gap-4">
|
||||||
|
<div className="flex-1">
|
||||||
|
<Text variant="large-semibold">
|
||||||
|
Sensitive action approval
|
||||||
|
</Text>
|
||||||
|
<Text variant="large" className="mt-1 text-zinc-900">
|
||||||
|
The agent will pause at sensitive action blocks and wait for
|
||||||
|
your review before continuing
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
<Switch
|
||||||
|
checked={currentSensitiveActionSafeMode}
|
||||||
|
onCheckedChange={handleSensitiveActionToggle}
|
||||||
|
disabled={isPending}
|
||||||
|
className="mt-1"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
</Dialog.Content>
|
</Dialog.Content>
|
||||||
</Dialog>
|
</Dialog>
|
||||||
|
|||||||
@@ -14,6 +14,10 @@ import {
|
|||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||||
import { useEffect, useRef, useState } from "react";
|
import { useEffect, useRef, useState } from "react";
|
||||||
import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal";
|
import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal";
|
||||||
|
import {
|
||||||
|
AIAgentSafetyPopup,
|
||||||
|
useAIAgentSafetyPopup,
|
||||||
|
} from "./components/AIAgentSafetyPopup/AIAgentSafetyPopup";
|
||||||
import { ModalHeader } from "./components/ModalHeader/ModalHeader";
|
import { ModalHeader } from "./components/ModalHeader/ModalHeader";
|
||||||
import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection";
|
import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection";
|
||||||
import { RunActions } from "./components/RunActions/RunActions";
|
import { RunActions } from "./components/RunActions/RunActions";
|
||||||
@@ -83,8 +87,17 @@ export function RunAgentModal({
|
|||||||
|
|
||||||
const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false);
|
const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false);
|
||||||
const [hasOverflow, setHasOverflow] = useState(false);
|
const [hasOverflow, setHasOverflow] = useState(false);
|
||||||
|
const [isSafetyPopupOpen, setIsSafetyPopupOpen] = useState(false);
|
||||||
|
const [pendingRunAction, setPendingRunAction] = useState<(() => void) | null>(
|
||||||
|
null,
|
||||||
|
);
|
||||||
const contentRef = useRef<HTMLDivElement>(null);
|
const contentRef = useRef<HTMLDivElement>(null);
|
||||||
|
|
||||||
|
const { shouldShowPopup, dismissPopup } = useAIAgentSafetyPopup(
|
||||||
|
agent.has_sensitive_action,
|
||||||
|
agent.has_human_in_the_loop,
|
||||||
|
);
|
||||||
|
|
||||||
const hasAnySetupFields =
|
const hasAnySetupFields =
|
||||||
Object.keys(agentInputFields || {}).length > 0 ||
|
Object.keys(agentInputFields || {}).length > 0 ||
|
||||||
Object.keys(agentCredentialsInputFields || {}).length > 0;
|
Object.keys(agentCredentialsInputFields || {}).length > 0;
|
||||||
@@ -165,6 +178,24 @@ export function RunAgentModal({
|
|||||||
onScheduleCreated?.(schedule);
|
onScheduleCreated?.(schedule);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function handleRunWithSafetyCheck() {
|
||||||
|
if (shouldShowPopup) {
|
||||||
|
setPendingRunAction(() => handleRun);
|
||||||
|
setIsSafetyPopupOpen(true);
|
||||||
|
} else {
|
||||||
|
handleRun();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleSafetyPopupAcknowledge() {
|
||||||
|
setIsSafetyPopupOpen(false);
|
||||||
|
dismissPopup();
|
||||||
|
if (pendingRunAction) {
|
||||||
|
pendingRunAction();
|
||||||
|
setPendingRunAction(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<Dialog
|
<Dialog
|
||||||
@@ -248,7 +279,7 @@ export function RunAgentModal({
|
|||||||
)}
|
)}
|
||||||
<RunActions
|
<RunActions
|
||||||
defaultRunType={defaultRunType}
|
defaultRunType={defaultRunType}
|
||||||
onRun={handleRun}
|
onRun={handleRunWithSafetyCheck}
|
||||||
isExecuting={isExecuting}
|
isExecuting={isExecuting}
|
||||||
isSettingUpTrigger={isSettingUpTrigger}
|
isSettingUpTrigger={isSettingUpTrigger}
|
||||||
isRunReady={allRequiredInputsAreSet}
|
isRunReady={allRequiredInputsAreSet}
|
||||||
@@ -266,6 +297,11 @@ export function RunAgentModal({
|
|||||||
</div>
|
</div>
|
||||||
</Dialog.Content>
|
</Dialog.Content>
|
||||||
</Dialog>
|
</Dialog>
|
||||||
|
|
||||||
|
<AIAgentSafetyPopup
|
||||||
|
isOpen={isSafetyPopupOpen}
|
||||||
|
onAcknowledge={handleSafetyPopupAcknowledge}
|
||||||
|
/>
|
||||||
</>
|
</>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,95 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
|
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||||
|
import { Key, storage } from "@/services/storage/local-storage";
|
||||||
|
import { ShieldCheckIcon } from "@phosphor-icons/react";
|
||||||
|
import { useCallback, useEffect, useState } from "react";
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
onAcknowledge: () => void;
|
||||||
|
isOpen: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function AIAgentSafetyPopup({ onAcknowledge, isOpen }: Props) {
|
||||||
|
function handleAcknowledge() {
|
||||||
|
// Mark popup as shown so it won't appear again
|
||||||
|
storage.set(Key.AI_AGENT_SAFETY_POPUP_SHOWN, "true");
|
||||||
|
onAcknowledge();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!isOpen) return null;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Dialog
|
||||||
|
controlled={{ isOpen, set: () => {} }}
|
||||||
|
styling={{ maxWidth: "480px" }}
|
||||||
|
>
|
||||||
|
<Dialog.Content>
|
||||||
|
<div className="flex flex-col items-center p-6 text-center">
|
||||||
|
<div className="mb-6 flex h-16 w-16 items-center justify-center rounded-full bg-blue-50">
|
||||||
|
<ShieldCheckIcon
|
||||||
|
weight="fill"
|
||||||
|
size={32}
|
||||||
|
className="text-blue-600"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<Text variant="h3" className="mb-4">
|
||||||
|
Safety Checks Enabled
|
||||||
|
</Text>
|
||||||
|
|
||||||
|
<Text variant="body" className="mb-2 text-zinc-700">
|
||||||
|
AI-generated agents may take actions that affect your data or
|
||||||
|
external systems.
|
||||||
|
</Text>
|
||||||
|
|
||||||
|
<Text variant="body" className="mb-8 text-zinc-700">
|
||||||
|
AutoGPT includes safety checks so you'll always have the
|
||||||
|
opportunity to review and approve sensitive actions before they
|
||||||
|
happen.
|
||||||
|
</Text>
|
||||||
|
|
||||||
|
<Button
|
||||||
|
variant="primary"
|
||||||
|
size="large"
|
||||||
|
className="w-full"
|
||||||
|
onClick={handleAcknowledge}
|
||||||
|
>
|
||||||
|
Got it
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
</Dialog.Content>
|
||||||
|
</Dialog>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function useAIAgentSafetyPopup(
|
||||||
|
hasSensitiveAction: boolean,
|
||||||
|
hasHumanInTheLoop: boolean,
|
||||||
|
) {
|
||||||
|
const [shouldShowPopup, setShouldShowPopup] = useState(false);
|
||||||
|
const [hasChecked, setHasChecked] = useState(false);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
// Only check once after mount (to avoid SSR issues)
|
||||||
|
if (hasChecked) return;
|
||||||
|
|
||||||
|
const hasSeenPopup =
|
||||||
|
storage.get(Key.AI_AGENT_SAFETY_POPUP_SHOWN) === "true";
|
||||||
|
const isRelevantAgent = hasSensitiveAction || hasHumanInTheLoop;
|
||||||
|
|
||||||
|
setShouldShowPopup(!hasSeenPopup && isRelevantAgent);
|
||||||
|
setHasChecked(true);
|
||||||
|
}, [hasSensitiveAction, hasHumanInTheLoop, hasChecked]);
|
||||||
|
|
||||||
|
const dismissPopup = useCallback(() => {
|
||||||
|
setShouldShowPopup(false);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return {
|
||||||
|
shouldShowPopup,
|
||||||
|
dismissPopup,
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -5,48 +5,104 @@ import { Graph } from "@/lib/autogpt-server-api/types";
|
|||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { ShieldCheckIcon, ShieldIcon } from "@phosphor-icons/react";
|
import { ShieldCheckIcon, ShieldIcon } from "@phosphor-icons/react";
|
||||||
import { useAgentSafeMode } from "@/hooks/useAgentSafeMode";
|
import { useAgentSafeMode } from "@/hooks/useAgentSafeMode";
|
||||||
|
import {
|
||||||
|
Tooltip,
|
||||||
|
TooltipContent,
|
||||||
|
TooltipTrigger,
|
||||||
|
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||||
|
|
||||||
interface Props {
|
interface Props {
|
||||||
graph: GraphModel | LibraryAgent | Graph;
|
graph: GraphModel | LibraryAgent | Graph;
|
||||||
className?: string;
|
className?: string;
|
||||||
fullWidth?: boolean;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export function SafeModeToggle({ graph }: Props) {
|
interface SafeModeIconButtonProps {
|
||||||
|
isEnabled: boolean;
|
||||||
|
label: string;
|
||||||
|
tooltipEnabled: string;
|
||||||
|
tooltipDisabled: string;
|
||||||
|
onToggle: () => void;
|
||||||
|
isPending: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
function SafeModeIconButton({
|
||||||
|
isEnabled,
|
||||||
|
label,
|
||||||
|
tooltipEnabled,
|
||||||
|
tooltipDisabled,
|
||||||
|
onToggle,
|
||||||
|
isPending,
|
||||||
|
}: SafeModeIconButtonProps) {
|
||||||
|
return (
|
||||||
|
<Tooltip delayDuration={100}>
|
||||||
|
<TooltipTrigger asChild>
|
||||||
|
<Button
|
||||||
|
variant="icon"
|
||||||
|
size="icon"
|
||||||
|
aria-label={`${label}: ${isEnabled ? "ON" : "OFF"}. ${isEnabled ? tooltipEnabled : tooltipDisabled}`}
|
||||||
|
onClick={onToggle}
|
||||||
|
disabled={isPending}
|
||||||
|
className={cn(isPending ? "opacity-0" : "opacity-100")}
|
||||||
|
>
|
||||||
|
{isEnabled ? (
|
||||||
|
<ShieldCheckIcon weight="bold" size={16} />
|
||||||
|
) : (
|
||||||
|
<ShieldIcon weight="bold" size={16} />
|
||||||
|
)}
|
||||||
|
</Button>
|
||||||
|
</TooltipTrigger>
|
||||||
|
<TooltipContent>
|
||||||
|
<div className="text-center">
|
||||||
|
<div className="font-medium">
|
||||||
|
{label}: {isEnabled ? "ON" : "OFF"}
|
||||||
|
</div>
|
||||||
|
<div className="mt-1 text-xs text-muted-foreground">
|
||||||
|
{isEnabled ? tooltipEnabled : tooltipDisabled}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</TooltipContent>
|
||||||
|
</Tooltip>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function SafeModeToggle({ graph, className }: Props) {
|
||||||
const {
|
const {
|
||||||
currentSafeMode,
|
currentHITLSafeMode,
|
||||||
|
showHITLToggle,
|
||||||
|
handleHITLToggle,
|
||||||
|
currentSensitiveActionSafeMode,
|
||||||
|
showSensitiveActionToggle,
|
||||||
|
handleSensitiveActionToggle,
|
||||||
isPending,
|
isPending,
|
||||||
shouldShowToggle,
|
shouldShowToggle,
|
||||||
isStateUndetermined,
|
|
||||||
handleToggle,
|
|
||||||
} = useAgentSafeMode(graph);
|
} = useAgentSafeMode(graph);
|
||||||
|
|
||||||
if (!shouldShowToggle || isStateUndetermined) {
|
if (!shouldShowToggle) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Button
|
<div className={cn("flex gap-1", className)}>
|
||||||
variant="icon"
|
{showHITLToggle && (
|
||||||
key={graph.id}
|
<SafeModeIconButton
|
||||||
size="icon"
|
isEnabled={currentHITLSafeMode}
|
||||||
aria-label={
|
label="Human-in-the-loop"
|
||||||
currentSafeMode!
|
tooltipEnabled="The agent will pause at human-in-the-loop blocks and wait for your approval"
|
||||||
? "Safe Mode: ON. Human in the loop blocks require manual review"
|
tooltipDisabled="Human-in-the-loop blocks will proceed automatically"
|
||||||
: "Safe Mode: OFF. Human in the loop blocks proceed automatically"
|
onToggle={handleHITLToggle}
|
||||||
}
|
isPending={isPending}
|
||||||
onClick={handleToggle}
|
/>
|
||||||
className={cn(isPending ? "opacity-0" : "opacity-100")}
|
|
||||||
>
|
|
||||||
{currentSafeMode! ? (
|
|
||||||
<>
|
|
||||||
<ShieldCheckIcon weight="bold" size={16} />
|
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
<>
|
|
||||||
<ShieldIcon weight="bold" size={16} />
|
|
||||||
</>
|
|
||||||
)}
|
)}
|
||||||
</Button>
|
{showSensitiveActionToggle && (
|
||||||
|
<SafeModeIconButton
|
||||||
|
isEnabled={currentSensitiveActionSafeMode}
|
||||||
|
label="Sensitive actions"
|
||||||
|
tooltipEnabled="The agent will pause at sensitive action blocks and wait for your approval"
|
||||||
|
tooltipDisabled="Sensitive action blocks will proceed automatically"
|
||||||
|
onToggle={handleSensitiveActionToggle}
|
||||||
|
isPending={isPending}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,8 +13,16 @@ interface Props {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
|
export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
|
||||||
const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } =
|
const {
|
||||||
useAgentSafeMode(agent);
|
currentHITLSafeMode,
|
||||||
|
showHITLToggle,
|
||||||
|
handleHITLToggle,
|
||||||
|
currentSensitiveActionSafeMode,
|
||||||
|
showSensitiveActionToggle,
|
||||||
|
handleSensitiveActionToggle,
|
||||||
|
isPending,
|
||||||
|
shouldShowToggle,
|
||||||
|
} = useAgentSafeMode(agent);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<SelectedViewLayout agent={agent}>
|
<SelectedViewLayout agent={agent}>
|
||||||
@@ -34,24 +42,51 @@ export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className={`${AGENT_LIBRARY_SECTION_PADDING_X} space-y-6`}>
|
<div className={`${AGENT_LIBRARY_SECTION_PADDING_X} space-y-6`}>
|
||||||
{hasHITLBlocks ? (
|
{shouldShowToggle ? (
|
||||||
<div className="flex w-full max-w-2xl flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
<>
|
||||||
<div className="flex w-full items-start justify-between gap-4">
|
{showHITLToggle && (
|
||||||
<div className="flex-1">
|
<div className="flex w-full max-w-2xl flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||||
<Text variant="large-semibold">Require human approval</Text>
|
<div className="flex w-full items-start justify-between gap-4">
|
||||||
<Text variant="large" className="mt-1 text-zinc-900">
|
<div className="flex-1">
|
||||||
The agent will pause and wait for your review before
|
<Text variant="large-semibold">
|
||||||
continuing
|
Human-in-the-loop approval
|
||||||
</Text>
|
</Text>
|
||||||
|
<Text variant="large" className="mt-1 text-zinc-900">
|
||||||
|
The agent will pause at human-in-the-loop blocks and
|
||||||
|
wait for your review before continuing
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
<Switch
|
||||||
|
checked={currentHITLSafeMode || false}
|
||||||
|
onCheckedChange={handleHITLToggle}
|
||||||
|
disabled={isPending}
|
||||||
|
className="mt-1"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<Switch
|
)}
|
||||||
checked={currentSafeMode || false}
|
{showSensitiveActionToggle && (
|
||||||
onCheckedChange={handleToggle}
|
<div className="flex w-full max-w-2xl flex-col items-start gap-4 rounded-xl border border-zinc-100 bg-white p-6">
|
||||||
disabled={isPending}
|
<div className="flex w-full items-start justify-between gap-4">
|
||||||
className="mt-1"
|
<div className="flex-1">
|
||||||
/>
|
<Text variant="large-semibold">
|
||||||
</div>
|
Sensitive action approval
|
||||||
</div>
|
</Text>
|
||||||
|
<Text variant="large" className="mt-1 text-zinc-900">
|
||||||
|
The agent will pause at sensitive action blocks and wait
|
||||||
|
for your review before continuing
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
<Switch
|
||||||
|
checked={currentSensitiveActionSafeMode}
|
||||||
|
onCheckedChange={handleSensitiveActionToggle}
|
||||||
|
disabled={isPending}
|
||||||
|
className="mt-1"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
) : (
|
) : (
|
||||||
<div className="rounded-xl border border-zinc-100 bg-white p-6">
|
<div className="rounded-xl border border-zinc-100 bg-white p-6">
|
||||||
<Text variant="body" className="text-muted-foreground">
|
<Text variant="body" className="text-muted-foreground">
|
||||||
|
|||||||
@@ -1,8 +1,15 @@
|
|||||||
"use client";
|
"use client";
|
||||||
import React, { useCallback, useEffect, useMemo, useState } from "react";
|
import React, {
|
||||||
|
useCallback,
|
||||||
|
useContext,
|
||||||
|
useEffect,
|
||||||
|
useMemo,
|
||||||
|
useState,
|
||||||
|
} from "react";
|
||||||
|
|
||||||
import {
|
import {
|
||||||
CredentialsMetaInput,
|
CredentialsMetaInput,
|
||||||
|
CredentialsType,
|
||||||
GraphExecutionID,
|
GraphExecutionID,
|
||||||
GraphMeta,
|
GraphMeta,
|
||||||
LibraryAgentPreset,
|
LibraryAgentPreset,
|
||||||
@@ -29,7 +36,11 @@ import {
|
|||||||
} from "@/components/__legacy__/ui/icons";
|
} from "@/components/__legacy__/ui/icons";
|
||||||
import { Input } from "@/components/__legacy__/ui/input";
|
import { Input } from "@/components/__legacy__/ui/input";
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView";
|
||||||
|
import {
|
||||||
|
findSavedCredentialByProviderAndType,
|
||||||
|
findSavedUserCredentialByProviderAndType,
|
||||||
|
} from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers";
|
||||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||||
import {
|
import {
|
||||||
useToast,
|
useToast,
|
||||||
@@ -37,6 +48,7 @@ import {
|
|||||||
} from "@/components/molecules/Toast/use-toast";
|
} from "@/components/molecules/Toast/use-toast";
|
||||||
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
||||||
import { cn, isEmpty } from "@/lib/utils";
|
import { cn, isEmpty } from "@/lib/utils";
|
||||||
|
import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider";
|
||||||
import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react";
|
import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react";
|
||||||
import { CalendarClockIcon, Trash2Icon } from "lucide-react";
|
import { CalendarClockIcon, Trash2Icon } from "lucide-react";
|
||||||
|
|
||||||
@@ -90,6 +102,7 @@ export function AgentRunDraftView({
|
|||||||
const api = useBackendAPI();
|
const api = useBackendAPI();
|
||||||
const { toast } = useToast();
|
const { toast } = useToast();
|
||||||
const toastOnFail = useToastOnFail();
|
const toastOnFail = useToastOnFail();
|
||||||
|
const allProviders = useContext(CredentialsProvidersContext);
|
||||||
|
|
||||||
const [inputValues, setInputValues] = useState<Record<string, any>>({});
|
const [inputValues, setInputValues] = useState<Record<string, any>>({});
|
||||||
const [inputCredentials, setInputCredentials] = useState<
|
const [inputCredentials, setInputCredentials] = useState<
|
||||||
@@ -128,6 +141,77 @@ export function AgentRunDraftView({
|
|||||||
() => graph.credentials_input_schema.properties,
|
() => graph.credentials_input_schema.properties,
|
||||||
[graph],
|
[graph],
|
||||||
);
|
);
|
||||||
|
const credentialFields = useMemo(
|
||||||
|
function getCredentialFields() {
|
||||||
|
return Object.entries(agentCredentialsInputFields);
|
||||||
|
},
|
||||||
|
[agentCredentialsInputFields],
|
||||||
|
);
|
||||||
|
const requiredCredentials = useMemo(
|
||||||
|
function getRequiredCredentials() {
|
||||||
|
return new Set(
|
||||||
|
(graph.credentials_input_schema?.required as string[]) || [],
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[graph.credentials_input_schema?.required],
|
||||||
|
);
|
||||||
|
|
||||||
|
useEffect(
|
||||||
|
function initializeDefaultCredentials() {
|
||||||
|
if (!allProviders) return;
|
||||||
|
if (!graph.credentials_input_schema?.properties) return;
|
||||||
|
if (requiredCredentials.size === 0) return;
|
||||||
|
|
||||||
|
setInputCredentials(function updateCredentials(currentCreds) {
|
||||||
|
const next = { ...currentCreds };
|
||||||
|
let didAdd = false;
|
||||||
|
|
||||||
|
for (const key of requiredCredentials) {
|
||||||
|
if (next[key]) continue;
|
||||||
|
const schema = graph.credentials_input_schema.properties[key];
|
||||||
|
if (!schema) continue;
|
||||||
|
|
||||||
|
const providerNames = schema.credentials_provider || [];
|
||||||
|
const credentialTypes = schema.credentials_types || [];
|
||||||
|
const requiredScopes = schema.credentials_scopes;
|
||||||
|
|
||||||
|
const userCredential = findSavedUserCredentialByProviderAndType(
|
||||||
|
providerNames,
|
||||||
|
credentialTypes,
|
||||||
|
requiredScopes,
|
||||||
|
allProviders,
|
||||||
|
);
|
||||||
|
|
||||||
|
const savedCredential =
|
||||||
|
userCredential ||
|
||||||
|
findSavedCredentialByProviderAndType(
|
||||||
|
providerNames,
|
||||||
|
credentialTypes,
|
||||||
|
requiredScopes,
|
||||||
|
allProviders,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!savedCredential) continue;
|
||||||
|
|
||||||
|
next[key] = {
|
||||||
|
id: savedCredential.id,
|
||||||
|
provider: savedCredential.provider,
|
||||||
|
type: savedCredential.type as CredentialsType,
|
||||||
|
title: savedCredential.title,
|
||||||
|
};
|
||||||
|
didAdd = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!didAdd) return currentCreds;
|
||||||
|
return next;
|
||||||
|
});
|
||||||
|
},
|
||||||
|
[
|
||||||
|
allProviders,
|
||||||
|
graph.credentials_input_schema?.properties,
|
||||||
|
requiredCredentials,
|
||||||
|
],
|
||||||
|
);
|
||||||
|
|
||||||
const [allRequiredInputsAreSet, missingInputs] = useMemo(() => {
|
const [allRequiredInputsAreSet, missingInputs] = useMemo(() => {
|
||||||
const nonEmptyInputs = new Set(
|
const nonEmptyInputs = new Set(
|
||||||
@@ -145,18 +229,35 @@ export function AgentRunDraftView({
|
|||||||
);
|
);
|
||||||
return [isSuperset, difference];
|
return [isSuperset, difference];
|
||||||
}, [agentInputSchema.required, inputValues]);
|
}, [agentInputSchema.required, inputValues]);
|
||||||
const [allCredentialsAreSet, missingCredentials] = useMemo(() => {
|
const [allCredentialsAreSet, missingCredentials] = useMemo(
|
||||||
const availableCredentials = new Set(Object.keys(inputCredentials));
|
function getCredentialStatus() {
|
||||||
const allCredentials = new Set(Object.keys(agentCredentialsInputFields));
|
const missing = Array.from(requiredCredentials).filter((key) => {
|
||||||
// Backwards-compatible implementation of isSupersetOf and difference
|
const cred = inputCredentials[key];
|
||||||
const isSuperset = Array.from(allCredentials).every((item) =>
|
return !cred || !cred.id;
|
||||||
availableCredentials.has(item),
|
});
|
||||||
);
|
return [missing.length === 0, missing];
|
||||||
const difference = Array.from(allCredentials).filter(
|
},
|
||||||
(item) => !availableCredentials.has(item),
|
[requiredCredentials, inputCredentials],
|
||||||
);
|
);
|
||||||
return [isSuperset, difference];
|
function addChangedCredentials(prev: Set<keyof LibraryAgentPresetUpdatable>) {
|
||||||
}, [agentCredentialsInputFields, inputCredentials]);
|
const next = new Set(prev);
|
||||||
|
next.add("credentials");
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleCredentialChange(key: string, value?: CredentialsMetaInput) {
|
||||||
|
setInputCredentials(function updateInputCredentials(currentCreds) {
|
||||||
|
const next = { ...currentCreds };
|
||||||
|
if (value === undefined) {
|
||||||
|
delete next[key];
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
next[key] = value;
|
||||||
|
return next;
|
||||||
|
});
|
||||||
|
setChangedPresetAttributes(addChangedCredentials);
|
||||||
|
}
|
||||||
|
|
||||||
const notifyMissingInputs = useCallback(
|
const notifyMissingInputs = useCallback(
|
||||||
(needPresetName: boolean = true) => {
|
(needPresetName: boolean = true) => {
|
||||||
const allMissingFields = (
|
const allMissingFields = (
|
||||||
@@ -649,35 +750,6 @@ export function AgentRunDraftView({
|
|||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{/* Credentials inputs */}
|
|
||||||
{Object.entries(agentCredentialsInputFields).map(
|
|
||||||
([key, inputSubSchema]) => (
|
|
||||||
<CredentialsInput
|
|
||||||
key={key}
|
|
||||||
schema={{ ...inputSubSchema, discriminator: undefined }}
|
|
||||||
selectedCredentials={
|
|
||||||
inputCredentials[key] ?? inputSubSchema.default
|
|
||||||
}
|
|
||||||
onSelectCredentials={(value) => {
|
|
||||||
setInputCredentials((obj) => {
|
|
||||||
const newObj = { ...obj };
|
|
||||||
if (value === undefined) {
|
|
||||||
delete newObj[key];
|
|
||||||
return newObj;
|
|
||||||
}
|
|
||||||
return {
|
|
||||||
...obj,
|
|
||||||
[key]: value,
|
|
||||||
};
|
|
||||||
});
|
|
||||||
setChangedPresetAttributes((prev) =>
|
|
||||||
prev.add("credentials"),
|
|
||||||
);
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
),
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* Regular inputs */}
|
{/* Regular inputs */}
|
||||||
{Object.entries(agentInputFields).map(([key, inputSubSchema]) => (
|
{Object.entries(agentInputFields).map(([key, inputSubSchema]) => (
|
||||||
<RunAgentInputs
|
<RunAgentInputs
|
||||||
@@ -695,6 +767,17 @@ export function AgentRunDraftView({
|
|||||||
data-testid={`agent-input-${key}`}
|
data-testid={`agent-input-${key}`}
|
||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
|
|
||||||
|
{/* Credentials inputs */}
|
||||||
|
{credentialFields.length > 0 && (
|
||||||
|
<CredentialsGroupedView
|
||||||
|
credentialFields={credentialFields}
|
||||||
|
requiredCredentials={requiredCredentials}
|
||||||
|
inputCredentials={inputCredentials}
|
||||||
|
inputValues={inputValues}
|
||||||
|
onCredentialChange={handleCredentialChange}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
</CardContent>
|
</CardContent>
|
||||||
</Card>
|
</Card>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
import { FileInput } from "@/components/atoms/FileInput/FileInput";
|
import { FileInput } from "@/components/atoms/FileInput/FileInput";
|
||||||
import { Input } from "@/components/atoms/Input/Input";
|
import { Input } from "@/components/atoms/Input/Input";
|
||||||
|
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||||
import {
|
import {
|
||||||
Form,
|
Form,
|
||||||
@@ -120,7 +121,7 @@ export default function LibraryUploadAgentDialog() {
|
|||||||
>
|
>
|
||||||
{isUploading ? (
|
{isUploading ? (
|
||||||
<div className="flex items-center gap-2">
|
<div className="flex items-center gap-2">
|
||||||
<div className="h-4 w-4 animate-spin rounded-full border-b-2 border-t-2 border-white"></div>
|
<LoadingSpinner size="small" className="text-white" />
|
||||||
<span>Uploading...</span>
|
<span>Uploading...</span>
|
||||||
</div>
|
</div>
|
||||||
) : (
|
) : (
|
||||||
|
|||||||
@@ -6383,6 +6383,11 @@
|
|||||||
"title": "Has Human In The Loop",
|
"title": "Has Human In The Loop",
|
||||||
"readOnly": true
|
"readOnly": true
|
||||||
},
|
},
|
||||||
|
"has_sensitive_action": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "Has Sensitive Action",
|
||||||
|
"readOnly": true
|
||||||
|
},
|
||||||
"trigger_setup_info": {
|
"trigger_setup_info": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||||
@@ -6399,6 +6404,7 @@
|
|||||||
"output_schema",
|
"output_schema",
|
||||||
"has_external_trigger",
|
"has_external_trigger",
|
||||||
"has_human_in_the_loop",
|
"has_human_in_the_loop",
|
||||||
|
"has_sensitive_action",
|
||||||
"trigger_setup_info"
|
"trigger_setup_info"
|
||||||
],
|
],
|
||||||
"title": "BaseGraph"
|
"title": "BaseGraph"
|
||||||
@@ -7629,6 +7635,11 @@
|
|||||||
"title": "Has Human In The Loop",
|
"title": "Has Human In The Loop",
|
||||||
"readOnly": true
|
"readOnly": true
|
||||||
},
|
},
|
||||||
|
"has_sensitive_action": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "Has Sensitive Action",
|
||||||
|
"readOnly": true
|
||||||
|
},
|
||||||
"trigger_setup_info": {
|
"trigger_setup_info": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||||
@@ -7652,6 +7663,7 @@
|
|||||||
"output_schema",
|
"output_schema",
|
||||||
"has_external_trigger",
|
"has_external_trigger",
|
||||||
"has_human_in_the_loop",
|
"has_human_in_the_loop",
|
||||||
|
"has_sensitive_action",
|
||||||
"trigger_setup_info",
|
"trigger_setup_info",
|
||||||
"credentials_input_schema"
|
"credentials_input_schema"
|
||||||
],
|
],
|
||||||
@@ -7730,6 +7742,11 @@
|
|||||||
"title": "Has Human In The Loop",
|
"title": "Has Human In The Loop",
|
||||||
"readOnly": true
|
"readOnly": true
|
||||||
},
|
},
|
||||||
|
"has_sensitive_action": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "Has Sensitive Action",
|
||||||
|
"readOnly": true
|
||||||
|
},
|
||||||
"trigger_setup_info": {
|
"trigger_setup_info": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||||
@@ -7754,6 +7771,7 @@
|
|||||||
"output_schema",
|
"output_schema",
|
||||||
"has_external_trigger",
|
"has_external_trigger",
|
||||||
"has_human_in_the_loop",
|
"has_human_in_the_loop",
|
||||||
|
"has_sensitive_action",
|
||||||
"trigger_setup_info",
|
"trigger_setup_info",
|
||||||
"credentials_input_schema"
|
"credentials_input_schema"
|
||||||
],
|
],
|
||||||
@@ -7762,8 +7780,14 @@
|
|||||||
"GraphSettings": {
|
"GraphSettings": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"human_in_the_loop_safe_mode": {
|
"human_in_the_loop_safe_mode": {
|
||||||
"anyOf": [{ "type": "boolean" }, { "type": "null" }],
|
"type": "boolean",
|
||||||
"title": "Human In The Loop Safe Mode"
|
"title": "Human In The Loop Safe Mode",
|
||||||
|
"default": true
|
||||||
|
},
|
||||||
|
"sensitive_action_safe_mode": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "Sensitive Action Safe Mode",
|
||||||
|
"default": false
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@@ -7921,6 +7945,16 @@
|
|||||||
"title": "Has External Trigger",
|
"title": "Has External Trigger",
|
||||||
"description": "Whether the agent has an external trigger (e.g. webhook) node"
|
"description": "Whether the agent has an external trigger (e.g. webhook) node"
|
||||||
},
|
},
|
||||||
|
"has_human_in_the_loop": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "Has Human In The Loop",
|
||||||
|
"description": "Whether the agent has human-in-the-loop blocks"
|
||||||
|
},
|
||||||
|
"has_sensitive_action": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "Has Sensitive Action",
|
||||||
|
"description": "Whether the agent has sensitive action blocks"
|
||||||
|
},
|
||||||
"trigger_setup_info": {
|
"trigger_setup_info": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
|
||||||
@@ -7967,6 +8001,8 @@
|
|||||||
"output_schema",
|
"output_schema",
|
||||||
"credentials_input_schema",
|
"credentials_input_schema",
|
||||||
"has_external_trigger",
|
"has_external_trigger",
|
||||||
|
"has_human_in_the_loop",
|
||||||
|
"has_sensitive_action",
|
||||||
"new_output",
|
"new_output",
|
||||||
"can_access_graph",
|
"can_access_graph",
|
||||||
"is_latest_version",
|
"is_latest_version",
|
||||||
@@ -9389,6 +9425,12 @@
|
|||||||
"type": "array",
|
"type": "array",
|
||||||
"title": "Reviews",
|
"title": "Reviews",
|
||||||
"description": "All reviews with their approval status, data, and messages"
|
"description": "All reviews with their approval status, data, and messages"
|
||||||
|
},
|
||||||
|
"auto_approve_future_actions": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "Auto Approve Future Actions",
|
||||||
|
"description": "If true, future reviews from the same blocks (nodes) being approved will be automatically approved for the remainder of this execution. This only affects the current execution run.",
|
||||||
|
"default": false
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"type": "object",
|
"type": "object",
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { CredentialsProvidersContextType } from "@/providers/agent-credentials/credentials-provider";
|
import { CredentialsProvidersContextType } from "@/providers/agent-credentials/credentials-provider";
|
||||||
import { getSystemCredentials } from "../../helpers";
|
import { filterSystemCredentials, getSystemCredentials } from "../../helpers";
|
||||||
|
|
||||||
export type CredentialField = [string, any];
|
export type CredentialField = [string, any];
|
||||||
|
|
||||||
@@ -208,3 +208,42 @@ export function findSavedCredentialByProviderAndType(
|
|||||||
|
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function findSavedUserCredentialByProviderAndType(
|
||||||
|
providerNames: string[],
|
||||||
|
credentialTypes: string[],
|
||||||
|
requiredScopes: string[] | undefined,
|
||||||
|
allProviders: CredentialsProvidersContextType | null,
|
||||||
|
): SavedCredential | undefined {
|
||||||
|
for (const providerName of providerNames) {
|
||||||
|
const providerData = allProviders?.[providerName];
|
||||||
|
if (!providerData) continue;
|
||||||
|
|
||||||
|
const userCredentials = filterSystemCredentials(
|
||||||
|
providerData.savedCredentials ?? [],
|
||||||
|
);
|
||||||
|
|
||||||
|
const matchingCredentials: SavedCredential[] = [];
|
||||||
|
|
||||||
|
for (const credential of userCredentials) {
|
||||||
|
const typeMatches =
|
||||||
|
credentialTypes.length === 0 ||
|
||||||
|
credentialTypes.includes(credential.type);
|
||||||
|
const scopesMatch = hasRequiredScopes(credential, requiredScopes);
|
||||||
|
|
||||||
|
if (!typeMatches) continue;
|
||||||
|
if (!scopesMatch) continue;
|
||||||
|
|
||||||
|
matchingCredentials.push(credential as SavedCredential);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (matchingCredentials.length === 1) {
|
||||||
|
return matchingCredentials[0];
|
||||||
|
}
|
||||||
|
if (matchingCredentials.length > 1) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|||||||
@@ -98,24 +98,20 @@ export function useCredentialsInput({
|
|||||||
|
|
||||||
// Auto-select the first available credential on initial mount
|
// Auto-select the first available credential on initial mount
|
||||||
// Once a user has made a selection, we don't override it
|
// Once a user has made a selection, we don't override it
|
||||||
useEffect(() => {
|
useEffect(
|
||||||
if (readOnly) return;
|
function autoSelectCredential() {
|
||||||
if (!credentials || !("savedCredentials" in credentials)) return;
|
if (readOnly) return;
|
||||||
|
if (!credentials || !("savedCredentials" in credentials)) return;
|
||||||
|
if (selectedCredential?.id) return;
|
||||||
|
|
||||||
// If already selected, don't auto-select
|
const savedCreds = credentials.savedCredentials;
|
||||||
if (selectedCredential?.id) return;
|
if (savedCreds.length === 0) return;
|
||||||
|
|
||||||
// Only attempt auto-selection once
|
if (hasAttemptedAutoSelect.current) return;
|
||||||
if (hasAttemptedAutoSelect.current) return;
|
hasAttemptedAutoSelect.current = true;
|
||||||
hasAttemptedAutoSelect.current = true;
|
|
||||||
|
|
||||||
// If optional, don't auto-select (user can choose "None")
|
if (isOptional) return;
|
||||||
if (isOptional) return;
|
|
||||||
|
|
||||||
const savedCreds = credentials.savedCredentials;
|
|
||||||
|
|
||||||
// Auto-select the first credential if any are available
|
|
||||||
if (savedCreds.length > 0) {
|
|
||||||
const cred = savedCreds[0];
|
const cred = savedCreds[0];
|
||||||
onSelectCredential({
|
onSelectCredential({
|
||||||
id: cred.id,
|
id: cred.id,
|
||||||
@@ -123,14 +119,15 @@ export function useCredentialsInput({
|
|||||||
provider: credentials.provider,
|
provider: credentials.provider,
|
||||||
title: (cred as any).title,
|
title: (cred as any).title,
|
||||||
});
|
});
|
||||||
}
|
},
|
||||||
}, [
|
[
|
||||||
credentials,
|
credentials,
|
||||||
selectedCredential?.id,
|
selectedCredential?.id,
|
||||||
readOnly,
|
readOnly,
|
||||||
isOptional,
|
isOptional,
|
||||||
onSelectCredential,
|
onSelectCredential,
|
||||||
]);
|
],
|
||||||
|
);
|
||||||
|
|
||||||
if (
|
if (
|
||||||
!credentials ||
|
!credentials ||
|
||||||
|
|||||||
@@ -0,0 +1,33 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import * as PopoverPrimitive from "@radix-ui/react-popover";
|
||||||
|
import * as React from "react";
|
||||||
|
|
||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
|
||||||
|
const Popover = PopoverPrimitive.Root;
|
||||||
|
|
||||||
|
const PopoverTrigger = PopoverPrimitive.Trigger;
|
||||||
|
|
||||||
|
const PopoverAnchor = PopoverPrimitive.Anchor;
|
||||||
|
|
||||||
|
const PopoverContent = React.forwardRef<
|
||||||
|
React.ElementRef<typeof PopoverPrimitive.Content>,
|
||||||
|
React.ComponentPropsWithoutRef<typeof PopoverPrimitive.Content>
|
||||||
|
>(({ className, align = "center", sideOffset = 4, ...props }, ref) => (
|
||||||
|
<PopoverPrimitive.Portal>
|
||||||
|
<PopoverPrimitive.Content
|
||||||
|
ref={ref}
|
||||||
|
align={align}
|
||||||
|
sideOffset={sideOffset}
|
||||||
|
className={cn(
|
||||||
|
"z-50 w-72 rounded-lg border border-zinc-200 bg-white p-4 text-zinc-900 shadow-md outline-none data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2",
|
||||||
|
className,
|
||||||
|
)}
|
||||||
|
{...props}
|
||||||
|
/>
|
||||||
|
</PopoverPrimitive.Portal>
|
||||||
|
));
|
||||||
|
PopoverContent.displayName = PopoverPrimitive.Content.displayName;
|
||||||
|
|
||||||
|
export { Popover, PopoverAnchor, PopoverContent, PopoverTrigger };
|
||||||
@@ -31,6 +31,29 @@ export function FloatingReviewsPanel({
|
|||||||
query: {
|
query: {
|
||||||
enabled: !!(graphId && executionId),
|
enabled: !!(graphId && executionId),
|
||||||
select: okData,
|
select: okData,
|
||||||
|
// Poll while execution is in progress to detect status changes
|
||||||
|
refetchInterval: (q) => {
|
||||||
|
// Note: refetchInterval callback receives raw data before select transform
|
||||||
|
const rawData = q.state.data as
|
||||||
|
| { status: number; data?: { status?: string } }
|
||||||
|
| undefined;
|
||||||
|
if (rawData?.status !== 200) return false;
|
||||||
|
|
||||||
|
const status = rawData?.data?.status;
|
||||||
|
if (!status) return false;
|
||||||
|
|
||||||
|
// Poll every 2 seconds while running or in review
|
||||||
|
if (
|
||||||
|
status === AgentExecutionStatus.RUNNING ||
|
||||||
|
status === AgentExecutionStatus.QUEUED ||
|
||||||
|
status === AgentExecutionStatus.INCOMPLETE ||
|
||||||
|
status === AgentExecutionStatus.REVIEW
|
||||||
|
) {
|
||||||
|
return 2000;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
},
|
||||||
|
refetchIntervalInBackground: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
@@ -40,23 +63,27 @@ export function FloatingReviewsPanel({
|
|||||||
useShallow((state) => state.graphExecutionStatus),
|
useShallow((state) => state.graphExecutionStatus),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Determine if we should poll for pending reviews
|
||||||
|
const isInReviewStatus =
|
||||||
|
executionDetails?.status === AgentExecutionStatus.REVIEW ||
|
||||||
|
graphExecutionStatus === AgentExecutionStatus.REVIEW;
|
||||||
|
|
||||||
const { pendingReviews, isLoading, refetch } = usePendingReviewsForExecution(
|
const { pendingReviews, isLoading, refetch } = usePendingReviewsForExecution(
|
||||||
executionId || "",
|
executionId || "",
|
||||||
|
{
|
||||||
|
enabled: !!executionId,
|
||||||
|
// Poll every 2 seconds when in REVIEW status to catch new reviews
|
||||||
|
refetchInterval: isInReviewStatus ? 2000 : false,
|
||||||
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Refetch pending reviews when execution status changes
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (executionId) {
|
if (executionId && executionDetails?.status) {
|
||||||
refetch();
|
refetch();
|
||||||
}
|
}
|
||||||
}, [executionDetails?.status, executionId, refetch]);
|
}, [executionDetails?.status, executionId, refetch]);
|
||||||
|
|
||||||
// Refetch when graph execution status changes to REVIEW
|
|
||||||
useEffect(() => {
|
|
||||||
if (graphExecutionStatus === AgentExecutionStatus.REVIEW && executionId) {
|
|
||||||
refetch();
|
|
||||||
}
|
|
||||||
}, [graphExecutionStatus, executionId, refetch]);
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
!executionId ||
|
!executionId ||
|
||||||
(!isLoading &&
|
(!isLoading &&
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
import { useState } from "react";
|
import { useState, useCallback } from "react";
|
||||||
import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel";
|
import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel";
|
||||||
import { PendingReviewCard } from "@/components/organisms/PendingReviewCard/PendingReviewCard";
|
import { PendingReviewCard } from "@/components/organisms/PendingReviewCard/PendingReviewCard";
|
||||||
import { Text } from "@/components/atoms/Text/Text";
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
|
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { ClockIcon, WarningIcon } from "@phosphor-icons/react";
|
import { ClockIcon, WarningIcon } from "@phosphor-icons/react";
|
||||||
import { usePostV2ProcessReviewAction } from "@/app/api/__generated__/endpoints/executions/executions";
|
import { usePostV2ProcessReviewAction } from "@/app/api/__generated__/endpoints/executions/executions";
|
||||||
@@ -40,6 +41,8 @@ export function PendingReviewsList({
|
|||||||
"approve" | "reject" | null
|
"approve" | "reject" | null
|
||||||
>(null);
|
>(null);
|
||||||
|
|
||||||
|
const [autoApproveFuture, setAutoApproveFuture] = useState(false);
|
||||||
|
|
||||||
const { toast } = useToast();
|
const { toast } = useToast();
|
||||||
|
|
||||||
const reviewActionMutation = usePostV2ProcessReviewAction({
|
const reviewActionMutation = usePostV2ProcessReviewAction({
|
||||||
@@ -92,6 +95,26 @@ export function PendingReviewsList({
|
|||||||
setReviewMessageMap((prev) => ({ ...prev, [nodeExecId]: message }));
|
setReviewMessageMap((prev) => ({ ...prev, [nodeExecId]: message }));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reset data to original values when toggling auto-approve
|
||||||
|
const handleAutoApproveFutureToggle = useCallback(
|
||||||
|
(enabled: boolean) => {
|
||||||
|
setAutoApproveFuture(enabled);
|
||||||
|
if (enabled) {
|
||||||
|
// Reset all data to original values
|
||||||
|
const originalData: Record<string, string> = {};
|
||||||
|
reviews.forEach((review) => {
|
||||||
|
originalData[review.node_exec_id] = JSON.stringify(
|
||||||
|
review.payload,
|
||||||
|
null,
|
||||||
|
2,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
setReviewDataMap(originalData);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[reviews],
|
||||||
|
);
|
||||||
|
|
||||||
function processReviews(approved: boolean) {
|
function processReviews(approved: boolean) {
|
||||||
if (reviews.length === 0) {
|
if (reviews.length === 0) {
|
||||||
toast({
|
toast({
|
||||||
@@ -109,22 +132,31 @@ export function PendingReviewsList({
|
|||||||
const reviewData = reviewDataMap[review.node_exec_id];
|
const reviewData = reviewDataMap[review.node_exec_id];
|
||||||
const reviewMessage = reviewMessageMap[review.node_exec_id];
|
const reviewMessage = reviewMessageMap[review.node_exec_id];
|
||||||
|
|
||||||
let parsedData: any = review.payload; // Default to original payload
|
// When auto-approving future actions, send undefined (use original data)
|
||||||
|
// Otherwise, parse and send the edited data if available
|
||||||
|
let parsedData: any = undefined;
|
||||||
|
|
||||||
// Parse edited data if available and editable
|
if (!autoApproveFuture) {
|
||||||
if (review.editable && reviewData) {
|
// For regular approve/reject, use edited data if available
|
||||||
try {
|
if (review.editable && reviewData) {
|
||||||
parsedData = JSON.parse(reviewData);
|
try {
|
||||||
} catch (error) {
|
parsedData = JSON.parse(reviewData);
|
||||||
toast({
|
} catch (error) {
|
||||||
title: "Invalid JSON",
|
toast({
|
||||||
description: `Please fix the JSON format in review for node ${review.node_exec_id}: ${error instanceof Error ? error.message : "Invalid syntax"}`,
|
title: "Invalid JSON",
|
||||||
variant: "destructive",
|
description: `Please fix the JSON format in review for node ${review.node_exec_id}: ${error instanceof Error ? error.message : "Invalid syntax"}`,
|
||||||
});
|
variant: "destructive",
|
||||||
setPendingAction(null);
|
});
|
||||||
return;
|
setPendingAction(null);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No edits, use original payload
|
||||||
|
parsedData = review.payload;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// When autoApproveFuture is true, parsedData stays undefined
|
||||||
|
// Backend will use the original payload stored in the database
|
||||||
|
|
||||||
reviewItems.push({
|
reviewItems.push({
|
||||||
node_exec_id: review.node_exec_id,
|
node_exec_id: review.node_exec_id,
|
||||||
@@ -137,6 +169,7 @@ export function PendingReviewsList({
|
|||||||
reviewActionMutation.mutate({
|
reviewActionMutation.mutate({
|
||||||
data: {
|
data: {
|
||||||
reviews: reviewItems,
|
reviews: reviewItems,
|
||||||
|
auto_approve_future_actions: autoApproveFuture && approved,
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -182,21 +215,37 @@ export function PendingReviewsList({
|
|||||||
<div className="space-y-7">
|
<div className="space-y-7">
|
||||||
{reviews.map((review) => (
|
{reviews.map((review) => (
|
||||||
<PendingReviewCard
|
<PendingReviewCard
|
||||||
key={review.node_exec_id}
|
key={`${review.node_exec_id}-${autoApproveFuture}`}
|
||||||
review={review}
|
review={review}
|
||||||
onReviewDataChange={handleReviewDataChange}
|
onReviewDataChange={handleReviewDataChange}
|
||||||
onReviewMessageChange={handleReviewMessageChange}
|
onReviewMessageChange={handleReviewMessageChange}
|
||||||
reviewMessage={reviewMessageMap[review.node_exec_id] || ""}
|
reviewMessage={reviewMessageMap[review.node_exec_id] || ""}
|
||||||
|
isDisabled={autoApproveFuture}
|
||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="space-y-7">
|
<div className="space-y-4">
|
||||||
<Text variant="body" className="text-textGrey">
|
{/* Auto-approve toggle */}
|
||||||
Note: Changes you make here apply only to this task
|
<div className="flex items-center gap-3">
|
||||||
</Text>
|
<Switch
|
||||||
|
checked={autoApproveFuture}
|
||||||
|
onCheckedChange={handleAutoApproveFutureToggle}
|
||||||
|
disabled={reviewActionMutation.isPending}
|
||||||
|
/>
|
||||||
|
<Text variant="body" className="text-textBlack">
|
||||||
|
Auto-approve all future actions from these blocks
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div className="flex gap-2">
|
{autoApproveFuture && (
|
||||||
|
<Text variant="small" className="text-amber-600">
|
||||||
|
Editing is disabled. Original data will be used for this and all
|
||||||
|
future reviews from these blocks.
|
||||||
|
</Text>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<div className="flex flex-wrap gap-2">
|
||||||
<Button
|
<Button
|
||||||
onClick={() => processReviews(true)}
|
onClick={() => processReviews(true)}
|
||||||
disabled={reviewActionMutation.isPending || reviews.length === 0}
|
disabled={reviewActionMutation.isPending || reviews.length === 0}
|
||||||
@@ -220,6 +269,11 @@ export function PendingReviewsList({
|
|||||||
Reject
|
Reject
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<Text variant="small" className="text-textGrey">
|
||||||
|
You can turn auto-approval on or off anytime in this agent's
|
||||||
|
settings.
|
||||||
|
</Text>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -35,12 +35,13 @@ export const CredentialFieldTitle = (props: {
|
|||||||
uiOptions,
|
uiOptions,
|
||||||
);
|
);
|
||||||
|
|
||||||
const credentialProvider = toDisplayName(
|
const provider = getCredentialProviderFromSchema(
|
||||||
getCredentialProviderFromSchema(
|
useNodeStore.getState().getHardCodedValues(nodeId),
|
||||||
useNodeStore.getState().getHardCodedValues(nodeId),
|
schema as BlockIOCredentialsSubSchema,
|
||||||
schema as BlockIOCredentialsSubSchema,
|
|
||||||
) ?? "",
|
|
||||||
);
|
);
|
||||||
|
const credentialProvider = provider
|
||||||
|
? `${toDisplayName(provider)} credential`
|
||||||
|
: "credential";
|
||||||
|
|
||||||
const updatedUiSchema = updateUiOption(uiSchema, {
|
const updatedUiSchema = updateUiOption(uiSchema, {
|
||||||
showHandles: false,
|
showHandles: false,
|
||||||
|
|||||||
@@ -0,0 +1,92 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import {
|
||||||
|
descriptionId,
|
||||||
|
FieldProps,
|
||||||
|
getTemplate,
|
||||||
|
RJSFSchema,
|
||||||
|
titleId,
|
||||||
|
} from "@rjsf/utils";
|
||||||
|
import { useMemo } from "react";
|
||||||
|
import { LlmModelPicker } from "./components/LlmModelPicker";
|
||||||
|
import { LlmModelMetadataMap } from "./types";
|
||||||
|
import { updateUiOption } from "../../helpers";
|
||||||
|
|
||||||
|
type LlmModelSchema = RJSFSchema & {
|
||||||
|
llm_model_metadata?: LlmModelMetadataMap;
|
||||||
|
};
|
||||||
|
|
||||||
|
export function LlmModelField(props: FieldProps) {
|
||||||
|
const { schema, formData, onChange, disabled, readonly, fieldPathId } = props;
|
||||||
|
|
||||||
|
const metadata = useMemo(() => {
|
||||||
|
return (schema as LlmModelSchema)?.llm_model_metadata ?? {};
|
||||||
|
}, [schema]);
|
||||||
|
|
||||||
|
const models = useMemo(() => {
|
||||||
|
return Object.values(metadata);
|
||||||
|
}, [metadata]);
|
||||||
|
|
||||||
|
const selectedName =
|
||||||
|
typeof formData === "string"
|
||||||
|
? formData
|
||||||
|
: typeof schema.default === "string"
|
||||||
|
? schema.default
|
||||||
|
: "";
|
||||||
|
|
||||||
|
const selectedModel = selectedName
|
||||||
|
? (metadata[selectedName] ??
|
||||||
|
models.find((model) => model.name === selectedName))
|
||||||
|
: undefined;
|
||||||
|
|
||||||
|
const recommendedName =
|
||||||
|
typeof schema.default === "string" ? schema.default : models[0]?.name;
|
||||||
|
|
||||||
|
const recommendedModel =
|
||||||
|
recommendedName && metadata[recommendedName]
|
||||||
|
? metadata[recommendedName]
|
||||||
|
: undefined;
|
||||||
|
|
||||||
|
if (models.length === 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const TitleFieldTemplate = getTemplate("TitleFieldTemplate", props.registry);
|
||||||
|
const DescriptionFieldTemplate = getTemplate(
|
||||||
|
"DescriptionFieldTemplate",
|
||||||
|
props.registry,
|
||||||
|
);
|
||||||
|
|
||||||
|
const updatedUiSchema = updateUiOption(props.uiSchema, {
|
||||||
|
showHandles: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<TitleFieldTemplate
|
||||||
|
id={titleId(fieldPathId)}
|
||||||
|
title={schema.title || ""}
|
||||||
|
required={true}
|
||||||
|
schema={schema}
|
||||||
|
uiSchema={updatedUiSchema}
|
||||||
|
registry={props.registry}
|
||||||
|
/>
|
||||||
|
<DescriptionFieldTemplate
|
||||||
|
id={descriptionId(fieldPathId)}
|
||||||
|
description={schema.description || ""}
|
||||||
|
schema={schema}
|
||||||
|
registry={props.registry}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<LlmModelPicker
|
||||||
|
models={models}
|
||||||
|
selectedModel={selectedModel}
|
||||||
|
recommendedModel={recommendedModel}
|
||||||
|
onSelect={(value) => onChange(value, fieldPathId?.path)}
|
||||||
|
disabled={disabled || readonly}
|
||||||
|
/>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,66 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import Image from "next/image";
|
||||||
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
|
|
||||||
|
const creatorIconMap: Record<string, string> = {
|
||||||
|
anthropic: "/integrations/anthropic-color.png",
|
||||||
|
openai: "/integrations/openai.png",
|
||||||
|
google: "/integrations/gemini.png",
|
||||||
|
nvidia: "/integrations/nvidia.png",
|
||||||
|
groq: "/integrations/groq.png",
|
||||||
|
ollama: "/integrations/ollama.png",
|
||||||
|
openrouter: "/integrations/open_router.png",
|
||||||
|
v0: "/integrations/v0.png",
|
||||||
|
xai: "/integrations/xai.webp",
|
||||||
|
meta: "/integrations/llama_api.png",
|
||||||
|
amazon: "/integrations/amazon.png",
|
||||||
|
cohere: "/integrations/cohere.png",
|
||||||
|
deepseek: "/integrations/deepseek.png",
|
||||||
|
gryphe: "/integrations/gryphe.png",
|
||||||
|
microsoft: "/integrations/microsoft.webp",
|
||||||
|
moonshotai: "/integrations/moonshot.png",
|
||||||
|
mistral: "/integrations/mistral.png",
|
||||||
|
mistralai: "/integrations/mistral.png",
|
||||||
|
nousresearch: "/integrations/nousresearch.avif",
|
||||||
|
perplexity: "/integrations/perplexity.webp",
|
||||||
|
qwen: "/integrations/qwen.png",
|
||||||
|
};
|
||||||
|
|
||||||
|
type Props = {
|
||||||
|
value: string;
|
||||||
|
size?: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
export function LlmIcon({ value, size = 20 }: Props) {
|
||||||
|
const normalized = value.trim().toLowerCase().replace(/\s+/g, "");
|
||||||
|
const src = creatorIconMap[normalized];
|
||||||
|
if (src) {
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
className="flex items-center justify-center overflow-hidden rounded-xsmall"
|
||||||
|
style={{ width: size, height: size }}
|
||||||
|
>
|
||||||
|
<Image
|
||||||
|
src={src}
|
||||||
|
alt={value}
|
||||||
|
width={size}
|
||||||
|
height={size}
|
||||||
|
className="h-full w-full object-cover"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const fallback = value?.trim().slice(0, 1).toUpperCase() || "?";
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
className="flex items-center justify-center rounded-xsmall bg-zinc-100"
|
||||||
|
style={{ width: size, height: size }}
|
||||||
|
>
|
||||||
|
<Text variant="small" className="text-zinc-500">
|
||||||
|
{fallback}
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import { ArrowLeftIcon } from "@phosphor-icons/react";
|
||||||
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
|
|
||||||
|
type Props = {
|
||||||
|
label: string;
|
||||||
|
onBack: () => void;
|
||||||
|
};
|
||||||
|
|
||||||
|
export function LlmMenuHeader({ label, onBack }: Props) {
|
||||||
|
return (
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={onBack}
|
||||||
|
className="flex w-full items-center gap-2 px-2 py-2 text-left hover:bg-zinc-100"
|
||||||
|
>
|
||||||
|
<ArrowLeftIcon className="h-4 w-4 text-zinc-800" weight="bold" />
|
||||||
|
<Text variant="body" className="text-zinc-900">
|
||||||
|
{label}
|
||||||
|
</Text>
|
||||||
|
</button>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,61 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import { CaretRightIcon, CheckIcon } from "@phosphor-icons/react";
|
||||||
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
|
||||||
|
type Props = {
|
||||||
|
title: string;
|
||||||
|
subtitle?: string;
|
||||||
|
icon?: React.ReactNode;
|
||||||
|
showChevron?: boolean;
|
||||||
|
rightSlot?: React.ReactNode;
|
||||||
|
onClick: () => void;
|
||||||
|
isActive?: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
export function LlmMenuItem({
|
||||||
|
title,
|
||||||
|
subtitle,
|
||||||
|
icon,
|
||||||
|
showChevron,
|
||||||
|
rightSlot,
|
||||||
|
onClick,
|
||||||
|
isActive,
|
||||||
|
}: Props) {
|
||||||
|
const hasIcon = Boolean(icon);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={onClick}
|
||||||
|
className={cn("w-full py-1 pl-2 pr-4 text-left hover:bg-zinc-100")}
|
||||||
|
>
|
||||||
|
<div className="flex items-center justify-between gap-3">
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
{icon}
|
||||||
|
<Text variant="body" className="text-zinc-900">
|
||||||
|
{title}
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
{isActive && (
|
||||||
|
<CheckIcon className="h-4 w-4 text-emerald-600" weight="bold" />
|
||||||
|
)}
|
||||||
|
{rightSlot}
|
||||||
|
{showChevron && (
|
||||||
|
<CaretRightIcon className="h-4 w-4 text-zinc-900" weight="bold" />
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{subtitle && (
|
||||||
|
<Text
|
||||||
|
variant="small"
|
||||||
|
className={cn("mb-1 text-zinc-500", hasIcon && "pl-0")}
|
||||||
|
>
|
||||||
|
{subtitle}
|
||||||
|
</Text>
|
||||||
|
)}
|
||||||
|
</button>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,235 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||||
|
import { CaretDownIcon } from "@phosphor-icons/react";
|
||||||
|
import {
|
||||||
|
Popover,
|
||||||
|
PopoverContent,
|
||||||
|
PopoverTrigger,
|
||||||
|
} from "@/components/molecules/Popover/Popover";
|
||||||
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
import {
|
||||||
|
getCreatorDisplayName,
|
||||||
|
getModelDisplayName,
|
||||||
|
getProviderDisplayName,
|
||||||
|
groupByCreator,
|
||||||
|
groupByTitle,
|
||||||
|
} from "../helpers";
|
||||||
|
import { LlmModelMetadata } from "../types";
|
||||||
|
import { LlmIcon } from "./LlmIcon";
|
||||||
|
import { LlmMenuHeader } from "./LlmMenuHeader";
|
||||||
|
import { LlmMenuItem } from "./LlmMenuItem";
|
||||||
|
import { LlmPriceTier } from "./LlmPriceTier";
|
||||||
|
|
||||||
|
type MenuView = "creator" | "model" | "provider";
|
||||||
|
|
||||||
|
type Props = {
|
||||||
|
models: LlmModelMetadata[];
|
||||||
|
selectedModel?: LlmModelMetadata;
|
||||||
|
recommendedModel?: LlmModelMetadata;
|
||||||
|
onSelect: (value: string) => void;
|
||||||
|
disabled?: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
export function LlmModelPicker({
|
||||||
|
models,
|
||||||
|
selectedModel,
|
||||||
|
recommendedModel,
|
||||||
|
onSelect,
|
||||||
|
disabled,
|
||||||
|
}: Props) {
|
||||||
|
const [open, setOpen] = useState(false);
|
||||||
|
const [view, setView] = useState<MenuView>("creator");
|
||||||
|
const [activeCreator, setActiveCreator] = useState<string | null>(null);
|
||||||
|
const [activeTitle, setActiveTitle] = useState<string | null>(null);
|
||||||
|
|
||||||
|
const modelsByCreator = useMemo(() => groupByCreator(models), [models]);
|
||||||
|
|
||||||
|
const creators = useMemo(() => {
|
||||||
|
return Array.from(modelsByCreator.keys()).sort((a, b) =>
|
||||||
|
a.localeCompare(b),
|
||||||
|
);
|
||||||
|
}, [modelsByCreator]);
|
||||||
|
|
||||||
|
const creatorIconValues = useMemo(() => {
|
||||||
|
const map = new Map<string, string>();
|
||||||
|
for (const [creator, entries] of modelsByCreator.entries()) {
|
||||||
|
map.set(creator, entries[0]?.creator ?? creator);
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}, [modelsByCreator]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!open) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
setView("creator");
|
||||||
|
setActiveCreator(
|
||||||
|
selectedModel
|
||||||
|
? getCreatorDisplayName(selectedModel)
|
||||||
|
: (creators[0] ?? null),
|
||||||
|
);
|
||||||
|
setActiveTitle(selectedModel ? getModelDisplayName(selectedModel) : null);
|
||||||
|
}, [open, selectedModel, creators]);
|
||||||
|
|
||||||
|
const currentCreator = activeCreator ?? creators[0] ?? null;
|
||||||
|
|
||||||
|
const currentModels = useMemo(() => {
|
||||||
|
return currentCreator ? (modelsByCreator.get(currentCreator) ?? []) : [];
|
||||||
|
}, [currentCreator, modelsByCreator]);
|
||||||
|
|
||||||
|
const currentCreatorIcon = useMemo(() => {
|
||||||
|
return currentModels[0]?.creator ?? currentCreator;
|
||||||
|
}, [currentModels, currentCreator]);
|
||||||
|
|
||||||
|
const modelsByTitle = useMemo(
|
||||||
|
() => groupByTitle(currentModels),
|
||||||
|
[currentModels],
|
||||||
|
);
|
||||||
|
|
||||||
|
const modelEntries = useMemo(() => {
|
||||||
|
return Array.from(modelsByTitle.entries())
|
||||||
|
.map(([title, entries]) => {
|
||||||
|
const providers = new Set(entries.map((entry) => entry.provider));
|
||||||
|
return {
|
||||||
|
title,
|
||||||
|
entries,
|
||||||
|
providerCount: providers.size,
|
||||||
|
};
|
||||||
|
})
|
||||||
|
.sort((a, b) => a.title.localeCompare(b.title));
|
||||||
|
}, [modelsByTitle]);
|
||||||
|
|
||||||
|
const providerEntries = useMemo(() => {
|
||||||
|
if (!activeTitle) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
return modelsByTitle.get(activeTitle) ?? [];
|
||||||
|
}, [activeTitle, modelsByTitle]);
|
||||||
|
|
||||||
|
const handleSelectModel = useCallback(
|
||||||
|
(modelName: string) => {
|
||||||
|
onSelect(modelName);
|
||||||
|
setOpen(false);
|
||||||
|
},
|
||||||
|
[onSelect],
|
||||||
|
);
|
||||||
|
|
||||||
|
const triggerModel = selectedModel ?? recommendedModel ?? models[0];
|
||||||
|
const triggerTitle = triggerModel
|
||||||
|
? getModelDisplayName(triggerModel)
|
||||||
|
: "Select model";
|
||||||
|
const triggerCreator = triggerModel?.creator ?? "";
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Popover open={open} onOpenChange={setOpen}>
|
||||||
|
<PopoverTrigger asChild>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
disabled={disabled}
|
||||||
|
className={cn(
|
||||||
|
"flex w-full min-w-[15rem] items-center rounded-lg border border-zinc-200 bg-white px-3 py-2 text-left",
|
||||||
|
"hover:border-zinc-300 focus:outline-none focus:ring-2 focus:ring-zinc-200",
|
||||||
|
disabled && "cursor-not-allowed opacity-60",
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
<LlmIcon value={triggerCreator} />
|
||||||
|
<Text variant="body" className="ml-1 flex-1 text-zinc-900">
|
||||||
|
{triggerTitle}
|
||||||
|
</Text>
|
||||||
|
<CaretDownIcon className="h-3 w-3 text-zinc-900" weight="bold" />
|
||||||
|
</button>
|
||||||
|
</PopoverTrigger>
|
||||||
|
<PopoverContent
|
||||||
|
align="start"
|
||||||
|
sideOffset={4}
|
||||||
|
className="max-h-[45vh] w-[--radix-popover-trigger-width] min-w-[16rem] overflow-y-auto rounded-md border border-zinc-200 bg-white p-0 shadow-[0px_1px_4px_rgba(12,12,13,0.12)]"
|
||||||
|
>
|
||||||
|
{view === "creator" && (
|
||||||
|
<div className="flex flex-col">
|
||||||
|
{recommendedModel && (
|
||||||
|
<>
|
||||||
|
<LlmMenuItem
|
||||||
|
title={getModelDisplayName(recommendedModel)}
|
||||||
|
subtitle="Recommended"
|
||||||
|
icon={<LlmIcon value={recommendedModel.creator} />}
|
||||||
|
onClick={() => handleSelectModel(recommendedModel.name)}
|
||||||
|
/>
|
||||||
|
<div className="border-b border-zinc-200" />
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
{creators.map((creator) => (
|
||||||
|
<LlmMenuItem
|
||||||
|
key={creator}
|
||||||
|
title={creator}
|
||||||
|
icon={
|
||||||
|
<LlmIcon value={creatorIconValues.get(creator) ?? creator} />
|
||||||
|
}
|
||||||
|
showChevron={true}
|
||||||
|
isActive={
|
||||||
|
selectedModel
|
||||||
|
? getCreatorDisplayName(selectedModel) === creator
|
||||||
|
: false
|
||||||
|
}
|
||||||
|
onClick={() => {
|
||||||
|
setActiveCreator(creator);
|
||||||
|
setView("model");
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
{view === "model" && currentCreator && (
|
||||||
|
<div className="flex flex-col">
|
||||||
|
<LlmMenuHeader
|
||||||
|
label={currentCreator}
|
||||||
|
onBack={() => setView("creator")}
|
||||||
|
/>
|
||||||
|
<div className="border-b border-zinc-200" />
|
||||||
|
{modelEntries.map((entry) => (
|
||||||
|
<LlmMenuItem
|
||||||
|
key={entry.title}
|
||||||
|
title={entry.title}
|
||||||
|
icon={<LlmIcon value={currentCreatorIcon} />}
|
||||||
|
rightSlot={<LlmPriceTier tier={entry.entries[0]?.price_tier} />}
|
||||||
|
showChevron={entry.providerCount > 1}
|
||||||
|
isActive={
|
||||||
|
selectedModel
|
||||||
|
? getModelDisplayName(selectedModel) === entry.title
|
||||||
|
: false
|
||||||
|
}
|
||||||
|
onClick={() => {
|
||||||
|
if (entry.providerCount > 1) {
|
||||||
|
setActiveTitle(entry.title);
|
||||||
|
setView("provider");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
handleSelectModel(entry.entries[0].name);
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
{view === "provider" && activeTitle && (
|
||||||
|
<div className="flex flex-col">
|
||||||
|
<LlmMenuHeader
|
||||||
|
label={activeTitle}
|
||||||
|
onBack={() => setView("model")}
|
||||||
|
/>
|
||||||
|
<div className="border-b border-zinc-200" />
|
||||||
|
{providerEntries.map((entry) => (
|
||||||
|
<LlmMenuItem
|
||||||
|
key={`${entry.title}-${entry.provider}`}
|
||||||
|
title={getProviderDisplayName(entry)}
|
||||||
|
icon={<LlmIcon value={entry.provider} />}
|
||||||
|
isActive={selectedModel?.provider === entry.provider}
|
||||||
|
onClick={() => handleSelectModel(entry.name)}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</PopoverContent>
|
||||||
|
</Popover>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import { CurrencyDollarSimpleIcon } from "@phosphor-icons/react";
|
||||||
|
|
||||||
|
type Props = {
|
||||||
|
tier?: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
export function LlmPriceTier({ tier }: Props) {
|
||||||
|
if (!tier || tier <= 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
const clamped = Math.min(3, Math.max(1, tier));
|
||||||
|
return (
|
||||||
|
<div className="flex items-center text-zinc-900">
|
||||||
|
{Array.from({ length: clamped }).map((_, index) => (
|
||||||
|
<CurrencyDollarSimpleIcon
|
||||||
|
key={`price-${index}`}
|
||||||
|
className="-mr-0.5 h-3 w-3"
|
||||||
|
weight="bold"
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,35 @@
|
|||||||
|
import { LlmModelMetadata } from "./types";
|
||||||
|
|
||||||
|
export function groupByCreator(models: LlmModelMetadata[]) {
|
||||||
|
const map = new Map<string, LlmModelMetadata[]>();
|
||||||
|
for (const model of models) {
|
||||||
|
const key = getCreatorDisplayName(model);
|
||||||
|
const existing = map.get(key) ?? [];
|
||||||
|
existing.push(model);
|
||||||
|
map.set(key, existing);
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function groupByTitle(models: LlmModelMetadata[]) {
|
||||||
|
const map = new Map<string, LlmModelMetadata[]>();
|
||||||
|
for (const model of models) {
|
||||||
|
const displayName = getModelDisplayName(model);
|
||||||
|
const existing = map.get(displayName) ?? [];
|
||||||
|
existing.push(model);
|
||||||
|
map.set(displayName, existing);
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getCreatorDisplayName(model: LlmModelMetadata): string {
|
||||||
|
return model.creator_name || model.creator || "";
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getModelDisplayName(model: LlmModelMetadata): string {
|
||||||
|
return model.title || model.name || "";
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getProviderDisplayName(model: LlmModelMetadata): string {
|
||||||
|
return model.provider_name || model.provider || "";
|
||||||
|
}
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
export type LlmModelMetadata = {
|
||||||
|
creator: string;
|
||||||
|
creator_name: string;
|
||||||
|
title: string;
|
||||||
|
provider: string;
|
||||||
|
provider_name: string;
|
||||||
|
name: string;
|
||||||
|
price_tier?: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type LlmModelMetadataMap = Record<string, LlmModelMetadata>;
|
||||||
@@ -8,6 +8,7 @@ import {
|
|||||||
isMultiSelectSchema,
|
isMultiSelectSchema,
|
||||||
} from "../utils/schema-utils";
|
} from "../utils/schema-utils";
|
||||||
import { TableField } from "./TableField/TableField";
|
import { TableField } from "./TableField/TableField";
|
||||||
|
import { LlmModelField } from "./LlmModelField/LlmModelField";
|
||||||
|
|
||||||
export interface CustomFieldDefinition {
|
export interface CustomFieldDefinition {
|
||||||
id: string;
|
id: string;
|
||||||
@@ -57,6 +58,15 @@ export const CUSTOM_FIELDS: CustomFieldDefinition[] = [
|
|||||||
},
|
},
|
||||||
component: TableField,
|
component: TableField,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: "custom/llm_model_field",
|
||||||
|
matcher: (schema: any) => {
|
||||||
|
return (
|
||||||
|
typeof schema === "object" && schema !== null && "llm_model" in schema
|
||||||
|
);
|
||||||
|
},
|
||||||
|
component: LlmModelField,
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
export function findCustomFieldId(schema: any): string | null {
|
export function findCustomFieldId(schema: any): string | null {
|
||||||
|
|||||||
@@ -20,11 +20,15 @@ function hasHITLBlocks(graph: GraphModel | LibraryAgent | Graph): boolean {
|
|||||||
if ("has_human_in_the_loop" in graph) {
|
if ("has_human_in_the_loop" in graph) {
|
||||||
return !!graph.has_human_in_the_loop;
|
return !!graph.has_human_in_the_loop;
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (isLibraryAgent(graph)) {
|
function hasSensitiveActionBlocks(
|
||||||
return graph.settings?.human_in_the_loop_safe_mode !== null;
|
graph: GraphModel | LibraryAgent | Graph,
|
||||||
|
): boolean {
|
||||||
|
if ("has_sensitive_action" in graph) {
|
||||||
|
return !!graph.has_sensitive_action;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,7 +44,9 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
|
|
||||||
const graphId = getGraphId(graph);
|
const graphId = getGraphId(graph);
|
||||||
const isAgent = isLibraryAgent(graph);
|
const isAgent = isLibraryAgent(graph);
|
||||||
const shouldShowToggle = hasHITLBlocks(graph);
|
const showHITLToggle = hasHITLBlocks(graph);
|
||||||
|
const showSensitiveActionToggle = hasSensitiveActionBlocks(graph);
|
||||||
|
const shouldShowToggle = showHITLToggle || showSensitiveActionToggle;
|
||||||
|
|
||||||
const { mutateAsync: updateGraphSettings, isPending } =
|
const { mutateAsync: updateGraphSettings, isPending } =
|
||||||
usePatchV1UpdateGraphSettings();
|
usePatchV1UpdateGraphSettings();
|
||||||
@@ -56,27 +62,37 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
const [localSafeMode, setLocalSafeMode] = useState<boolean | null>(null);
|
const [localHITLSafeMode, setLocalHITLSafeMode] = useState<boolean>(true);
|
||||||
|
const [localSensitiveActionSafeMode, setLocalSensitiveActionSafeMode] =
|
||||||
|
useState<boolean>(false);
|
||||||
|
const [isLocalStateLoaded, setIsLocalStateLoaded] = useState<boolean>(false);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!isAgent && libraryAgent) {
|
if (!isAgent && libraryAgent) {
|
||||||
const backendValue = libraryAgent.settings?.human_in_the_loop_safe_mode;
|
setLocalHITLSafeMode(
|
||||||
if (backendValue !== undefined) {
|
libraryAgent.settings?.human_in_the_loop_safe_mode ?? true,
|
||||||
setLocalSafeMode(backendValue);
|
);
|
||||||
}
|
setLocalSensitiveActionSafeMode(
|
||||||
|
libraryAgent.settings?.sensitive_action_safe_mode ?? false,
|
||||||
|
);
|
||||||
|
setIsLocalStateLoaded(true);
|
||||||
}
|
}
|
||||||
}, [isAgent, libraryAgent]);
|
}, [isAgent, libraryAgent]);
|
||||||
|
|
||||||
const currentSafeMode = isAgent
|
const currentHITLSafeMode = isAgent
|
||||||
? graph.settings?.human_in_the_loop_safe_mode
|
? (graph.settings?.human_in_the_loop_safe_mode ?? true)
|
||||||
: localSafeMode;
|
: localHITLSafeMode;
|
||||||
|
|
||||||
const isStateUndetermined = isAgent
|
const currentSensitiveActionSafeMode = isAgent
|
||||||
? graph.settings?.human_in_the_loop_safe_mode == null
|
? (graph.settings?.sensitive_action_safe_mode ?? false)
|
||||||
: isLoading || localSafeMode === null;
|
: localSensitiveActionSafeMode;
|
||||||
|
|
||||||
const handleToggle = useCallback(async () => {
|
const isHITLStateUndetermined = isAgent
|
||||||
const newSafeMode = !currentSafeMode;
|
? false
|
||||||
|
: isLoading || !isLocalStateLoaded;
|
||||||
|
|
||||||
|
const handleHITLToggle = useCallback(async () => {
|
||||||
|
const newSafeMode = !currentHITLSafeMode;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await updateGraphSettings({
|
await updateGraphSettings({
|
||||||
@@ -85,7 +101,7 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
});
|
});
|
||||||
|
|
||||||
if (!isAgent) {
|
if (!isAgent) {
|
||||||
setLocalSafeMode(newSafeMode);
|
setLocalHITLSafeMode(newSafeMode);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isAgent) {
|
if (isAgent) {
|
||||||
@@ -101,37 +117,62 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
queryClient.invalidateQueries({ queryKey: ["v2", "executions"] });
|
queryClient.invalidateQueries({ queryKey: ["v2", "executions"] });
|
||||||
|
|
||||||
toast({
|
toast({
|
||||||
title: `Safe mode ${newSafeMode ? "enabled" : "disabled"}`,
|
title: `HITL safe mode ${newSafeMode ? "enabled" : "disabled"}`,
|
||||||
description: newSafeMode
|
description: newSafeMode
|
||||||
? "Human-in-the-loop blocks will require manual review"
|
? "Human-in-the-loop blocks will require manual review"
|
||||||
: "Human-in-the-loop blocks will proceed automatically",
|
: "Human-in-the-loop blocks will proceed automatically",
|
||||||
duration: 2000,
|
duration: 2000,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
const isNotFoundError =
|
handleToggleError(error, isAgent, toast);
|
||||||
error instanceof Error &&
|
|
||||||
(error.message.includes("404") || error.message.includes("not found"));
|
|
||||||
|
|
||||||
if (!isAgent && isNotFoundError) {
|
|
||||||
toast({
|
|
||||||
title: "Safe mode not available",
|
|
||||||
description:
|
|
||||||
"To configure safe mode, please save this graph to your library first.",
|
|
||||||
variant: "destructive",
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
toast({
|
|
||||||
title: "Failed to update safe mode",
|
|
||||||
description:
|
|
||||||
error instanceof Error
|
|
||||||
? error.message
|
|
||||||
: "An unexpected error occurred.",
|
|
||||||
variant: "destructive",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}, [
|
}, [
|
||||||
currentSafeMode,
|
currentHITLSafeMode,
|
||||||
|
graphId,
|
||||||
|
isAgent,
|
||||||
|
graph.id,
|
||||||
|
updateGraphSettings,
|
||||||
|
queryClient,
|
||||||
|
toast,
|
||||||
|
]);
|
||||||
|
|
||||||
|
const handleSensitiveActionToggle = useCallback(async () => {
|
||||||
|
const newSafeMode = !currentSensitiveActionSafeMode;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await updateGraphSettings({
|
||||||
|
graphId,
|
||||||
|
data: { sensitive_action_safe_mode: newSafeMode },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!isAgent) {
|
||||||
|
setLocalSensitiveActionSafeMode(newSafeMode);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isAgent) {
|
||||||
|
queryClient.invalidateQueries({
|
||||||
|
queryKey: getGetV2GetLibraryAgentQueryOptions(graph.id.toString())
|
||||||
|
.queryKey,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
queryClient.invalidateQueries({
|
||||||
|
queryKey: ["v1", "graphs", graphId, "executions"],
|
||||||
|
});
|
||||||
|
queryClient.invalidateQueries({ queryKey: ["v2", "executions"] });
|
||||||
|
|
||||||
|
toast({
|
||||||
|
title: `Sensitive action safe mode ${newSafeMode ? "enabled" : "disabled"}`,
|
||||||
|
description: newSafeMode
|
||||||
|
? "Sensitive action blocks will require manual review"
|
||||||
|
: "Sensitive action blocks will proceed automatically",
|
||||||
|
duration: 2000,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
handleToggleError(error, isAgent, toast);
|
||||||
|
}
|
||||||
|
}, [
|
||||||
|
currentSensitiveActionSafeMode,
|
||||||
graphId,
|
graphId,
|
||||||
isAgent,
|
isAgent,
|
||||||
graph.id,
|
graph.id,
|
||||||
@@ -141,11 +182,53 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) {
|
|||||||
]);
|
]);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
currentSafeMode,
|
// HITL safe mode
|
||||||
|
currentHITLSafeMode,
|
||||||
|
showHITLToggle,
|
||||||
|
isHITLStateUndetermined,
|
||||||
|
handleHITLToggle,
|
||||||
|
|
||||||
|
// Sensitive action safe mode
|
||||||
|
currentSensitiveActionSafeMode,
|
||||||
|
showSensitiveActionToggle,
|
||||||
|
handleSensitiveActionToggle,
|
||||||
|
|
||||||
|
// General
|
||||||
isPending,
|
isPending,
|
||||||
shouldShowToggle,
|
shouldShowToggle,
|
||||||
isStateUndetermined,
|
|
||||||
handleToggle,
|
// Backwards compatibility
|
||||||
hasHITLBlocks: shouldShowToggle,
|
currentSafeMode: currentHITLSafeMode,
|
||||||
|
isStateUndetermined: isHITLStateUndetermined,
|
||||||
|
handleToggle: handleHITLToggle,
|
||||||
|
hasHITLBlocks: showHITLToggle,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function handleToggleError(
|
||||||
|
error: unknown,
|
||||||
|
isAgent: boolean,
|
||||||
|
toast: ReturnType<typeof useToast>["toast"],
|
||||||
|
) {
|
||||||
|
const isNotFoundError =
|
||||||
|
error instanceof Error &&
|
||||||
|
(error.message.includes("404") || error.message.includes("not found"));
|
||||||
|
|
||||||
|
if (!isAgent && isNotFoundError) {
|
||||||
|
toast({
|
||||||
|
title: "Safe mode not available",
|
||||||
|
description:
|
||||||
|
"To configure safe mode, please save this graph to your library first.",
|
||||||
|
variant: "destructive",
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
toast({
|
||||||
|
title: "Failed to update safe mode",
|
||||||
|
description:
|
||||||
|
error instanceof Error
|
||||||
|
? error.message
|
||||||
|
: "An unexpected error occurred.",
|
||||||
|
variant: "destructive",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,8 +15,22 @@ export function usePendingReviews() {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
export function usePendingReviewsForExecution(graphExecId: string) {
|
interface UsePendingReviewsForExecutionOptions {
|
||||||
const query = useGetV2GetPendingReviewsForExecution(graphExecId);
|
enabled?: boolean;
|
||||||
|
refetchInterval?: number | false;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function usePendingReviewsForExecution(
|
||||||
|
graphExecId: string,
|
||||||
|
options?: UsePendingReviewsForExecutionOptions,
|
||||||
|
) {
|
||||||
|
const query = useGetV2GetPendingReviewsForExecution(graphExecId, {
|
||||||
|
query: {
|
||||||
|
enabled: options?.enabled ?? !!graphExecId,
|
||||||
|
refetchInterval: options?.refetchInterval,
|
||||||
|
refetchIntervalInBackground: !!options?.refetchInterval,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
pendingReviews: okData(query.data) || [],
|
pendingReviews: okData(query.data) || [],
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import isEqual from "lodash/isEqual";
|
|||||||
export function cleanNode(node: CustomNode) {
|
export function cleanNode(node: CustomNode) {
|
||||||
return {
|
return {
|
||||||
id: node.id,
|
id: node.id,
|
||||||
position: node.position,
|
// Note: position is intentionally excluded to prevent draft saves when dragging nodes
|
||||||
data: {
|
data: {
|
||||||
hardcodedValues: node.data.hardcodedValues,
|
hardcodedValues: node.data.hardcodedValues,
|
||||||
title: node.data.title,
|
title: node.data.title,
|
||||||
|
|||||||
@@ -106,9 +106,14 @@ export function getTimezoneDisplayName(timezone: string): string {
|
|||||||
const parts = timezone.split("/");
|
const parts = timezone.split("/");
|
||||||
const city = parts[parts.length - 1].replace(/_/g, " ");
|
const city = parts[parts.length - 1].replace(/_/g, " ");
|
||||||
const abbr = getTimezoneAbbreviation(timezone);
|
const abbr = getTimezoneAbbreviation(timezone);
|
||||||
return abbr ? `${city} (${abbr})` : city;
|
if (abbr && abbr !== timezone) {
|
||||||
|
return `${city} (${abbr})`;
|
||||||
|
}
|
||||||
|
// If abbreviation is same as timezone or not found, show timezone with underscores replaced
|
||||||
|
const timezoneDisplay = timezone.replace(/_/g, " ");
|
||||||
|
return `${city} (${timezoneDisplay})`;
|
||||||
} catch {
|
} catch {
|
||||||
return timezone;
|
return timezone.replace(/_/g, " ");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ export enum Key {
|
|||||||
LIBRARY_AGENTS_CACHE = "library-agents-cache",
|
LIBRARY_AGENTS_CACHE = "library-agents-cache",
|
||||||
CHAT_SESSION_ID = "chat_session_id",
|
CHAT_SESSION_ID = "chat_session_id",
|
||||||
COOKIE_CONSENT = "autogpt_cookie_consent",
|
COOKIE_CONSENT = "autogpt_cookie_consent",
|
||||||
|
AI_AGENT_SAFETY_POPUP_SHOWN = "ai-agent-safety-popup-shown",
|
||||||
}
|
}
|
||||||
|
|
||||||
function get(key: Key) {
|
function get(key: Key) {
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import { LoginPage } from "./pages/login.page";
|
|||||||
import { MarketplacePage } from "./pages/marketplace.page";
|
import { MarketplacePage } from "./pages/marketplace.page";
|
||||||
import { hasMinCount, hasUrl, isVisible, matchesUrl } from "./utils/assertion";
|
import { hasMinCount, hasUrl, isVisible, matchesUrl } from "./utils/assertion";
|
||||||
|
|
||||||
|
// Marketplace tests for store agent search functionality
|
||||||
test.describe("Marketplace – Basic Functionality", () => {
|
test.describe("Marketplace – Basic Functionality", () => {
|
||||||
test("User can access marketplace page when logged out", async ({ page }) => {
|
test("User can access marketplace page when logged out", async ({ page }) => {
|
||||||
const marketplacePage = new MarketplacePage(page);
|
const marketplacePage = new MarketplacePage(page);
|
||||||
|
|||||||
44
docs/CLAUDE.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Documentation Guidelines
|
||||||
|
|
||||||
|
## Block Documentation Manual Sections
|
||||||
|
|
||||||
|
When updating manual sections (`<!-- MANUAL: ... -->`) in block documentation files (e.g., `docs/integrations/basic.md`), follow these formats:
|
||||||
|
|
||||||
|
### How It Works Section
|
||||||
|
|
||||||
|
Provide a technical explanation of how the block functions:
|
||||||
|
- Describe the processing logic in 1-2 paragraphs
|
||||||
|
- Mention any validation, error handling, or edge cases
|
||||||
|
- Use code examples with backticks when helpful (e.g., `[[1, 2], [3, 4]]` becomes `[1, 2, 3, 4]`)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```markdown
|
||||||
|
<!-- MANUAL: how_it_works -->
|
||||||
|
The block iterates through each list in the input and extends a result list with all elements from each one. It processes lists in order, so `[[1, 2], [3, 4]]` becomes `[1, 2, 3, 4]`.
|
||||||
|
|
||||||
|
The block includes validation to ensure each item is actually a list. If a non-list value is encountered, the block outputs an error message instead of proceeding.
|
||||||
|
<!-- END MANUAL -->
|
||||||
|
```
|
||||||
|
|
||||||
|
### Use Case Section
|
||||||
|
|
||||||
|
Provide 3 practical use cases in this format:
|
||||||
|
- **Bold Heading**: Short one-sentence description
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```markdown
|
||||||
|
<!-- MANUAL: use_case -->
|
||||||
|
**Paginated API Merging**: Combine results from multiple API pages into a single list for batch processing or display.
|
||||||
|
|
||||||
|
**Parallel Task Aggregation**: Merge outputs from parallel workflow branches that each produce a list of results.
|
||||||
|
|
||||||
|
**Multi-Source Data Collection**: Combine data collected from different sources (like multiple RSS feeds or API endpoints) into one unified list.
|
||||||
|
<!-- END MANUAL -->
|
||||||
|
```
|
||||||
|
|
||||||
|
### Style Guidelines
|
||||||
|
|
||||||
|
- Keep descriptions concise and action-oriented
|
||||||
|
- Focus on practical, real-world scenarios
|
||||||
|
- Use consistent terminology with other blocks
|
||||||
|
- Avoid overly technical jargon unless necessary
|
||||||
@@ -31,6 +31,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
|||||||
| [Agent Time Input](basic.md#agent-time-input) | Block for time input |
|
| [Agent Time Input](basic.md#agent-time-input) | Block for time input |
|
||||||
| [Agent Toggle Input](basic.md#agent-toggle-input) | Block for boolean toggle input |
|
| [Agent Toggle Input](basic.md#agent-toggle-input) | Block for boolean toggle input |
|
||||||
| [Block Installation](basic.md#block-installation) | Given a code string, this block allows the verification and installation of a block code into the system |
|
| [Block Installation](basic.md#block-installation) | Given a code string, this block allows the verification and installation of a block code into the system |
|
||||||
|
| [Concatenate Lists](basic.md#concatenate-lists) | Concatenates multiple lists into a single list |
|
||||||
| [Dictionary Is Empty](basic.md#dictionary-is-empty) | Checks if a dictionary is empty |
|
| [Dictionary Is Empty](basic.md#dictionary-is-empty) | Checks if a dictionary is empty |
|
||||||
| [File Store](basic.md#file-store) | Stores the input file in the temporary directory |
|
| [File Store](basic.md#file-store) | Stores the input file in the temporary directory |
|
||||||
| [Find In Dictionary](basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value |
|
| [Find In Dictionary](basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value |
|
||||||
|
|||||||
@@ -634,6 +634,42 @@ This enables extensibility by allowing custom blocks to be added without modifyi
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Concatenate Lists
|
||||||
|
|
||||||
|
### What it is
|
||||||
|
Concatenates multiple lists into a single list. All elements from all input lists are combined in order.
|
||||||
|
|
||||||
|
### How it works
|
||||||
|
<!-- MANUAL: how_it_works -->
|
||||||
|
The block iterates through each list in the input and extends a result list with all elements from each one. It processes lists in order, so `[[1, 2], [3, 4]]` becomes `[1, 2, 3, 4]`.
|
||||||
|
|
||||||
|
The block includes validation to ensure each item is actually a list. If a non-list value (like a string or number) is encountered, the block outputs an error message instead of proceeding. None values are skipped automatically.
|
||||||
|
<!-- END MANUAL -->
|
||||||
|
|
||||||
|
### Inputs
|
||||||
|
|
||||||
|
| Input | Description | Type | Required |
|
||||||
|
|-------|-------------|------|----------|
|
||||||
|
| lists | A list of lists to concatenate together. All lists will be combined in order into a single list. | List[List[Any]] | Yes |
|
||||||
|
|
||||||
|
### Outputs
|
||||||
|
|
||||||
|
| Output | Description | Type |
|
||||||
|
|--------|-------------|------|
|
||||||
|
| error | Error message if concatenation failed due to invalid input types. | str |
|
||||||
|
| concatenated_list | The concatenated list containing all elements from all input lists in order. | List[Any] |
|
||||||
|
|
||||||
|
### Possible use case
|
||||||
|
<!-- MANUAL: use_case -->
|
||||||
|
**Paginated API Merging**: Combine results from multiple API pages into a single list for batch processing or display.
|
||||||
|
|
||||||
|
**Parallel Task Aggregation**: Merge outputs from parallel workflow branches that each produce a list of results.
|
||||||
|
|
||||||
|
**Multi-Source Data Collection**: Combine data collected from different sources (like multiple RSS feeds or API endpoints) into one unified list.
|
||||||
|
<!-- END MANUAL -->
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Dictionary Is Empty
|
## Dictionary Is Empty
|
||||||
|
|
||||||
### What it is
|
### What it is
|
||||||
|
|||||||