diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py index aa3cc1555d..0f94135a41 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/core.py @@ -218,6 +218,7 @@ async def save_agent_to_library( library_agents = await library_db.create_library_agent( graph=created_graph, user_id=user_id, + sensitive_action_safe_mode=True, create_library_agents_for_sub_graphs=False, ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py index 137f62fbc1..19e092c312 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py @@ -166,7 +166,6 @@ async def get_or_create_library_agent( library_agents = await library_db.create_library_agent( graph=graph, user_id=user_id, - is_ai_generated=False, create_library_agents_for_sub_graphs=False, ) assert len(library_agents) == 1, "Expected 1 library agent to be created" diff --git a/autogpt_platform/backend/backend/api/features/library/db.py b/autogpt_platform/backend/backend/api/features/library/db.py index c26f3a7953..409c1a2c16 100644 --- a/autogpt_platform/backend/backend/api/features/library/db.py +++ b/autogpt_platform/backend/backend/api/features/library/db.py @@ -404,7 +404,7 @@ async def add_generated_agent_image( async def create_library_agent( graph: graph_db.GraphModel, user_id: str, - is_ai_generated: bool, + sensitive_action_safe_mode: bool = False, create_library_agents_for_sub_graphs: bool = True, ) -> list[library_model.LibraryAgent]: """ @@ -413,8 +413,8 @@ async def create_library_agent( Args: agent: The agent/Graph to add to the library. user_id: The user to whom the agent will be added. + sensitive_action_safe_mode: Whether sensitive action blocks require review. create_library_agents_for_sub_graphs: If True, creates LibraryAgent records for sub-graphs as well. - is_ai_generated: Whether this graph was AI-generated. Returns: The newly created LibraryAgent records. @@ -450,7 +450,8 @@ async def create_library_agent( }, settings=SafeJson( GraphSettings.from_graph( - graph_entry, is_ai_generated=is_ai_generated + graph_entry, + sensitive_action_safe_mode=sensitive_action_safe_mode, ).model_dump() ), ), @@ -797,9 +798,7 @@ async def add_store_agent_to_library( "isCreatedByUser": False, "useGraphIsActiveVersion": False, "settings": SafeJson( - GraphSettings.from_graph( - graph_model, is_ai_generated=False - ).model_dump() + GraphSettings.from_graph(graph_model).model_dump() ), }, include=library_agent_include( @@ -1189,12 +1188,12 @@ async def fork_library_agent( ) new_graph = await on_graph_activate(new_graph, user_id=user_id) - # Create a library agent for the new graph, preserving is_ai_generated flag + # Create a library agent for the new graph, preserving sensitive_action_safe_mode return ( await create_library_agent( new_graph, user_id, - is_ai_generated=original_agent.settings.is_ai_generated_graph, + sensitive_action_safe_mode=original_agent.settings.sensitive_action_safe_mode, ) )[0] except prisma.errors.PrismaError as e: diff --git a/autogpt_platform/backend/backend/api/features/library/model.py b/autogpt_platform/backend/backend/api/features/library/model.py index 56fad7bfd3..14d7c7be81 100644 --- a/autogpt_platform/backend/backend/api/features/library/model.py +++ b/autogpt_platform/backend/backend/api/features/library/model.py @@ -73,6 +73,12 @@ class LibraryAgent(pydantic.BaseModel): has_external_trigger: bool = pydantic.Field( description="Whether the agent has an external trigger (e.g. webhook) node" ) + has_human_in_the_loop: bool = pydantic.Field( + description="Whether the agent has human-in-the-loop blocks" + ) + has_sensitive_action: bool = pydantic.Field( + description="Whether the agent has sensitive action blocks" + ) trigger_setup_info: Optional[GraphTriggerInfo] = None # Indicates whether there's a new output (based on recent runs) @@ -180,6 +186,8 @@ class LibraryAgent(pydantic.BaseModel): graph.credentials_input_schema if sub_graphs is not None else None ), has_external_trigger=graph.has_external_trigger, + has_human_in_the_loop=graph.has_human_in_the_loop, + has_sensitive_action=graph.has_sensitive_action, trigger_setup_info=graph.trigger_setup_info, new_output=new_output, can_access_graph=can_access_graph, diff --git a/autogpt_platform/backend/backend/api/features/library/routes_test.py b/autogpt_platform/backend/backend/api/features/library/routes_test.py index 0f05240a7f..ca604af760 100644 --- a/autogpt_platform/backend/backend/api/features/library/routes_test.py +++ b/autogpt_platform/backend/backend/api/features/library/routes_test.py @@ -52,6 +52,8 @@ async def test_get_library_agents_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, recommended_schedule_cron=None, new_output=False, @@ -75,6 +77,8 @@ async def test_get_library_agents_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, recommended_schedule_cron=None, new_output=False, @@ -150,6 +154,8 @@ async def test_get_favorite_library_agents_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, recommended_schedule_cron=None, new_output=False, @@ -218,6 +224,8 @@ def test_add_agent_to_library_success( output_schema={"type": "object", "properties": {}}, credentials_input_schema={"type": "object", "properties": {}}, has_external_trigger=False, + has_human_in_the_loop=False, + has_sensitive_action=False, status=library_model.LibraryAgentStatus.COMPLETED, new_output=False, can_access_graph=True, diff --git a/autogpt_platform/backend/backend/api/features/v1.py b/autogpt_platform/backend/backend/api/features/v1.py index 2213e99a46..3a5dd3ec12 100644 --- a/autogpt_platform/backend/backend/api/features/v1.py +++ b/autogpt_platform/backend/backend/api/features/v1.py @@ -762,9 +762,7 @@ async def create_new_graph( graph.validate_graph(for_run=False) await graph_db.create_graph(graph, user_id=user_id) - await library_db.create_library_agent( - graph, user_id, is_ai_generated=create_graph.is_ai_generated - ) + await library_db.create_library_agent(graph, user_id) activated_graph = await on_graph_activate(graph, user_id=user_id) if create_graph.source == "builder": @@ -892,7 +890,9 @@ async def _update_library_agent_version_and_settings( user_id, agent_graph.id, agent_graph.version ) updated_settings = GraphSettings.from_graph( - agent_graph, is_ai_generated=library.settings.is_ai_generated_graph + graph=agent_graph, + hitl_safe_mode=library.settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode, ) if updated_settings != library.settings: library = await library_db.update_library_agent( diff --git a/autogpt_platform/backend/backend/api/model.py b/autogpt_platform/backend/backend/api/model.py index 6457fc9269..5e13e20450 100644 --- a/autogpt_platform/backend/backend/api/model.py +++ b/autogpt_platform/backend/backend/api/model.py @@ -43,7 +43,6 @@ GraphExecutionSource = Literal["builder", "library", "onboarding"] class CreateGraph(pydantic.BaseModel): graph: Graph source: GraphCreationSource | None = None - is_ai_generated: bool = False class CreateAPIKeyRequest(pydantic.BaseModel): diff --git a/autogpt_platform/backend/backend/blocks/helpers/review.py b/autogpt_platform/backend/backend/blocks/helpers/review.py index f35397e6aa..80c28cfd14 100644 --- a/autogpt_platform/backend/backend/blocks/helpers/review.py +++ b/autogpt_platform/backend/backend/blocks/helpers/review.py @@ -84,7 +84,7 @@ class HITLReviewHelper: Exception: If review creation or status update fails """ # Skip review if safe mode is disabled - return auto-approved result - if not execution_context.safe_mode: + if not execution_context.human_in_the_loop_safe_mode: logger.info( f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled" ) diff --git a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py index 1e338816c8..b6106843bd 100644 --- a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py +++ b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py @@ -104,7 +104,7 @@ class HumanInTheLoopBlock(Block): execution_context: ExecutionContext, **_kwargs, ) -> BlockOutput: - if not execution_context.safe_mode: + if not execution_context.human_in_the_loop_safe_mode: logger.info( f"HITL block skipping review for node {node_exec_id} - safe mode disabled" ) diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py index 8266d433ad..0f9da7e10b 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py @@ -242,7 +242,7 @@ async def test_smart_decision_maker_tracks_llm_stats(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -343,7 +343,7 @@ async def test_smart_decision_maker_parameter_validation(): # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -409,7 +409,7 @@ async def test_smart_decision_maker_parameter_validation(): # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -471,7 +471,7 @@ async def test_smart_decision_maker_parameter_validation(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -535,7 +535,7 @@ async def test_smart_decision_maker_parameter_validation(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -658,7 +658,7 @@ async def test_smart_decision_maker_raw_response_conversion(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -730,7 +730,7 @@ async def test_smart_decision_maker_raw_response_conversion(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -786,7 +786,7 @@ async def test_smart_decision_maker_raw_response_conversion(): outputs = {} # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests @@ -905,7 +905,7 @@ async def test_smart_decision_maker_agent_mode(): # Create a mock execution context mock_execution_context = ExecutionContext( - safe_mode=False, + human_in_the_loop_safe_mode=False, ) # Create a mock execution processor for agent mode tests @@ -1027,7 +1027,7 @@ async def test_smart_decision_maker_traditional_mode_default(): # Create execution context - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) # Create a mock execution processor for tests diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py index af89a83f86..0427b13466 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py @@ -386,7 +386,7 @@ async def test_output_yielding_with_dynamic_fields(): outputs = {} from backend.data.execution import ExecutionContext - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False) mock_execution_processor = MagicMock() async for output_name, output_value in block.run( @@ -609,7 +609,9 @@ async def test_validation_errors_dont_pollute_conversation(): outputs = {} from backend.data.execution import ExecutionContext - mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_context = ExecutionContext( + human_in_the_loop_safe_mode=False + ) # Create a proper mock execution processor for agent mode from collections import defaultdict diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py index 96cf3e06a7..4bfa3892e2 100644 --- a/autogpt_platform/backend/backend/data/block.py +++ b/autogpt_platform/backend/backend/data/block.py @@ -474,7 +474,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): self.block_type = block_type self.webhook_config = webhook_config self.execution_stats: NodeExecutionStats = NodeExecutionStats() - self.requires_human_review: bool = False + self.is_sensitive_action: bool = False if self.webhook_config: if isinstance(self.webhook_config, BlockWebhookConfig): @@ -638,9 +638,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): - input_data_to_use: The input data to use (may be modified by reviewer) """ if not ( - self.requires_human_review - and execution_context.safe_mode - and execution_context.is_ai_generated_graph + self.is_sensitive_action and execution_context.sensitive_action_safe_mode ): return False, input_data diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index ca458c05eb..652d12f4a4 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -81,8 +81,8 @@ class ExecutionContext(BaseModel): This includes information needed by blocks, sub-graphs, and execution management. """ - safe_mode: bool = True - is_ai_generated_graph: bool = False + human_in_the_loop_safe_mode: bool = True + sensitive_action_safe_mode: bool = False user_timezone: str = "UTC" root_execution_id: Optional[str] = None parent_execution_id: Optional[str] = None diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 3380a6020f..d868c0ff3b 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -62,14 +62,22 @@ logger = logging.getLogger(__name__) class GraphSettings(BaseModel): - human_in_the_loop_safe_mode: bool | None = None - is_ai_generated_graph: bool = False + human_in_the_loop_safe_mode: bool = True + sensitive_action_safe_mode: bool = False @classmethod - def from_graph(cls, graph: "GraphModel", is_ai_generated: bool) -> "GraphSettings": + def from_graph( + cls, + graph: "GraphModel", + hitl_safe_mode: bool | None = None, + sensitive_action_safe_mode: bool = False, + ) -> "GraphSettings": + # Default to True if not explicitly set + if hitl_safe_mode is None: + hitl_safe_mode = True return cls( - human_in_the_loop_safe_mode=(True if graph.has_human_in_the_loop else None), - is_ai_generated_graph=is_ai_generated, + human_in_the_loop_safe_mode=hitl_safe_mode, + sensitive_action_safe_mode=sensitive_action_safe_mode, ) @@ -252,10 +260,14 @@ class BaseGraph(BaseDbModel): return any( node.block_id for node in self.nodes - if ( - node.block.block_type == BlockType.HUMAN_IN_THE_LOOP - or node.block.requires_human_review - ) + if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP + ) + + @computed_field + @property + def has_sensitive_action(self) -> bool: + return any( + node.block_id for node in self.nodes if node.block.is_sensitive_action ) @property diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py index 6c3b0118cc..7771c3751c 100644 --- a/autogpt_platform/backend/backend/executor/utils.py +++ b/autogpt_platform/backend/backend/executor/utils.py @@ -873,12 +873,8 @@ async def add_graph_execution( settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id) execution_context = ExecutionContext( - safe_mode=( - settings.human_in_the_loop_safe_mode - if settings.human_in_the_loop_safe_mode is not None - else True - ), - is_ai_generated_graph=settings.is_ai_generated_graph, + human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode, + sensitive_action_safe_mode=settings.sensitive_action_safe_mode, user_timezone=( user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC" ), diff --git a/autogpt_platform/backend/backend/executor/utils_test.py b/autogpt_platform/backend/backend/executor/utils_test.py index 0e652f9627..e6e8fcbf60 100644 --- a/autogpt_platform/backend/backend/executor/utils_test.py +++ b/autogpt_platform/backend/backend/executor/utils_test.py @@ -386,6 +386,7 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture): mock_user.timezone = "UTC" mock_settings = mocker.MagicMock() mock_settings.human_in_the_loop_safe_mode = True + mock_settings.sensitive_action_safe_mode = False mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user) mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings) @@ -651,6 +652,7 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture): mock_user.timezone = "UTC" mock_settings = mocker.MagicMock() mock_settings.human_in_the_loop_safe_mode = True + mock_settings.sensitive_action_safe_mode = False mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user) mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings) diff --git a/autogpt_platform/backend/test/e2e_test_data.py b/autogpt_platform/backend/test/e2e_test_data.py index 3fb1e33e40..d7576cdad3 100644 --- a/autogpt_platform/backend/test/e2e_test_data.py +++ b/autogpt_platform/backend/test/e2e_test_data.py @@ -412,9 +412,7 @@ class TestDataCreator: # Use the API function to create library agent library_agents.extend( v.model_dump() - for v in await create_library_agent( - graph, user["id"], is_ai_generated=False - ) + for v in await create_library_agent(graph, user["id"]) ) except Exception as e: print(f"Error creating library agent: {e}") diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx index 7886f7adaf..de912c5fc3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentSettingsModal/AgentSettingsModal.tsx @@ -31,10 +31,18 @@ export function AgentSettingsModal({ } } - const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } = - useAgentSafeMode(agent); + const { + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + isPending, + shouldShowToggle, + } = useAgentSafeMode(agent); - if (!hasHITLBlocks) return null; + if (!shouldShowToggle) return null; return (
-
-
-
- Require human approval - - The agent will pause and wait for your review before - continuing - + {showHITLToggle && ( +
+
+
+ + Human-in-the-loop approval + + + The agent will pause at human-in-the-loop blocks and wait + for your review before continuing + +
+
-
-
+ )} + {showSensitiveActionToggle && ( +
+
+
+ + Sensitive action approval + + + The agent will pause at sensitive action blocks and wait for + your review before continuing + +
+ +
+
+ )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx index 57d7055e1c..530d24529f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedSettingsView/SelectedSettingsView.tsx @@ -13,8 +13,16 @@ interface Props { } export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) { - const { currentSafeMode, isPending, hasHITLBlocks, handleToggle } = - useAgentSafeMode(agent); + const { + currentHITLSafeMode, + showHITLToggle, + handleHITLToggle, + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + isPending, + shouldShowToggle, + } = useAgentSafeMode(agent); return ( @@ -34,24 +42,51 @@ export function SelectedSettingsView({ agent, onClearSelectedRun }: Props) {
- {hasHITLBlocks ? ( -
-
-
- Require human approval - - The agent will pause and wait for your review before - continuing - + {shouldShowToggle ? ( + <> + {showHITLToggle && ( +
+
+
+ + Human-in-the-loop approval + + + The agent will pause at human-in-the-loop blocks and + wait for your review before continuing + +
+ +
- -
-
+ )} + {showSensitiveActionToggle && ( +
+
+
+ + Sensitive action approval + + + The agent will pause at sensitive action blocks and wait + for your review before continuing + +
+ +
+
+ )} + ) : (
diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index c0d5b763bb..2f36777705 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -6383,6 +6383,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -6399,6 +6404,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info" ], "title": "BaseGraph" @@ -6793,11 +6799,6 @@ { "type": "null" } ], "title": "Source" - }, - "is_ai_generated": { - "type": "boolean", - "title": "Is Ai Generated", - "default": false } }, "type": "object", @@ -7608,6 +7609,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7631,6 +7637,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info", "credentials_input_schema" ], @@ -7709,6 +7716,11 @@ "title": "Has Human In The Loop", "readOnly": true }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "readOnly": true + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7733,6 +7745,7 @@ "output_schema", "has_external_trigger", "has_human_in_the_loop", + "has_sensitive_action", "trigger_setup_info", "credentials_input_schema" ], @@ -7741,12 +7754,13 @@ "GraphSettings": { "properties": { "human_in_the_loop_safe_mode": { - "anyOf": [{ "type": "boolean" }, { "type": "null" }], - "title": "Human In The Loop Safe Mode" - }, - "is_ai_generated_graph": { "type": "boolean", - "title": "Is Ai Generated Graph", + "title": "Human In The Loop Safe Mode", + "default": true + }, + "sensitive_action_safe_mode": { + "type": "boolean", + "title": "Sensitive Action Safe Mode", "default": false } }, @@ -7905,6 +7919,16 @@ "title": "Has External Trigger", "description": "Whether the agent has an external trigger (e.g. webhook) node" }, + "has_human_in_the_loop": { + "type": "boolean", + "title": "Has Human In The Loop", + "description": "Whether the agent has human-in-the-loop blocks" + }, + "has_sensitive_action": { + "type": "boolean", + "title": "Has Sensitive Action", + "description": "Whether the agent has sensitive action blocks" + }, "trigger_setup_info": { "anyOf": [ { "$ref": "#/components/schemas/GraphTriggerInfo" }, @@ -7951,6 +7975,8 @@ "output_schema", "credentials_input_schema", "has_external_trigger", + "has_human_in_the_loop", + "has_sensitive_action", "new_output", "can_access_graph", "is_latest_version", diff --git a/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts b/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts index 07a2b33674..8e5560ce8f 100644 --- a/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts +++ b/autogpt_platform/frontend/src/hooks/useAgentSafeMode.ts @@ -20,11 +20,15 @@ function hasHITLBlocks(graph: GraphModel | LibraryAgent | Graph): boolean { if ("has_human_in_the_loop" in graph) { return !!graph.has_human_in_the_loop; } + return false; +} - if (isLibraryAgent(graph)) { - return graph.settings?.human_in_the_loop_safe_mode !== null; +function hasSensitiveActionBlocks( + graph: GraphModel | LibraryAgent | Graph, +): boolean { + if ("has_sensitive_action" in graph) { + return !!graph.has_sensitive_action; } - return false; } @@ -40,7 +44,9 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { const graphId = getGraphId(graph); const isAgent = isLibraryAgent(graph); - const shouldShowToggle = hasHITLBlocks(graph); + const showHITLToggle = hasHITLBlocks(graph); + const showSensitiveActionToggle = hasSensitiveActionBlocks(graph); + const shouldShowToggle = showHITLToggle || showSensitiveActionToggle; const { mutateAsync: updateGraphSettings, isPending } = usePatchV1UpdateGraphSettings(); @@ -56,27 +62,37 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { }, ); - const [localSafeMode, setLocalSafeMode] = useState(null); + const [localHITLSafeMode, setLocalHITLSafeMode] = useState(true); + const [localSensitiveActionSafeMode, setLocalSensitiveActionSafeMode] = + useState(false); + const [isLocalStateLoaded, setIsLocalStateLoaded] = useState(false); useEffect(() => { if (!isAgent && libraryAgent) { - const backendValue = libraryAgent.settings?.human_in_the_loop_safe_mode; - if (backendValue !== undefined) { - setLocalSafeMode(backendValue); - } + setLocalHITLSafeMode( + libraryAgent.settings?.human_in_the_loop_safe_mode ?? true, + ); + setLocalSensitiveActionSafeMode( + libraryAgent.settings?.sensitive_action_safe_mode ?? false, + ); + setIsLocalStateLoaded(true); } }, [isAgent, libraryAgent]); - const currentSafeMode = isAgent - ? graph.settings?.human_in_the_loop_safe_mode - : localSafeMode; + const currentHITLSafeMode = isAgent + ? (graph.settings?.human_in_the_loop_safe_mode ?? true) + : localHITLSafeMode; - const isStateUndetermined = isAgent - ? graph.settings?.human_in_the_loop_safe_mode == null - : isLoading || localSafeMode === null; + const currentSensitiveActionSafeMode = isAgent + ? (graph.settings?.sensitive_action_safe_mode ?? false) + : localSensitiveActionSafeMode; - const handleToggle = useCallback(async () => { - const newSafeMode = !currentSafeMode; + const isHITLStateUndetermined = isAgent + ? false + : isLoading || !isLocalStateLoaded; + + const handleHITLToggle = useCallback(async () => { + const newSafeMode = !currentHITLSafeMode; try { await updateGraphSettings({ @@ -85,7 +101,7 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { }); if (!isAgent) { - setLocalSafeMode(newSafeMode); + setLocalHITLSafeMode(newSafeMode); } if (isAgent) { @@ -101,37 +117,62 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { queryClient.invalidateQueries({ queryKey: ["v2", "executions"] }); toast({ - title: `Safe mode ${newSafeMode ? "enabled" : "disabled"}`, + title: `HITL safe mode ${newSafeMode ? "enabled" : "disabled"}`, description: newSafeMode ? "Human-in-the-loop blocks will require manual review" : "Human-in-the-loop blocks will proceed automatically", duration: 2000, }); } catch (error) { - const isNotFoundError = - error instanceof Error && - (error.message.includes("404") || error.message.includes("not found")); - - if (!isAgent && isNotFoundError) { - toast({ - title: "Safe mode not available", - description: - "To configure safe mode, please save this graph to your library first.", - variant: "destructive", - }); - } else { - toast({ - title: "Failed to update safe mode", - description: - error instanceof Error - ? error.message - : "An unexpected error occurred.", - variant: "destructive", - }); - } + handleToggleError(error, isAgent, toast); } }, [ - currentSafeMode, + currentHITLSafeMode, + graphId, + isAgent, + graph.id, + updateGraphSettings, + queryClient, + toast, + ]); + + const handleSensitiveActionToggle = useCallback(async () => { + const newSafeMode = !currentSensitiveActionSafeMode; + + try { + await updateGraphSettings({ + graphId, + data: { sensitive_action_safe_mode: newSafeMode }, + }); + + if (!isAgent) { + setLocalSensitiveActionSafeMode(newSafeMode); + } + + if (isAgent) { + queryClient.invalidateQueries({ + queryKey: getGetV2GetLibraryAgentQueryOptions(graph.id.toString()) + .queryKey, + }); + } + + queryClient.invalidateQueries({ + queryKey: ["v1", "graphs", graphId, "executions"], + }); + queryClient.invalidateQueries({ queryKey: ["v2", "executions"] }); + + toast({ + title: `Sensitive action safe mode ${newSafeMode ? "enabled" : "disabled"}`, + description: newSafeMode + ? "Sensitive action blocks will require manual review" + : "Sensitive action blocks will proceed automatically", + duration: 2000, + }); + } catch (error) { + handleToggleError(error, isAgent, toast); + } + }, [ + currentSensitiveActionSafeMode, graphId, isAgent, graph.id, @@ -141,11 +182,53 @@ export function useAgentSafeMode(graph: GraphModel | LibraryAgent | Graph) { ]); return { - currentSafeMode, + // HITL safe mode + currentHITLSafeMode, + showHITLToggle, + isHITLStateUndetermined, + handleHITLToggle, + + // Sensitive action safe mode + currentSensitiveActionSafeMode, + showSensitiveActionToggle, + handleSensitiveActionToggle, + + // General isPending, shouldShowToggle, - isStateUndetermined, - handleToggle, - hasHITLBlocks: shouldShowToggle, + + // Backwards compatibility + currentSafeMode: currentHITLSafeMode, + isStateUndetermined: isHITLStateUndetermined, + handleToggle: handleHITLToggle, + hasHITLBlocks: showHITLToggle, }; } + +function handleToggleError( + error: unknown, + isAgent: boolean, + toast: ReturnType["toast"], +) { + const isNotFoundError = + error instanceof Error && + (error.message.includes("404") || error.message.includes("not found")); + + if (!isAgent && isNotFoundError) { + toast({ + title: "Safe mode not available", + description: + "To configure safe mode, please save this graph to your library first.", + variant: "destructive", + }); + } else { + toast({ + title: "Failed to update safe mode", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } +}