From 398247fb60bd9d52da02610ac95ec289ee33d3db Mon Sep 17 00:00:00 2001 From: majdyz Date: Fri, 10 Apr 2026 23:44:59 +0000 Subject: [PATCH] fix(copilot): filter matched credentials & share executor helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Address review feedback on the credential-race setup card: - Thread `graph_credentials` through `_build_setup_requirements_from_validation_error` so `missing_credentials` only lists fields the user hasn't connected yet. Previously it was computed with `None`, so the inline card showed every connected credential as missing during a race — the opposite of the UX fix. - Promote the credential-error-string matcher to a public helper `backend.executor.utils.is_credential_validation_error_message` and reuse it from both the dry-run path in `_construct_starting_node_execution_input` and from the copilot tool, so adding a new credential error string only requires touching one file. - Make the setup-card message action-neutral ("try again" instead of "try scheduling again") — the helper is shared by the run and schedule paths and the previous wording misled run-path users. - Extend tests: cover the shared helper, add a test that verifies connected credentials are filtered out of `missing_credentials`, and assert the message is path-neutral. --- .../backend/copilot/tools/run_agent.py | 44 +++++----- .../backend/copilot/tools/run_agent_test.py | 80 +++++++++++++++---- .../backend/backend/executor/utils.py | 35 +++++--- 3 files changed, 108 insertions(+), 51 deletions(-) diff --git a/autogpt_platform/backend/backend/copilot/tools/run_agent.py b/autogpt_platform/backend/backend/copilot/tools/run_agent.py index 8528bd486d..a8d26bbd76 100644 --- a/autogpt_platform/backend/backend/copilot/tools/run_agent.py +++ b/autogpt_platform/backend/backend/copilot/tools/run_agent.py @@ -13,6 +13,7 @@ from backend.data.execution import ExecutionStatus from backend.data.graph import GraphModel from backend.data.model import CredentialsMetaInput from backend.executor import utils as execution_utils +from backend.executor.utils import is_credential_validation_error_message from backend.util.clients import get_scheduler_client from backend.util.exceptions import DatabaseError, GraphValidationError, NotFoundError from backend.util.timezone_utils import ( @@ -363,54 +364,47 @@ class RunAgentTool(BaseTool): trigger_info=trigger_info, ) - @staticmethod - def _is_credential_node_error_message(message: str) -> bool: - """Return True if *message* came from the executor's credential gate. - - Mirrors the recognised strings in - ``backend.executor.utils._validate_node_input_credentials`` so we - can distinguish credential failures from other graph validation - errors (input schema mismatch, invalid block config, ...) when - the scheduler raises a generic ``GraphValidationError``. - """ - lower = message.lower() - return ( - lower == "these credentials are required" - or lower.startswith("invalid credentials:") - or lower.startswith("credentials not available:") - or lower.startswith("unknown credentials #") - ) - def _build_setup_requirements_from_validation_error( self, graph: GraphModel, error: GraphValidationError, session_id: str, + graph_credentials: dict[str, CredentialsMetaInput], ) -> SetupRequirementsResponse | None: """Convert a credential-related ``GraphValidationError`` into the inline ``SetupRequirementsResponse`` the frontend renders. Returns ``None`` if *error* isn't credential-related — the caller should then fall back to a plain text error. + + ``graph_credentials`` is the already-matched credentials map + from ``_check_prerequisites``. It is used to filter + ``missing_credentials`` down to the credential fields the user + still needs to connect — mirroring how the non-race path builds + the same card at lines 478-481. """ has_credential_error = any( - self._is_credential_node_error_message(msg) + is_credential_validation_error_message(msg) for node_errors in error.node_errors.values() for msg in node_errors.values() ) if not has_credential_error: return None - # Rebuild the missing-credentials map from the graph schema so - # the card renders the same fields the user would see if the - # check had fired at ``_check_prerequisites`` time. + # ``requirements_creds_dict`` is the full credential schema + # (what the card's "Requirements" section renders). The + # ``missing_credentials_dict`` is the subset the user still + # needs to connect — we pass in the already-matched + # ``graph_credentials`` so connected credentials are excluded. requirements_creds_dict = build_missing_credentials_from_graph(graph, None) - missing_credentials_dict = build_missing_credentials_from_graph(graph, None) + missing_credentials_dict = build_missing_credentials_from_graph( + graph, graph_credentials + ) return SetupRequirementsResponse( message=( f"Agent '{graph.name}' has credentials that are missing or " "no longer valid. Please connect the required account(s) " - "and try scheduling again." + "and try again." ), session_id=session_id, setup_info=SetupInfo( @@ -584,6 +578,7 @@ class RunAgentTool(BaseTool): graph=graph, error=e, session_id=session_id, + graph_credentials=graph_credentials, ) if creds_setup is not None: return creds_setup @@ -779,6 +774,7 @@ class RunAgentTool(BaseTool): graph=graph, error=e, session_id=session_id, + graph_credentials=graph_credentials, ) if creds_setup is not None: return creds_setup diff --git a/autogpt_platform/backend/backend/copilot/tools/run_agent_test.py b/autogpt_platform/backend/backend/copilot/tools/run_agent_test.py index e05dceae62..85a0a2af25 100644 --- a/autogpt_platform/backend/backend/copilot/tools/run_agent_test.py +++ b/autogpt_platform/backend/backend/copilot/tools/run_agent_test.py @@ -4,6 +4,7 @@ from unittest.mock import AsyncMock, patch import orjson import pytest +from backend.executor.utils import is_credential_validation_error_message from backend.util.exceptions import GraphValidationError from ._test_data import ( @@ -470,24 +471,22 @@ async def test_run_agent_rejects_unknown_input_fields(setup_test_data): # --------------------------------------------------------------------------- -def test_is_credential_node_error_message_recognises_credential_strings(): - """Static helper should match all credential error strings emitted by +def test_is_credential_validation_error_message_recognises_credential_strings(): + """Shared helper should match all credential error strings emitted by ``backend.executor.utils._validate_node_input_credentials``.""" - matcher = RunAgentTool._is_credential_node_error_message - assert matcher("These credentials are required") - assert matcher("THESE CREDENTIALS ARE REQUIRED") - assert matcher("Invalid credentials: not found") - assert matcher("Credentials not available: github") - assert matcher("Unknown credentials #abc-123") + assert is_credential_validation_error_message("These credentials are required") + assert is_credential_validation_error_message("THESE CREDENTIALS ARE REQUIRED") + assert is_credential_validation_error_message("Invalid credentials: not found") + assert is_credential_validation_error_message("Credentials not available: github") + assert is_credential_validation_error_message("Unknown credentials #abc-123") -def test_is_credential_node_error_message_rejects_non_credential_strings(): - """Static helper should ignore unrelated graph validation messages.""" - matcher = RunAgentTool._is_credential_node_error_message - assert not matcher("Input field 'url' is required") - assert not matcher("Block configuration invalid") - assert not matcher("") - assert not matcher("credentials are fine") +def test_is_credential_validation_error_message_rejects_non_credential_strings(): + """Shared helper should ignore unrelated graph validation messages.""" + assert not is_credential_validation_error_message("Input field 'url' is required") + assert not is_credential_validation_error_message("Block configuration invalid") + assert not is_credential_validation_error_message("") + assert not is_credential_validation_error_message("credentials are fine") @pytest.mark.asyncio(loop_scope="session") @@ -505,10 +504,13 @@ async def test_build_setup_requirements_from_credential_validation_error( node_errors={"some-node-id": {"credentials": "These credentials are required"}}, ) + # No matched credentials => missing_credentials should equal the full + # requirements set (the credential race with nothing connected). response = tool._build_setup_requirements_from_validation_error( graph=graph, error=error, session_id="test-session", + graph_credentials={}, ) assert isinstance(response, SetupRequirementsResponse) @@ -520,6 +522,53 @@ async def test_build_setup_requirements_from_credential_validation_error( # rebuilt missing-credentials map matches the graph schema. assert len(response.setup_info.user_readiness.missing_credentials) > 0 assert "credentials" in response.message.lower() + # Message must be action-neutral: this helper is shared by the run + # path and the schedule path, so hardcoding "scheduling again" would + # mislead users on the run path. + assert "scheduling again" not in response.message.lower() + + +@pytest.mark.asyncio(loop_scope="session") +async def test_build_setup_requirements_filters_matched_credentials( + setup_firecrawl_test_data, +): + """``missing_credentials`` must exclude credentials the user already + has connected (``graph_credentials``), otherwise the inline card + would show every connected credential as missing during a race.""" + from typing import cast + + from backend.data.model import CredentialsMetaInput + + graph = setup_firecrawl_test_data["graph"] + tool = RunAgentTool() + + # Derive the graph's aggregated credential field keys and fabricate + # a fully-matched credentials map so that filtering leaves the + # missing_credentials map empty. The helper only reads + # ``graph_credentials.keys()`` (via ``build_missing_credentials_from_graph``), + # so the values are opaque sentinels. + aggregated = graph.aggregate_credentials_inputs() + graph_credentials = cast( + dict[str, CredentialsMetaInput], + {field_key: object() for field_key in aggregated.keys()}, + ) + + error = GraphValidationError( + message="Graph is invalid", + node_errors={"some-node-id": {"credentials": "These credentials are required"}}, + ) + + response = tool._build_setup_requirements_from_validation_error( + graph=graph, + error=error, + session_id="test-session", + graph_credentials=graph_credentials, + ) + + assert isinstance(response, SetupRequirementsResponse) + # All fields matched => missing_credentials is empty, requirements still populated. + assert response.setup_info.user_readiness.missing_credentials == {} + assert len(response.setup_info.requirements["credentials"]) > 0 @pytest.mark.asyncio(loop_scope="session") @@ -540,6 +589,7 @@ async def test_build_setup_requirements_returns_none_for_non_credential_error( graph=graph, error=error, session_id="test-session", + graph_credentials={}, ) assert response is None diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py index 0ee3e26479..61dc8f3a0f 100644 --- a/autogpt_platform/backend/backend/executor/utils.py +++ b/autogpt_platform/backend/backend/executor/utils.py @@ -249,6 +249,28 @@ def validate_exec( return data, node_block.name +def is_credential_validation_error_message(message: str) -> bool: + """Return True if *message* came from the credential gate in + :func:`_validate_node_input_credentials`. + + Kept as a public module-level helper so other layers (e.g. the + copilot tool that rebuilds the inline credentials setup card on a + credential race) can distinguish credential failures from other + graph validation errors without redefining the string list. + + Adding a new error string in ``_validate_node_input_credentials`` + **must** also be reflected here — otherwise the copilot will fall + back to a plain text error for that case. + """ + lower = message.lower() + return ( + lower == "these credentials are required" + or lower.startswith("invalid credentials:") + or lower.startswith("credentials not available:") + or lower.startswith("unknown credentials #") + ) + + async def _validate_node_input_credentials( graph: GraphModel, user_id: str, @@ -476,22 +498,11 @@ async def _construct_starting_node_execution_input( # Dry runs simulate every block — missing credentials are irrelevant. # Strip credential-only errors so the graph can proceed. if dry_run and validation_errors: - - def _is_credential_error(msg: str) -> bool: - """Match errors produced by _validate_node_input_credentials.""" - m = msg.lower() - return ( - m == "these credentials are required" - or m.startswith("invalid credentials:") - or m.startswith("credentials not available:") - or m.startswith("unknown credentials #") - ) - validation_errors = { node_id: { field: msg for field, msg in errors.items() - if not _is_credential_error(msg) + if not is_credential_validation_error_message(msg) } for node_id, errors in validation_errors.items() }