fix(backend/copilot): show all creds as missing in race path; add tool guardrails

- _build_setup_requirements_from_validation_error now passes None to
  build_missing_credentials_from_graph so all credential fields show up
  as missing_credentials in the race scenario (prev-matched creds
  became invalid between prereq check and executor call).
- Add test_run_agent_execution_credential_race_returns_setup_card to
  mirror the existing schedule-path race test for the run path.
- Add credential-setup guardrail text to create_agent and edit_agent
  tool descriptions so the LLM doesn't call them for credential setup.
This commit is contained in:
majdyz
2026-04-11 08:13:12 +07:00
parent 6f9f2c72db
commit cc96ed131b
4 changed files with 72 additions and 48 deletions

View File

@@ -24,7 +24,9 @@ class CreateAgentTool(BaseTool):
def description(self) -> str:
return (
"Create a new agent from JSON (nodes + links). Validates, auto-fixes, and saves. "
"Before calling, search for existing agents with find_library_agent."
"Before calling, search for existing agents with find_library_agent. "
"Do NOT use this to connect credentials for an existing agent — call run_agent "
"instead (it surfaces the inline credential-setup card automatically)."
)
@property

View File

@@ -24,7 +24,9 @@ class EditAgentTool(BaseTool):
def description(self) -> str:
return (
"Edit an existing agent. Validates, auto-fixes, and saves. "
"Before calling, search for existing agents with find_library_agent."
"Before calling, search for existing agents with find_library_agent. "
"Do NOT use this to connect credentials for an existing agent — call run_agent "
"instead (it surfaces the inline credential-setup card automatically)."
)
@property

View File

@@ -369,7 +369,6 @@ class RunAgentTool(BaseTool):
graph: GraphModel,
error: GraphValidationError,
session_id: str,
graph_credentials: dict[str, CredentialsMetaInput],
) -> SetupRequirementsResponse | None:
"""Convert a credential-related ``GraphValidationError`` into
the inline ``SetupRequirementsResponse`` the frontend renders.
@@ -377,11 +376,10 @@ class RunAgentTool(BaseTool):
Returns ``None`` if *error* isn't credential-related — the
caller should then fall back to a plain text error.
``graph_credentials`` is the already-matched credentials map
from ``_check_prerequisites``. It is used to filter
``missing_credentials`` down to the credential fields the user
still needs to connect — mirroring how the non-race path builds
the same card at lines 478-481.
This is the race-condition path (prereq check passed → creds
deleted/invalidated → executor/scheduler raised). All credential
fields are shown as missing so the user sees exactly which
accounts to reconnect.
"""
has_credential_error = any(
is_credential_validation_error_message(msg)
@@ -391,15 +389,11 @@ class RunAgentTool(BaseTool):
if not has_credential_error:
return None
# ``requirements_creds_dict`` is the full credential schema
# (what the card's "Requirements" section renders). The
# ``missing_credentials_dict`` is the subset the user still
# needs to connect — we pass in the already-matched
# ``graph_credentials`` so connected credentials are excluded.
requirements_creds_dict = build_missing_credentials_from_graph(graph, None)
missing_credentials_dict = build_missing_credentials_from_graph(
graph, graph_credentials
)
# Show all credential fields as missing — in the race case the
# previously-matched credentials have since become invalid, so
# the user needs to reconnect all of them. Passing ``None``
# means no field is treated as "already connected".
credentials_dict = build_missing_credentials_from_graph(graph, None)
return SetupRequirementsResponse(
message=(
f"Agent '{graph.name}' has credentials that are missing or "
@@ -412,11 +406,11 @@ class RunAgentTool(BaseTool):
agent_name=graph.name,
user_readiness=UserReadiness(
has_all_credentials=False,
missing_credentials=missing_credentials_dict,
missing_credentials=credentials_dict,
ready_to_run=False,
),
requirements={
"credentials": list(requirements_creds_dict.values()),
"credentials": list(credentials_dict.values()),
"inputs": get_inputs_from_schema(graph.input_schema),
"execution_modes": self._get_execution_modes(graph),
},
@@ -578,7 +572,6 @@ class RunAgentTool(BaseTool):
graph=graph,
error=e,
session_id=session_id,
graph_credentials=graph_credentials,
)
if creds_setup is not None:
return creds_setup
@@ -774,7 +767,6 @@ class RunAgentTool(BaseTool):
graph=graph,
error=e,
session_id=session_id,
graph_credentials=graph_credentials,
)
if creds_setup is not None:
return creds_setup

View File

@@ -504,13 +504,11 @@ async def test_build_setup_requirements_from_credential_validation_error(
node_errors={"some-node-id": {"credentials": "These credentials are required"}},
)
# No matched credentials => missing_credentials should equal the full
# requirements set (the credential race with nothing connected).
# Race path: all credential fields shown as missing.
response = tool._build_setup_requirements_from_validation_error(
graph=graph,
error=error,
session_id="test-session",
graph_credentials={},
)
assert isinstance(response, SetupRequirementsResponse)
@@ -529,30 +527,15 @@ async def test_build_setup_requirements_from_credential_validation_error(
@pytest.mark.asyncio(loop_scope="session")
async def test_build_setup_requirements_filters_matched_credentials(
async def test_build_setup_requirements_shows_all_creds_missing_in_race(
setup_firecrawl_test_data,
):
"""``missing_credentials`` must exclude credentials the user already
has connected (``graph_credentials``), otherwise the inline card
would show every connected credential as missing during a race."""
from typing import cast
from backend.data.model import CredentialsMetaInput
"""In the race scenario (prereq passed → creds deleted → executor raised),
the helper must show ALL credential fields as missing so the user knows
which accounts need to be reconnected — not an empty missing_credentials map."""
graph = setup_firecrawl_test_data["graph"]
tool = RunAgentTool()
# Derive the graph's aggregated credential field keys and fabricate
# a fully-matched credentials map so that filtering leaves the
# missing_credentials map empty. The helper only reads
# ``graph_credentials.keys()`` (via ``build_missing_credentials_from_graph``),
# so the values are opaque sentinels.
aggregated = graph.aggregate_credentials_inputs()
graph_credentials = cast(
dict[str, CredentialsMetaInput],
{field_key: object() for field_key in aggregated.keys()},
)
error = GraphValidationError(
message="Graph is invalid",
node_errors={"some-node-id": {"credentials": "These credentials are required"}},
@@ -562,13 +545,15 @@ async def test_build_setup_requirements_filters_matched_credentials(
graph=graph,
error=error,
session_id="test-session",
graph_credentials=graph_credentials,
)
assert isinstance(response, SetupRequirementsResponse)
# All fields matched => missing_credentials is empty, requirements still populated.
assert response.setup_info.user_readiness.missing_credentials == {}
assert len(response.setup_info.requirements["credentials"]) > 0
# missing_credentials and requirements["credentials"] must both be non-empty
# and share the same field keys (both come from build_missing_credentials_from_graph).
missing = response.setup_info.user_readiness.missing_credentials
requirements_creds = response.setup_info.requirements["credentials"]
assert len(missing) > 0
assert set(missing.keys()) == {c["id"] for c in requirements_creds}
@pytest.mark.asyncio(loop_scope="session")
@@ -589,7 +574,6 @@ async def test_build_setup_requirements_returns_none_for_non_credential_error(
graph=graph,
error=error,
session_id="test-session",
graph_credentials={},
)
assert response is None
@@ -640,3 +624,47 @@ async def test_run_agent_schedule_credential_race_returns_setup_card(
assert result_data.get("type") == "setup_requirements"
assert "setup_info" in result_data
assert result_data["setup_info"]["user_readiness"]["ready_to_run"] is False
@pytest.mark.asyncio(loop_scope="session")
async def test_run_agent_execution_credential_race_returns_setup_card(
setup_test_data,
):
"""End-to-end: if the executor raises a credential GraphValidationError
after _check_prerequisites passed, the user should still see the
inline credentials-setup card (not a generic error)."""
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
tool = RunAgentTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
with patch(
"backend.copilot.tools.run_agent.execution_utils.add_graph_execution",
side_effect=GraphValidationError(
message="Graph is invalid",
node_errors={
"some-node-id": {"credentials": "These credentials are required"}
},
),
):
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"test_input": "value"},
dry_run=False,
session=session,
)
assert response is not None
assert isinstance(response.output, str)
result_data = orjson.loads(response.output)
# Should surface the inline credential card, NOT a generic error or a
# link redirecting to the Builder.
assert result_data.get("type") == "setup_requirements"
assert "setup_info" in result_data
assert result_data["setup_info"]["user_readiness"]["ready_to_run"] is False