mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Compare commits
19 Commits
dev
...
test-scree
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
17810d268a | ||
|
|
3f5c2b93cd | ||
|
|
7e62fdae48 | ||
|
|
a866e8d709 | ||
|
|
df2cd316f8 | ||
|
|
f1b1c19612 | ||
|
|
e5d42fcb99 | ||
|
|
8b289cacdd | ||
|
|
1eeac09801 | ||
|
|
afc02697af | ||
|
|
dd0cca48b4 | ||
|
|
60d4dd8ff2 | ||
|
|
8548bfcc4e | ||
|
|
7e19a1aa68 | ||
|
|
5723c1e230 | ||
|
|
1968ecf355 | ||
|
|
f4d6bc1f5b | ||
|
|
823eb3d15a | ||
|
|
270c2f0f55 |
@@ -28,6 +28,7 @@ from backend.data.model import NodeExecutionStats, SchemaField
|
||||
from backend.util import json
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
from backend.util.prompt import MAIN_OBJECTIVE_PREFIX
|
||||
from backend.util.security import SENSITIVE_FIELD_NAMES
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.graph import Link, Node
|
||||
@@ -258,6 +259,108 @@ def get_pending_tool_calls(conversation_history: list[Any] | None) -> dict[str,
|
||||
return {call_id: count for call_id, count in pending_calls.items() if count > 0}
|
||||
|
||||
|
||||
def _derive_suffix_from_defaults(defaults: dict[str, Any]) -> str:
|
||||
"""Derive a short, descriptive suffix from hardcoded defaults.
|
||||
|
||||
Returns a cleaned string like ``_topic_sports`` derived from the first
|
||||
default's key and value, giving the LLM a meaningful hint about how
|
||||
this tool variant differs from its siblings. Falls back to ``""`` if
|
||||
no usable label can be derived.
|
||||
"""
|
||||
if not defaults:
|
||||
return ""
|
||||
key, val = next(iter(defaults.items()))
|
||||
# Use the value as a label when it's a short string; otherwise use the key.
|
||||
if isinstance(val, str) and 1 <= len(val) <= 30:
|
||||
label = f"{key}_{val}"
|
||||
else:
|
||||
label = key
|
||||
# Sanitise to [a-zA-Z0-9_] and trim
|
||||
label = re.sub(r"[^a-zA-Z0-9]+", "_", label).strip("_").lower()
|
||||
return f"_{label}" if label else ""
|
||||
|
||||
|
||||
def _disambiguate_tool_names(tools: list[dict[str, Any]]) -> None:
|
||||
"""Ensure all tool names are unique (Anthropic API requires this).
|
||||
|
||||
When multiple nodes use the same block type, they get the same tool name.
|
||||
This derives a descriptive suffix from hardcoded defaults (e.g.
|
||||
``search_topic_sports``) so the LLM can distinguish tool variants by name.
|
||||
Falls back to numeric suffixes (``_1``, ``_2``) when defaults don't
|
||||
provide a useful label. Also enriches descriptions with the full
|
||||
pre-configured values. Mutates the list in place.
|
||||
|
||||
Malformed tools (missing ``function`` or ``function.name``) are silently
|
||||
skipped so the caller never crashes on unexpected input.
|
||||
"""
|
||||
# Collect names, skipping tools that lack the required structure.
|
||||
valid_tools: list[tuple[int, dict[str, Any]]] = []
|
||||
for idx, tool in enumerate(tools):
|
||||
func = tool.get("function") if isinstance(tool, dict) else None
|
||||
if not isinstance(func, dict) or "name" not in func:
|
||||
# Strip internal metadata even from malformed entries.
|
||||
if isinstance(func, dict):
|
||||
func.pop("_hardcoded_defaults", None)
|
||||
continue
|
||||
valid_tools.append((idx, tool))
|
||||
|
||||
names = [t.get("function", {}).get("name", "") for _i, t in valid_tools]
|
||||
name_counts = Counter(names)
|
||||
duplicates = {n for n, c in name_counts.items() if c > 1}
|
||||
|
||||
if not duplicates:
|
||||
for _i, t in valid_tools:
|
||||
t.get("function", {}).pop("_hardcoded_defaults", None)
|
||||
return
|
||||
|
||||
taken: set[str] = set(names)
|
||||
counters: dict[str, int] = {}
|
||||
|
||||
for _i, tool in valid_tools:
|
||||
func = tool.get("function", {})
|
||||
name = func.get("name", "")
|
||||
defaults = func.pop("_hardcoded_defaults", {})
|
||||
|
||||
if name not in duplicates:
|
||||
continue
|
||||
|
||||
counters[name] = counters.get(name, 0) + 1
|
||||
|
||||
# Try a descriptive suffix first; fall back to numeric.
|
||||
desc_suffix = _derive_suffix_from_defaults(defaults)
|
||||
if desc_suffix:
|
||||
candidate = f"{name[: 64 - len(desc_suffix)]}{desc_suffix}"
|
||||
if candidate in taken:
|
||||
# Descriptive suffix collided — append counter to de-dup
|
||||
num_suffix = f"{desc_suffix}_{counters[name]}"
|
||||
candidate = f"{name[: 64 - len(num_suffix)]}{num_suffix}"
|
||||
else:
|
||||
candidate = None
|
||||
|
||||
if not candidate or candidate in taken:
|
||||
# Pure numeric fallback
|
||||
while True:
|
||||
suffix = f"_{counters[name]}"
|
||||
candidate = f"{name[: 64 - len(suffix)]}{suffix}"
|
||||
if candidate not in taken:
|
||||
break
|
||||
counters[name] += 1
|
||||
|
||||
func["name"] = candidate
|
||||
taken.add(candidate)
|
||||
|
||||
if defaults and isinstance(defaults, dict):
|
||||
parts: list[str] = []
|
||||
for k, v in defaults.items():
|
||||
rendered = json.dumps(v)
|
||||
if len(rendered) > 100:
|
||||
rendered = rendered[:80] + "...<truncated>"
|
||||
parts.append(f"{k}={rendered}")
|
||||
summary = ", ".join(parts)
|
||||
original_desc = func.get("description", "") or ""
|
||||
func["description"] = f"{original_desc} [Pre-configured: {summary}]"
|
||||
|
||||
|
||||
class OrchestratorBlock(Block):
|
||||
"""
|
||||
A block that uses a language model to orchestrate tool calls, supporting both
|
||||
@@ -507,6 +610,23 @@ class OrchestratorBlock(Block):
|
||||
tool_function["_field_mapping"] = field_mapping
|
||||
tool_function["_sink_node_id"] = sink_node.id
|
||||
|
||||
# Store hardcoded defaults (non-linked inputs) for disambiguation.
|
||||
# Exclude linked fields, private fields, and credential/auth fields
|
||||
# to avoid leaking sensitive data into tool descriptions.
|
||||
linked_fields = {link.sink_name for link in links}
|
||||
defaults = sink_node.input_default
|
||||
tool_function["_hardcoded_defaults"] = (
|
||||
{
|
||||
k: v
|
||||
for k, v in defaults.items()
|
||||
if k not in linked_fields
|
||||
and not k.startswith("_")
|
||||
and k.lower() not in SENSITIVE_FIELD_NAMES
|
||||
}
|
||||
if isinstance(defaults, dict)
|
||||
else {}
|
||||
)
|
||||
|
||||
return {"type": "function", "function": tool_function}
|
||||
|
||||
@staticmethod
|
||||
@@ -581,6 +701,24 @@ class OrchestratorBlock(Block):
|
||||
tool_function["_field_mapping"] = field_mapping
|
||||
tool_function["_sink_node_id"] = sink_node.id
|
||||
|
||||
# Store hardcoded defaults (non-linked inputs) for disambiguation.
|
||||
# Exclude linked fields, private fields, agent meta fields, and
|
||||
# credential/auth fields to avoid leaking sensitive data.
|
||||
linked_fields = {link.sink_name for link in links}
|
||||
defaults = sink_node.input_default
|
||||
tool_function["_hardcoded_defaults"] = (
|
||||
{
|
||||
k: v
|
||||
for k, v in defaults.items()
|
||||
if k not in linked_fields
|
||||
and k not in ("graph_id", "graph_version", "input_schema")
|
||||
and not k.startswith("_")
|
||||
and k.lower() not in SENSITIVE_FIELD_NAMES
|
||||
}
|
||||
if isinstance(defaults, dict)
|
||||
else {}
|
||||
)
|
||||
|
||||
return {"type": "function", "function": tool_function}
|
||||
|
||||
@staticmethod
|
||||
@@ -629,6 +767,7 @@ class OrchestratorBlock(Block):
|
||||
)
|
||||
return_tool_functions.append(tool_func)
|
||||
|
||||
_disambiguate_tool_names(return_tool_functions)
|
||||
return return_tool_functions
|
||||
|
||||
async def _attempt_llm_call_with_validation(
|
||||
@@ -996,7 +1135,10 @@ class OrchestratorBlock(Block):
|
||||
credentials, input_data, iteration_prompt, tool_functions
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}"
|
||||
yield (
|
||||
"error",
|
||||
f"LLM call failed in agent mode iteration {iteration}: {str(e)}",
|
||||
)
|
||||
return
|
||||
|
||||
# Process tool calls
|
||||
@@ -1041,7 +1183,10 @@ class OrchestratorBlock(Block):
|
||||
if max_iterations < 0:
|
||||
yield "finished", f"Agent mode completed after {iteration} iterations"
|
||||
else:
|
||||
yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)"
|
||||
yield (
|
||||
"finished",
|
||||
f"Agent mode completed after {max_iterations} iterations (limit reached)",
|
||||
)
|
||||
yield "conversations", current_prompt
|
||||
|
||||
async def run(
|
||||
|
||||
@@ -1074,6 +1074,7 @@ async def test_orchestrator_uses_customized_name_for_blocks():
|
||||
mock_node.block_id = StoreValueBlock().id
|
||||
mock_node.metadata = {"customized_name": "My Custom Tool Name"}
|
||||
mock_node.block = StoreValueBlock()
|
||||
mock_node.input_default = {}
|
||||
|
||||
# Create a mock link
|
||||
mock_link = MagicMock(spec=Link)
|
||||
@@ -1105,6 +1106,7 @@ async def test_orchestrator_falls_back_to_block_name():
|
||||
mock_node.block_id = StoreValueBlock().id
|
||||
mock_node.metadata = {} # No customized_name
|
||||
mock_node.block = StoreValueBlock()
|
||||
mock_node.input_default = {}
|
||||
|
||||
# Create a mock link
|
||||
mock_link = MagicMock(spec=Link)
|
||||
|
||||
@@ -0,0 +1,456 @@
|
||||
"""Tests for OrchestratorBlock tool name disambiguation.
|
||||
|
||||
When multiple nodes use the same block type, their tool names collide.
|
||||
The Anthropic API requires unique tool names, so the orchestrator must
|
||||
disambiguate them and enrich descriptions with hardcoded defaults.
|
||||
"""
|
||||
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.blocks.orchestrator import OrchestratorBlock, _disambiguate_tool_names
|
||||
from backend.blocks.text import MatchTextPatternBlock
|
||||
|
||||
|
||||
def _make_mock_node(
|
||||
block,
|
||||
node_id: str,
|
||||
input_default: dict | None = None,
|
||||
metadata: dict | None = None,
|
||||
):
|
||||
"""Create a mock Node with the given block and defaults."""
|
||||
node = Mock()
|
||||
node.block = block
|
||||
node.block_id = block.id
|
||||
node.id = node_id
|
||||
node.input_default = input_default or {}
|
||||
node.metadata = metadata or {}
|
||||
return node
|
||||
|
||||
|
||||
def _make_mock_link(source_name: str, sink_name: str, sink_id: str, source_id: str):
|
||||
"""Create a mock Link."""
|
||||
return Mock(
|
||||
source_name=source_name,
|
||||
sink_name=sink_name,
|
||||
sink_id=sink_id,
|
||||
source_id=source_id,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_duplicate_block_names_get_suffixed():
|
||||
"""Two nodes using the same block type should produce unique tool names."""
|
||||
block = MatchTextPatternBlock()
|
||||
node_a = _make_mock_node(block, "node_a", input_default={"match": "foo"})
|
||||
node_b = _make_mock_node(block, "node_b", input_default={"match": "bar"})
|
||||
|
||||
link_a = _make_mock_link("tools_^_a_~_text", "text", "node_a", "orch")
|
||||
link_b = _make_mock_link("tools_^_b_~_text", "text", "node_b", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [
|
||||
(link_a, node_a),
|
||||
(link_b, node_b),
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
names = [t["function"]["name"] for t in tools]
|
||||
assert len(names) == 2
|
||||
assert len(set(names)) == 2, f"Tool names are not unique: {names}"
|
||||
# Should be suffixed with _1, _2
|
||||
base = OrchestratorBlock.cleanup(block.name)
|
||||
assert f"{base}_1" in names
|
||||
assert f"{base}_2" in names
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_duplicate_tools_include_defaults_in_description():
|
||||
"""Duplicate tools should have hardcoded defaults in description."""
|
||||
block = MatchTextPatternBlock()
|
||||
node_a = _make_mock_node(
|
||||
block, "node_a", input_default={"match": "error", "case_sensitive": True}
|
||||
)
|
||||
node_b = _make_mock_node(
|
||||
block, "node_b", input_default={"match": "warning", "case_sensitive": False}
|
||||
)
|
||||
|
||||
link_a = _make_mock_link("tools_^_a_~_text", "text", "node_a", "orch")
|
||||
link_b = _make_mock_link("tools_^_b_~_text", "text", "node_b", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [
|
||||
(link_a, node_a),
|
||||
(link_b, node_b),
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
# Find each tool by suffix
|
||||
tool_1 = next(t for t in tools if t["function"]["name"].endswith("_1"))
|
||||
tool_2 = next(t for t in tools if t["function"]["name"].endswith("_2"))
|
||||
|
||||
# Descriptions should contain the hardcoded defaults (not the linked 'text' field)
|
||||
assert "[Pre-configured:" in tool_1["function"]["description"]
|
||||
assert "[Pre-configured:" in tool_2["function"]["description"]
|
||||
assert '"error"' in tool_1["function"]["description"]
|
||||
assert '"warning"' in tool_2["function"]["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_unique_tool_names_unchanged():
|
||||
"""When all tool names are already unique, no suffixing should occur."""
|
||||
block_a = MatchTextPatternBlock()
|
||||
node_a = _make_mock_node(
|
||||
block_a, "node_a", metadata={"customized_name": "search_errors"}
|
||||
)
|
||||
node_b = _make_mock_node(
|
||||
block_a, "node_b", metadata={"customized_name": "search_warnings"}
|
||||
)
|
||||
|
||||
link_a = _make_mock_link("tools_^_a_~_text", "text", "node_a", "orch")
|
||||
link_b = _make_mock_link("tools_^_b_~_text", "text", "node_b", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [
|
||||
(link_a, node_a),
|
||||
(link_b, node_b),
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
names = [t["function"]["name"] for t in tools]
|
||||
assert "search_errors" in names
|
||||
assert "search_warnings" in names
|
||||
# No suffixing
|
||||
assert all("_1" not in n and "_2" not in n for n in names)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_hardcoded_defaults_key_leaks_to_tool_schema():
|
||||
"""_hardcoded_defaults should be cleaned up and not sent to the LLM API."""
|
||||
block = MatchTextPatternBlock()
|
||||
node_a = _make_mock_node(block, "node_a", input_default={"match": "foo"})
|
||||
node_b = _make_mock_node(block, "node_b", input_default={"match": "bar"})
|
||||
|
||||
link_a = _make_mock_link("tools_^_a_~_text", "text", "node_a", "orch")
|
||||
link_b = _make_mock_link("tools_^_b_~_text", "text", "node_b", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [
|
||||
(link_a, node_a),
|
||||
(link_b, node_b),
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
for tool in tools:
|
||||
assert "_hardcoded_defaults" not in tool["function"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_single_tool_no_suffixing():
|
||||
"""A single tool should never get suffixed."""
|
||||
block = MatchTextPatternBlock()
|
||||
node = _make_mock_node(block, "node_a", input_default={"match": "foo"})
|
||||
link = _make_mock_link("tools_^_a_~_text", "text", "node_a", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [(link, node)]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
assert len(tools) == 1
|
||||
name = tools[0]["function"]["name"]
|
||||
assert not name.endswith("_1")
|
||||
assert not name.endswith("_2")
|
||||
# No Pre-configured in description for single tools
|
||||
assert "[Pre-configured:" not in tools[0]["function"].get("description", "")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_three_duplicates_all_get_unique_names():
|
||||
"""Three nodes with same block type should all get unique suffixed names."""
|
||||
block = MatchTextPatternBlock()
|
||||
nodes_and_links = []
|
||||
for i, pattern in enumerate(["error", "warning", "info"]):
|
||||
node = _make_mock_node(block, f"node_{i}", input_default={"match": pattern})
|
||||
link = _make_mock_link(f"tools_^_{i}_~_text", "text", f"node_{i}", "orch")
|
||||
nodes_and_links.append((link, node))
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = nodes_and_links
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
names = [t["function"]["name"] for t in tools]
|
||||
assert len(names) == 3
|
||||
assert len(set(names)) == 3, f"Tool names are not unique: {names}"
|
||||
base = OrchestratorBlock.cleanup(block.name)
|
||||
assert f"{base}_1" in names
|
||||
assert f"{base}_2" in names
|
||||
assert f"{base}_3" in names
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_linked_fields_excluded_from_defaults():
|
||||
"""Fields that are linked (LLM provides them) should not appear in defaults."""
|
||||
block = MatchTextPatternBlock()
|
||||
# 'text' is linked, 'match' and 'case_sensitive' are hardcoded
|
||||
node_a = _make_mock_node(
|
||||
block,
|
||||
"node_a",
|
||||
input_default={"text": "ignored", "match": "error", "case_sensitive": True},
|
||||
)
|
||||
# Duplicate to trigger disambiguation
|
||||
node_b = _make_mock_node(
|
||||
block, "node_b", input_default={"text": "ignored", "match": "warning"}
|
||||
)
|
||||
|
||||
link_a = _make_mock_link("tools_^_a_~_text", "text", "node_a", "orch")
|
||||
link_b = _make_mock_link("tools_^_b_~_text", "text", "node_b", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [
|
||||
(link_a, node_a),
|
||||
(link_b, node_b),
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
tool_1 = next(t for t in tools if t["function"]["name"].endswith("_1"))
|
||||
desc = tool_1["function"]["description"]
|
||||
# 'text' is linked so should NOT appear in Pre-configured
|
||||
assert "text=" not in desc
|
||||
# 'match' is hardcoded so should appear
|
||||
assert "match=" in desc
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_mixed_unique_and_duplicate_names():
|
||||
"""Only duplicate names get suffixed; unique names are left untouched."""
|
||||
block_a = MatchTextPatternBlock()
|
||||
node_a1 = _make_mock_node(block_a, "node_a1", input_default={"match": "foo"})
|
||||
node_a2 = _make_mock_node(block_a, "node_a2", input_default={"match": "bar"})
|
||||
|
||||
# Use a different block with a custom name to be unique
|
||||
node_b = _make_mock_node(
|
||||
block_a, "node_b", metadata={"customized_name": "unique_tool"}
|
||||
)
|
||||
|
||||
link_a1 = _make_mock_link("tools_^_a1_~_text", "text", "node_a1", "orch")
|
||||
link_a2 = _make_mock_link("tools_^_a2_~_text", "text", "node_a2", "orch")
|
||||
link_b = _make_mock_link("tools_^_b_~_text", "text", "node_b", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [
|
||||
(link_a1, node_a1),
|
||||
(link_a2, node_a2),
|
||||
(link_b, node_b),
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
names = [t["function"]["name"] for t in tools]
|
||||
assert len(set(names)) == 3
|
||||
assert "unique_tool" in names
|
||||
base = OrchestratorBlock.cleanup(block_a.name)
|
||||
assert f"{base}_1" in names
|
||||
assert f"{base}_2" in names
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_sensitive_fields_excluded_from_defaults():
|
||||
"""Credentials and other sensitive fields must not leak into descriptions."""
|
||||
block = MatchTextPatternBlock()
|
||||
node_a = _make_mock_node(
|
||||
block,
|
||||
"node_a",
|
||||
input_default={
|
||||
"match": "error",
|
||||
"credentials": {"api_key": "sk-secret"},
|
||||
"api_key": "my-key",
|
||||
"password": "hunter2",
|
||||
},
|
||||
)
|
||||
node_b = _make_mock_node(
|
||||
block, "node_b", input_default={"match": "warning", "credentials": {"x": "y"}}
|
||||
)
|
||||
|
||||
link_a = _make_mock_link("tools_^_a_~_text", "text", "node_a", "orch")
|
||||
link_b = _make_mock_link("tools_^_b_~_text", "text", "node_b", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [
|
||||
(link_a, node_a),
|
||||
(link_b, node_b),
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
for tool in tools:
|
||||
desc = tool["function"].get("description", "")
|
||||
assert "sk-secret" not in desc
|
||||
assert "my-key" not in desc
|
||||
assert "hunter2" not in desc
|
||||
assert "credentials=" not in desc
|
||||
assert "api_key=" not in desc
|
||||
assert "password=" not in desc
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_long_tool_name_truncated():
|
||||
"""Tool names exceeding 64 chars should be truncated before suffixing."""
|
||||
block = MatchTextPatternBlock()
|
||||
long_name = "a" * 63 # 63 chars, adding _1 would make 65
|
||||
|
||||
node_a = _make_mock_node(block, "node_a", metadata={"customized_name": long_name})
|
||||
node_b = _make_mock_node(block, "node_b", metadata={"customized_name": long_name})
|
||||
|
||||
link_a = _make_mock_link("tools_^_a_~_text", "text", "node_a", "orch")
|
||||
link_b = _make_mock_link("tools_^_b_~_text", "text", "node_b", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [
|
||||
(link_a, node_a),
|
||||
(link_b, node_b),
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
for tool in tools:
|
||||
name = tool["function"]["name"]
|
||||
assert len(name) <= 64, f"Tool name exceeds 64 chars: {name!r} ({len(name)})"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suffix_collision_with_user_named_tool():
|
||||
"""If a user-named tool is 'my_tool_1', dedup of 'my_tool' should skip to _2."""
|
||||
block = MatchTextPatternBlock()
|
||||
# Two nodes with same block name (will collide)
|
||||
node_a = _make_mock_node(block, "node_a", input_default={"match": "foo"})
|
||||
node_b = _make_mock_node(block, "node_b", input_default={"match": "bar"})
|
||||
|
||||
# A third node that a user has customized to match the _1 suffix pattern
|
||||
base = OrchestratorBlock.cleanup(block.name)
|
||||
node_c = _make_mock_node(block, "node_c", metadata={"customized_name": f"{base}_1"})
|
||||
|
||||
link_a = _make_mock_link("tools_^_a_~_text", "text", "node_a", "orch")
|
||||
link_b = _make_mock_link("tools_^_b_~_text", "text", "node_b", "orch")
|
||||
link_c = _make_mock_link("tools_^_c_~_text", "text", "node_c", "orch")
|
||||
|
||||
mock_db = AsyncMock()
|
||||
mock_db.get_connected_output_nodes.return_value = [
|
||||
(link_a, node_a),
|
||||
(link_b, node_b),
|
||||
(link_c, node_c),
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.orchestrator.get_database_manager_async_client",
|
||||
return_value=mock_db,
|
||||
):
|
||||
tools = await OrchestratorBlock._create_tool_node_signatures("orch")
|
||||
|
||||
names = [t["function"]["name"] for t in tools]
|
||||
assert len(set(names)) == len(names), f"Tool names are not unique: {names}"
|
||||
# The user-named tool keeps its name
|
||||
assert f"{base}_1" in names
|
||||
# The duplicates should skip _1 (taken) and use _2, _3
|
||||
assert f"{base}_2" in names
|
||||
assert f"{base}_3" in names
|
||||
|
||||
|
||||
def test_disambiguate_skips_malformed_tools():
|
||||
"""Malformed tools (missing function/name) should not crash disambiguation."""
|
||||
tools: list = [
|
||||
{"function": {"name": "good_tool", "description": "A tool"}},
|
||||
{"function": {"name": "good_tool", "description": "Another tool"}},
|
||||
# Missing 'function' key entirely
|
||||
{"type": "function"},
|
||||
# 'function' present but missing 'name'
|
||||
{"function": {"description": "no name"}},
|
||||
# Not even a dict
|
||||
"not_a_dict",
|
||||
]
|
||||
# Should not raise
|
||||
_disambiguate_tool_names(tools)
|
||||
|
||||
# The two good tools should be disambiguated
|
||||
names = [
|
||||
t.get("function", {}).get("name")
|
||||
for t in tools
|
||||
if isinstance(t, dict)
|
||||
and isinstance(t.get("function"), dict)
|
||||
and "name" in t.get("function", {})
|
||||
]
|
||||
assert "good_tool_1" in names
|
||||
assert "good_tool_2" in names
|
||||
|
||||
|
||||
def test_disambiguate_handles_missing_description():
|
||||
"""Tools with no description key should still get Pre-configured appended."""
|
||||
tools: list[dict] = [
|
||||
{
|
||||
"function": {
|
||||
"name": "my_tool",
|
||||
"_hardcoded_defaults": {"key": "val1"},
|
||||
}
|
||||
},
|
||||
{
|
||||
"function": {
|
||||
"name": "my_tool",
|
||||
"description": "Has desc",
|
||||
"_hardcoded_defaults": {"key": "val2"},
|
||||
}
|
||||
},
|
||||
]
|
||||
_disambiguate_tool_names(tools)
|
||||
|
||||
tool_1 = next(t for t in tools if t["function"]["name"] == "my_tool_1")
|
||||
tool_2 = next(t for t in tools if t["function"]["name"] == "my_tool_2")
|
||||
# Both should have Pre-configured
|
||||
assert "[Pre-configured:" in tool_1["function"].get("description", "")
|
||||
assert "[Pre-configured:" in tool_2["function"].get("description", "")
|
||||
20
autogpt_platform/backend/backend/util/security.py
Normal file
20
autogpt_platform/backend/backend/util/security.py
Normal file
@@ -0,0 +1,20 @@
|
||||
"""Shared security constants for field-level filtering.
|
||||
|
||||
Other modules (e.g. orchestrator, future blocks) import from here so the
|
||||
sensitive-field list stays in one place.
|
||||
"""
|
||||
|
||||
# Field names to exclude from hardcoded-defaults descriptions (case-insensitive).
|
||||
SENSITIVE_FIELD_NAMES: frozenset[str] = frozenset(
|
||||
{
|
||||
"credentials",
|
||||
"api_key",
|
||||
"password",
|
||||
"secret",
|
||||
"token",
|
||||
"auth",
|
||||
"authorization",
|
||||
"access_token",
|
||||
"refresh_token",
|
||||
}
|
||||
)
|
||||
BIN
test-screenshots/pr-12555/04-library-page.png
Normal file
BIN
test-screenshots/pr-12555/04-library-page.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
BIN
test-screenshots/pr-12555/12-builder-annotated.png
Normal file
BIN
test-screenshots/pr-12555/12-builder-annotated.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 136 KiB |
BIN
test-screenshots/pr-12555/14-calculator-graph.png
Normal file
BIN
test-screenshots/pr-12555/14-calculator-graph.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 122 KiB |
BIN
test-screenshots/pr-12555/15-graph-fit-view.png
Normal file
BIN
test-screenshots/pr-12555/15-graph-fit-view.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 122 KiB |
BIN
test-screenshots/pr-12555/16-final-builder.png
Normal file
BIN
test-screenshots/pr-12555/16-final-builder.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 127 KiB |
Reference in New Issue
Block a user