mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-14 08:45:12 -05:00
Compare commits
4 Commits
ntindle/go
...
feat/opena
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
889b4e4152 | ||
|
|
e8c50b96d1 | ||
|
|
30e854569a | ||
|
|
301d7cbada |
@@ -118,7 +118,7 @@ def build_missing_credentials_from_graph(
|
|||||||
preserving all supported credential types for each field.
|
preserving all supported credential types for each field.
|
||||||
"""
|
"""
|
||||||
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
|
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
|
||||||
aggregated_fields = graph.regular_credentials_inputs
|
aggregated_fields = graph.aggregate_credentials_inputs()
|
||||||
|
|
||||||
return {
|
return {
|
||||||
field_key: _serialize_missing_credential(field_key, field_info)
|
field_key: _serialize_missing_credential(field_key, field_info)
|
||||||
@@ -338,7 +338,7 @@ async def match_user_credentials_to_graph(
|
|||||||
missing_creds: list[str] = []
|
missing_creds: list[str] = []
|
||||||
|
|
||||||
# Get aggregated credentials requirements from the graph
|
# Get aggregated credentials requirements from the graph
|
||||||
aggregated_creds = graph.regular_credentials_inputs
|
aggregated_creds = graph.aggregate_credentials_inputs()
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
|
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,78 +0,0 @@
|
|||||||
"""Tests for chat tools utility functions."""
|
|
||||||
|
|
||||||
from unittest.mock import AsyncMock, MagicMock, patch
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from backend.data.model import CredentialsFieldInfo
|
|
||||||
|
|
||||||
|
|
||||||
def _make_regular_field() -> CredentialsFieldInfo:
|
|
||||||
return CredentialsFieldInfo.model_validate(
|
|
||||||
{
|
|
||||||
"credentials_provider": ["github"],
|
|
||||||
"credentials_types": ["api_key"],
|
|
||||||
"is_auto_credential": False,
|
|
||||||
},
|
|
||||||
by_alias=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_build_missing_credentials_excludes_auto_creds():
|
|
||||||
"""
|
|
||||||
build_missing_credentials_from_graph() should use regular_credentials_inputs
|
|
||||||
and thus exclude auto_credentials from the "missing" set.
|
|
||||||
"""
|
|
||||||
from backend.api.features.chat.tools.utils import (
|
|
||||||
build_missing_credentials_from_graph,
|
|
||||||
)
|
|
||||||
|
|
||||||
regular_field = _make_regular_field()
|
|
||||||
|
|
||||||
mock_graph = MagicMock()
|
|
||||||
# regular_credentials_inputs should only return the non-auto field
|
|
||||||
mock_graph.regular_credentials_inputs = {
|
|
||||||
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
|
|
||||||
}
|
|
||||||
|
|
||||||
result = build_missing_credentials_from_graph(mock_graph, matched_credentials=None)
|
|
||||||
|
|
||||||
# Should include the regular credential
|
|
||||||
assert "github_api_key" in result
|
|
||||||
# Should NOT include the auto_credential (not in regular_credentials_inputs)
|
|
||||||
assert "google_oauth2" not in result
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_match_user_credentials_excludes_auto_creds():
|
|
||||||
"""
|
|
||||||
match_user_credentials_to_graph() should use regular_credentials_inputs
|
|
||||||
and thus exclude auto_credentials from matching.
|
|
||||||
"""
|
|
||||||
from backend.api.features.chat.tools.utils import match_user_credentials_to_graph
|
|
||||||
|
|
||||||
regular_field = _make_regular_field()
|
|
||||||
|
|
||||||
mock_graph = MagicMock()
|
|
||||||
mock_graph.id = "test-graph"
|
|
||||||
# regular_credentials_inputs returns only non-auto fields
|
|
||||||
mock_graph.regular_credentials_inputs = {
|
|
||||||
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
|
|
||||||
}
|
|
||||||
|
|
||||||
# Mock the credentials manager to return no credentials
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.utils.IntegrationCredentialsManager"
|
|
||||||
) as MockCredsMgr:
|
|
||||||
mock_store = AsyncMock()
|
|
||||||
mock_store.get_all_creds.return_value = []
|
|
||||||
MockCredsMgr.return_value.store = mock_store
|
|
||||||
|
|
||||||
matched, missing = await match_user_credentials_to_graph(
|
|
||||||
user_id="test-user", graph=mock_graph
|
|
||||||
)
|
|
||||||
|
|
||||||
# No credentials available, so github should be missing
|
|
||||||
assert len(matched) == 0
|
|
||||||
assert len(missing) == 1
|
|
||||||
assert "github_api_key" in missing[0]
|
|
||||||
@@ -1102,7 +1102,7 @@ async def create_preset_from_graph_execution(
|
|||||||
raise NotFoundError(
|
raise NotFoundError(
|
||||||
f"Graph #{graph_execution.graph_id} not found or accessible"
|
f"Graph #{graph_execution.graph_id} not found or accessible"
|
||||||
)
|
)
|
||||||
elif len(graph.regular_credentials_inputs) > 0:
|
elif len(graph.aggregate_credentials_inputs()) > 0:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Graph execution #{graph_exec_id} can't be turned into a preset "
|
f"Graph execution #{graph_exec_id} can't be turned into a preset "
|
||||||
"because it was run before this feature existed "
|
"because it was run before this feature existed "
|
||||||
|
|||||||
@@ -309,8 +309,6 @@ class BlockSchema(BaseModel):
|
|||||||
"credentials_provider": [config.get("provider", "google")],
|
"credentials_provider": [config.get("provider", "google")],
|
||||||
"credentials_types": [config.get("type", "oauth2")],
|
"credentials_types": [config.get("type", "oauth2")],
|
||||||
"credentials_scopes": config.get("scopes"),
|
"credentials_scopes": config.get("scopes"),
|
||||||
"is_auto_credential": True,
|
|
||||||
"input_field_name": info["field_name"],
|
|
||||||
}
|
}
|
||||||
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
||||||
auto_schema, by_alias=True
|
auto_schema, by_alias=True
|
||||||
|
|||||||
@@ -32,6 +32,14 @@ from backend.data.model import (
|
|||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util import json
|
from backend.util import json
|
||||||
from backend.util.logging import TruncatedLogger
|
from backend.util.logging import TruncatedLogger
|
||||||
|
from backend.util.openai_responses import (
|
||||||
|
convert_tools_to_responses_format,
|
||||||
|
extract_responses_content,
|
||||||
|
extract_responses_reasoning,
|
||||||
|
extract_responses_tool_calls,
|
||||||
|
extract_usage,
|
||||||
|
requires_responses_api,
|
||||||
|
)
|
||||||
from backend.util.prompt import compress_context, estimate_token_count
|
from backend.util.prompt import compress_context, estimate_token_count
|
||||||
from backend.util.text import TextFormatter
|
from backend.util.text import TextFormatter
|
||||||
|
|
||||||
@@ -659,38 +667,72 @@ async def llm_call(
|
|||||||
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
||||||
|
|
||||||
if provider == "openai":
|
if provider == "openai":
|
||||||
tools_param = tools if tools else openai.NOT_GIVEN
|
|
||||||
oai_client = openai.AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
|
oai_client = openai.AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
|
||||||
response_format = None
|
|
||||||
|
|
||||||
parallel_tool_calls = get_parallel_tool_calls_param(
|
# Check if this model requires the Responses API (reasoning models: o1, o3, etc.)
|
||||||
llm_model, parallel_tool_calls
|
if requires_responses_api(llm_model.value):
|
||||||
)
|
# Use responses.create for reasoning models
|
||||||
|
tools_converted = (
|
||||||
|
convert_tools_to_responses_format(tools) if tools else None
|
||||||
|
)
|
||||||
|
|
||||||
if force_json_output:
|
response = await oai_client.responses.create(
|
||||||
response_format = {"type": "json_object"}
|
model=llm_model.value,
|
||||||
|
input=prompt, # type: ignore
|
||||||
|
tools=tools_converted, # type: ignore
|
||||||
|
max_output_tokens=max_tokens,
|
||||||
|
store=False, # Don't persist conversations
|
||||||
|
)
|
||||||
|
|
||||||
response = await oai_client.chat.completions.create(
|
tool_calls = extract_responses_tool_calls(response)
|
||||||
model=llm_model.value,
|
reasoning = extract_responses_reasoning(response)
|
||||||
messages=prompt, # type: ignore
|
content = extract_responses_content(response)
|
||||||
response_format=response_format, # type: ignore
|
prompt_tokens, completion_tokens = extract_usage(response, True)
|
||||||
max_completion_tokens=max_tokens,
|
|
||||||
tools=tools_param, # type: ignore
|
|
||||||
parallel_tool_calls=parallel_tool_calls,
|
|
||||||
)
|
|
||||||
|
|
||||||
tool_calls = extract_openai_tool_calls(response)
|
return LLMResponse(
|
||||||
reasoning = extract_openai_reasoning(response)
|
raw_response=response,
|
||||||
|
prompt=prompt,
|
||||||
|
response=content,
|
||||||
|
tool_calls=tool_calls,
|
||||||
|
prompt_tokens=prompt_tokens,
|
||||||
|
completion_tokens=completion_tokens,
|
||||||
|
reasoning=reasoning,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Use chat.completions.create for standard models
|
||||||
|
tools_param = tools if tools else openai.NOT_GIVEN
|
||||||
|
response_format = None
|
||||||
|
|
||||||
return LLMResponse(
|
parallel_tool_calls = get_parallel_tool_calls_param(
|
||||||
raw_response=response.choices[0].message,
|
llm_model, parallel_tool_calls
|
||||||
prompt=prompt,
|
)
|
||||||
response=response.choices[0].message.content or "",
|
|
||||||
tool_calls=tool_calls,
|
if force_json_output:
|
||||||
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
response_format = {"type": "json_object"}
|
||||||
completion_tokens=response.usage.completion_tokens if response.usage else 0,
|
|
||||||
reasoning=reasoning,
|
response = await oai_client.chat.completions.create(
|
||||||
)
|
model=llm_model.value,
|
||||||
|
messages=prompt, # type: ignore
|
||||||
|
response_format=response_format, # type: ignore
|
||||||
|
max_completion_tokens=max_tokens,
|
||||||
|
tools=tools_param, # type: ignore
|
||||||
|
parallel_tool_calls=parallel_tool_calls,
|
||||||
|
)
|
||||||
|
|
||||||
|
tool_calls = extract_openai_tool_calls(response)
|
||||||
|
reasoning = extract_openai_reasoning(response)
|
||||||
|
|
||||||
|
return LLMResponse(
|
||||||
|
raw_response=response.choices[0].message,
|
||||||
|
prompt=prompt,
|
||||||
|
response=response.choices[0].message.content or "",
|
||||||
|
tool_calls=tool_calls,
|
||||||
|
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
||||||
|
completion_tokens=(
|
||||||
|
response.usage.completion_tokens if response.usage else 0
|
||||||
|
),
|
||||||
|
reasoning=reasoning,
|
||||||
|
)
|
||||||
elif provider == "anthropic":
|
elif provider == "anthropic":
|
||||||
|
|
||||||
an_tools = convert_openai_tool_fmt_to_anthropic(tools)
|
an_tools = convert_openai_tool_fmt_to_anthropic(tools)
|
||||||
|
|||||||
@@ -434,7 +434,8 @@ class GraphModel(Graph, GraphMeta):
|
|||||||
@computed_field
|
@computed_field
|
||||||
@property
|
@property
|
||||||
def credentials_input_schema(self) -> dict[str, Any]:
|
def credentials_input_schema(self) -> dict[str, Any]:
|
||||||
graph_credentials_inputs = self.regular_credentials_inputs
|
graph_credentials_inputs = self.aggregate_credentials_inputs()
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
|
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
|
||||||
f"{graph_credentials_inputs}"
|
f"{graph_credentials_inputs}"
|
||||||
@@ -590,28 +591,6 @@ class GraphModel(Graph, GraphMeta):
|
|||||||
for key, (field_info, node_field_pairs) in combined.items()
|
for key, (field_info, node_field_pairs) in combined.items()
|
||||||
}
|
}
|
||||||
|
|
||||||
@property
|
|
||||||
def regular_credentials_inputs(
|
|
||||||
self,
|
|
||||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
|
||||||
"""Credentials that need explicit user mapping (CredentialsMetaInput fields)."""
|
|
||||||
return {
|
|
||||||
k: v
|
|
||||||
for k, v in self.aggregate_credentials_inputs().items()
|
|
||||||
if not v[0].is_auto_credential
|
|
||||||
}
|
|
||||||
|
|
||||||
@property
|
|
||||||
def auto_credentials_inputs(
|
|
||||||
self,
|
|
||||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
|
||||||
"""Credentials embedded in file fields (_credentials_id), resolved at execution time."""
|
|
||||||
return {
|
|
||||||
k: v
|
|
||||||
for k, v in self.aggregate_credentials_inputs().items()
|
|
||||||
if v[0].is_auto_credential
|
|
||||||
}
|
|
||||||
|
|
||||||
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
|
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
|
||||||
"""
|
"""
|
||||||
Reassigns all IDs in the graph to new UUIDs.
|
Reassigns all IDs in the graph to new UUIDs.
|
||||||
@@ -662,16 +641,6 @@ class GraphModel(Graph, GraphMeta):
|
|||||||
) and graph_id in graph_id_map:
|
) and graph_id in graph_id_map:
|
||||||
node.input_default["graph_id"] = graph_id_map[graph_id]
|
node.input_default["graph_id"] = graph_id_map[graph_id]
|
||||||
|
|
||||||
# Clear auto-credentials references (e.g., _credentials_id in
|
|
||||||
# GoogleDriveFile fields) so the new user must re-authenticate
|
|
||||||
# with their own account
|
|
||||||
for node in graph.nodes:
|
|
||||||
if not node.input_default:
|
|
||||||
continue
|
|
||||||
for key, value in node.input_default.items():
|
|
||||||
if isinstance(value, dict) and "_credentials_id" in value:
|
|
||||||
del value["_credentials_id"]
|
|
||||||
|
|
||||||
def validate_graph(
|
def validate_graph(
|
||||||
self,
|
self,
|
||||||
for_run: bool = False,
|
for_run: bool = False,
|
||||||
|
|||||||
@@ -462,329 +462,3 @@ def test_node_credentials_optional_with_other_metadata():
|
|||||||
assert node.credentials_optional is True
|
assert node.credentials_optional is True
|
||||||
assert node.metadata["position"] == {"x": 100, "y": 200}
|
assert node.metadata["position"] == {"x": 100, "y": 200}
|
||||||
assert node.metadata["customized_name"] == "My Custom Node"
|
assert node.metadata["customized_name"] == "My Custom Node"
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests for CredentialsFieldInfo.combine() field propagation
|
|
||||||
def test_combine_preserves_is_auto_credential_flag():
|
|
||||||
"""
|
|
||||||
CredentialsFieldInfo.combine() must propagate is_auto_credential and
|
|
||||||
input_field_name to the combined result. Regression test for reviewer
|
|
||||||
finding that combine() dropped these fields.
|
|
||||||
"""
|
|
||||||
from backend.data.model import CredentialsFieldInfo
|
|
||||||
|
|
||||||
auto_field = CredentialsFieldInfo.model_validate(
|
|
||||||
{
|
|
||||||
"credentials_provider": ["google"],
|
|
||||||
"credentials_types": ["oauth2"],
|
|
||||||
"credentials_scopes": ["drive.readonly"],
|
|
||||||
"is_auto_credential": True,
|
|
||||||
"input_field_name": "spreadsheet",
|
|
||||||
},
|
|
||||||
by_alias=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# combine() takes *args of (field_info, key) tuples
|
|
||||||
combined = CredentialsFieldInfo.combine(
|
|
||||||
(auto_field, ("node-1", "credentials")),
|
|
||||||
(auto_field, ("node-2", "credentials")),
|
|
||||||
)
|
|
||||||
|
|
||||||
assert len(combined) == 1
|
|
||||||
group_key = next(iter(combined))
|
|
||||||
combined_info, combined_keys = combined[group_key]
|
|
||||||
|
|
||||||
assert combined_info.is_auto_credential is True
|
|
||||||
assert combined_info.input_field_name == "spreadsheet"
|
|
||||||
assert combined_keys == {("node-1", "credentials"), ("node-2", "credentials")}
|
|
||||||
|
|
||||||
|
|
||||||
def test_combine_preserves_regular_credential_defaults():
|
|
||||||
"""Regular credentials should have is_auto_credential=False after combine()."""
|
|
||||||
from backend.data.model import CredentialsFieldInfo
|
|
||||||
|
|
||||||
regular_field = CredentialsFieldInfo.model_validate(
|
|
||||||
{
|
|
||||||
"credentials_provider": ["github"],
|
|
||||||
"credentials_types": ["api_key"],
|
|
||||||
"is_auto_credential": False,
|
|
||||||
},
|
|
||||||
by_alias=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
combined = CredentialsFieldInfo.combine(
|
|
||||||
(regular_field, ("node-1", "credentials")),
|
|
||||||
)
|
|
||||||
|
|
||||||
group_key = next(iter(combined))
|
|
||||||
combined_info, _ = combined[group_key]
|
|
||||||
|
|
||||||
assert combined_info.is_auto_credential is False
|
|
||||||
assert combined_info.input_field_name is None
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests for _reassign_ids credential clearing (Fix 3: SECRT-1772)
|
|
||||||
|
|
||||||
|
|
||||||
def test_reassign_ids_clears_credentials_id():
|
|
||||||
"""
|
|
||||||
[SECRT-1772] _reassign_ids should clear _credentials_id from
|
|
||||||
GoogleDriveFile-style input_default fields so forked agents
|
|
||||||
don't retain the original creator's credential references.
|
|
||||||
"""
|
|
||||||
from backend.data.graph import GraphModel
|
|
||||||
|
|
||||||
node = Node(
|
|
||||||
id="node-1",
|
|
||||||
block_id=StoreValueBlock().id,
|
|
||||||
input_default={
|
|
||||||
"spreadsheet": {
|
|
||||||
"_credentials_id": "original-cred-id",
|
|
||||||
"id": "file-123",
|
|
||||||
"name": "test.xlsx",
|
|
||||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
|
||||||
"url": "https://docs.google.com/spreadsheets/d/file-123",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
graph = Graph(
|
|
||||||
id="test-graph",
|
|
||||||
name="Test",
|
|
||||||
description="Test",
|
|
||||||
nodes=[node],
|
|
||||||
links=[],
|
|
||||||
)
|
|
||||||
|
|
||||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
|
||||||
|
|
||||||
# _credentials_id key should be removed (not set to None) so that
|
|
||||||
# _acquire_auto_credentials correctly errors instead of treating it as chained data
|
|
||||||
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
|
|
||||||
|
|
||||||
|
|
||||||
def test_reassign_ids_preserves_non_credential_fields():
|
|
||||||
"""
|
|
||||||
Regression guard: _reassign_ids should NOT modify non-credential fields
|
|
||||||
like name, mimeType, id, url.
|
|
||||||
"""
|
|
||||||
from backend.data.graph import GraphModel
|
|
||||||
|
|
||||||
node = Node(
|
|
||||||
id="node-1",
|
|
||||||
block_id=StoreValueBlock().id,
|
|
||||||
input_default={
|
|
||||||
"spreadsheet": {
|
|
||||||
"_credentials_id": "cred-abc",
|
|
||||||
"id": "file-123",
|
|
||||||
"name": "test.xlsx",
|
|
||||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
|
||||||
"url": "https://docs.google.com/spreadsheets/d/file-123",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
graph = Graph(
|
|
||||||
id="test-graph",
|
|
||||||
name="Test",
|
|
||||||
description="Test",
|
|
||||||
nodes=[node],
|
|
||||||
links=[],
|
|
||||||
)
|
|
||||||
|
|
||||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
|
||||||
|
|
||||||
field = graph.nodes[0].input_default["spreadsheet"]
|
|
||||||
assert field["id"] == "file-123"
|
|
||||||
assert field["name"] == "test.xlsx"
|
|
||||||
assert field["mimeType"] == "application/vnd.google-apps.spreadsheet"
|
|
||||||
assert field["url"] == "https://docs.google.com/spreadsheets/d/file-123"
|
|
||||||
|
|
||||||
|
|
||||||
def test_reassign_ids_handles_no_credentials():
|
|
||||||
"""
|
|
||||||
Regression guard: _reassign_ids should not error when input_default
|
|
||||||
has no dict fields with _credentials_id.
|
|
||||||
"""
|
|
||||||
from backend.data.graph import GraphModel
|
|
||||||
|
|
||||||
node = Node(
|
|
||||||
id="node-1",
|
|
||||||
block_id=StoreValueBlock().id,
|
|
||||||
input_default={
|
|
||||||
"input": "some value",
|
|
||||||
"another_input": 42,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
graph = Graph(
|
|
||||||
id="test-graph",
|
|
||||||
name="Test",
|
|
||||||
description="Test",
|
|
||||||
nodes=[node],
|
|
||||||
links=[],
|
|
||||||
)
|
|
||||||
|
|
||||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
|
||||||
|
|
||||||
# Should not error, fields unchanged
|
|
||||||
assert graph.nodes[0].input_default["input"] == "some value"
|
|
||||||
assert graph.nodes[0].input_default["another_input"] == 42
|
|
||||||
|
|
||||||
|
|
||||||
def test_reassign_ids_handles_multiple_credential_fields():
|
|
||||||
"""
|
|
||||||
[SECRT-1772] When a node has multiple dict fields with _credentials_id,
|
|
||||||
ALL of them should be cleared.
|
|
||||||
"""
|
|
||||||
from backend.data.graph import GraphModel
|
|
||||||
|
|
||||||
node = Node(
|
|
||||||
id="node-1",
|
|
||||||
block_id=StoreValueBlock().id,
|
|
||||||
input_default={
|
|
||||||
"spreadsheet": {
|
|
||||||
"_credentials_id": "cred-1",
|
|
||||||
"id": "file-1",
|
|
||||||
"name": "file1.xlsx",
|
|
||||||
},
|
|
||||||
"doc_file": {
|
|
||||||
"_credentials_id": "cred-2",
|
|
||||||
"id": "file-2",
|
|
||||||
"name": "file2.docx",
|
|
||||||
},
|
|
||||||
"plain_input": "not a dict",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
graph = Graph(
|
|
||||||
id="test-graph",
|
|
||||||
name="Test",
|
|
||||||
description="Test",
|
|
||||||
nodes=[node],
|
|
||||||
links=[],
|
|
||||||
)
|
|
||||||
|
|
||||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
|
||||||
|
|
||||||
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
|
|
||||||
assert "_credentials_id" not in graph.nodes[0].input_default["doc_file"]
|
|
||||||
assert graph.nodes[0].input_default["plain_input"] == "not a dict"
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests for discriminate() field propagation
|
|
||||||
def test_discriminate_preserves_is_auto_credential_flag():
|
|
||||||
"""
|
|
||||||
CredentialsFieldInfo.discriminate() must propagate is_auto_credential and
|
|
||||||
input_field_name to the discriminated result. Regression test for
|
|
||||||
discriminate() dropping these fields (same class of bug as combine()).
|
|
||||||
"""
|
|
||||||
from backend.data.model import CredentialsFieldInfo
|
|
||||||
|
|
||||||
auto_field = CredentialsFieldInfo.model_validate(
|
|
||||||
{
|
|
||||||
"credentials_provider": ["google", "openai"],
|
|
||||||
"credentials_types": ["oauth2"],
|
|
||||||
"credentials_scopes": ["drive.readonly"],
|
|
||||||
"is_auto_credential": True,
|
|
||||||
"input_field_name": "spreadsheet",
|
|
||||||
"discriminator": "model",
|
|
||||||
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
|
|
||||||
},
|
|
||||||
by_alias=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
discriminated = auto_field.discriminate("gemini")
|
|
||||||
|
|
||||||
assert discriminated.is_auto_credential is True
|
|
||||||
assert discriminated.input_field_name == "spreadsheet"
|
|
||||||
assert discriminated.provider == frozenset(["google"])
|
|
||||||
|
|
||||||
|
|
||||||
def test_discriminate_preserves_regular_credential_defaults():
|
|
||||||
"""Regular credentials should have is_auto_credential=False after discriminate()."""
|
|
||||||
from backend.data.model import CredentialsFieldInfo
|
|
||||||
|
|
||||||
regular_field = CredentialsFieldInfo.model_validate(
|
|
||||||
{
|
|
||||||
"credentials_provider": ["google", "openai"],
|
|
||||||
"credentials_types": ["api_key"],
|
|
||||||
"is_auto_credential": False,
|
|
||||||
"discriminator": "model",
|
|
||||||
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
|
|
||||||
},
|
|
||||||
by_alias=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
discriminated = regular_field.discriminate("gpt-4")
|
|
||||||
|
|
||||||
assert discriminated.is_auto_credential is False
|
|
||||||
assert discriminated.input_field_name is None
|
|
||||||
assert discriminated.provider == frozenset(["openai"])
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests for credentials_input_schema excluding auto_credentials
|
|
||||||
def test_credentials_input_schema_excludes_auto_creds():
|
|
||||||
"""
|
|
||||||
GraphModel.credentials_input_schema should exclude auto_credentials
|
|
||||||
(is_auto_credential=True) from the schema. Auto_credentials are
|
|
||||||
transparently resolved at execution time via file picker data.
|
|
||||||
"""
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
from unittest.mock import PropertyMock, patch
|
|
||||||
|
|
||||||
from backend.data.graph import GraphModel, NodeModel
|
|
||||||
from backend.data.model import CredentialsFieldInfo
|
|
||||||
|
|
||||||
regular_field_info = CredentialsFieldInfo.model_validate(
|
|
||||||
{
|
|
||||||
"credentials_provider": ["github"],
|
|
||||||
"credentials_types": ["api_key"],
|
|
||||||
"is_auto_credential": False,
|
|
||||||
},
|
|
||||||
by_alias=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
graph = GraphModel(
|
|
||||||
id="test-graph",
|
|
||||||
version=1,
|
|
||||||
name="Test",
|
|
||||||
description="Test",
|
|
||||||
user_id="test-user",
|
|
||||||
created_at=datetime.now(timezone.utc),
|
|
||||||
nodes=[
|
|
||||||
NodeModel(
|
|
||||||
id="node-1",
|
|
||||||
block_id=StoreValueBlock().id,
|
|
||||||
input_default={},
|
|
||||||
graph_id="test-graph",
|
|
||||||
graph_version=1,
|
|
||||||
),
|
|
||||||
],
|
|
||||||
links=[],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Mock regular_credentials_inputs to return only the non-auto field (3-tuple)
|
|
||||||
regular_only = {
|
|
||||||
"github_credentials": (
|
|
||||||
regular_field_info,
|
|
||||||
{("node-1", "credentials")},
|
|
||||||
True,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
with patch.object(
|
|
||||||
type(graph),
|
|
||||||
"regular_credentials_inputs",
|
|
||||||
new_callable=PropertyMock,
|
|
||||||
return_value=regular_only,
|
|
||||||
):
|
|
||||||
schema = graph.credentials_input_schema
|
|
||||||
field_names = set(schema.get("properties", {}).keys())
|
|
||||||
# Should include regular credential but NOT auto_credential
|
|
||||||
assert "github_credentials" in field_names
|
|
||||||
assert "google_credentials" not in field_names
|
|
||||||
|
|||||||
@@ -574,8 +574,6 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
|||||||
discriminator: Optional[str] = None
|
discriminator: Optional[str] = None
|
||||||
discriminator_mapping: Optional[dict[str, CP]] = None
|
discriminator_mapping: Optional[dict[str, CP]] = None
|
||||||
discriminator_values: set[Any] = Field(default_factory=set)
|
discriminator_values: set[Any] = Field(default_factory=set)
|
||||||
is_auto_credential: bool = False
|
|
||||||
input_field_name: Optional[str] = None
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def combine(
|
def combine(
|
||||||
@@ -656,9 +654,6 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
|||||||
+ "_credentials"
|
+ "_credentials"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Propagate is_auto_credential from the combined field.
|
|
||||||
# All fields in a group should share the same is_auto_credential
|
|
||||||
# value since auto and regular credentials serve different purposes.
|
|
||||||
result[group_key] = (
|
result[group_key] = (
|
||||||
CredentialsFieldInfo[CP, CT](
|
CredentialsFieldInfo[CP, CT](
|
||||||
credentials_provider=combined.provider,
|
credentials_provider=combined.provider,
|
||||||
@@ -667,8 +662,6 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
|||||||
discriminator=combined.discriminator,
|
discriminator=combined.discriminator,
|
||||||
discriminator_mapping=combined.discriminator_mapping,
|
discriminator_mapping=combined.discriminator_mapping,
|
||||||
discriminator_values=set(all_discriminator_values),
|
discriminator_values=set(all_discriminator_values),
|
||||||
is_auto_credential=combined.is_auto_credential,
|
|
||||||
input_field_name=combined.input_field_name,
|
|
||||||
),
|
),
|
||||||
combined_keys,
|
combined_keys,
|
||||||
)
|
)
|
||||||
@@ -694,8 +687,6 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
|||||||
discriminator=self.discriminator,
|
discriminator=self.discriminator,
|
||||||
discriminator_mapping=self.discriminator_mapping,
|
discriminator_mapping=self.discriminator_mapping,
|
||||||
discriminator_values=self.discriminator_values,
|
discriminator_values=self.discriminator_values,
|
||||||
is_auto_credential=self.is_auto_credential,
|
|
||||||
input_field_name=self.input_field_name,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -168,81 +168,6 @@ def execute_graph(
|
|||||||
T = TypeVar("T")
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
|
||||||
async def _acquire_auto_credentials(
|
|
||||||
input_model: type[BlockSchema],
|
|
||||||
input_data: dict[str, Any],
|
|
||||||
creds_manager: "IntegrationCredentialsManager",
|
|
||||||
user_id: str,
|
|
||||||
) -> tuple[dict[str, Any], list[AsyncRedisLock]]:
|
|
||||||
"""
|
|
||||||
Resolve auto_credentials from GoogleDriveFileField-style inputs.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
(extra_exec_kwargs, locks): kwargs to inject into block execution, and
|
|
||||||
credential locks to release after execution completes.
|
|
||||||
"""
|
|
||||||
extra_exec_kwargs: dict[str, Any] = {}
|
|
||||||
locks: list[AsyncRedisLock] = []
|
|
||||||
|
|
||||||
# NOTE: If a block ever has multiple auto-credential fields, a ValueError
|
|
||||||
# on a later field will strand locks acquired for earlier fields. They'll
|
|
||||||
# auto-expire via Redis TTL, but add a try/except to release partial locks
|
|
||||||
# if that becomes a real scenario.
|
|
||||||
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
|
||||||
field_name = info["field_name"]
|
|
||||||
field_data = input_data.get(field_name)
|
|
||||||
|
|
||||||
if field_data and isinstance(field_data, dict):
|
|
||||||
# Check if _credentials_id key exists in the field data
|
|
||||||
if "_credentials_id" in field_data:
|
|
||||||
cred_id = field_data["_credentials_id"]
|
|
||||||
if cred_id:
|
|
||||||
# Credential ID provided - acquire credentials
|
|
||||||
provider = info.get("config", {}).get(
|
|
||||||
"provider", "external service"
|
|
||||||
)
|
|
||||||
file_name = field_data.get("name", "selected file")
|
|
||||||
try:
|
|
||||||
credentials, lock = await creds_manager.acquire(
|
|
||||||
user_id, cred_id
|
|
||||||
)
|
|
||||||
locks.append(lock)
|
|
||||||
extra_exec_kwargs[kwarg_name] = credentials
|
|
||||||
except ValueError:
|
|
||||||
raise ValueError(
|
|
||||||
f"{provider.capitalize()} credentials for "
|
|
||||||
f"'{file_name}' in field '{field_name}' are not "
|
|
||||||
f"available in your account. "
|
|
||||||
f"This can happen if the agent was created by another "
|
|
||||||
f"user or the credentials were deleted. "
|
|
||||||
f"Please open the agent in the builder and re-select "
|
|
||||||
f"the file to authenticate with your own account."
|
|
||||||
)
|
|
||||||
# else: _credentials_id is explicitly None, skip (chained data)
|
|
||||||
else:
|
|
||||||
# _credentials_id key missing entirely - this is an error
|
|
||||||
provider = info.get("config", {}).get("provider", "external service")
|
|
||||||
file_name = field_data.get("name", "selected file")
|
|
||||||
raise ValueError(
|
|
||||||
f"Authentication missing for '{file_name}' in field "
|
|
||||||
f"'{field_name}'. Please re-select the file to authenticate "
|
|
||||||
f"with {provider.capitalize()}."
|
|
||||||
)
|
|
||||||
elif field_data is None and field_name not in input_data:
|
|
||||||
# Field not in input_data at all = connected from upstream block, skip
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
# field_data is None/empty but key IS in input_data = user didn't select
|
|
||||||
provider = info.get("config", {}).get("provider", "external service")
|
|
||||||
raise ValueError(
|
|
||||||
f"No file selected for '{field_name}'. "
|
|
||||||
f"Please select a file to provide "
|
|
||||||
f"{provider.capitalize()} authentication."
|
|
||||||
)
|
|
||||||
|
|
||||||
return extra_exec_kwargs, locks
|
|
||||||
|
|
||||||
|
|
||||||
async def execute_node(
|
async def execute_node(
|
||||||
node: Node,
|
node: Node,
|
||||||
data: NodeExecutionEntry,
|
data: NodeExecutionEntry,
|
||||||
@@ -345,14 +270,41 @@ async def execute_node(
|
|||||||
extra_exec_kwargs[field_name] = credentials
|
extra_exec_kwargs[field_name] = credentials
|
||||||
|
|
||||||
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
|
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
|
||||||
auto_extra_kwargs, auto_locks = await _acquire_auto_credentials(
|
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
||||||
input_model=input_model,
|
field_name = info["field_name"]
|
||||||
input_data=input_data,
|
field_data = input_data.get(field_name)
|
||||||
creds_manager=creds_manager,
|
if field_data and isinstance(field_data, dict):
|
||||||
user_id=user_id,
|
# Check if _credentials_id key exists in the field data
|
||||||
)
|
if "_credentials_id" in field_data:
|
||||||
extra_exec_kwargs.update(auto_extra_kwargs)
|
cred_id = field_data["_credentials_id"]
|
||||||
creds_locks.extend(auto_locks)
|
if cred_id:
|
||||||
|
# Credential ID provided - acquire credentials
|
||||||
|
provider = info.get("config", {}).get(
|
||||||
|
"provider", "external service"
|
||||||
|
)
|
||||||
|
file_name = field_data.get("name", "selected file")
|
||||||
|
try:
|
||||||
|
credentials, lock = await creds_manager.acquire(
|
||||||
|
user_id, cred_id
|
||||||
|
)
|
||||||
|
creds_locks.append(lock)
|
||||||
|
extra_exec_kwargs[kwarg_name] = credentials
|
||||||
|
except ValueError:
|
||||||
|
# Credential was deleted or doesn't exist
|
||||||
|
raise ValueError(
|
||||||
|
f"Authentication expired for '{file_name}' in field '{field_name}'. "
|
||||||
|
f"The saved {provider.capitalize()} credentials no longer exist. "
|
||||||
|
f"Please re-select the file to re-authenticate."
|
||||||
|
)
|
||||||
|
# else: _credentials_id is explicitly None, skip credentials (for chained data)
|
||||||
|
else:
|
||||||
|
# _credentials_id key missing entirely - this is an error
|
||||||
|
provider = info.get("config", {}).get("provider", "external service")
|
||||||
|
file_name = field_data.get("name", "selected file")
|
||||||
|
raise ValueError(
|
||||||
|
f"Authentication missing for '{file_name}' in field '{field_name}'. "
|
||||||
|
f"Please re-select the file to authenticate with {provider.capitalize()}."
|
||||||
|
)
|
||||||
|
|
||||||
output_size = 0
|
output_size = 0
|
||||||
|
|
||||||
|
|||||||
@@ -1,320 +0,0 @@
|
|||||||
"""
|
|
||||||
Tests for auto_credentials handling in execute_node().
|
|
||||||
|
|
||||||
These test the _acquire_auto_credentials() helper function extracted from
|
|
||||||
execute_node() (manager.py lines 273-308).
|
|
||||||
"""
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
from pytest_mock import MockerFixture
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def google_drive_file_data():
|
|
||||||
return {
|
|
||||||
"valid": {
|
|
||||||
"_credentials_id": "cred-id-123",
|
|
||||||
"id": "file-123",
|
|
||||||
"name": "test.xlsx",
|
|
||||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
|
||||||
},
|
|
||||||
"chained": {
|
|
||||||
"_credentials_id": None,
|
|
||||||
"id": "file-456",
|
|
||||||
"name": "chained.xlsx",
|
|
||||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
|
||||||
},
|
|
||||||
"missing_key": {
|
|
||||||
"id": "file-789",
|
|
||||||
"name": "bad.xlsx",
|
|
||||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_input_model(mocker: MockerFixture):
|
|
||||||
"""Create a mock input model with get_auto_credentials_fields() returning one field."""
|
|
||||||
input_model = mocker.MagicMock()
|
|
||||||
input_model.get_auto_credentials_fields.return_value = {
|
|
||||||
"credentials": {
|
|
||||||
"field_name": "spreadsheet",
|
|
||||||
"config": {
|
|
||||||
"provider": "google",
|
|
||||||
"type": "oauth2",
|
|
||||||
"scopes": ["https://www.googleapis.com/auth/drive.readonly"],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return input_model
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_creds_manager(mocker: MockerFixture):
|
|
||||||
manager = mocker.AsyncMock()
|
|
||||||
mock_lock = mocker.AsyncMock()
|
|
||||||
mock_creds = mocker.MagicMock()
|
|
||||||
mock_creds.id = "cred-id-123"
|
|
||||||
mock_creds.provider = "google"
|
|
||||||
manager.acquire.return_value = (mock_creds, mock_lock)
|
|
||||||
return manager, mock_creds, mock_lock
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_auto_credentials_happy_path(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
google_drive_file_data,
|
|
||||||
mock_input_model,
|
|
||||||
mock_creds_manager,
|
|
||||||
):
|
|
||||||
"""When field_data has a valid _credentials_id, credentials should be acquired."""
|
|
||||||
from backend.executor.manager import _acquire_auto_credentials
|
|
||||||
|
|
||||||
manager, mock_creds, mock_lock = mock_creds_manager
|
|
||||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
|
||||||
|
|
||||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
|
||||||
input_model=mock_input_model,
|
|
||||||
input_data=input_data,
|
|
||||||
creds_manager=manager,
|
|
||||||
user_id="user-1",
|
|
||||||
)
|
|
||||||
|
|
||||||
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
|
|
||||||
assert extra_kwargs["credentials"] == mock_creds
|
|
||||||
assert mock_lock in locks
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_auto_credentials_field_none_static_raises(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
mock_input_model,
|
|
||||||
mock_creds_manager,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
[THE BUG FIX TEST — OPEN-2895]
|
|
||||||
When field_data is None and the key IS in input_data (user didn't select a file),
|
|
||||||
should raise ValueError instead of silently skipping.
|
|
||||||
"""
|
|
||||||
from backend.executor.manager import _acquire_auto_credentials
|
|
||||||
|
|
||||||
manager, _, _ = mock_creds_manager
|
|
||||||
# Key is present but value is None = user didn't select a file
|
|
||||||
input_data = {"spreadsheet": None}
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="No file selected"):
|
|
||||||
await _acquire_auto_credentials(
|
|
||||||
input_model=mock_input_model,
|
|
||||||
input_data=input_data,
|
|
||||||
creds_manager=manager,
|
|
||||||
user_id="user-1",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_auto_credentials_field_absent_skips(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
mock_input_model,
|
|
||||||
mock_creds_manager,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
When the field key is NOT in input_data at all (upstream connection),
|
|
||||||
should skip without error.
|
|
||||||
"""
|
|
||||||
from backend.executor.manager import _acquire_auto_credentials
|
|
||||||
|
|
||||||
manager, _, _ = mock_creds_manager
|
|
||||||
# Key not present = connected from upstream block
|
|
||||||
input_data = {}
|
|
||||||
|
|
||||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
|
||||||
input_model=mock_input_model,
|
|
||||||
input_data=input_data,
|
|
||||||
creds_manager=manager,
|
|
||||||
user_id="user-1",
|
|
||||||
)
|
|
||||||
|
|
||||||
manager.acquire.assert_not_called()
|
|
||||||
assert "credentials" not in extra_kwargs
|
|
||||||
assert locks == []
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_auto_credentials_chained_cred_id_none(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
google_drive_file_data,
|
|
||||||
mock_input_model,
|
|
||||||
mock_creds_manager,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
When _credentials_id is explicitly None (chained data from upstream),
|
|
||||||
should skip credential acquisition.
|
|
||||||
"""
|
|
||||||
from backend.executor.manager import _acquire_auto_credentials
|
|
||||||
|
|
||||||
manager, _, _ = mock_creds_manager
|
|
||||||
input_data = {"spreadsheet": google_drive_file_data["chained"]}
|
|
||||||
|
|
||||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
|
||||||
input_model=mock_input_model,
|
|
||||||
input_data=input_data,
|
|
||||||
creds_manager=manager,
|
|
||||||
user_id="user-1",
|
|
||||||
)
|
|
||||||
|
|
||||||
manager.acquire.assert_not_called()
|
|
||||||
assert "credentials" not in extra_kwargs
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_auto_credentials_missing_cred_id_key_raises(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
google_drive_file_data,
|
|
||||||
mock_input_model,
|
|
||||||
mock_creds_manager,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
When _credentials_id key is missing entirely from field_data dict,
|
|
||||||
should raise ValueError.
|
|
||||||
"""
|
|
||||||
from backend.executor.manager import _acquire_auto_credentials
|
|
||||||
|
|
||||||
manager, _, _ = mock_creds_manager
|
|
||||||
input_data = {"spreadsheet": google_drive_file_data["missing_key"]}
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="Authentication missing"):
|
|
||||||
await _acquire_auto_credentials(
|
|
||||||
input_model=mock_input_model,
|
|
||||||
input_data=input_data,
|
|
||||||
creds_manager=manager,
|
|
||||||
user_id="user-1",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_auto_credentials_ownership_mismatch_error(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
google_drive_file_data,
|
|
||||||
mock_input_model,
|
|
||||||
mock_creds_manager,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
[SECRT-1772] When acquire() raises ValueError (credential belongs to another user),
|
|
||||||
the error message should mention 'not available' (not 'expired').
|
|
||||||
"""
|
|
||||||
from backend.executor.manager import _acquire_auto_credentials
|
|
||||||
|
|
||||||
manager, _, _ = mock_creds_manager
|
|
||||||
manager.acquire.side_effect = ValueError(
|
|
||||||
"Credentials #cred-id-123 for user #user-2 not found"
|
|
||||||
)
|
|
||||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="not available in your account"):
|
|
||||||
await _acquire_auto_credentials(
|
|
||||||
input_model=mock_input_model,
|
|
||||||
input_data=input_data,
|
|
||||||
creds_manager=manager,
|
|
||||||
user_id="user-2",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_auto_credentials_deleted_credential_error(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
google_drive_file_data,
|
|
||||||
mock_input_model,
|
|
||||||
mock_creds_manager,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
[SECRT-1772] When acquire() raises ValueError (credential was deleted),
|
|
||||||
the error message should mention 'not available' (not 'expired').
|
|
||||||
"""
|
|
||||||
from backend.executor.manager import _acquire_auto_credentials
|
|
||||||
|
|
||||||
manager, _, _ = mock_creds_manager
|
|
||||||
manager.acquire.side_effect = ValueError(
|
|
||||||
"Credentials #cred-id-123 for user #user-1 not found"
|
|
||||||
)
|
|
||||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="not available in your account"):
|
|
||||||
await _acquire_auto_credentials(
|
|
||||||
input_model=mock_input_model,
|
|
||||||
input_data=input_data,
|
|
||||||
creds_manager=manager,
|
|
||||||
user_id="user-1",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_auto_credentials_lock_appended(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
google_drive_file_data,
|
|
||||||
mock_input_model,
|
|
||||||
mock_creds_manager,
|
|
||||||
):
|
|
||||||
"""Lock from acquire() should be included in returned locks list."""
|
|
||||||
from backend.executor.manager import _acquire_auto_credentials
|
|
||||||
|
|
||||||
manager, _, mock_lock = mock_creds_manager
|
|
||||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
|
||||||
|
|
||||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
|
||||||
input_model=mock_input_model,
|
|
||||||
input_data=input_data,
|
|
||||||
creds_manager=manager,
|
|
||||||
user_id="user-1",
|
|
||||||
)
|
|
||||||
|
|
||||||
assert len(locks) == 1
|
|
||||||
assert locks[0] is mock_lock
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_auto_credentials_multiple_fields(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
mock_creds_manager,
|
|
||||||
):
|
|
||||||
"""When there are multiple auto_credentials fields, only valid ones should acquire."""
|
|
||||||
from backend.executor.manager import _acquire_auto_credentials
|
|
||||||
|
|
||||||
manager, mock_creds, mock_lock = mock_creds_manager
|
|
||||||
|
|
||||||
input_model = mocker.MagicMock()
|
|
||||||
input_model.get_auto_credentials_fields.return_value = {
|
|
||||||
"credentials": {
|
|
||||||
"field_name": "spreadsheet",
|
|
||||||
"config": {"provider": "google", "type": "oauth2"},
|
|
||||||
},
|
|
||||||
"credentials2": {
|
|
||||||
"field_name": "doc_file",
|
|
||||||
"config": {"provider": "google", "type": "oauth2"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
input_data = {
|
|
||||||
"spreadsheet": {
|
|
||||||
"_credentials_id": "cred-id-123",
|
|
||||||
"id": "file-1",
|
|
||||||
"name": "file1.xlsx",
|
|
||||||
},
|
|
||||||
"doc_file": {
|
|
||||||
"_credentials_id": None,
|
|
||||||
"id": "file-2",
|
|
||||||
"name": "chained.doc",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
|
||||||
input_model=input_model,
|
|
||||||
input_data=input_data,
|
|
||||||
creds_manager=manager,
|
|
||||||
user_id="user-1",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only the first field should have acquired credentials
|
|
||||||
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
|
|
||||||
assert "credentials" in extra_kwargs
|
|
||||||
assert "credentials2" not in extra_kwargs
|
|
||||||
assert len(locks) == 1
|
|
||||||
@@ -254,8 +254,7 @@ async def _validate_node_input_credentials(
|
|||||||
|
|
||||||
# Find any fields of type CredentialsMetaInput
|
# Find any fields of type CredentialsMetaInput
|
||||||
credentials_fields = block.input_schema.get_credentials_fields()
|
credentials_fields = block.input_schema.get_credentials_fields()
|
||||||
auto_credentials_fields = block.input_schema.get_auto_credentials_fields()
|
if not credentials_fields:
|
||||||
if not credentials_fields and not auto_credentials_fields:
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Track if any credential field is missing for this node
|
# Track if any credential field is missing for this node
|
||||||
@@ -335,47 +334,6 @@ async def _validate_node_input_credentials(
|
|||||||
] = "Invalid credentials: type/provider mismatch"
|
] = "Invalid credentials: type/provider mismatch"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Validate auto-credentials (GoogleDriveFileField-based)
|
|
||||||
# These have _credentials_id embedded in the file field data
|
|
||||||
if auto_credentials_fields:
|
|
||||||
for _kwarg_name, info in auto_credentials_fields.items():
|
|
||||||
field_name = info["field_name"]
|
|
||||||
# Check input_default and nodes_input_masks for the field value
|
|
||||||
field_value = node.input_default.get(field_name)
|
|
||||||
if nodes_input_masks and node.id in nodes_input_masks:
|
|
||||||
field_value = nodes_input_masks[node.id].get(
|
|
||||||
field_name, field_value
|
|
||||||
)
|
|
||||||
|
|
||||||
if field_value and isinstance(field_value, dict):
|
|
||||||
if "_credentials_id" not in field_value:
|
|
||||||
# Key removed (e.g., on fork) — needs re-auth
|
|
||||||
has_missing_credentials = True
|
|
||||||
credential_errors[node.id][field_name] = (
|
|
||||||
"Authentication missing for the selected file. "
|
|
||||||
"Please re-select the file to authenticate with "
|
|
||||||
"your own account."
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
cred_id = field_value.get("_credentials_id")
|
|
||||||
if cred_id and isinstance(cred_id, str):
|
|
||||||
try:
|
|
||||||
creds_store = get_integration_credentials_store()
|
|
||||||
creds = await creds_store.get_creds_by_id(user_id, cred_id)
|
|
||||||
except Exception as e:
|
|
||||||
has_missing_credentials = True
|
|
||||||
credential_errors[node.id][
|
|
||||||
field_name
|
|
||||||
] = f"Credentials not available: {e}"
|
|
||||||
continue
|
|
||||||
if not creds:
|
|
||||||
has_missing_credentials = True
|
|
||||||
credential_errors[node.id][field_name] = (
|
|
||||||
"The saved credentials are not available "
|
|
||||||
"for your account. Please re-select the file to "
|
|
||||||
"authenticate with your own account."
|
|
||||||
)
|
|
||||||
|
|
||||||
# If node has optional credentials and any are missing, mark for skipping
|
# If node has optional credentials and any are missing, mark for skipping
|
||||||
# But only if there are no other errors for this node
|
# But only if there are no other errors for this node
|
||||||
if (
|
if (
|
||||||
@@ -407,9 +365,8 @@ def make_node_credentials_input_map(
|
|||||||
"""
|
"""
|
||||||
result: dict[str, dict[str, JsonValue]] = {}
|
result: dict[str, dict[str, JsonValue]] = {}
|
||||||
|
|
||||||
# Only map regular credentials (not auto_credentials, which are resolved
|
# Get aggregated credentials fields for the graph
|
||||||
# at execution time from _credentials_id in file field data)
|
graph_cred_inputs = graph.aggregate_credentials_inputs()
|
||||||
graph_cred_inputs = graph.regular_credentials_inputs
|
|
||||||
|
|
||||||
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
|
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
|
||||||
# Best-effort map: skip missing items
|
# Best-effort map: skip missing items
|
||||||
|
|||||||
@@ -907,335 +907,3 @@ async def test_stop_graph_execution_cascades_to_child_with_reviews(
|
|||||||
|
|
||||||
# Verify both parent and child status updates
|
# Verify both parent and child status updates
|
||||||
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
|
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests for auto_credentials validation in _validate_node_input_credentials
|
|
||||||
# (Fix 3: SECRT-1772 + Fix 4: Path 4)
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_validate_node_input_credentials_auto_creds_valid(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
[SECRT-1772] When a node has auto_credentials with a valid _credentials_id
|
|
||||||
that exists in the store, validation should pass without errors.
|
|
||||||
"""
|
|
||||||
from backend.executor.utils import _validate_node_input_credentials
|
|
||||||
|
|
||||||
mock_node = mocker.MagicMock()
|
|
||||||
mock_node.id = "node-with-auto-creds"
|
|
||||||
mock_node.credentials_optional = False
|
|
||||||
mock_node.input_default = {
|
|
||||||
"spreadsheet": {
|
|
||||||
"_credentials_id": "valid-cred-id",
|
|
||||||
"id": "file-123",
|
|
||||||
"name": "test.xlsx",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_block = mocker.MagicMock()
|
|
||||||
# No regular credentials fields
|
|
||||||
mock_block.input_schema.get_credentials_fields.return_value = {}
|
|
||||||
# Has auto_credentials fields
|
|
||||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
|
||||||
"credentials": {
|
|
||||||
"field_name": "spreadsheet",
|
|
||||||
"config": {"provider": "google", "type": "oauth2"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mock_node.block = mock_block
|
|
||||||
|
|
||||||
mock_graph = mocker.MagicMock()
|
|
||||||
mock_graph.nodes = [mock_node]
|
|
||||||
|
|
||||||
# Mock the credentials store to return valid credentials
|
|
||||||
mock_store = mocker.MagicMock()
|
|
||||||
mock_creds = mocker.MagicMock()
|
|
||||||
mock_creds.id = "valid-cred-id"
|
|
||||||
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=mock_creds)
|
|
||||||
mocker.patch(
|
|
||||||
"backend.executor.utils.get_integration_credentials_store",
|
|
||||||
return_value=mock_store,
|
|
||||||
)
|
|
||||||
|
|
||||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
|
||||||
graph=mock_graph,
|
|
||||||
user_id="test-user",
|
|
||||||
nodes_input_masks=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
assert mock_node.id not in errors
|
|
||||||
assert mock_node.id not in nodes_to_skip
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_validate_node_input_credentials_auto_creds_missing(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
[SECRT-1772] When a node has auto_credentials with a _credentials_id
|
|
||||||
that doesn't exist for the current user, validation should report an error.
|
|
||||||
"""
|
|
||||||
from backend.executor.utils import _validate_node_input_credentials
|
|
||||||
|
|
||||||
mock_node = mocker.MagicMock()
|
|
||||||
mock_node.id = "node-with-bad-auto-creds"
|
|
||||||
mock_node.credentials_optional = False
|
|
||||||
mock_node.input_default = {
|
|
||||||
"spreadsheet": {
|
|
||||||
"_credentials_id": "other-users-cred-id",
|
|
||||||
"id": "file-123",
|
|
||||||
"name": "test.xlsx",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_block = mocker.MagicMock()
|
|
||||||
mock_block.input_schema.get_credentials_fields.return_value = {}
|
|
||||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
|
||||||
"credentials": {
|
|
||||||
"field_name": "spreadsheet",
|
|
||||||
"config": {"provider": "google", "type": "oauth2"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mock_node.block = mock_block
|
|
||||||
|
|
||||||
mock_graph = mocker.MagicMock()
|
|
||||||
mock_graph.nodes = [mock_node]
|
|
||||||
|
|
||||||
# Mock the credentials store to return None (cred not found for this user)
|
|
||||||
mock_store = mocker.MagicMock()
|
|
||||||
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=None)
|
|
||||||
mocker.patch(
|
|
||||||
"backend.executor.utils.get_integration_credentials_store",
|
|
||||||
return_value=mock_store,
|
|
||||||
)
|
|
||||||
|
|
||||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
|
||||||
graph=mock_graph,
|
|
||||||
user_id="different-user",
|
|
||||||
nodes_input_masks=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
assert mock_node.id in errors
|
|
||||||
assert "spreadsheet" in errors[mock_node.id]
|
|
||||||
assert "not available" in errors[mock_node.id]["spreadsheet"].lower()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_validate_node_input_credentials_both_regular_and_auto(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
[SECRT-1772] A node that has BOTH regular credentials AND auto_credentials
|
|
||||||
should have both validated.
|
|
||||||
"""
|
|
||||||
from backend.executor.utils import _validate_node_input_credentials
|
|
||||||
|
|
||||||
mock_node = mocker.MagicMock()
|
|
||||||
mock_node.id = "node-with-both-creds"
|
|
||||||
mock_node.credentials_optional = False
|
|
||||||
mock_node.input_default = {
|
|
||||||
"credentials": {
|
|
||||||
"id": "regular-cred-id",
|
|
||||||
"provider": "github",
|
|
||||||
"type": "api_key",
|
|
||||||
},
|
|
||||||
"spreadsheet": {
|
|
||||||
"_credentials_id": "auto-cred-id",
|
|
||||||
"id": "file-123",
|
|
||||||
"name": "test.xlsx",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_credentials_field_type = mocker.MagicMock()
|
|
||||||
mock_credentials_meta = mocker.MagicMock()
|
|
||||||
mock_credentials_meta.id = "regular-cred-id"
|
|
||||||
mock_credentials_meta.provider = "github"
|
|
||||||
mock_credentials_meta.type = "api_key"
|
|
||||||
mock_credentials_field_type.model_validate.return_value = mock_credentials_meta
|
|
||||||
|
|
||||||
mock_block = mocker.MagicMock()
|
|
||||||
# Regular credentials field
|
|
||||||
mock_block.input_schema.get_credentials_fields.return_value = {
|
|
||||||
"credentials": mock_credentials_field_type,
|
|
||||||
}
|
|
||||||
# Auto-credentials field
|
|
||||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
|
||||||
"auto_credentials": {
|
|
||||||
"field_name": "spreadsheet",
|
|
||||||
"config": {"provider": "google", "type": "oauth2"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mock_node.block = mock_block
|
|
||||||
|
|
||||||
mock_graph = mocker.MagicMock()
|
|
||||||
mock_graph.nodes = [mock_node]
|
|
||||||
|
|
||||||
# Mock the credentials store to return valid credentials for both
|
|
||||||
mock_store = mocker.MagicMock()
|
|
||||||
mock_regular_creds = mocker.MagicMock()
|
|
||||||
mock_regular_creds.id = "regular-cred-id"
|
|
||||||
mock_regular_creds.provider = "github"
|
|
||||||
mock_regular_creds.type = "api_key"
|
|
||||||
|
|
||||||
mock_auto_creds = mocker.MagicMock()
|
|
||||||
mock_auto_creds.id = "auto-cred-id"
|
|
||||||
|
|
||||||
def get_creds_side_effect(user_id, cred_id):
|
|
||||||
if cred_id == "regular-cred-id":
|
|
||||||
return mock_regular_creds
|
|
||||||
elif cred_id == "auto-cred-id":
|
|
||||||
return mock_auto_creds
|
|
||||||
return None
|
|
||||||
|
|
||||||
mock_store.get_creds_by_id = mocker.AsyncMock(side_effect=get_creds_side_effect)
|
|
||||||
mocker.patch(
|
|
||||||
"backend.executor.utils.get_integration_credentials_store",
|
|
||||||
return_value=mock_store,
|
|
||||||
)
|
|
||||||
|
|
||||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
|
||||||
graph=mock_graph,
|
|
||||||
user_id="test-user",
|
|
||||||
nodes_input_masks=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Both should validate successfully - no errors
|
|
||||||
assert mock_node.id not in errors
|
|
||||||
assert mock_node.id not in nodes_to_skip
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_validate_node_input_credentials_auto_creds_skipped_when_none(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
When a node has auto_credentials but the field value has _credentials_id=None
|
|
||||||
(e.g., from upstream connection), validation should skip it without error.
|
|
||||||
"""
|
|
||||||
from backend.executor.utils import _validate_node_input_credentials
|
|
||||||
|
|
||||||
mock_node = mocker.MagicMock()
|
|
||||||
mock_node.id = "node-with-chained-auto-creds"
|
|
||||||
mock_node.credentials_optional = False
|
|
||||||
mock_node.input_default = {
|
|
||||||
"spreadsheet": {
|
|
||||||
"_credentials_id": None,
|
|
||||||
"id": "file-123",
|
|
||||||
"name": "test.xlsx",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_block = mocker.MagicMock()
|
|
||||||
mock_block.input_schema.get_credentials_fields.return_value = {}
|
|
||||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
|
||||||
"credentials": {
|
|
||||||
"field_name": "spreadsheet",
|
|
||||||
"config": {"provider": "google", "type": "oauth2"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mock_node.block = mock_block
|
|
||||||
|
|
||||||
mock_graph = mocker.MagicMock()
|
|
||||||
mock_graph.nodes = [mock_node]
|
|
||||||
|
|
||||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
|
||||||
graph=mock_graph,
|
|
||||||
user_id="test-user",
|
|
||||||
nodes_input_masks=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
# No error - chained data with None cred_id is valid
|
|
||||||
assert mock_node.id not in errors
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests for CredentialsFieldInfo auto_credential tag (Fix 4: Path 4)
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
def test_credentials_field_info_auto_credential_tag():
|
|
||||||
"""
|
|
||||||
[Path 4] CredentialsFieldInfo should support is_auto_credential and
|
|
||||||
input_field_name fields for distinguishing auto from regular credentials.
|
|
||||||
"""
|
|
||||||
from backend.data.model import CredentialsFieldInfo
|
|
||||||
|
|
||||||
# Regular credential should have is_auto_credential=False by default
|
|
||||||
regular = CredentialsFieldInfo.model_validate(
|
|
||||||
{
|
|
||||||
"credentials_provider": ["github"],
|
|
||||||
"credentials_types": ["api_key"],
|
|
||||||
},
|
|
||||||
by_alias=True,
|
|
||||||
)
|
|
||||||
assert regular.is_auto_credential is False
|
|
||||||
assert regular.input_field_name is None
|
|
||||||
|
|
||||||
# Auto credential should have is_auto_credential=True
|
|
||||||
auto = CredentialsFieldInfo.model_validate(
|
|
||||||
{
|
|
||||||
"credentials_provider": ["google"],
|
|
||||||
"credentials_types": ["oauth2"],
|
|
||||||
"is_auto_credential": True,
|
|
||||||
"input_field_name": "spreadsheet",
|
|
||||||
},
|
|
||||||
by_alias=True,
|
|
||||||
)
|
|
||||||
assert auto.is_auto_credential is True
|
|
||||||
assert auto.input_field_name == "spreadsheet"
|
|
||||||
|
|
||||||
|
|
||||||
def test_make_node_credentials_input_map_excludes_auto_creds(
|
|
||||||
mocker: MockerFixture,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
[Path 4] make_node_credentials_input_map should only include regular credentials,
|
|
||||||
not auto_credentials (which are resolved at execution time).
|
|
||||||
"""
|
|
||||||
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
|
||||||
from backend.executor.utils import make_node_credentials_input_map
|
|
||||||
from backend.integrations.providers import ProviderName
|
|
||||||
|
|
||||||
# Create a mock graph with aggregate_credentials_inputs that returns
|
|
||||||
# both regular and auto credentials
|
|
||||||
mock_graph = mocker.MagicMock()
|
|
||||||
|
|
||||||
regular_field_info = CredentialsFieldInfo.model_validate(
|
|
||||||
{
|
|
||||||
"credentials_provider": ["github"],
|
|
||||||
"credentials_types": ["api_key"],
|
|
||||||
"is_auto_credential": False,
|
|
||||||
},
|
|
||||||
by_alias=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Mock regular_credentials_inputs property (auto_credentials are excluded)
|
|
||||||
mock_graph.regular_credentials_inputs = {
|
|
||||||
"github_creds": (regular_field_info, {("node-1", "credentials")}, True),
|
|
||||||
}
|
|
||||||
|
|
||||||
graph_credentials_input = {
|
|
||||||
"github_creds": CredentialsMetaInput(
|
|
||||||
id="cred-123",
|
|
||||||
provider=ProviderName("github"),
|
|
||||||
type="api_key",
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
result = make_node_credentials_input_map(mock_graph, graph_credentials_input)
|
|
||||||
|
|
||||||
# Regular credentials should be mapped
|
|
||||||
assert "node-1" in result
|
|
||||||
assert "credentials" in result["node-1"]
|
|
||||||
|
|
||||||
# Auto credentials should NOT appear in the result
|
|
||||||
# (they would have been mapped to the kwarg_name "credentials" not "spreadsheet")
|
|
||||||
for node_id, fields in result.items():
|
|
||||||
for field_name, value in fields.items():
|
|
||||||
# Verify no auto-credential phantom entries
|
|
||||||
if isinstance(value, dict):
|
|
||||||
assert "_credentials_id" not in value
|
|
||||||
|
|||||||
185
autogpt_platform/backend/backend/util/openai_responses.py
Normal file
185
autogpt_platform/backend/backend/util/openai_responses.py
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
"""Helpers for OpenAI Responses API migration.
|
||||||
|
|
||||||
|
This module provides utilities for conditionally using OpenAI's Responses API
|
||||||
|
instead of Chat Completions for reasoning models (o1, o3, etc.) that require it.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
# Exact model identifiers that require the Responses API.
|
||||||
|
# Use exact matching to avoid false positives on future models.
|
||||||
|
# NOTE: Update this set when OpenAI releases new reasoning models.
|
||||||
|
REASONING_MODELS = frozenset(
|
||||||
|
{
|
||||||
|
# O1 family
|
||||||
|
"o1",
|
||||||
|
"o1-mini",
|
||||||
|
"o1-preview",
|
||||||
|
"o1-2024-12-17",
|
||||||
|
# O3 family
|
||||||
|
"o3",
|
||||||
|
"o3-mini",
|
||||||
|
"o3-2025-04-16",
|
||||||
|
"o3-mini-2025-01-31",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def requires_responses_api(model: str) -> bool:
|
||||||
|
"""Check if model requires the Responses API (exact match).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: The model identifier string (e.g., "o3-mini", "gpt-4o")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the model requires responses.create, False otherwise
|
||||||
|
"""
|
||||||
|
return model in REASONING_MODELS
|
||||||
|
|
||||||
|
|
||||||
|
def convert_tools_to_responses_format(tools: list[dict] | None) -> list[dict]:
|
||||||
|
"""Convert Chat Completions tool format to Responses API format.
|
||||||
|
|
||||||
|
The Responses API uses internally-tagged polymorphism (flatter structure)
|
||||||
|
and functions are strict by default.
|
||||||
|
|
||||||
|
Chat Completions format:
|
||||||
|
{"type": "function", "function": {"name": "...", "parameters": {...}}}
|
||||||
|
|
||||||
|
Responses API format:
|
||||||
|
{"type": "function", "name": "...", "parameters": {...}}
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tools: List of tools in Chat Completions format
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of tools in Responses API format
|
||||||
|
"""
|
||||||
|
if not tools:
|
||||||
|
return []
|
||||||
|
|
||||||
|
converted = []
|
||||||
|
for tool in tools:
|
||||||
|
if tool.get("type") == "function":
|
||||||
|
func = tool.get("function", {})
|
||||||
|
converted.append(
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"name": func.get("name"),
|
||||||
|
"description": func.get("description"),
|
||||||
|
"parameters": func.get("parameters"),
|
||||||
|
# Note: strict=True is default in Responses API
|
||||||
|
}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Pass through non-function tools as-is
|
||||||
|
converted.append(tool)
|
||||||
|
return converted
|
||||||
|
|
||||||
|
|
||||||
|
def extract_responses_tool_calls(response: Any) -> list[dict] | None:
|
||||||
|
"""Extract tool calls from Responses API response.
|
||||||
|
|
||||||
|
The Responses API returns tool calls as separate items in the output array
|
||||||
|
with type="function_call".
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response: The Responses API response object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of tool calls in a normalized format, or None if no tool calls
|
||||||
|
"""
|
||||||
|
tool_calls = []
|
||||||
|
for item in response.output:
|
||||||
|
if getattr(item, "type", None) == "function_call":
|
||||||
|
tool_calls.append(
|
||||||
|
{
|
||||||
|
"id": item.call_id,
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": item.name,
|
||||||
|
"arguments": item.arguments,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return tool_calls if tool_calls else None
|
||||||
|
|
||||||
|
|
||||||
|
def extract_usage(response: Any, is_responses_api: bool) -> tuple[int, int]:
|
||||||
|
"""Extract token usage from either API response.
|
||||||
|
|
||||||
|
The Responses API uses different field names for token counts:
|
||||||
|
- Chat Completions: prompt_tokens, completion_tokens
|
||||||
|
- Responses API: input_tokens, output_tokens
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response: The API response object
|
||||||
|
is_responses_api: True if response is from Responses API
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (prompt_tokens, completion_tokens)
|
||||||
|
"""
|
||||||
|
if not response.usage:
|
||||||
|
return 0, 0
|
||||||
|
|
||||||
|
if is_responses_api:
|
||||||
|
# Responses API uses different field names
|
||||||
|
return (
|
||||||
|
getattr(response.usage, "input_tokens", 0),
|
||||||
|
getattr(response.usage, "output_tokens", 0),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Chat Completions API
|
||||||
|
return (
|
||||||
|
getattr(response.usage, "prompt_tokens", 0),
|
||||||
|
getattr(response.usage, "completion_tokens", 0),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_responses_content(response: Any) -> str:
|
||||||
|
"""Extract text content from Responses API response.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response: The Responses API response object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The text content from the response, or empty string if none
|
||||||
|
"""
|
||||||
|
# The SDK provides a helper property
|
||||||
|
if hasattr(response, "output_text"):
|
||||||
|
return response.output_text or ""
|
||||||
|
|
||||||
|
# Fallback: manually extract from output items
|
||||||
|
for item in response.output:
|
||||||
|
if getattr(item, "type", None) == "message":
|
||||||
|
for content in getattr(item, "content", []):
|
||||||
|
if getattr(content, "type", None) == "output_text":
|
||||||
|
return getattr(content, "text", "")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
def extract_responses_reasoning(response: Any) -> str | None:
|
||||||
|
"""Extract reasoning content from Responses API response.
|
||||||
|
|
||||||
|
Reasoning models return their reasoning process in the response,
|
||||||
|
which can be useful for debugging or display.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response: The Responses API response object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The reasoning text, or None if not present
|
||||||
|
"""
|
||||||
|
for item in response.output:
|
||||||
|
if getattr(item, "type", None) == "reasoning":
|
||||||
|
# Reasoning items may have summary or content
|
||||||
|
summary = getattr(item, "summary", [])
|
||||||
|
if summary:
|
||||||
|
# Join summary items if present
|
||||||
|
texts = []
|
||||||
|
for s in summary:
|
||||||
|
if hasattr(s, "text"):
|
||||||
|
texts.append(s.text)
|
||||||
|
if texts:
|
||||||
|
return "\n".join(texts)
|
||||||
|
return None
|
||||||
155
autogpt_platform/backend/backend/util/openai_responses_test.py
Normal file
155
autogpt_platform/backend/backend/util/openai_responses_test.py
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
"""Tests for OpenAI Responses API helpers."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from backend.util.openai_responses import (
|
||||||
|
REASONING_MODELS,
|
||||||
|
convert_tools_to_responses_format,
|
||||||
|
requires_responses_api,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestRequiresResponsesApi:
|
||||||
|
"""Tests for the requires_responses_api function."""
|
||||||
|
|
||||||
|
def test_o1_models_require_responses_api(self):
|
||||||
|
"""O1 family models should require the Responses API."""
|
||||||
|
assert requires_responses_api("o1") is True
|
||||||
|
assert requires_responses_api("o1-mini") is True
|
||||||
|
assert requires_responses_api("o1-preview") is True
|
||||||
|
assert requires_responses_api("o1-2024-12-17") is True
|
||||||
|
|
||||||
|
def test_o3_models_require_responses_api(self):
|
||||||
|
"""O3 family models should require the Responses API."""
|
||||||
|
assert requires_responses_api("o3") is True
|
||||||
|
assert requires_responses_api("o3-mini") is True
|
||||||
|
assert requires_responses_api("o3-2025-04-16") is True
|
||||||
|
assert requires_responses_api("o3-mini-2025-01-31") is True
|
||||||
|
|
||||||
|
def test_gpt_models_do_not_require_responses_api(self):
|
||||||
|
"""GPT models should NOT require the Responses API."""
|
||||||
|
assert requires_responses_api("gpt-4o") is False
|
||||||
|
assert requires_responses_api("gpt-4o-mini") is False
|
||||||
|
assert requires_responses_api("gpt-4-turbo") is False
|
||||||
|
assert requires_responses_api("gpt-3.5-turbo") is False
|
||||||
|
assert requires_responses_api("gpt-5") is False
|
||||||
|
assert requires_responses_api("gpt-5-mini") is False
|
||||||
|
|
||||||
|
def test_other_models_do_not_require_responses_api(self):
|
||||||
|
"""Other provider models should NOT require the Responses API."""
|
||||||
|
assert requires_responses_api("claude-3-opus") is False
|
||||||
|
assert requires_responses_api("llama-3.3-70b") is False
|
||||||
|
assert requires_responses_api("gemini-pro") is False
|
||||||
|
|
||||||
|
def test_empty_string_does_not_require_responses_api(self):
|
||||||
|
"""Empty string should not require the Responses API."""
|
||||||
|
assert requires_responses_api("") is False
|
||||||
|
|
||||||
|
def test_exact_matching_no_false_positives(self):
|
||||||
|
"""Should not match models that just start with 'o1' or 'o3'."""
|
||||||
|
# These are hypothetical models that start with o1/o3 but aren't
|
||||||
|
# actually reasoning models
|
||||||
|
assert requires_responses_api("o1-turbo-hypothetical") is False
|
||||||
|
assert requires_responses_api("o3-fast-hypothetical") is False
|
||||||
|
assert requires_responses_api("o100") is False
|
||||||
|
|
||||||
|
|
||||||
|
class TestConvertToolsToResponsesFormat:
|
||||||
|
"""Tests for the convert_tools_to_responses_format function."""
|
||||||
|
|
||||||
|
def test_empty_tools_returns_empty_list(self):
|
||||||
|
"""Empty or None tools should return empty list."""
|
||||||
|
assert convert_tools_to_responses_format(None) == []
|
||||||
|
assert convert_tools_to_responses_format([]) == []
|
||||||
|
|
||||||
|
def test_converts_function_tool_format(self):
|
||||||
|
"""Should convert Chat Completions function format to Responses format."""
|
||||||
|
chat_completions_tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_weather",
|
||||||
|
"description": "Get the weather in a location",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"location": {"type": "string"},
|
||||||
|
},
|
||||||
|
"required": ["location"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
result = convert_tools_to_responses_format(chat_completions_tools)
|
||||||
|
|
||||||
|
assert len(result) == 1
|
||||||
|
assert result[0]["type"] == "function"
|
||||||
|
assert result[0]["name"] == "get_weather"
|
||||||
|
assert result[0]["description"] == "Get the weather in a location"
|
||||||
|
assert result[0]["parameters"] == {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"location": {"type": "string"},
|
||||||
|
},
|
||||||
|
"required": ["location"],
|
||||||
|
}
|
||||||
|
# Should not have nested "function" key
|
||||||
|
assert "function" not in result[0]
|
||||||
|
|
||||||
|
def test_handles_multiple_tools(self):
|
||||||
|
"""Should handle multiple tools."""
|
||||||
|
chat_completions_tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "tool_1",
|
||||||
|
"description": "First tool",
|
||||||
|
"parameters": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "tool_2",
|
||||||
|
"description": "Second tool",
|
||||||
|
"parameters": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
result = convert_tools_to_responses_format(chat_completions_tools)
|
||||||
|
|
||||||
|
assert len(result) == 2
|
||||||
|
assert result[0]["name"] == "tool_1"
|
||||||
|
assert result[1]["name"] == "tool_2"
|
||||||
|
|
||||||
|
def test_passes_through_non_function_tools(self):
|
||||||
|
"""Non-function tools should be passed through as-is."""
|
||||||
|
tools = [{"type": "web_search", "config": {"enabled": True}}]
|
||||||
|
|
||||||
|
result = convert_tools_to_responses_format(tools)
|
||||||
|
|
||||||
|
assert result == tools
|
||||||
|
|
||||||
|
|
||||||
|
class TestReasoningModelsSet:
|
||||||
|
"""Tests for the REASONING_MODELS constant."""
|
||||||
|
|
||||||
|
def test_reasoning_models_is_frozenset(self):
|
||||||
|
"""REASONING_MODELS should be a frozenset (immutable)."""
|
||||||
|
assert isinstance(REASONING_MODELS, frozenset)
|
||||||
|
|
||||||
|
def test_contains_expected_models(self):
|
||||||
|
"""Should contain all expected reasoning models."""
|
||||||
|
expected = {
|
||||||
|
"o1",
|
||||||
|
"o1-mini",
|
||||||
|
"o1-preview",
|
||||||
|
"o1-2024-12-17",
|
||||||
|
"o3",
|
||||||
|
"o3-mini",
|
||||||
|
"o3-2025-04-16",
|
||||||
|
"o3-mini-2025-01-31",
|
||||||
|
}
|
||||||
|
assert expected.issubset(REASONING_MODELS)
|
||||||
@@ -22,6 +22,11 @@ Sentry.init({
|
|||||||
|
|
||||||
enabled: shouldEnable,
|
enabled: shouldEnable,
|
||||||
|
|
||||||
|
// Suppress cross-origin stylesheet errors from Sentry Replay (rrweb)
|
||||||
|
// serializing DOM snapshots with cross-origin stylesheets
|
||||||
|
// (e.g., from browser extensions or CDN-loaded CSS)
|
||||||
|
ignoreErrors: [/Not allowed to access cross-origin stylesheet/],
|
||||||
|
|
||||||
// Add optional integrations for additional features
|
// Add optional integrations for additional features
|
||||||
integrations: [
|
integrations: [
|
||||||
Sentry.captureConsoleIntegration(),
|
Sentry.captureConsoleIntegration(),
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ export function ScheduleListItem({
|
|||||||
description={formatDistanceToNow(schedule.next_run_time, {
|
description={formatDistanceToNow(schedule.next_run_time, {
|
||||||
addSuffix: true,
|
addSuffix: true,
|
||||||
})}
|
})}
|
||||||
|
descriptionTitle={new Date(schedule.next_run_time).toString()}
|
||||||
onClick={onClick}
|
onClick={onClick}
|
||||||
selected={selected}
|
selected={selected}
|
||||||
icon={
|
icon={
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import React from "react";
|
|||||||
interface Props {
|
interface Props {
|
||||||
title: string;
|
title: string;
|
||||||
description?: string;
|
description?: string;
|
||||||
|
descriptionTitle?: string;
|
||||||
icon?: React.ReactNode;
|
icon?: React.ReactNode;
|
||||||
selected?: boolean;
|
selected?: boolean;
|
||||||
onClick?: () => void;
|
onClick?: () => void;
|
||||||
@@ -16,6 +17,7 @@ interface Props {
|
|||||||
export function SidebarItemCard({
|
export function SidebarItemCard({
|
||||||
title,
|
title,
|
||||||
description,
|
description,
|
||||||
|
descriptionTitle,
|
||||||
icon,
|
icon,
|
||||||
selected,
|
selected,
|
||||||
onClick,
|
onClick,
|
||||||
@@ -38,7 +40,11 @@ export function SidebarItemCard({
|
|||||||
>
|
>
|
||||||
{title}
|
{title}
|
||||||
</Text>
|
</Text>
|
||||||
<Text variant="body" className="leading-tight !text-zinc-500">
|
<Text
|
||||||
|
variant="body"
|
||||||
|
className="leading-tight !text-zinc-500"
|
||||||
|
title={descriptionTitle}
|
||||||
|
>
|
||||||
{description}
|
{description}
|
||||||
</Text>
|
</Text>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -81,6 +81,9 @@ export function TaskListItem({
|
|||||||
? formatDistanceToNow(run.started_at, { addSuffix: true })
|
? formatDistanceToNow(run.started_at, { addSuffix: true })
|
||||||
: "—"
|
: "—"
|
||||||
}
|
}
|
||||||
|
descriptionTitle={
|
||||||
|
run.started_at ? new Date(run.started_at).toString() : undefined
|
||||||
|
}
|
||||||
onClick={onClick}
|
onClick={onClick}
|
||||||
selected={selected}
|
selected={selected}
|
||||||
actions={
|
actions={
|
||||||
|
|||||||
@@ -180,3 +180,14 @@ body[data-google-picker-open="true"] [data-dialog-content] {
|
|||||||
z-index: 1 !important;
|
z-index: 1 !important;
|
||||||
pointer-events: none !important;
|
pointer-events: none !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* CoPilot chat table styling — remove left/right borders, increase padding */
|
||||||
|
[data-streamdown="table-wrapper"] table {
|
||||||
|
border-left: none;
|
||||||
|
border-right: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
[data-streamdown="table-wrapper"] th,
|
||||||
|
[data-streamdown="table-wrapper"] td {
|
||||||
|
padding: 0.875rem 1rem; /* py-3.5 px-4 */
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,9 +4,7 @@ import { loadScript } from "@/services/scripts/scripts";
|
|||||||
export async function loadGoogleAPIPicker(): Promise<void> {
|
export async function loadGoogleAPIPicker(): Promise<void> {
|
||||||
validateWindow();
|
validateWindow();
|
||||||
|
|
||||||
await loadScript("https://apis.google.com/js/api.js", {
|
await loadScript("https://apis.google.com/js/api.js");
|
||||||
referrerPolicy: "no-referrer-when-downgrade",
|
|
||||||
});
|
|
||||||
|
|
||||||
const googleAPI = window.gapi;
|
const googleAPI = window.gapi;
|
||||||
if (!googleAPI) {
|
if (!googleAPI) {
|
||||||
@@ -29,9 +27,7 @@ export async function loadGoogleIdentityServices(): Promise<void> {
|
|||||||
throw new Error("Google Identity Services cannot load on server");
|
throw new Error("Google Identity Services cannot load on server");
|
||||||
}
|
}
|
||||||
|
|
||||||
await loadScript("https://accounts.google.com/gsi/client", {
|
await loadScript("https://accounts.google.com/gsi/client");
|
||||||
referrerPolicy: "no-referrer-when-downgrade",
|
|
||||||
});
|
|
||||||
|
|
||||||
const google = window.google;
|
const google = window.google;
|
||||||
if (!google?.accounts?.oauth2) {
|
if (!google?.accounts?.oauth2) {
|
||||||
|
|||||||
@@ -226,7 +226,7 @@ function renderMarkdown(
|
|||||||
table: ({ children, ...props }) => (
|
table: ({ children, ...props }) => (
|
||||||
<div className="my-4 overflow-x-auto">
|
<div className="my-4 overflow-x-auto">
|
||||||
<table
|
<table
|
||||||
className="min-w-full divide-y divide-gray-200 rounded-lg border border-gray-200 dark:divide-gray-700 dark:border-gray-700"
|
className="min-w-full divide-y divide-gray-200 border-y border-gray-200 dark:divide-gray-700 dark:border-gray-700"
|
||||||
{...props}
|
{...props}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
@@ -235,7 +235,7 @@ function renderMarkdown(
|
|||||||
),
|
),
|
||||||
th: ({ children, ...props }) => (
|
th: ({ children, ...props }) => (
|
||||||
<th
|
<th
|
||||||
className="bg-gray-50 px-4 py-3 text-left text-xs font-semibold uppercase tracking-wider text-gray-700 dark:bg-gray-800 dark:text-gray-300"
|
className="bg-gray-50 px-4 py-3.5 text-left text-xs font-semibold uppercase tracking-wider text-gray-700 dark:bg-gray-800 dark:text-gray-300"
|
||||||
{...props}
|
{...props}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
@@ -243,7 +243,7 @@ function renderMarkdown(
|
|||||||
),
|
),
|
||||||
td: ({ children, ...props }) => (
|
td: ({ children, ...props }) => (
|
||||||
<td
|
<td
|
||||||
className="border-t border-gray-200 px-4 py-3 text-sm text-gray-600 dark:border-gray-700 dark:text-gray-400"
|
className="border-t border-gray-200 px-4 py-3.5 text-sm text-gray-600 dark:border-gray-700 dark:text-gray-400"
|
||||||
{...props}
|
{...props}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
|
|||||||
Reference in New Issue
Block a user