Compare commits

..

8 Commits

Author SHA1 Message Date
Nick Tindle
e7705427bb fix: add is_auto_credential and input_field_name to auto credentials schema
Post-refactor fix: these fields were moved from data/block.py to blocks/_base.py in #12068
2026-02-12 16:13:23 -06:00
Nicholas Tindle
201ec5aa3a Merge branch 'dev' into ntindle/google-issues-fix 2026-02-12 16:12:20 -06:00
Nicholas Tindle
2b8134a711 Merge branch 'dev' into ntindle/google-issues-fix 2026-02-09 13:46:37 -06:00
Nicholas Tindle
90b3b5ba16 fix(backend): Fix misplaced section header in graph_test.py
Move the _reassign_ids section comment to above the actual _reassign_ids
tests, and label the combine() tests correctly.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-08 16:11:47 -06:00
Nicholas Tindle
f4f81bc4fc fix(backend): Remove _credentials_id key on fork instead of setting to None
Setting _credentials_id to None on fork was ambiguous — both "forked,
needs re-auth" and "chained data from upstream" were represented as None.
This caused _acquire_auto_credentials to silently skip credential
acquisition for forked agents, leading to confusing TypeErrors at runtime.

Now the key is deleted entirely, making the three states unambiguous:
- Present with value: user-selected credentials
- Present as None: chained data from upstream block
- Absent: forked/needs re-authentication

Also adds pre-run validation for the missing key case and makes error
messages provider-agnostic.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 17:34:16 -06:00
Nicholas Tindle
c5abc01f25 fix(backend): Add error handling for auto-credentials store lookup
Wrap get_creds_by_id call in try/except in the auto-credentials
validation path to match the error handling pattern used for regular
credentials.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 16:53:29 -06:00
Nicholas Tindle
8b7053c1de merge: Resolve conflicts with dev (PR #11986 graph model refactor)
Adapt auto-credentials filtering to dev's refactored graph model:
- aggregate_credentials_inputs() now returns 3-tuples (field_info, node_pairs, is_required)
- credentials_input_schema moved to GraphModel, builds JSON schema directly
- Update regular/auto_credentials_inputs properties for 3-tuple format
- Update test mocks and assertions for new tuple format and class hierarchy

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 16:39:57 -06:00
Nicholas Tindle
e00c1202ad fix(platform): Fix Google Drive auto-credentials handling across the platform
- Tag auto-credentials with `is_auto_credential` and `input_field_name` on `CredentialsFieldInfo` to distinguish them from regular user-provided credentials
- Add `regular_credentials_inputs` and `auto_credentials_inputs` properties to `Graph` so UI schemas, CoPilot, and library presets only surface regular credentials
- Extract `_acquire_auto_credentials()` helper in executor to resolve embedded `_credentials_id` at execution time with proper lock management
- Validate auto-credentials ownership in `_validate_node_input_credentials()` to catch stale/missing credentials before execution
- Clear `_credentials_id` in `_reassign_ids()` on graph fork so cloned agents require re-authentication
- Propagate `is_auto_credential` through `combine()` and `discriminate()` on `CredentialsFieldInfo`
- Add `referrerPolicy: "no-referrer-when-downgrade"` to Google API script loading to fix Firefox API key validation
- Comprehensive test coverage for all new behavior

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 16:08:53 -06:00
37 changed files with 3437 additions and 85 deletions

View File

@@ -118,7 +118,7 @@ def build_missing_credentials_from_graph(
preserving all supported credential types for each field.
"""
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
aggregated_fields = graph.aggregate_credentials_inputs()
aggregated_fields = graph.regular_credentials_inputs
return {
field_key: _serialize_missing_credential(field_key, field_info)
@@ -338,7 +338,7 @@ async def match_user_credentials_to_graph(
missing_creds: list[str] = []
# Get aggregated credentials requirements from the graph
aggregated_creds = graph.aggregate_credentials_inputs()
aggregated_creds = graph.regular_credentials_inputs
logger.debug(
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
)

View File

@@ -0,0 +1,78 @@
"""Tests for chat tools utility functions."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from backend.data.model import CredentialsFieldInfo
def _make_regular_field() -> CredentialsFieldInfo:
return CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
def test_build_missing_credentials_excludes_auto_creds():
"""
build_missing_credentials_from_graph() should use regular_credentials_inputs
and thus exclude auto_credentials from the "missing" set.
"""
from backend.api.features.chat.tools.utils import (
build_missing_credentials_from_graph,
)
regular_field = _make_regular_field()
mock_graph = MagicMock()
# regular_credentials_inputs should only return the non-auto field
mock_graph.regular_credentials_inputs = {
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
}
result = build_missing_credentials_from_graph(mock_graph, matched_credentials=None)
# Should include the regular credential
assert "github_api_key" in result
# Should NOT include the auto_credential (not in regular_credentials_inputs)
assert "google_oauth2" not in result
@pytest.mark.asyncio
async def test_match_user_credentials_excludes_auto_creds():
"""
match_user_credentials_to_graph() should use regular_credentials_inputs
and thus exclude auto_credentials from matching.
"""
from backend.api.features.chat.tools.utils import match_user_credentials_to_graph
regular_field = _make_regular_field()
mock_graph = MagicMock()
mock_graph.id = "test-graph"
# regular_credentials_inputs returns only non-auto fields
mock_graph.regular_credentials_inputs = {
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
}
# Mock the credentials manager to return no credentials
with patch(
"backend.api.features.chat.tools.utils.IntegrationCredentialsManager"
) as MockCredsMgr:
mock_store = AsyncMock()
mock_store.get_all_creds.return_value = []
MockCredsMgr.return_value.store = mock_store
matched, missing = await match_user_credentials_to_graph(
user_id="test-user", graph=mock_graph
)
# No credentials available, so github should be missing
assert len(matched) == 0
assert len(missing) == 1
assert "github_api_key" in missing[0]

View File

@@ -1102,7 +1102,7 @@ async def create_preset_from_graph_execution(
raise NotFoundError(
f"Graph #{graph_execution.graph_id} not found or accessible"
)
elif len(graph.aggregate_credentials_inputs()) > 0:
elif len(graph.regular_credentials_inputs) > 0:
raise ValueError(
f"Graph execution #{graph_exec_id} can't be turned into a preset "
"because it was run before this feature existed "

View File

@@ -309,6 +309,8 @@ class BlockSchema(BaseModel):
"credentials_provider": [config.get("provider", "google")],
"credentials_types": [config.get("type", "oauth2")],
"credentials_scopes": config.get("scopes"),
"is_auto_credential": True,
"input_field_name": info["field_name"],
}
result[kwarg_name] = CredentialsFieldInfo.model_validate(
auto_schema, by_alias=True

View File

@@ -434,8 +434,7 @@ class GraphModel(Graph, GraphMeta):
@computed_field
@property
def credentials_input_schema(self) -> dict[str, Any]:
graph_credentials_inputs = self.aggregate_credentials_inputs()
graph_credentials_inputs = self.regular_credentials_inputs
logger.debug(
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
f"{graph_credentials_inputs}"
@@ -591,6 +590,28 @@ class GraphModel(Graph, GraphMeta):
for key, (field_info, node_field_pairs) in combined.items()
}
@property
def regular_credentials_inputs(
self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
"""Credentials that need explicit user mapping (CredentialsMetaInput fields)."""
return {
k: v
for k, v in self.aggregate_credentials_inputs().items()
if not v[0].is_auto_credential
}
@property
def auto_credentials_inputs(
self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
"""Credentials embedded in file fields (_credentials_id), resolved at execution time."""
return {
k: v
for k, v in self.aggregate_credentials_inputs().items()
if v[0].is_auto_credential
}
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
"""
Reassigns all IDs in the graph to new UUIDs.
@@ -641,6 +662,16 @@ class GraphModel(Graph, GraphMeta):
) and graph_id in graph_id_map:
node.input_default["graph_id"] = graph_id_map[graph_id]
# Clear auto-credentials references (e.g., _credentials_id in
# GoogleDriveFile fields) so the new user must re-authenticate
# with their own account
for node in graph.nodes:
if not node.input_default:
continue
for key, value in node.input_default.items():
if isinstance(value, dict) and "_credentials_id" in value:
del value["_credentials_id"]
def validate_graph(
self,
for_run: bool = False,

View File

@@ -462,3 +462,329 @@ def test_node_credentials_optional_with_other_metadata():
assert node.credentials_optional is True
assert node.metadata["position"] == {"x": 100, "y": 200}
assert node.metadata["customized_name"] == "My Custom Node"
# ============================================================================
# Tests for CredentialsFieldInfo.combine() field propagation
def test_combine_preserves_is_auto_credential_flag():
"""
CredentialsFieldInfo.combine() must propagate is_auto_credential and
input_field_name to the combined result. Regression test for reviewer
finding that combine() dropped these fields.
"""
from backend.data.model import CredentialsFieldInfo
auto_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google"],
"credentials_types": ["oauth2"],
"credentials_scopes": ["drive.readonly"],
"is_auto_credential": True,
"input_field_name": "spreadsheet",
},
by_alias=True,
)
# combine() takes *args of (field_info, key) tuples
combined = CredentialsFieldInfo.combine(
(auto_field, ("node-1", "credentials")),
(auto_field, ("node-2", "credentials")),
)
assert len(combined) == 1
group_key = next(iter(combined))
combined_info, combined_keys = combined[group_key]
assert combined_info.is_auto_credential is True
assert combined_info.input_field_name == "spreadsheet"
assert combined_keys == {("node-1", "credentials"), ("node-2", "credentials")}
def test_combine_preserves_regular_credential_defaults():
"""Regular credentials should have is_auto_credential=False after combine()."""
from backend.data.model import CredentialsFieldInfo
regular_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
combined = CredentialsFieldInfo.combine(
(regular_field, ("node-1", "credentials")),
)
group_key = next(iter(combined))
combined_info, _ = combined[group_key]
assert combined_info.is_auto_credential is False
assert combined_info.input_field_name is None
# ============================================================================
# Tests for _reassign_ids credential clearing (Fix 3: SECRT-1772)
def test_reassign_ids_clears_credentials_id():
"""
[SECRT-1772] _reassign_ids should clear _credentials_id from
GoogleDriveFile-style input_default fields so forked agents
don't retain the original creator's credential references.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"spreadsheet": {
"_credentials_id": "original-cred-id",
"id": "file-123",
"name": "test.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
"url": "https://docs.google.com/spreadsheets/d/file-123",
},
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
# _credentials_id key should be removed (not set to None) so that
# _acquire_auto_credentials correctly errors instead of treating it as chained data
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
def test_reassign_ids_preserves_non_credential_fields():
"""
Regression guard: _reassign_ids should NOT modify non-credential fields
like name, mimeType, id, url.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"spreadsheet": {
"_credentials_id": "cred-abc",
"id": "file-123",
"name": "test.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
"url": "https://docs.google.com/spreadsheets/d/file-123",
},
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
field = graph.nodes[0].input_default["spreadsheet"]
assert field["id"] == "file-123"
assert field["name"] == "test.xlsx"
assert field["mimeType"] == "application/vnd.google-apps.spreadsheet"
assert field["url"] == "https://docs.google.com/spreadsheets/d/file-123"
def test_reassign_ids_handles_no_credentials():
"""
Regression guard: _reassign_ids should not error when input_default
has no dict fields with _credentials_id.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"input": "some value",
"another_input": 42,
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
# Should not error, fields unchanged
assert graph.nodes[0].input_default["input"] == "some value"
assert graph.nodes[0].input_default["another_input"] == 42
def test_reassign_ids_handles_multiple_credential_fields():
"""
[SECRT-1772] When a node has multiple dict fields with _credentials_id,
ALL of them should be cleared.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"spreadsheet": {
"_credentials_id": "cred-1",
"id": "file-1",
"name": "file1.xlsx",
},
"doc_file": {
"_credentials_id": "cred-2",
"id": "file-2",
"name": "file2.docx",
},
"plain_input": "not a dict",
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
assert "_credentials_id" not in graph.nodes[0].input_default["doc_file"]
assert graph.nodes[0].input_default["plain_input"] == "not a dict"
# ============================================================================
# Tests for discriminate() field propagation
def test_discriminate_preserves_is_auto_credential_flag():
"""
CredentialsFieldInfo.discriminate() must propagate is_auto_credential and
input_field_name to the discriminated result. Regression test for
discriminate() dropping these fields (same class of bug as combine()).
"""
from backend.data.model import CredentialsFieldInfo
auto_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google", "openai"],
"credentials_types": ["oauth2"],
"credentials_scopes": ["drive.readonly"],
"is_auto_credential": True,
"input_field_name": "spreadsheet",
"discriminator": "model",
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
},
by_alias=True,
)
discriminated = auto_field.discriminate("gemini")
assert discriminated.is_auto_credential is True
assert discriminated.input_field_name == "spreadsheet"
assert discriminated.provider == frozenset(["google"])
def test_discriminate_preserves_regular_credential_defaults():
"""Regular credentials should have is_auto_credential=False after discriminate()."""
from backend.data.model import CredentialsFieldInfo
regular_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google", "openai"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
"discriminator": "model",
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
},
by_alias=True,
)
discriminated = regular_field.discriminate("gpt-4")
assert discriminated.is_auto_credential is False
assert discriminated.input_field_name is None
assert discriminated.provider == frozenset(["openai"])
# ============================================================================
# Tests for credentials_input_schema excluding auto_credentials
def test_credentials_input_schema_excludes_auto_creds():
"""
GraphModel.credentials_input_schema should exclude auto_credentials
(is_auto_credential=True) from the schema. Auto_credentials are
transparently resolved at execution time via file picker data.
"""
from datetime import datetime, timezone
from unittest.mock import PropertyMock, patch
from backend.data.graph import GraphModel, NodeModel
from backend.data.model import CredentialsFieldInfo
regular_field_info = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
graph = GraphModel(
id="test-graph",
version=1,
name="Test",
description="Test",
user_id="test-user",
created_at=datetime.now(timezone.utc),
nodes=[
NodeModel(
id="node-1",
block_id=StoreValueBlock().id,
input_default={},
graph_id="test-graph",
graph_version=1,
),
],
links=[],
)
# Mock regular_credentials_inputs to return only the non-auto field (3-tuple)
regular_only = {
"github_credentials": (
regular_field_info,
{("node-1", "credentials")},
True,
),
}
with patch.object(
type(graph),
"regular_credentials_inputs",
new_callable=PropertyMock,
return_value=regular_only,
):
schema = graph.credentials_input_schema
field_names = set(schema.get("properties", {}).keys())
# Should include regular credential but NOT auto_credential
assert "github_credentials" in field_names
assert "google_credentials" not in field_names

View File

@@ -574,6 +574,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
discriminator: Optional[str] = None
discriminator_mapping: Optional[dict[str, CP]] = None
discriminator_values: set[Any] = Field(default_factory=set)
is_auto_credential: bool = False
input_field_name: Optional[str] = None
@classmethod
def combine(
@@ -654,6 +656,9 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
+ "_credentials"
)
# Propagate is_auto_credential from the combined field.
# All fields in a group should share the same is_auto_credential
# value since auto and regular credentials serve different purposes.
result[group_key] = (
CredentialsFieldInfo[CP, CT](
credentials_provider=combined.provider,
@@ -662,6 +667,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
discriminator=combined.discriminator,
discriminator_mapping=combined.discriminator_mapping,
discriminator_values=set(all_discriminator_values),
is_auto_credential=combined.is_auto_credential,
input_field_name=combined.input_field_name,
),
combined_keys,
)
@@ -687,6 +694,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
discriminator=self.discriminator,
discriminator_mapping=self.discriminator_mapping,
discriminator_values=self.discriminator_values,
is_auto_credential=self.is_auto_credential,
input_field_name=self.input_field_name,
)

View File

@@ -168,6 +168,81 @@ def execute_graph(
T = TypeVar("T")
async def _acquire_auto_credentials(
input_model: type[BlockSchema],
input_data: dict[str, Any],
creds_manager: "IntegrationCredentialsManager",
user_id: str,
) -> tuple[dict[str, Any], list[AsyncRedisLock]]:
"""
Resolve auto_credentials from GoogleDriveFileField-style inputs.
Returns:
(extra_exec_kwargs, locks): kwargs to inject into block execution, and
credential locks to release after execution completes.
"""
extra_exec_kwargs: dict[str, Any] = {}
locks: list[AsyncRedisLock] = []
# NOTE: If a block ever has multiple auto-credential fields, a ValueError
# on a later field will strand locks acquired for earlier fields. They'll
# auto-expire via Redis TTL, but add a try/except to release partial locks
# if that becomes a real scenario.
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
field_name = info["field_name"]
field_data = input_data.get(field_name)
if field_data and isinstance(field_data, dict):
# Check if _credentials_id key exists in the field data
if "_credentials_id" in field_data:
cred_id = field_data["_credentials_id"]
if cred_id:
# Credential ID provided - acquire credentials
provider = info.get("config", {}).get(
"provider", "external service"
)
file_name = field_data.get("name", "selected file")
try:
credentials, lock = await creds_manager.acquire(
user_id, cred_id
)
locks.append(lock)
extra_exec_kwargs[kwarg_name] = credentials
except ValueError:
raise ValueError(
f"{provider.capitalize()} credentials for "
f"'{file_name}' in field '{field_name}' are not "
f"available in your account. "
f"This can happen if the agent was created by another "
f"user or the credentials were deleted. "
f"Please open the agent in the builder and re-select "
f"the file to authenticate with your own account."
)
# else: _credentials_id is explicitly None, skip (chained data)
else:
# _credentials_id key missing entirely - this is an error
provider = info.get("config", {}).get("provider", "external service")
file_name = field_data.get("name", "selected file")
raise ValueError(
f"Authentication missing for '{file_name}' in field "
f"'{field_name}'. Please re-select the file to authenticate "
f"with {provider.capitalize()}."
)
elif field_data is None and field_name not in input_data:
# Field not in input_data at all = connected from upstream block, skip
pass
else:
# field_data is None/empty but key IS in input_data = user didn't select
provider = info.get("config", {}).get("provider", "external service")
raise ValueError(
f"No file selected for '{field_name}'. "
f"Please select a file to provide "
f"{provider.capitalize()} authentication."
)
return extra_exec_kwargs, locks
async def execute_node(
node: Node,
data: NodeExecutionEntry,
@@ -270,41 +345,14 @@ async def execute_node(
extra_exec_kwargs[field_name] = credentials
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
field_name = info["field_name"]
field_data = input_data.get(field_name)
if field_data and isinstance(field_data, dict):
# Check if _credentials_id key exists in the field data
if "_credentials_id" in field_data:
cred_id = field_data["_credentials_id"]
if cred_id:
# Credential ID provided - acquire credentials
provider = info.get("config", {}).get(
"provider", "external service"
)
file_name = field_data.get("name", "selected file")
try:
credentials, lock = await creds_manager.acquire(
user_id, cred_id
)
creds_locks.append(lock)
extra_exec_kwargs[kwarg_name] = credentials
except ValueError:
# Credential was deleted or doesn't exist
raise ValueError(
f"Authentication expired for '{file_name}' in field '{field_name}'. "
f"The saved {provider.capitalize()} credentials no longer exist. "
f"Please re-select the file to re-authenticate."
)
# else: _credentials_id is explicitly None, skip credentials (for chained data)
else:
# _credentials_id key missing entirely - this is an error
provider = info.get("config", {}).get("provider", "external service")
file_name = field_data.get("name", "selected file")
raise ValueError(
f"Authentication missing for '{file_name}' in field '{field_name}'. "
f"Please re-select the file to authenticate with {provider.capitalize()}."
)
auto_extra_kwargs, auto_locks = await _acquire_auto_credentials(
input_model=input_model,
input_data=input_data,
creds_manager=creds_manager,
user_id=user_id,
)
extra_exec_kwargs.update(auto_extra_kwargs)
creds_locks.extend(auto_locks)
output_size = 0

View File

@@ -0,0 +1,320 @@
"""
Tests for auto_credentials handling in execute_node().
These test the _acquire_auto_credentials() helper function extracted from
execute_node() (manager.py lines 273-308).
"""
import pytest
from pytest_mock import MockerFixture
@pytest.fixture
def google_drive_file_data():
return {
"valid": {
"_credentials_id": "cred-id-123",
"id": "file-123",
"name": "test.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
},
"chained": {
"_credentials_id": None,
"id": "file-456",
"name": "chained.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
},
"missing_key": {
"id": "file-789",
"name": "bad.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
},
}
@pytest.fixture
def mock_input_model(mocker: MockerFixture):
"""Create a mock input model with get_auto_credentials_fields() returning one field."""
input_model = mocker.MagicMock()
input_model.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.readonly"],
},
}
}
return input_model
@pytest.fixture
def mock_creds_manager(mocker: MockerFixture):
manager = mocker.AsyncMock()
mock_lock = mocker.AsyncMock()
mock_creds = mocker.MagicMock()
mock_creds.id = "cred-id-123"
mock_creds.provider = "google"
manager.acquire.return_value = (mock_creds, mock_lock)
return manager, mock_creds, mock_lock
@pytest.mark.asyncio
async def test_auto_credentials_happy_path(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""When field_data has a valid _credentials_id, credentials should be acquired."""
from backend.executor.manager import _acquire_auto_credentials
manager, mock_creds, mock_lock = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["valid"]}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
assert extra_kwargs["credentials"] == mock_creds
assert mock_lock in locks
@pytest.mark.asyncio
async def test_auto_credentials_field_none_static_raises(
mocker: MockerFixture,
mock_input_model,
mock_creds_manager,
):
"""
[THE BUG FIX TEST — OPEN-2895]
When field_data is None and the key IS in input_data (user didn't select a file),
should raise ValueError instead of silently skipping.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
# Key is present but value is None = user didn't select a file
input_data = {"spreadsheet": None}
with pytest.raises(ValueError, match="No file selected"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
@pytest.mark.asyncio
async def test_auto_credentials_field_absent_skips(
mocker: MockerFixture,
mock_input_model,
mock_creds_manager,
):
"""
When the field key is NOT in input_data at all (upstream connection),
should skip without error.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
# Key not present = connected from upstream block
input_data = {}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
manager.acquire.assert_not_called()
assert "credentials" not in extra_kwargs
assert locks == []
@pytest.mark.asyncio
async def test_auto_credentials_chained_cred_id_none(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
When _credentials_id is explicitly None (chained data from upstream),
should skip credential acquisition.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["chained"]}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
manager.acquire.assert_not_called()
assert "credentials" not in extra_kwargs
@pytest.mark.asyncio
async def test_auto_credentials_missing_cred_id_key_raises(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
When _credentials_id key is missing entirely from field_data dict,
should raise ValueError.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["missing_key"]}
with pytest.raises(ValueError, match="Authentication missing"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
@pytest.mark.asyncio
async def test_auto_credentials_ownership_mismatch_error(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
[SECRT-1772] When acquire() raises ValueError (credential belongs to another user),
the error message should mention 'not available' (not 'expired').
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
manager.acquire.side_effect = ValueError(
"Credentials #cred-id-123 for user #user-2 not found"
)
input_data = {"spreadsheet": google_drive_file_data["valid"]}
with pytest.raises(ValueError, match="not available in your account"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-2",
)
@pytest.mark.asyncio
async def test_auto_credentials_deleted_credential_error(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
[SECRT-1772] When acquire() raises ValueError (credential was deleted),
the error message should mention 'not available' (not 'expired').
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
manager.acquire.side_effect = ValueError(
"Credentials #cred-id-123 for user #user-1 not found"
)
input_data = {"spreadsheet": google_drive_file_data["valid"]}
with pytest.raises(ValueError, match="not available in your account"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
@pytest.mark.asyncio
async def test_auto_credentials_lock_appended(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""Lock from acquire() should be included in returned locks list."""
from backend.executor.manager import _acquire_auto_credentials
manager, _, mock_lock = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["valid"]}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
assert len(locks) == 1
assert locks[0] is mock_lock
@pytest.mark.asyncio
async def test_auto_credentials_multiple_fields(
mocker: MockerFixture,
mock_creds_manager,
):
"""When there are multiple auto_credentials fields, only valid ones should acquire."""
from backend.executor.manager import _acquire_auto_credentials
manager, mock_creds, mock_lock = mock_creds_manager
input_model = mocker.MagicMock()
input_model.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
},
"credentials2": {
"field_name": "doc_file",
"config": {"provider": "google", "type": "oauth2"},
},
}
input_data = {
"spreadsheet": {
"_credentials_id": "cred-id-123",
"id": "file-1",
"name": "file1.xlsx",
},
"doc_file": {
"_credentials_id": None,
"id": "file-2",
"name": "chained.doc",
},
}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
# Only the first field should have acquired credentials
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
assert "credentials" in extra_kwargs
assert "credentials2" not in extra_kwargs
assert len(locks) == 1

View File

@@ -254,7 +254,8 @@ async def _validate_node_input_credentials(
# Find any fields of type CredentialsMetaInput
credentials_fields = block.input_schema.get_credentials_fields()
if not credentials_fields:
auto_credentials_fields = block.input_schema.get_auto_credentials_fields()
if not credentials_fields and not auto_credentials_fields:
continue
# Track if any credential field is missing for this node
@@ -334,6 +335,47 @@ async def _validate_node_input_credentials(
] = "Invalid credentials: type/provider mismatch"
continue
# Validate auto-credentials (GoogleDriveFileField-based)
# These have _credentials_id embedded in the file field data
if auto_credentials_fields:
for _kwarg_name, info in auto_credentials_fields.items():
field_name = info["field_name"]
# Check input_default and nodes_input_masks for the field value
field_value = node.input_default.get(field_name)
if nodes_input_masks and node.id in nodes_input_masks:
field_value = nodes_input_masks[node.id].get(
field_name, field_value
)
if field_value and isinstance(field_value, dict):
if "_credentials_id" not in field_value:
# Key removed (e.g., on fork) — needs re-auth
has_missing_credentials = True
credential_errors[node.id][field_name] = (
"Authentication missing for the selected file. "
"Please re-select the file to authenticate with "
"your own account."
)
continue
cred_id = field_value.get("_credentials_id")
if cred_id and isinstance(cred_id, str):
try:
creds_store = get_integration_credentials_store()
creds = await creds_store.get_creds_by_id(user_id, cred_id)
except Exception as e:
has_missing_credentials = True
credential_errors[node.id][
field_name
] = f"Credentials not available: {e}"
continue
if not creds:
has_missing_credentials = True
credential_errors[node.id][field_name] = (
"The saved credentials are not available "
"for your account. Please re-select the file to "
"authenticate with your own account."
)
# If node has optional credentials and any are missing, mark for skipping
# But only if there are no other errors for this node
if (
@@ -365,8 +407,9 @@ def make_node_credentials_input_map(
"""
result: dict[str, dict[str, JsonValue]] = {}
# Get aggregated credentials fields for the graph
graph_cred_inputs = graph.aggregate_credentials_inputs()
# Only map regular credentials (not auto_credentials, which are resolved
# at execution time from _credentials_id in file field data)
graph_cred_inputs = graph.regular_credentials_inputs
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
# Best-effort map: skip missing items

View File

@@ -907,3 +907,335 @@ async def test_stop_graph_execution_cascades_to_child_with_reviews(
# Verify both parent and child status updates
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
# ============================================================================
# Tests for auto_credentials validation in _validate_node_input_credentials
# (Fix 3: SECRT-1772 + Fix 4: Path 4)
# ============================================================================
@pytest.mark.asyncio
async def test_validate_node_input_credentials_auto_creds_valid(
mocker: MockerFixture,
):
"""
[SECRT-1772] When a node has auto_credentials with a valid _credentials_id
that exists in the store, validation should pass without errors.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-auto-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"spreadsheet": {
"_credentials_id": "valid-cred-id",
"id": "file-123",
"name": "test.xlsx",
}
}
mock_block = mocker.MagicMock()
# No regular credentials fields
mock_block.input_schema.get_credentials_fields.return_value = {}
# Has auto_credentials fields
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
# Mock the credentials store to return valid credentials
mock_store = mocker.MagicMock()
mock_creds = mocker.MagicMock()
mock_creds.id = "valid-cred-id"
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=mock_creds)
mocker.patch(
"backend.executor.utils.get_integration_credentials_store",
return_value=mock_store,
)
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="test-user",
nodes_input_masks=None,
)
assert mock_node.id not in errors
assert mock_node.id not in nodes_to_skip
@pytest.mark.asyncio
async def test_validate_node_input_credentials_auto_creds_missing(
mocker: MockerFixture,
):
"""
[SECRT-1772] When a node has auto_credentials with a _credentials_id
that doesn't exist for the current user, validation should report an error.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-bad-auto-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"spreadsheet": {
"_credentials_id": "other-users-cred-id",
"id": "file-123",
"name": "test.xlsx",
}
}
mock_block = mocker.MagicMock()
mock_block.input_schema.get_credentials_fields.return_value = {}
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
# Mock the credentials store to return None (cred not found for this user)
mock_store = mocker.MagicMock()
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=None)
mocker.patch(
"backend.executor.utils.get_integration_credentials_store",
return_value=mock_store,
)
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="different-user",
nodes_input_masks=None,
)
assert mock_node.id in errors
assert "spreadsheet" in errors[mock_node.id]
assert "not available" in errors[mock_node.id]["spreadsheet"].lower()
@pytest.mark.asyncio
async def test_validate_node_input_credentials_both_regular_and_auto(
mocker: MockerFixture,
):
"""
[SECRT-1772] A node that has BOTH regular credentials AND auto_credentials
should have both validated.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-both-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"credentials": {
"id": "regular-cred-id",
"provider": "github",
"type": "api_key",
},
"spreadsheet": {
"_credentials_id": "auto-cred-id",
"id": "file-123",
"name": "test.xlsx",
},
}
mock_credentials_field_type = mocker.MagicMock()
mock_credentials_meta = mocker.MagicMock()
mock_credentials_meta.id = "regular-cred-id"
mock_credentials_meta.provider = "github"
mock_credentials_meta.type = "api_key"
mock_credentials_field_type.model_validate.return_value = mock_credentials_meta
mock_block = mocker.MagicMock()
# Regular credentials field
mock_block.input_schema.get_credentials_fields.return_value = {
"credentials": mock_credentials_field_type,
}
# Auto-credentials field
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"auto_credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
# Mock the credentials store to return valid credentials for both
mock_store = mocker.MagicMock()
mock_regular_creds = mocker.MagicMock()
mock_regular_creds.id = "regular-cred-id"
mock_regular_creds.provider = "github"
mock_regular_creds.type = "api_key"
mock_auto_creds = mocker.MagicMock()
mock_auto_creds.id = "auto-cred-id"
def get_creds_side_effect(user_id, cred_id):
if cred_id == "regular-cred-id":
return mock_regular_creds
elif cred_id == "auto-cred-id":
return mock_auto_creds
return None
mock_store.get_creds_by_id = mocker.AsyncMock(side_effect=get_creds_side_effect)
mocker.patch(
"backend.executor.utils.get_integration_credentials_store",
return_value=mock_store,
)
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="test-user",
nodes_input_masks=None,
)
# Both should validate successfully - no errors
assert mock_node.id not in errors
assert mock_node.id not in nodes_to_skip
@pytest.mark.asyncio
async def test_validate_node_input_credentials_auto_creds_skipped_when_none(
mocker: MockerFixture,
):
"""
When a node has auto_credentials but the field value has _credentials_id=None
(e.g., from upstream connection), validation should skip it without error.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-chained-auto-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"spreadsheet": {
"_credentials_id": None,
"id": "file-123",
"name": "test.xlsx",
}
}
mock_block = mocker.MagicMock()
mock_block.input_schema.get_credentials_fields.return_value = {}
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="test-user",
nodes_input_masks=None,
)
# No error - chained data with None cred_id is valid
assert mock_node.id not in errors
# ============================================================================
# Tests for CredentialsFieldInfo auto_credential tag (Fix 4: Path 4)
# ============================================================================
def test_credentials_field_info_auto_credential_tag():
"""
[Path 4] CredentialsFieldInfo should support is_auto_credential and
input_field_name fields for distinguishing auto from regular credentials.
"""
from backend.data.model import CredentialsFieldInfo
# Regular credential should have is_auto_credential=False by default
regular = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
},
by_alias=True,
)
assert regular.is_auto_credential is False
assert regular.input_field_name is None
# Auto credential should have is_auto_credential=True
auto = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google"],
"credentials_types": ["oauth2"],
"is_auto_credential": True,
"input_field_name": "spreadsheet",
},
by_alias=True,
)
assert auto.is_auto_credential is True
assert auto.input_field_name == "spreadsheet"
def test_make_node_credentials_input_map_excludes_auto_creds(
mocker: MockerFixture,
):
"""
[Path 4] make_node_credentials_input_map should only include regular credentials,
not auto_credentials (which are resolved at execution time).
"""
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
from backend.executor.utils import make_node_credentials_input_map
from backend.integrations.providers import ProviderName
# Create a mock graph with aggregate_credentials_inputs that returns
# both regular and auto credentials
mock_graph = mocker.MagicMock()
regular_field_info = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
# Mock regular_credentials_inputs property (auto_credentials are excluded)
mock_graph.regular_credentials_inputs = {
"github_creds": (regular_field_info, {("node-1", "credentials")}, True),
}
graph_credentials_input = {
"github_creds": CredentialsMetaInput(
id="cred-123",
provider=ProviderName("github"),
type="api_key",
),
}
result = make_node_credentials_input_map(mock_graph, graph_credentials_input)
# Regular credentials should be mapped
assert "node-1" in result
assert "credentials" in result["node-1"]
# Auto credentials should NOT appear in the result
# (they would have been mapped to the kwarg_name "credentials" not "spreadsheet")
for node_id, fields in result.items():
for field_name, value in fields.items():
# Verify no auto-credential phantom entries
if isinstance(value, dict):
assert "_credentials_id" not in value

View File

@@ -22,11 +22,6 @@ Sentry.init({
enabled: shouldEnable,
// Suppress cross-origin stylesheet errors from Sentry Replay (rrweb)
// serializing DOM snapshots with cross-origin stylesheets
// (e.g., from browser extensions or CDN-loaded CSS)
ignoreErrors: [/Not allowed to access cross-origin stylesheet/],
// Add optional integrations for additional features
integrations: [
Sentry.captureConsoleIntegration(),

View File

@@ -4,7 +4,7 @@ import {
} from "@/app/api/__generated__/endpoints/graphs/graphs";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
import { GraphExecutionMeta } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/use-agent-runs";
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
import { useShallow } from "zustand/react/shallow";
import { useEffect, useState } from "react";

View File

@@ -1,6 +1,6 @@
import { useCallback } from "react";
import { AgentRunDraftView } from "@/app/(platform)/build/components/legacy-builder/agent-run-draft-view";
import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import type {
CredentialsMetaInput,

View File

@@ -18,7 +18,7 @@ import {
import { useToast } from "@/components/molecules/Toast/use-toast";
import { useQueryClient } from "@tanstack/react-query";
import { getGetV2ListMySubmissionsQueryKey } from "@/app/api/__generated__/endpoints/store/store";
import { CronExpressionDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { CalendarClockIcon } from "lucide-react";

View File

@@ -29,7 +29,6 @@ export function ScheduleListItem({
description={formatDistanceToNow(schedule.next_run_time, {
addSuffix: true,
})}
descriptionTitle={new Date(schedule.next_run_time).toString()}
onClick={onClick}
selected={selected}
icon={

View File

@@ -7,7 +7,6 @@ import React from "react";
interface Props {
title: string;
description?: string;
descriptionTitle?: string;
icon?: React.ReactNode;
selected?: boolean;
onClick?: () => void;
@@ -17,7 +16,6 @@ interface Props {
export function SidebarItemCard({
title,
description,
descriptionTitle,
icon,
selected,
onClick,
@@ -40,11 +38,7 @@ export function SidebarItemCard({
>
{title}
</Text>
<Text
variant="body"
className="leading-tight !text-zinc-500"
title={descriptionTitle}
>
<Text variant="body" className="leading-tight !text-zinc-500">
{description}
</Text>
</div>

View File

@@ -81,9 +81,6 @@ export function TaskListItem({
? formatDistanceToNow(run.started_at, { addSuffix: true })
: "—"
}
descriptionTitle={
run.started_at ? new Date(run.started_at).toString() : undefined
}
onClick={onClick}
selected={selected}
actions={

View File

@@ -0,0 +1,631 @@
"use client";
import { useParams, useRouter } from "next/navigation";
import { useQueryState } from "nuqs";
import React, {
useCallback,
useEffect,
useMemo,
useRef,
useState,
} from "react";
import {
Graph,
GraphExecution,
GraphExecutionID,
GraphExecutionMeta,
GraphID,
LibraryAgent,
LibraryAgentID,
LibraryAgentPreset,
LibraryAgentPresetID,
Schedule,
ScheduleID,
} from "@/lib/autogpt-server-api";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import { exportAsJSONFile } from "@/lib/utils";
import DeleteConfirmDialog from "@/components/__legacy__/delete-confirm-dialog";
import type { ButtonAction } from "@/components/__legacy__/types";
import { Button } from "@/components/__legacy__/ui/button";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/__legacy__/ui/dialog";
import LoadingBox, { LoadingSpinner } from "@/components/__legacy__/ui/loading";
import {
useToast,
useToastOnFail,
} from "@/components/molecules/Toast/use-toast";
import { AgentRunDetailsView } from "./components/agent-run-details-view";
import { AgentRunDraftView } from "./components/agent-run-draft-view";
import { CreatePresetDialog } from "./components/create-preset-dialog";
import { useAgentRunsInfinite } from "./use-agent-runs";
import { AgentRunsSelectorList } from "./components/agent-runs-selector-list";
import { AgentScheduleDetailsView } from "./components/agent-schedule-details-view";
export function OldAgentLibraryView() {
const { id: agentID }: { id: LibraryAgentID } = useParams();
const [executionId, setExecutionId] = useQueryState("executionId");
const toastOnFail = useToastOnFail();
const { toast } = useToast();
const router = useRouter();
const api = useBackendAPI();
// ============================ STATE =============================
const [graph, setGraph] = useState<Graph | null>(null); // Graph version corresponding to LibraryAgent
const [agent, setAgent] = useState<LibraryAgent | null>(null);
const agentRunsQuery = useAgentRunsInfinite(graph?.id); // only runs once graph.id is known
const agentRuns = agentRunsQuery.agentRuns;
const [agentPresets, setAgentPresets] = useState<LibraryAgentPreset[]>([]);
const [schedules, setSchedules] = useState<Schedule[]>([]);
const [selectedView, selectView] = useState<
| { type: "run"; id?: GraphExecutionID }
| { type: "preset"; id: LibraryAgentPresetID }
| { type: "schedule"; id: ScheduleID }
>({ type: "run" });
const [selectedRun, setSelectedRun] = useState<
GraphExecution | GraphExecutionMeta | null
>(null);
const selectedSchedule =
selectedView.type == "schedule"
? schedules.find((s) => s.id == selectedView.id)
: null;
const [isFirstLoad, setIsFirstLoad] = useState<boolean>(true);
const [agentDeleteDialogOpen, setAgentDeleteDialogOpen] =
useState<boolean>(false);
const [confirmingDeleteAgentRun, setConfirmingDeleteAgentRun] =
useState<GraphExecutionMeta | null>(null);
const [confirmingDeleteAgentPreset, setConfirmingDeleteAgentPreset] =
useState<LibraryAgentPresetID | null>(null);
const [copyAgentDialogOpen, setCopyAgentDialogOpen] = useState(false);
const [creatingPresetFromExecutionID, setCreatingPresetFromExecutionID] =
useState<GraphExecutionID | null>(null);
// Set page title with agent name
useEffect(() => {
if (agent) {
document.title = `${agent.name} - Library - AutoGPT Platform`;
}
}, [agent]);
const openRunDraftView = useCallback(() => {
selectView({ type: "run" });
}, []);
const selectRun = useCallback((id: GraphExecutionID) => {
selectView({ type: "run", id });
}, []);
const selectPreset = useCallback((id: LibraryAgentPresetID) => {
selectView({ type: "preset", id });
}, []);
const selectSchedule = useCallback((id: ScheduleID) => {
selectView({ type: "schedule", id });
}, []);
const graphVersions = useRef<Record<number, Graph>>({});
const loadingGraphVersions = useRef<Record<number, Promise<Graph>>>({});
const getGraphVersion = useCallback(
async (graphID: GraphID, version: number) => {
if (version in graphVersions.current)
return graphVersions.current[version];
if (version in loadingGraphVersions.current)
return loadingGraphVersions.current[version];
const pendingGraph = api.getGraph(graphID, version).then((graph) => {
graphVersions.current[version] = graph;
return graph;
});
// Cache promise as well to avoid duplicate requests
loadingGraphVersions.current[version] = pendingGraph;
return pendingGraph;
},
[api, graphVersions, loadingGraphVersions],
);
const lastRefresh = useRef<number>(0);
const refreshPageData = useCallback(() => {
if (Date.now() - lastRefresh.current < 2e3) return; // 2 second debounce
lastRefresh.current = Date.now();
api.getLibraryAgent(agentID).then((agent) => {
setAgent(agent);
getGraphVersion(agent.graph_id, agent.graph_version).then(
(_graph) =>
(graph && graph.version == _graph.version) || setGraph(_graph),
);
Promise.all([
agentRunsQuery.refetchRuns(),
api.listLibraryAgentPresets({
graph_id: agent.graph_id,
page_size: 100,
}),
]).then(([runsQueryResult, presets]) => {
setAgentPresets(presets.presets);
const newestAgentRunsResponse = runsQueryResult.data?.pages[0];
if (!newestAgentRunsResponse || newestAgentRunsResponse.status != 200)
return;
const newestAgentRuns = newestAgentRunsResponse.data.executions;
// Preload the corresponding graph versions for the latest 10 runs
new Set(
newestAgentRuns.slice(0, 10).map((run) => run.graph_version),
).forEach((version) => getGraphVersion(agent.graph_id, version));
});
});
}, [api, agentID, getGraphVersion, graph]);
// On first load: select the latest run
useEffect(() => {
// Only for first load or first execution
if (selectedView.id || !isFirstLoad) return;
if (agentRuns.length == 0 && agentPresets.length == 0) return;
setIsFirstLoad(false);
if (agentRuns.length > 0) {
// select latest run
const latestRun = agentRuns.reduce((latest, current) => {
if (!latest.started_at && !current.started_at) return latest;
if (!latest.started_at) return current;
if (!current.started_at) return latest;
return latest.started_at > current.started_at ? latest : current;
}, agentRuns[0]);
selectRun(latestRun.id as GraphExecutionID);
} else {
// select top preset
const latestPreset = agentPresets.toSorted(
(a, b) => b.updated_at.getTime() - a.updated_at.getTime(),
)[0];
selectPreset(latestPreset.id);
}
}, [
isFirstLoad,
selectedView.id,
agentRuns,
agentPresets,
selectRun,
selectPreset,
]);
useEffect(() => {
if (executionId) {
selectRun(executionId as GraphExecutionID);
setExecutionId(null);
}
}, [executionId, selectRun, setExecutionId]);
// Initial load
useEffect(() => {
refreshPageData();
// Show a toast when the WebSocket connection disconnects
let connectionToast: ReturnType<typeof toast> | null = null;
const cancelDisconnectHandler = api.onWebSocketDisconnect(() => {
connectionToast ??= toast({
title: "Connection to server was lost",
variant: "destructive",
description: (
<div className="flex items-center">
Trying to reconnect...
<LoadingSpinner className="ml-1.5 size-3.5" />
</div>
),
duration: Infinity,
dismissable: true,
});
});
const cancelConnectHandler = api.onWebSocketConnect(() => {
if (connectionToast)
connectionToast.update({
id: connectionToast.id,
title: "✅ Connection re-established",
variant: "default",
description: (
<div className="flex items-center">
Refreshing data...
<LoadingSpinner className="ml-1.5 size-3.5" />
</div>
),
duration: 2000,
dismissable: true,
});
connectionToast = null;
});
return () => {
cancelDisconnectHandler();
cancelConnectHandler();
};
}, []);
// Subscribe to WebSocket updates for agent runs
useEffect(() => {
if (!agent?.graph_id) return;
return api.onWebSocketConnect(() => {
refreshPageData(); // Sync up on (re)connect
// Subscribe to all executions for this agent
api.subscribeToGraphExecutions(agent.graph_id);
});
}, [api, agent?.graph_id, refreshPageData]);
// Handle execution updates
useEffect(() => {
const detachExecUpdateHandler = api.onWebSocketMessage(
"graph_execution_event",
(data) => {
if (data.graph_id != agent?.graph_id) return;
agentRunsQuery.upsertAgentRun(data);
if (data.id === selectedView.id) {
// Update currently viewed run
setSelectedRun(data);
}
},
);
return () => {
detachExecUpdateHandler();
};
}, [api, agent?.graph_id, selectedView.id]);
// Pre-load selectedRun based on selectedView
useEffect(() => {
if (selectedView.type != "run" || !selectedView.id) return;
const newSelectedRun = agentRuns.find((run) => run.id == selectedView.id);
if (selectedView.id !== selectedRun?.id) {
// Pull partial data from "cache" while waiting for the rest to load
setSelectedRun((newSelectedRun as GraphExecutionMeta) ?? null);
}
}, [api, selectedView, agentRuns, selectedRun?.id]);
// Load selectedRun based on selectedView; refresh on agent refresh
useEffect(() => {
if (selectedView.type != "run" || !selectedView.id || !agent) return;
api
.getGraphExecutionInfo(agent.graph_id, selectedView.id)
.then(async (run) => {
// Ensure corresponding graph version is available before rendering I/O
await getGraphVersion(run.graph_id, run.graph_version);
setSelectedRun(run);
});
}, [api, selectedView, agent, getGraphVersion]);
const fetchSchedules = useCallback(async () => {
if (!agent) return;
setSchedules(await api.listGraphExecutionSchedules(agent.graph_id));
}, [api, agent?.graph_id]);
useEffect(() => {
fetchSchedules();
}, [fetchSchedules]);
// =========================== ACTIONS ============================
const deleteRun = useCallback(
async (run: GraphExecutionMeta) => {
if (run.status == "RUNNING" || run.status == "QUEUED") {
await api.stopGraphExecution(run.graph_id, run.id);
}
await api.deleteGraphExecution(run.id);
setConfirmingDeleteAgentRun(null);
if (selectedView.type == "run" && selectedView.id == run.id) {
openRunDraftView();
}
agentRunsQuery.removeAgentRun(run.id);
},
[api, selectedView, openRunDraftView],
);
const deletePreset = useCallback(
async (presetID: LibraryAgentPresetID) => {
await api.deleteLibraryAgentPreset(presetID);
setConfirmingDeleteAgentPreset(null);
if (selectedView.type == "preset" && selectedView.id == presetID) {
openRunDraftView();
}
setAgentPresets((presets) => presets.filter((p) => p.id !== presetID));
},
[api, selectedView, openRunDraftView],
);
const deleteSchedule = useCallback(
async (scheduleID: ScheduleID) => {
const removedSchedule =
await api.deleteGraphExecutionSchedule(scheduleID);
setSchedules((schedules) => {
const newSchedules = schedules.filter(
(s) => s.id !== removedSchedule.id,
);
if (
selectedView.type == "schedule" &&
selectedView.id == removedSchedule.id
) {
if (newSchedules.length > 0) {
// Select next schedule if available
selectSchedule(newSchedules[0].id);
} else {
// Reset to draft view if current schedule was deleted
openRunDraftView();
}
}
return newSchedules;
});
openRunDraftView();
},
[schedules, api],
);
const handleCreatePresetFromRun = useCallback(
async (name: string, description: string) => {
if (!creatingPresetFromExecutionID) return;
await api
.createLibraryAgentPreset({
name,
description,
graph_execution_id: creatingPresetFromExecutionID,
})
.then((preset) => {
setAgentPresets((prev) => [...prev, preset]);
selectPreset(preset.id);
setCreatingPresetFromExecutionID(null);
})
.catch(toastOnFail("create a preset"));
},
[api, creatingPresetFromExecutionID, selectPreset, toast],
);
const downloadGraph = useCallback(
async () =>
agent &&
// Export sanitized graph from backend
api
.getGraph(agent.graph_id, agent.graph_version, true)
.then((graph) =>
exportAsJSONFile(graph, `${graph.name}_v${graph.version}.json`),
),
[api, agent],
);
const copyAgent = useCallback(async () => {
setCopyAgentDialogOpen(false);
api
.forkLibraryAgent(agentID)
.then((newAgent) => {
router.push(`/library/agents/${newAgent.id}`);
})
.catch((error) => {
console.error("Error copying agent:", error);
toast({
title: "Error copying agent",
description: `An error occurred while copying the agent: ${error.message}`,
variant: "destructive",
});
});
}, [agentID, api, router, toast]);
const agentActions: ButtonAction[] = useMemo(
() => [
{
label: "Customize agent",
href: `/build?flowID=${agent?.graph_id}&flowVersion=${agent?.graph_version}`,
disabled: !agent?.can_access_graph,
},
{ label: "Export agent to file", callback: downloadGraph },
...(!agent?.can_access_graph
? [
{
label: "Edit a copy",
callback: () => setCopyAgentDialogOpen(true),
},
]
: []),
{
label: "Delete agent",
callback: () => setAgentDeleteDialogOpen(true),
},
],
[agent, downloadGraph],
);
const runGraph =
graphVersions.current[selectedRun?.graph_version ?? 0] ?? graph;
const onCreateSchedule = useCallback(
(schedule: Schedule) => {
setSchedules((prev) => [...prev, schedule]);
selectSchedule(schedule.id);
},
[selectView],
);
const onCreatePreset = useCallback(
(preset: LibraryAgentPreset) => {
setAgentPresets((prev) => [...prev, preset]);
selectPreset(preset.id);
},
[selectPreset],
);
const onUpdatePreset = useCallback(
(updated: LibraryAgentPreset) => {
setAgentPresets((prev) =>
prev.map((p) => (p.id === updated.id ? updated : p)),
);
selectPreset(updated.id);
},
[selectPreset],
);
if (!agent || !graph) {
return <LoadingBox className="h-[90vh]" />;
}
return (
<div className="container justify-stretch p-0 pt-16 lg:flex">
{/* Sidebar w/ list of runs */}
{/* TODO: render this below header in sm and md layouts */}
<AgentRunsSelectorList
className="agpt-div w-full border-b pb-2 lg:w-auto lg:border-b-0 lg:border-r lg:pb-0"
agent={agent}
agentRunsQuery={agentRunsQuery}
agentPresets={agentPresets}
schedules={schedules}
selectedView={selectedView}
onSelectRun={selectRun}
onSelectPreset={selectPreset}
onSelectSchedule={selectSchedule}
onSelectDraftNewRun={openRunDraftView}
doDeleteRun={setConfirmingDeleteAgentRun}
doDeletePreset={setConfirmingDeleteAgentPreset}
doDeleteSchedule={deleteSchedule}
doCreatePresetFromRun={setCreatingPresetFromExecutionID}
/>
<div className="flex-1">
{/* Header */}
<div className="agpt-div w-full border-b">
<h1
data-testid="agent-title"
className="font-poppins text-3xl font-medium"
>
{
agent.name /* TODO: use dynamic/custom run title - https://github.com/Significant-Gravitas/AutoGPT/issues/9184 */
}
</h1>
</div>
{/* Run / Schedule views */}
{(selectedView.type == "run" && selectedView.id ? (
selectedRun && runGraph ? (
<AgentRunDetailsView
agent={agent}
graph={runGraph}
run={selectedRun}
agentActions={agentActions}
onRun={selectRun}
doDeleteRun={() => setConfirmingDeleteAgentRun(selectedRun)}
doCreatePresetFromRun={() =>
setCreatingPresetFromExecutionID(selectedRun.id)
}
/>
) : null
) : selectedView.type == "run" ? (
/* Draft new runs / Create new presets */
<AgentRunDraftView
graph={graph}
onRun={selectRun}
onCreateSchedule={onCreateSchedule}
onCreatePreset={onCreatePreset}
agentActions={agentActions}
recommendedScheduleCron={agent?.recommended_schedule_cron || null}
/>
) : selectedView.type == "preset" ? (
/* Edit & update presets */
<AgentRunDraftView
graph={graph}
agentPreset={
agentPresets.find((preset) => preset.id == selectedView.id)!
}
onRun={selectRun}
recommendedScheduleCron={agent?.recommended_schedule_cron || null}
onCreateSchedule={onCreateSchedule}
onUpdatePreset={onUpdatePreset}
doDeletePreset={setConfirmingDeleteAgentPreset}
agentActions={agentActions}
/>
) : selectedView.type == "schedule" ? (
selectedSchedule &&
graph && (
<AgentScheduleDetailsView
graph={graph}
schedule={selectedSchedule}
// agent={agent}
agentActions={agentActions}
onForcedRun={selectRun}
doDeleteSchedule={deleteSchedule}
/>
)
) : null) || <LoadingBox className="h-[70vh]" />}
<DeleteConfirmDialog
entityType="agent"
open={agentDeleteDialogOpen}
onOpenChange={setAgentDeleteDialogOpen}
onDoDelete={() =>
agent &&
api.deleteLibraryAgent(agent.id).then(() => router.push("/library"))
}
/>
<DeleteConfirmDialog
entityType="agent run"
open={!!confirmingDeleteAgentRun}
onOpenChange={(open) => !open && setConfirmingDeleteAgentRun(null)}
onDoDelete={() =>
confirmingDeleteAgentRun && deleteRun(confirmingDeleteAgentRun)
}
/>
<DeleteConfirmDialog
entityType={agent.has_external_trigger ? "trigger" : "agent preset"}
open={!!confirmingDeleteAgentPreset}
onOpenChange={(open) => !open && setConfirmingDeleteAgentPreset(null)}
onDoDelete={() =>
confirmingDeleteAgentPreset &&
deletePreset(confirmingDeleteAgentPreset)
}
/>
{/* Copy agent confirmation dialog */}
<Dialog
onOpenChange={setCopyAgentDialogOpen}
open={copyAgentDialogOpen}
>
<DialogContent>
<DialogHeader>
<DialogTitle>You&apos;re making an editable copy</DialogTitle>
<DialogDescription className="pt-2">
The original Marketplace agent stays the same and cannot be
edited. We&apos;ll save a new version of this agent to your
Library. From there, you can customize it however you&apos;d
like by clicking &quot;Customize agent&quot; this will open
the builder where you can see and modify the inner workings.
</DialogDescription>
</DialogHeader>
<DialogFooter className="justify-end">
<Button
type="button"
variant="outline"
onClick={() => setCopyAgentDialogOpen(false)}
>
Cancel
</Button>
<Button type="button" onClick={copyAgent}>
Continue
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
<CreatePresetDialog
open={!!creatingPresetFromExecutionID}
onOpenChange={() => setCreatingPresetFromExecutionID(null)}
onConfirm={handleCreatePresetFromRun}
/>
</div>
</div>
);
}

View File

@@ -0,0 +1,445 @@
"use client";
import { format, formatDistanceToNow, formatDistanceStrict } from "date-fns";
import React, { useCallback, useMemo, useEffect } from "react";
import {
Graph,
GraphExecution,
GraphExecutionID,
GraphExecutionMeta,
LibraryAgent,
} from "@/lib/autogpt-server-api";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import ActionButtonGroup from "@/components/__legacy__/action-button-group";
import type { ButtonAction } from "@/components/__legacy__/types";
import {
Card,
CardContent,
CardHeader,
CardTitle,
} from "@/components/__legacy__/ui/card";
import {
IconRefresh,
IconSquare,
IconCircleAlert,
} from "@/components/__legacy__/ui/icons";
import { Input } from "@/components/__legacy__/ui/input";
import LoadingBox from "@/components/__legacy__/ui/loading";
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { useToastOnFail } from "@/components/molecules/Toast/use-toast";
import { AgentRunStatus, agentRunStatusMap } from "./agent-run-status-chip";
import useCredits from "@/hooks/useCredits";
import { AgentRunOutputView } from "./agent-run-output-view";
import { analytics } from "@/services/analytics";
import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList";
import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews";
export function AgentRunDetailsView({
agent,
graph,
run,
agentActions,
onRun,
doDeleteRun,
doCreatePresetFromRun,
}: {
agent: LibraryAgent;
graph: Graph;
run: GraphExecution | GraphExecutionMeta;
agentActions: ButtonAction[];
onRun: (runID: GraphExecutionID) => void;
doDeleteRun: () => void;
doCreatePresetFromRun: () => void;
}): React.ReactNode {
const api = useBackendAPI();
const { formatCredits } = useCredits();
const runStatus: AgentRunStatus = useMemo(
() => agentRunStatusMap[run.status],
[run],
);
const {
pendingReviews,
isLoading: reviewsLoading,
refetch: refetchReviews,
} = usePendingReviewsForExecution(run.id);
const toastOnFail = useToastOnFail();
// Refetch pending reviews when execution status changes to REVIEW
useEffect(() => {
if (runStatus === "review" && run.id) {
refetchReviews();
}
}, [runStatus, run.id, refetchReviews]);
const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => {
if (!run) return [];
return [
{
label: "Status",
value: runStatus.charAt(0).toUpperCase() + runStatus.slice(1),
},
{
label: "Started",
value: run.started_at
? `${formatDistanceToNow(run.started_at, { addSuffix: true })}, ${format(run.started_at, "HH:mm")}`
: "—",
},
...(run.stats
? [
{
label: "Duration",
value: formatDistanceStrict(0, run.stats.duration * 1000),
},
{ label: "Steps", value: run.stats.node_exec_count },
{ label: "Cost", value: formatCredits(run.stats.cost) },
]
: []),
];
}, [run, runStatus, formatCredits]);
const agentRunInputs:
| Record<
string,
{
title?: string;
/* type: BlockIOSubType; */
value: string | number | undefined;
}
>
| undefined = useMemo(() => {
if (!run.inputs) return undefined;
// TODO: show (link to) preset - https://github.com/Significant-Gravitas/AutoGPT/issues/9168
// Add type info from agent input schema
return Object.fromEntries(
Object.entries(run.inputs).map(([k, v]) => [
k,
{
title: graph.input_schema.properties[k]?.title,
// type: graph.input_schema.properties[k].type, // TODO: implement typed graph inputs
value: typeof v == "object" ? JSON.stringify(v, undefined, 2) : v,
},
]),
);
}, [graph, run]);
const runAgain = useCallback(() => {
if (
!run.inputs ||
!(graph.credentials_input_schema?.required ?? []).every(
(k) => k in (run.credential_inputs ?? {}),
)
)
return;
if (run.preset_id) {
return api
.executeLibraryAgentPreset(
run.preset_id,
run.inputs!,
run.credential_inputs!,
)
.then(({ id }) => {
analytics.sendDatafastEvent("run_agent", {
name: graph.name,
id: graph.id,
});
onRun(id);
})
.catch(toastOnFail("execute agent preset"));
}
return api
.executeGraph(
graph.id,
graph.version,
run.inputs!,
run.credential_inputs!,
"library",
)
.then(({ id }) => {
analytics.sendDatafastEvent("run_agent", {
name: graph.name,
id: graph.id,
});
onRun(id);
})
.catch(toastOnFail("execute agent"));
}, [api, graph, run, onRun, toastOnFail]);
const stopRun = useCallback(
() => api.stopGraphExecution(graph.id, run.id),
[api, graph.id, run.id],
);
const agentRunOutputs:
| Record<
string,
{
title?: string;
/* type: BlockIOSubType; */
values: Array<React.ReactNode>;
}
>
| null
| undefined = useMemo(() => {
if (!("outputs" in run)) return undefined;
if (!["running", "success", "failed", "stopped"].includes(runStatus))
return null;
// Add type info from agent input schema
return Object.fromEntries(
Object.entries(run.outputs).map(([k, vv]) => [
k,
{
title: graph.output_schema.properties[k].title,
/* type: agent.output_schema.properties[k].type */
values: vv.map((v) =>
typeof v == "object" ? JSON.stringify(v, undefined, 2) : v,
),
},
]),
);
}, [graph, run, runStatus]);
const runActions: ButtonAction[] = useMemo(
() => [
...(["running", "queued"].includes(runStatus)
? ([
{
label: (
<>
<IconSquare className="mr-2 size-4" />
Stop run
</>
),
variant: "secondary",
callback: stopRun,
},
] satisfies ButtonAction[])
: []),
...(["success", "failed", "stopped"].includes(runStatus) &&
!graph.has_external_trigger &&
(graph.credentials_input_schema?.required ?? []).every(
(k) => k in (run.credential_inputs ?? {}),
)
? [
{
label: (
<>
<IconRefresh className="mr-2 size-4" />
Run again
</>
),
callback: runAgain,
dataTestId: "run-again-button",
},
]
: []),
...(agent.can_access_graph
? [
{
label: "Open run in builder",
href: `/build?flowID=${run.graph_id}&flowVersion=${run.graph_version}&flowExecutionID=${run.id}`,
},
]
: []),
{ label: "Create preset from run", callback: doCreatePresetFromRun },
{ label: "Delete run", variant: "secondary", callback: doDeleteRun },
],
[
runStatus,
runAgain,
stopRun,
doDeleteRun,
doCreatePresetFromRun,
graph.has_external_trigger,
graph.credentials_input_schema?.required,
agent.can_access_graph,
run.graph_id,
run.graph_version,
run.id,
],
);
return (
<div className="agpt-div flex gap-6">
<div className="flex flex-1 flex-col gap-4">
<Card className="agpt-box">
<CardHeader>
<CardTitle className="font-poppins text-lg">Info</CardTitle>
</CardHeader>
<CardContent>
<div className="flex justify-stretch gap-4">
{infoStats.map(({ label, value }) => (
<div key={label} className="flex-1">
<p className="text-sm font-medium text-black">{label}</p>
<p className="text-sm text-neutral-600">{value}</p>
</div>
))}
</div>
{run.status === "FAILED" && (
<div className="mt-4 rounded-md border border-red-200 bg-red-50 p-3 dark:border-red-800 dark:bg-red-900/20">
<p className="text-sm text-red-800 dark:text-red-200">
<strong>Error:</strong>{" "}
{run.stats?.error ||
"The execution failed due to an internal error. You can re-run the agent to retry."}
</p>
</div>
)}
</CardContent>
</Card>
{/* Smart Agent Execution Summary */}
{run.stats?.activity_status && (
<Card className="agpt-box">
<CardHeader>
<CardTitle className="flex items-center gap-2 font-poppins text-lg">
Task Summary
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<IconCircleAlert className="size-4 cursor-help text-neutral-500 hover:text-neutral-700" />
</TooltipTrigger>
<TooltipContent>
<p className="max-w-xs">
This AI-generated summary describes how the agent
handled your task. Its an experimental feature and may
occasionally be inaccurate.
</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</CardTitle>
</CardHeader>
<CardContent className="space-y-4">
<p className="text-sm leading-relaxed text-neutral-700">
{run.stats.activity_status}
</p>
{/* Correctness Score */}
{typeof run.stats.correctness_score === "number" && (
<div className="flex items-center gap-3 rounded-lg bg-neutral-50 p-3">
<div className="flex items-center gap-2">
<span className="text-sm font-medium text-neutral-600">
Success Estimate:
</span>
<div className="flex items-center gap-2">
<div className="relative h-2 w-16 overflow-hidden rounded-full bg-neutral-200">
<div
className={`h-full transition-all ${
run.stats.correctness_score >= 0.8
? "bg-green-500"
: run.stats.correctness_score >= 0.6
? "bg-yellow-500"
: run.stats.correctness_score >= 0.4
? "bg-orange-500"
: "bg-red-500"
}`}
style={{
width: `${Math.round(run.stats.correctness_score * 100)}%`,
}}
/>
</div>
<span className="text-sm font-medium">
{Math.round(run.stats.correctness_score * 100)}%
</span>
</div>
</div>
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<IconCircleAlert className="size-4 cursor-help text-neutral-400 hover:text-neutral-600" />
</TooltipTrigger>
<TooltipContent>
<p className="max-w-xs">
AI-generated estimate of how well this execution
achieved its intended purpose. This score indicates
{run.stats.correctness_score >= 0.8
? " the agent was highly successful."
: run.stats.correctness_score >= 0.6
? " the agent was mostly successful with minor issues."
: run.stats.correctness_score >= 0.4
? " the agent was partially successful with some gaps."
: " the agent had limited success with significant issues."}
</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
)}
</CardContent>
</Card>
)}
{agentRunOutputs !== null && (
<AgentRunOutputView agentRunOutputs={agentRunOutputs} />
)}
{/* Pending Reviews Section */}
{runStatus === "review" && (
<Card className="agpt-box">
<CardHeader>
<CardTitle className="font-poppins text-lg">
Pending Reviews ({pendingReviews.length})
</CardTitle>
</CardHeader>
<CardContent>
{reviewsLoading ? (
<LoadingBox spinnerSize={12} className="h-24" />
) : pendingReviews.length > 0 ? (
<PendingReviewsList
reviews={pendingReviews}
onReviewComplete={refetchReviews}
emptyMessage="No pending reviews for this execution"
/>
) : (
<div className="py-4 text-neutral-600">
No pending reviews for this execution
</div>
)}
</CardContent>
</Card>
)}
<Card className="agpt-box">
<CardHeader>
<CardTitle className="font-poppins text-lg">Input</CardTitle>
</CardHeader>
<CardContent className="flex flex-col gap-4">
{agentRunInputs !== undefined ? (
Object.entries(agentRunInputs).map(([key, { title, value }]) => (
<div key={key} className="flex flex-col gap-1.5">
<label className="text-sm font-medium">{title || key}</label>
<Input value={value} className="rounded-full" disabled />
</div>
))
) : (
<LoadingBox spinnerSize={12} className="h-24" />
)}
</CardContent>
</Card>
</div>
{/* Run / Agent Actions */}
<aside className="w-48 xl:w-56">
<div className="flex flex-col gap-8">
<ActionButtonGroup title="Run actions" actions={runActions} />
<ActionButtonGroup title="Agent actions" actions={agentActions} />
</div>
</aside>
</div>
);
}

View File

@@ -20,7 +20,7 @@ import {
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs";
import { ScheduleTaskDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
import { ScheduleTaskDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
import ActionButtonGroup from "@/components/__legacy__/action-button-group";
import type { ButtonAction } from "@/components/__legacy__/types";
import {
@@ -53,10 +53,7 @@ import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react";
import { CalendarClockIcon, Trash2Icon } from "lucide-react";
import { analytics } from "@/services/analytics";
import {
AgentStatus,
AgentStatusChip,
} from "@/app/(platform)/build/components/legacy-builder/agent-status-chip";
import { AgentStatus, AgentStatusChip } from "./agent-status-chip";
export function AgentRunDraftView({
graph,

View File

@@ -0,0 +1,178 @@
"use client";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import React, { useMemo } from "react";
import {
Card,
CardContent,
CardHeader,
CardTitle,
} from "@/components/__legacy__/ui/card";
import LoadingBox from "@/components/__legacy__/ui/loading";
import type { OutputMetadata } from "../../../../../../../../components/contextual/OutputRenderers";
import {
globalRegistry,
OutputActions,
OutputItem,
} from "../../../../../../../../components/contextual/OutputRenderers";
export function AgentRunOutputView({
agentRunOutputs,
}: {
agentRunOutputs:
| Record<
string,
{
title?: string;
/* type: BlockIOSubType; */
values: Array<React.ReactNode>;
}
>
| undefined;
}) {
const enableEnhancedOutputHandling = useGetFlag(
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
);
// Prepare items for the renderer system
const outputItems = useMemo(() => {
if (!agentRunOutputs) return [];
const items: Array<{
key: string;
label: string;
value: unknown;
metadata?: OutputMetadata;
renderer: any;
}> = [];
Object.entries(agentRunOutputs).forEach(([key, { title, values }]) => {
values.forEach((value, index) => {
// Enhanced metadata extraction
const metadata: OutputMetadata = {};
// Type guard to safely access properties
if (
typeof value === "object" &&
value !== null &&
!React.isValidElement(value)
) {
const objValue = value as any;
if (objValue.type) metadata.type = objValue.type;
if (objValue.mimeType) metadata.mimeType = objValue.mimeType;
if (objValue.filename) metadata.filename = objValue.filename;
}
const renderer = globalRegistry.getRenderer(value, metadata);
if (renderer) {
items.push({
key: `${key}-${index}`,
label: index === 0 ? title || key : "",
value,
metadata,
renderer,
});
} else {
const textRenderer = globalRegistry
.getAllRenderers()
.find((r) => r.name === "TextRenderer");
if (textRenderer) {
items.push({
key: `${key}-${index}`,
label: index === 0 ? title || key : "",
value: JSON.stringify(value, null, 2),
metadata,
renderer: textRenderer,
});
}
}
});
});
return items;
}, [agentRunOutputs]);
return (
<>
{enableEnhancedOutputHandling ? (
<Card className="agpt-box" style={{ maxWidth: "950px" }}>
<CardHeader>
<div className="flex items-center justify-between">
<CardTitle className="font-poppins text-lg">Output</CardTitle>
{outputItems.length > 0 && (
<OutputActions
items={outputItems.map((item) => ({
value: item.value,
metadata: item.metadata,
renderer: item.renderer,
}))}
/>
)}
</div>
</CardHeader>
<CardContent
className="flex flex-col gap-4"
style={{ maxWidth: "660px" }}
>
{agentRunOutputs !== undefined ? (
outputItems.length > 0 ? (
outputItems.map((item) => (
<OutputItem
key={item.key}
value={item.value}
metadata={item.metadata}
renderer={item.renderer}
label={item.label}
/>
))
) : (
<p className="text-sm text-muted-foreground">
No outputs to display
</p>
)
) : (
<LoadingBox spinnerSize={12} className="h-24" />
)}
</CardContent>
</Card>
) : (
<Card className="agpt-box" style={{ maxWidth: "950px" }}>
<CardHeader>
<CardTitle className="font-poppins text-lg">Output</CardTitle>
</CardHeader>
<CardContent
className="flex flex-col gap-4"
style={{ maxWidth: "660px" }}
>
{agentRunOutputs !== undefined ? (
Object.entries(agentRunOutputs).map(
([key, { title, values }]) => (
<div key={key} className="flex flex-col gap-1.5">
<label className="text-sm font-medium">
{title || key}
</label>
{values.map((value, i) => (
<p
className="resize-none overflow-x-auto whitespace-pre-wrap break-words border-none text-sm text-neutral-700 disabled:cursor-not-allowed"
key={i}
>
{value}
</p>
))}
{/* TODO: pretty type-dependent rendering */}
</div>
),
)
) : (
<LoadingBox spinnerSize={12} className="h-24" />
)}
</CardContent>
</Card>
)}
</>
);
}

View File

@@ -0,0 +1,68 @@
import React from "react";
import { Badge } from "@/components/__legacy__/ui/badge";
import { GraphExecutionMeta } from "@/lib/autogpt-server-api/types";
export type AgentRunStatus =
| "success"
| "failed"
| "queued"
| "running"
| "stopped"
| "scheduled"
| "draft"
| "review";
export const agentRunStatusMap: Record<
GraphExecutionMeta["status"],
AgentRunStatus
> = {
INCOMPLETE: "draft",
COMPLETED: "success",
FAILED: "failed",
QUEUED: "queued",
RUNNING: "running",
TERMINATED: "stopped",
REVIEW: "review",
};
const statusData: Record<
AgentRunStatus,
{ label: string; variant: keyof typeof statusStyles }
> = {
success: { label: "Success", variant: "success" },
running: { label: "Running", variant: "info" },
failed: { label: "Failed", variant: "destructive" },
queued: { label: "Queued", variant: "warning" },
draft: { label: "Draft", variant: "secondary" },
stopped: { label: "Stopped", variant: "secondary" },
scheduled: { label: "Scheduled", variant: "secondary" },
review: { label: "In Review", variant: "warning" },
};
const statusStyles = {
success:
"bg-green-100 text-green-800 hover:bg-green-100 hover:text-green-800",
destructive: "bg-red-100 text-red-800 hover:bg-red-100 hover:text-red-800",
warning:
"bg-yellow-100 text-yellow-800 hover:bg-yellow-100 hover:text-yellow-800",
info: "bg-blue-100 text-blue-800 hover:bg-blue-100 hover:text-blue-800",
secondary:
"bg-slate-100 text-slate-800 hover:bg-slate-100 hover:text-slate-800",
};
export function AgentRunStatusChip({
status,
}: {
status: AgentRunStatus;
}): React.ReactElement {
return (
<Badge
variant="secondary"
className={`text-xs font-medium ${statusStyles[statusData[status]?.variant]} rounded-[45px] px-[9px] py-[3px]`}
>
{statusData[status]?.label}
</Badge>
);
}

View File

@@ -0,0 +1,130 @@
import React from "react";
import { formatDistanceToNow, isPast } from "date-fns";
import { cn } from "@/lib/utils";
import { Link2Icon, Link2OffIcon, MoreVertical } from "lucide-react";
import { Card, CardContent } from "@/components/__legacy__/ui/card";
import { Button } from "@/components/__legacy__/ui/button";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/__legacy__/ui/dropdown-menu";
import { AgentStatus, AgentStatusChip } from "./agent-status-chip";
import { AgentRunStatus, AgentRunStatusChip } from "./agent-run-status-chip";
import { PushPinSimpleIcon } from "@phosphor-icons/react";
export type AgentRunSummaryProps = (
| {
type: "run";
status: AgentRunStatus;
}
| {
type: "preset";
status?: undefined;
}
| {
type: "preset.triggered";
status: AgentStatus;
}
| {
type: "schedule";
status: "scheduled";
}
) & {
title: string;
timestamp?: number | Date;
selected?: boolean;
onClick?: () => void;
// onRename: () => void;
onDelete: () => void;
onPinAsPreset?: () => void;
className?: string;
};
export function AgentRunSummaryCard({
type,
status,
title,
timestamp,
selected = false,
onClick,
// onRename,
onDelete,
onPinAsPreset,
className,
}: AgentRunSummaryProps): React.ReactElement {
return (
<Card
className={cn(
"agpt-rounded-card cursor-pointer border-zinc-300",
selected ? "agpt-card-selected" : "",
className,
)}
onClick={onClick}
>
<CardContent className="relative p-2.5 lg:p-4">
{(type == "run" || type == "schedule") && (
<AgentRunStatusChip status={status} />
)}
{type == "preset" && (
<div className="flex items-center text-sm font-medium text-neutral-700">
<PushPinSimpleIcon className="mr-1 size-4 text-foreground" /> Preset
</div>
)}
{type == "preset.triggered" && (
<div className="flex items-center justify-between">
<AgentStatusChip status={status} />
<div className="flex items-center text-sm font-medium text-neutral-700">
{status == "inactive" ? (
<Link2OffIcon className="mr-1 size-4 text-foreground" />
) : (
<Link2Icon className="mr-1 size-4 text-foreground" />
)}{" "}
Trigger
</div>
</div>
)}
<div className="mt-5 flex items-center justify-between">
<h3 className="truncate pr-2 text-base font-medium text-neutral-900">
{title}
</h3>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="ghost" className="h-5 w-5 p-0">
<MoreVertical className="h-5 w-5" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent>
{onPinAsPreset && (
<DropdownMenuItem onClick={onPinAsPreset}>
Pin as a preset
</DropdownMenuItem>
)}
{/* <DropdownMenuItem onClick={onRename}>Rename</DropdownMenuItem> */}
<DropdownMenuItem onClick={onDelete}>Delete</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
{timestamp && (
<p
className="mt-1 text-sm font-normal text-neutral-500"
title={new Date(timestamp).toString()}
>
{isPast(timestamp) ? "Ran" : "Runs in"}{" "}
{formatDistanceToNow(timestamp, { addSuffix: true })}
</p>
)}
</CardContent>
</Card>
);
}

View File

@@ -0,0 +1,237 @@
"use client";
import { Plus } from "lucide-react";
import React, { useEffect, useState } from "react";
import {
GraphExecutionID,
GraphExecutionMeta,
LibraryAgent,
LibraryAgentPreset,
LibraryAgentPresetID,
Schedule,
ScheduleID,
} from "@/lib/autogpt-server-api";
import { cn } from "@/lib/utils";
import { Badge } from "@/components/__legacy__/ui/badge";
import { Button } from "@/components/atoms/Button/Button";
import LoadingBox, { LoadingSpinner } from "@/components/__legacy__/ui/loading";
import { Separator } from "@/components/__legacy__/ui/separator";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
import { AgentRunsQuery } from "../use-agent-runs";
import { agentRunStatusMap } from "./agent-run-status-chip";
import { AgentRunSummaryCard } from "./agent-run-summary-card";
interface AgentRunsSelectorListProps {
agent: LibraryAgent;
agentRunsQuery: AgentRunsQuery;
agentPresets: LibraryAgentPreset[];
schedules: Schedule[];
selectedView: { type: "run" | "preset" | "schedule"; id?: string };
allowDraftNewRun?: boolean;
onSelectRun: (id: GraphExecutionID) => void;
onSelectPreset: (preset: LibraryAgentPresetID) => void;
onSelectSchedule: (id: ScheduleID) => void;
onSelectDraftNewRun: () => void;
doDeleteRun: (id: GraphExecutionMeta) => void;
doDeletePreset: (id: LibraryAgentPresetID) => void;
doDeleteSchedule: (id: ScheduleID) => void;
doCreatePresetFromRun?: (id: GraphExecutionID) => void;
className?: string;
}
export function AgentRunsSelectorList({
agent,
agentRunsQuery: {
agentRuns,
agentRunCount,
agentRunsLoading,
hasMoreRuns,
fetchMoreRuns,
isFetchingMoreRuns,
},
agentPresets,
schedules,
selectedView,
allowDraftNewRun = true,
onSelectRun,
onSelectPreset,
onSelectSchedule,
onSelectDraftNewRun,
doDeleteRun,
doDeletePreset,
doDeleteSchedule,
doCreatePresetFromRun,
className,
}: AgentRunsSelectorListProps): React.ReactElement {
const [activeListTab, setActiveListTab] = useState<"runs" | "scheduled">(
"runs",
);
useEffect(() => {
if (selectedView.type === "schedule") {
setActiveListTab("scheduled");
} else {
setActiveListTab("runs");
}
}, [selectedView]);
const listItemClasses = "h-28 w-72 lg:w-full lg:h-32";
return (
<aside className={cn("flex flex-col gap-4", className)}>
{allowDraftNewRun ? (
<Button
className={"mb-4 hidden lg:flex"}
onClick={onSelectDraftNewRun}
leftIcon={<Plus className="h-6 w-6" />}
>
New {agent.has_external_trigger ? "trigger" : "run"}
</Button>
) : null}
<div className="flex gap-2">
<Badge
variant={activeListTab === "runs" ? "secondary" : "outline"}
className="cursor-pointer gap-2 rounded-full text-base"
onClick={() => setActiveListTab("runs")}
>
<span>Runs</span>
<span className="text-neutral-600">
{agentRunCount ?? <LoadingSpinner className="size-4" />}
</span>
</Badge>
<Badge
variant={activeListTab === "scheduled" ? "secondary" : "outline"}
className="cursor-pointer gap-2 rounded-full text-base"
onClick={() => setActiveListTab("scheduled")}
>
<span>Scheduled</span>
<span className="text-neutral-600">{schedules.length}</span>
</Badge>
</div>
{/* Runs / Schedules list */}
{agentRunsLoading && activeListTab === "runs" ? (
<LoadingBox className="h-28 w-full lg:h-[calc(100vh-300px)] lg:w-72 xl:w-80" />
) : (
<ScrollArea
className="w-full lg:h-[calc(100vh-300px)] lg:w-72 xl:w-80"
orientation={window.innerWidth >= 1024 ? "vertical" : "horizontal"}
>
<InfiniteScroll
direction={window.innerWidth >= 1024 ? "vertical" : "horizontal"}
hasNextPage={hasMoreRuns}
fetchNextPage={fetchMoreRuns}
isFetchingNextPage={isFetchingMoreRuns}
>
<div className="flex items-center gap-2 lg:flex-col">
{/* New Run button - only in small layouts */}
{allowDraftNewRun && (
<Button
size="large"
className={
"flex h-12 w-40 items-center gap-2 py-6 lg:hidden " +
(selectedView.type == "run" && !selectedView.id
? "agpt-card-selected text-accent"
: "")
}
onClick={onSelectDraftNewRun}
leftIcon={<Plus className="h-6 w-6" />}
>
New {agent.has_external_trigger ? "trigger" : "run"}
</Button>
)}
{activeListTab === "runs" ? (
<>
{agentPresets
.filter((preset) => preset.webhook) // Triggers
.toSorted(
(a, b) => b.updated_at.getTime() - a.updated_at.getTime(),
)
.map((preset) => (
<AgentRunSummaryCard
className={cn(listItemClasses, "lg:h-auto")}
key={preset.id}
type="preset.triggered"
status={preset.is_active ? "active" : "inactive"}
title={preset.name}
// timestamp={preset.last_run_time} // TODO: implement this
selected={selectedView.id === preset.id}
onClick={() => onSelectPreset(preset.id)}
onDelete={() => doDeletePreset(preset.id)}
/>
))}
{agentPresets
.filter((preset) => !preset.webhook) // Presets
.toSorted(
(a, b) => b.updated_at.getTime() - a.updated_at.getTime(),
)
.map((preset) => (
<AgentRunSummaryCard
className={cn(listItemClasses, "lg:h-auto")}
key={preset.id}
type="preset"
title={preset.name}
// timestamp={preset.last_run_time} // TODO: implement this
selected={selectedView.id === preset.id}
onClick={() => onSelectPreset(preset.id)}
onDelete={() => doDeletePreset(preset.id)}
/>
))}
{agentPresets.length > 0 && <Separator className="my-1" />}
{agentRuns
.toSorted((a, b) => {
const aTime = a.started_at?.getTime() ?? 0;
const bTime = b.started_at?.getTime() ?? 0;
return bTime - aTime;
})
.map((run) => (
<AgentRunSummaryCard
className={listItemClasses}
key={run.id}
type="run"
status={agentRunStatusMap[run.status]}
title={
(run.preset_id
? agentPresets.find((p) => p.id == run.preset_id)
?.name
: null) ?? agent.name
}
timestamp={run.started_at ?? undefined}
selected={selectedView.id === run.id}
onClick={() => onSelectRun(run.id)}
onDelete={() => doDeleteRun(run as GraphExecutionMeta)}
onPinAsPreset={
doCreatePresetFromRun
? () => doCreatePresetFromRun(run.id)
: undefined
}
/>
))}
</>
) : (
schedules.map((schedule) => (
<AgentRunSummaryCard
className={listItemClasses}
key={schedule.id}
type="schedule"
status="scheduled" // TODO: implement active/inactive status for schedules
title={schedule.name}
timestamp={schedule.next_run_time}
selected={selectedView.id === schedule.id}
onClick={() => onSelectSchedule(schedule.id)}
onDelete={() => doDeleteSchedule(schedule.id)}
/>
))
)}
</div>
</InfiniteScroll>
</ScrollArea>
)}
</aside>
);
}

View File

@@ -0,0 +1,180 @@
"use client";
import React, { useCallback, useMemo } from "react";
import {
Graph,
GraphExecutionID,
Schedule,
ScheduleID,
} from "@/lib/autogpt-server-api";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import ActionButtonGroup from "@/components/__legacy__/action-button-group";
import type { ButtonAction } from "@/components/__legacy__/types";
import {
Card,
CardContent,
CardHeader,
CardTitle,
} from "@/components/__legacy__/ui/card";
import { IconCross } from "@/components/__legacy__/ui/icons";
import { Input } from "@/components/__legacy__/ui/input";
import LoadingBox from "@/components/__legacy__/ui/loading";
import { useToastOnFail } from "@/components/molecules/Toast/use-toast";
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { formatScheduleTime } from "@/lib/timezone-utils";
import { useUserTimezone } from "@/lib/hooks/useUserTimezone";
import { PlayIcon } from "lucide-react";
import { AgentRunStatus } from "./agent-run-status-chip";
export function AgentScheduleDetailsView({
graph,
schedule,
agentActions,
onForcedRun,
doDeleteSchedule,
}: {
graph: Graph;
schedule: Schedule;
agentActions: ButtonAction[];
onForcedRun: (runID: GraphExecutionID) => void;
doDeleteSchedule: (scheduleID: ScheduleID) => void;
}): React.ReactNode {
const api = useBackendAPI();
const selectedRunStatus: AgentRunStatus = "scheduled";
const toastOnFail = useToastOnFail();
// Get user's timezone for displaying schedule times
const userTimezone = useUserTimezone();
const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => {
return [
{
label: "Status",
value:
selectedRunStatus.charAt(0).toUpperCase() +
selectedRunStatus.slice(1),
},
{
label: "Schedule",
value: humanizeCronExpression(schedule.cron),
},
{
label: "Next run",
value: formatScheduleTime(schedule.next_run_time, userTimezone),
},
];
}, [schedule, selectedRunStatus, userTimezone]);
const agentRunInputs: Record<
string,
{ title?: string; /* type: BlockIOSubType; */ value: any }
> = useMemo(() => {
// TODO: show (link to) preset - https://github.com/Significant-Gravitas/AutoGPT/issues/9168
// Add type info from agent input schema
return Object.fromEntries(
Object.entries(schedule.input_data).map(([k, v]) => [
k,
{
title: graph.input_schema.properties[k].title,
/* TODO: type: agent.input_schema.properties[k].type */
value: v,
},
]),
);
}, [graph, schedule]);
const runNow = useCallback(
() =>
api
.executeGraph(
graph.id,
graph.version,
schedule.input_data,
schedule.input_credentials,
"library",
)
.then((run) => onForcedRun(run.id))
.catch(toastOnFail("execute agent")),
[api, graph, schedule, onForcedRun, toastOnFail],
);
const runActions: ButtonAction[] = useMemo(
() => [
{
label: (
<>
<PlayIcon className="mr-2 size-4" />
Run now
</>
),
callback: runNow,
},
{
label: (
<>
<IconCross className="mr-2 size-4 px-0.5" />
Delete schedule
</>
),
callback: () => doDeleteSchedule(schedule.id),
variant: "destructive",
},
],
[runNow],
);
return (
<div className="agpt-div flex gap-6">
<div className="flex flex-1 flex-col gap-4">
<Card className="agpt-box">
<CardHeader>
<CardTitle className="font-poppins text-lg">Info</CardTitle>
</CardHeader>
<CardContent>
<div className="flex justify-stretch gap-4">
{infoStats.map(({ label, value }) => (
<div key={label} className="flex-1">
<p className="text-sm font-medium text-black">{label}</p>
<p className="text-sm text-neutral-600">{value}</p>
</div>
))}
</div>
</CardContent>
</Card>
<Card className="agpt-box">
<CardHeader>
<CardTitle className="font-poppins text-lg">Input</CardTitle>
</CardHeader>
<CardContent className="flex flex-col gap-4">
{agentRunInputs !== undefined ? (
Object.entries(agentRunInputs).map(([key, { title, value }]) => (
<div key={key} className="flex flex-col gap-1.5">
<label className="text-sm font-medium">{title || key}</label>
<Input value={value} className="rounded-full" disabled />
</div>
))
) : (
<LoadingBox spinnerSize={12} className="h-24" />
)}
</CardContent>
</Card>
</div>
{/* Run / Agent Actions */}
<aside className="w-48 xl:w-56">
<div className="flex flex-col gap-8">
<ActionButtonGroup title="Run actions" actions={runActions} />
<ActionButtonGroup title="Agent actions" actions={agentActions} />
</div>
</aside>
</div>
);
}

View File

@@ -0,0 +1,100 @@
"use client";
import React, { useState } from "react";
import { Button } from "@/components/__legacy__/ui/button";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/__legacy__/ui/dialog";
import { Input } from "@/components/__legacy__/ui/input";
import { Textarea } from "@/components/__legacy__/ui/textarea";
interface CreatePresetDialogProps {
open: boolean;
onOpenChange: (open: boolean) => void;
onConfirm: (name: string, description: string) => Promise<void> | void;
}
export function CreatePresetDialog({
open,
onOpenChange,
onConfirm,
}: CreatePresetDialogProps) {
const [name, setName] = useState("");
const [description, setDescription] = useState("");
const handleSubmit = async () => {
if (name.trim()) {
await onConfirm(name.trim(), description.trim());
setName("");
setDescription("");
onOpenChange(false);
}
};
const handleCancel = () => {
setName("");
setDescription("");
onOpenChange(false);
};
const handleKeyDown = (e: React.KeyboardEvent) => {
if (e.key === "Enter" && (e.metaKey || e.ctrlKey)) {
e.preventDefault();
handleSubmit();
}
};
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="sm:max-w-[425px]">
<DialogHeader>
<DialogTitle>Create Preset</DialogTitle>
<DialogDescription>
Give your preset a name and description to help identify it later.
</DialogDescription>
</DialogHeader>
<div className="grid gap-4 py-4">
<div className="grid gap-2">
<label htmlFor="preset-name" className="text-sm font-medium">
Name *
</label>
<Input
id="preset-name"
placeholder="Enter preset name"
value={name}
onChange={(e) => setName(e.target.value)}
onKeyDown={handleKeyDown}
autoFocus
/>
</div>
<div className="grid gap-2">
<label htmlFor="preset-description" className="text-sm font-medium">
Description
</label>
<Textarea
id="preset-description"
placeholder="Optional description"
value={description}
onChange={(e) => setDescription(e.target.value)}
onKeyDown={handleKeyDown}
rows={3}
/>
</div>
</div>
<DialogFooter>
<Button variant="outline" onClick={handleCancel}>
Cancel
</Button>
<Button onClick={handleSubmit} disabled={!name.trim()}>
Create Preset
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
);
}

View File

@@ -2,7 +2,7 @@ import { useEffect, useState } from "react";
import { Input } from "@/components/__legacy__/ui/input";
import { Button } from "@/components/__legacy__/ui/button";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { CronScheduler } from "@/components/contextual/CronScheduler/cron-scheduler";
import { CronScheduler } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { getTimezoneDisplayName } from "@/lib/timezone-utils";
import { useUserTimezone } from "@/lib/hooks/useUserTimezone";

View File

@@ -0,0 +1,210 @@
import {
GraphExecutionMeta as LegacyGraphExecutionMeta,
GraphID,
GraphExecutionID,
} from "@/lib/autogpt-server-api";
import { getQueryClient } from "@/lib/react-query/queryClient";
import {
getPaginatedTotalCount,
getPaginationNextPageNumber,
unpaginate,
} from "@/app/api/helpers";
import {
getV1ListGraphExecutionsResponse,
getV1ListGraphExecutionsResponse200,
useGetV1ListGraphExecutionsInfinite,
} from "@/app/api/__generated__/endpoints/graphs/graphs";
import { GraphExecutionsPaginated } from "@/app/api/__generated__/models/graphExecutionsPaginated";
import { GraphExecutionMeta as RawGraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
export type GraphExecutionMeta = Omit<
RawGraphExecutionMeta,
"id" | "user_id" | "graph_id" | "preset_id" | "stats"
> &
Pick<
LegacyGraphExecutionMeta,
"id" | "user_id" | "graph_id" | "preset_id" | "stats"
>;
/** Hook to fetch runs for a specific graph, with support for infinite scroll.
*
* @param graphID - The ID of the graph to fetch agent runs for. This parameter is
* optional in the sense that the hook doesn't run unless it is passed.
* This way, it can be used in components where the graph ID is not
* immediately available.
*/
export const useAgentRunsInfinite = (graphID?: GraphID) => {
const queryClient = getQueryClient();
const {
data: queryResults,
refetch: refetchRuns,
isPending: agentRunsLoading,
isRefetching: agentRunsReloading,
hasNextPage: hasMoreRuns,
fetchNextPage: fetchMoreRuns,
isFetchingNextPage: isFetchingMoreRuns,
queryKey,
} = useGetV1ListGraphExecutionsInfinite(
graphID!,
{ page: 1, page_size: 20 },
{
query: {
getNextPageParam: getPaginationNextPageNumber,
// Prevent query from running if graphID is not available (yet)
...(!graphID
? {
enabled: false,
queryFn: () =>
// Fake empty response if graphID is not available (yet)
Promise.resolve({
status: 200,
data: {
executions: [],
pagination: {
current_page: 1,
page_size: 20,
total_items: 0,
total_pages: 0,
},
},
headers: new Headers(),
} satisfies getV1ListGraphExecutionsResponse),
}
: {}),
},
},
queryClient,
);
const agentRuns = queryResults ? unpaginate(queryResults, "executions") : [];
const agentRunCount = getPaginatedTotalCount(queryResults);
const upsertAgentRun = (newAgentRun: GraphExecutionMeta) => {
queryClient.setQueryData(
queryKey,
(currentQueryData: typeof queryResults) => {
if (!currentQueryData?.pages || agentRunCount === undefined)
return currentQueryData;
const exists = currentQueryData.pages.some((page) => {
if (page.status !== 200) return false;
const response = page.data;
return response.executions.some((run) => run.id === newAgentRun.id);
});
if (exists) {
// If the run already exists, we update it
return {
...currentQueryData,
pages: currentQueryData.pages.map((page) => {
if (page.status !== 200) return page;
const response = page.data;
const executions = response.executions;
const index = executions.findIndex(
(run) => run.id === newAgentRun.id,
);
if (index === -1) return page;
const newExecutions = [...executions];
newExecutions[index] = newAgentRun;
return {
...page,
data: {
...response,
executions: newExecutions,
},
} satisfies getV1ListGraphExecutionsResponse;
}),
};
}
// If the run does not exist, we add it to the first page
const page = currentQueryData
.pages[0] as getV1ListGraphExecutionsResponse200 & {
headers: Headers;
};
const updatedExecutions = [newAgentRun, ...page.data.executions];
const updatedPage = {
...page,
data: {
...page.data,
executions: updatedExecutions,
},
} satisfies getV1ListGraphExecutionsResponse;
const updatedPages = [updatedPage, ...currentQueryData.pages.slice(1)];
return {
...currentQueryData,
pages: updatedPages.map(
// Increment the total runs count in the pagination info of all pages
(page) =>
page.status === 200
? {
...page,
data: {
...page.data,
pagination: {
...page.data.pagination,
total_items: agentRunCount + 1,
},
},
}
: page,
),
};
},
);
};
const removeAgentRun = (runID: GraphExecutionID) => {
queryClient.setQueryData(
[queryKey, { page: 1, page_size: 20 }],
(currentQueryData: typeof queryResults) => {
if (!currentQueryData?.pages) return currentQueryData;
let found = false;
return {
...currentQueryData,
pages: currentQueryData.pages.map((page) => {
const response = page.data as GraphExecutionsPaginated;
const filteredExecutions = response.executions.filter(
(run) => run.id !== runID,
);
if (filteredExecutions.length < response.executions.length) {
found = true;
}
return {
...page,
data: {
...response,
executions: filteredExecutions,
pagination: {
...response.pagination,
total_items:
response.pagination.total_items - (found ? 1 : 0),
},
},
};
}),
};
},
);
};
return {
agentRuns: agentRuns as GraphExecutionMeta[],
refetchRuns,
agentRunCount,
agentRunsLoading: agentRunsLoading || agentRunsReloading,
hasMoreRuns,
fetchMoreRuns,
isFetchingMoreRuns,
upsertAgentRun,
removeAgentRun,
};
};
export type AgentRunsQuery = ReturnType<typeof useAgentRunsInfinite>;

View File

@@ -0,0 +1,7 @@
"use client";
import { OldAgentLibraryView } from "../../agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView";
export default function OldAgentLibraryPage() {
return <OldAgentLibraryView />;
}

View File

@@ -180,14 +180,3 @@ body[data-google-picker-open="true"] [data-dialog-content] {
z-index: 1 !important;
pointer-events: none !important;
}
/* CoPilot chat table styling — remove left/right borders, increase padding */
[data-streamdown="table-wrapper"] table {
border-left: none;
border-right: none;
}
[data-streamdown="table-wrapper"] th,
[data-streamdown="table-wrapper"] td {
padding: 0.875rem 1rem; /* py-3.5 px-4 */
}

View File

@@ -4,7 +4,9 @@ import { loadScript } from "@/services/scripts/scripts";
export async function loadGoogleAPIPicker(): Promise<void> {
validateWindow();
await loadScript("https://apis.google.com/js/api.js");
await loadScript("https://apis.google.com/js/api.js", {
referrerPolicy: "no-referrer-when-downgrade",
});
const googleAPI = window.gapi;
if (!googleAPI) {
@@ -27,7 +29,9 @@ export async function loadGoogleIdentityServices(): Promise<void> {
throw new Error("Google Identity Services cannot load on server");
}
await loadScript("https://accounts.google.com/gsi/client");
await loadScript("https://accounts.google.com/gsi/client", {
referrerPolicy: "no-referrer-when-downgrade",
});
const google = window.google;
if (!google?.accounts?.oauth2) {

View File

@@ -226,7 +226,7 @@ function renderMarkdown(
table: ({ children, ...props }) => (
<div className="my-4 overflow-x-auto">
<table
className="min-w-full divide-y divide-gray-200 border-y border-gray-200 dark:divide-gray-700 dark:border-gray-700"
className="min-w-full divide-y divide-gray-200 rounded-lg border border-gray-200 dark:divide-gray-700 dark:border-gray-700"
{...props}
>
{children}
@@ -235,7 +235,7 @@ function renderMarkdown(
),
th: ({ children, ...props }) => (
<th
className="bg-gray-50 px-4 py-3.5 text-left text-xs font-semibold uppercase tracking-wider text-gray-700 dark:bg-gray-800 dark:text-gray-300"
className="bg-gray-50 px-4 py-3 text-left text-xs font-semibold uppercase tracking-wider text-gray-700 dark:bg-gray-800 dark:text-gray-300"
{...props}
>
{children}
@@ -243,7 +243,7 @@ function renderMarkdown(
),
td: ({ children, ...props }) => (
<td
className="border-t border-gray-200 px-4 py-3.5 text-sm text-gray-600 dark:border-gray-700 dark:text-gray-400"
className="border-t border-gray-200 px-4 py-3 text-sm text-gray-600 dark:border-gray-700 dark:text-gray-400"
{...props}
>
{children}

View File

@@ -1,6 +1,6 @@
"use client";
import { CronExpressionDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
import { Form, FormField } from "@/components/__legacy__/ui/form";
import { Button } from "@/components/atoms/Button/Button";
import { Input } from "@/components/atoms/Input/Input";

View File

@@ -7,6 +7,7 @@ import { useFlags } from "launchdarkly-react-client-sdk";
export enum Flag {
BETA_BLOCKS = "beta-blocks",
NEW_BLOCK_MENU = "new-block-menu",
NEW_AGENT_RUNS = "new-agent-runs",
GRAPH_SEARCH = "graph-search",
ENABLE_ENHANCED_OUTPUT_HANDLING = "enable-enhanced-output-handling",
SHARE_EXECUTION_RESULTS = "share-execution-results",
@@ -21,6 +22,7 @@ const isPwMockEnabled = process.env.NEXT_PUBLIC_PW_TEST === "true";
const defaultFlags = {
[Flag.BETA_BLOCKS]: [],
[Flag.NEW_BLOCK_MENU]: false,
[Flag.NEW_AGENT_RUNS]: false,
[Flag.GRAPH_SEARCH]: false,
[Flag.ENABLE_ENHANCED_OUTPUT_HANDLING]: false,
[Flag.SHARE_EXECUTION_RESULTS]: false,