From ca216dfd7f91ef1e79c3129879b31a8a1d2b79a9 Mon Sep 17 00:00:00 2001 From: Otto Date: Fri, 13 Feb 2026 16:46:23 +0000 Subject: [PATCH 1/7] ci(docs-claude-review): Update comments instead of creating new ones (#12106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ This PR updates the Claude Block Docs Review CI workflow to update existing comments instead of creating new ones on each push. ### What's Changed: 1. **Concurrency group** - Prevents race conditions if the workflow runs twice simultaneously 2. **Comment cleanup step** - Deletes any previous Claude review comment before posting a new one 3. **Marker instruction** - Instructs Claude to include a `` marker in its comment for identification ### Why: Previously, every PR push would create a new review comment, cluttering the PR with multiple comments. Now only the most recent review is shown. ### Testing: 1. Create a PR that triggers this workflow (modify a file in `docs/integrations/` or `autogpt_platform/backend/backend/blocks/`) 2. Verify first run creates comment with marker 3. Push another commit 4. Verify old comment is deleted and new comment is created (not accumulated) Requested by @Bentlybro --- ## Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [ ] I have made a test plan - [ ] I have tested my changes according to the test plan (will be tested on merge) #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**)

Greptile Overview

Greptile Summary

Added concurrency control and comment deduplication to prevent multiple Claude review comments from accumulating on PRs. The workflow now deletes previous review comments (identified by `` marker) before posting new ones, and uses concurrency groups to prevent race conditions.

Confidence Score: 5/5

- This PR is safe to merge with minimal risk - The changes are well-contained, follow GitHub Actions best practices, and use built-in GitHub APIs safely. The concurrency control prevents race conditions, and the comment cleanup logic uses proper filtering with `head -1` to handle edge cases. The HTML comment marker approach is standard and reliable. - No files require special attention

Sequence Diagram

```mermaid sequenceDiagram participant GH as GitHub PR Event participant WF as Workflow participant API as GitHub API participant Claude as Claude Action GH->>WF: PR opened/synchronized WF->>WF: Check concurrency group Note over WF: Cancel any in-progress runs
for same PR number WF->>API: Query PR comments API-->>WF: Return all comments WF->>WF: Filter for CLAUDE_DOCS_REVIEW marker alt Previous comment exists WF->>API: DELETE comment by ID API-->>WF: Comment deleted else No previous comment WF->>WF: Skip deletion end WF->>Claude: Run code review Claude->>API: POST new comment with marker API-->>Claude: Comment created ```
Last reviewed commit: fb1b436 --- .github/workflows/docs-claude-review.yml | 34 ++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/.github/workflows/docs-claude-review.yml b/.github/workflows/docs-claude-review.yml index ca2788b387..19d5dd667b 100644 --- a/.github/workflows/docs-claude-review.yml +++ b/.github/workflows/docs-claude-review.yml @@ -7,6 +7,10 @@ on: - "docs/integrations/**" - "autogpt_platform/backend/backend/blocks/**" +concurrency: + group: claude-docs-review-${{ github.event.pull_request.number }} + cancel-in-progress: true + jobs: claude-review: # Only run for PRs from members/collaborators @@ -91,5 +95,35 @@ jobs: 3. Read corresponding documentation files to verify accuracy 4. Provide your feedback as a PR comment + ## IMPORTANT: Comment Marker + Start your PR comment with exactly this HTML comment marker on its own line: + + + This marker is used to identify and replace your comment on subsequent runs. + Be constructive and specific. If everything looks good, say so! If there are issues, explain what's wrong and suggest how to fix it. + + - name: Delete old Claude review comments + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Get all comment IDs with our marker, sorted by creation date (oldest first) + COMMENT_IDS=$(gh api \ + repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments \ + --jq '[.[] | select(.body | contains(""))] | sort_by(.created_at) | .[].id') + + # Count comments + COMMENT_COUNT=$(echo "$COMMENT_IDS" | grep -c . || true) + + if [ "$COMMENT_COUNT" -gt 1 ]; then + # Delete all but the last (newest) comment + echo "$COMMENT_IDS" | head -n -1 | while read -r COMMENT_ID; do + if [ -n "$COMMENT_ID" ]; then + echo "Deleting old review comment: $COMMENT_ID" + gh api -X DELETE repos/${{ github.repository }}/issues/comments/$COMMENT_ID + fi + done + else + echo "No old review comments to clean up" + fi From b8f5c208d08e313306ad3ee87020d8746d9afbb4 Mon Sep 17 00:00:00 2001 From: DEEVEN SERU <144827577+DEVELOPER-DEEVEN@users.noreply.github.com> Date: Sat, 14 Feb 2026 00:45:09 +0530 Subject: [PATCH 2/7] Handle errors in Jina ExtractWebsiteContentBlock (#12048) ## Summary - catch Jina reader client/server errors in ExtractWebsiteContentBlock and surface a clear error output keyed to the user URL - guard empty responses to return an explicit error instead of yielding blank content - add regression tests covering the happy path and HTTP client failures via a monkeypatched fetch ## Testing - not run (pytest unavailable in this environment) --------- Co-authored-by: Nicholas Tindle Co-authored-by: Nicholas Tindle --- .../backend/backend/blocks/jina/search.py | 25 ++++++- .../test/blocks/test_jina_extract_website.py | 66 +++++++++++++++++++ 2 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 autogpt_platform/backend/test/blocks/test_jina_extract_website.py diff --git a/autogpt_platform/backend/backend/blocks/jina/search.py b/autogpt_platform/backend/backend/blocks/jina/search.py index 22a883fa03..5e58ddcab4 100644 --- a/autogpt_platform/backend/backend/blocks/jina/search.py +++ b/autogpt_platform/backend/backend/blocks/jina/search.py @@ -17,6 +17,7 @@ from backend.blocks.jina._auth import ( from backend.blocks.search import GetRequest from backend.data.model import SchemaField from backend.util.exceptions import BlockExecutionError +from backend.util.request import HTTPClientError, HTTPServerError, validate_url class SearchTheWebBlock(Block, GetRequest): @@ -110,7 +111,12 @@ class ExtractWebsiteContentBlock(Block, GetRequest): self, input_data: Input, *, credentials: JinaCredentials, **kwargs ) -> BlockOutput: if input_data.raw_content: - url = input_data.url + try: + parsed_url, _, _ = await validate_url(input_data.url, []) + url = parsed_url.geturl() + except ValueError as e: + yield "error", f"Invalid URL: {e}" + return headers = {} else: url = f"https://r.jina.ai/{input_data.url}" @@ -119,5 +125,20 @@ class ExtractWebsiteContentBlock(Block, GetRequest): "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", } - content = await self.get_request(url, json=False, headers=headers) + try: + content = await self.get_request(url, json=False, headers=headers) + except HTTPClientError as e: + yield "error", f"Client error ({e.status_code}) fetching {input_data.url}: {e}" + return + except HTTPServerError as e: + yield "error", f"Server error ({e.status_code}) fetching {input_data.url}: {e}" + return + except Exception as e: + yield "error", f"Failed to fetch {input_data.url}: {e}" + return + + if not content: + yield "error", f"No content returned for {input_data.url}" + return + yield "content", content diff --git a/autogpt_platform/backend/test/blocks/test_jina_extract_website.py b/autogpt_platform/backend/test/blocks/test_jina_extract_website.py new file mode 100644 index 0000000000..335c43f966 --- /dev/null +++ b/autogpt_platform/backend/test/blocks/test_jina_extract_website.py @@ -0,0 +1,66 @@ +from typing import cast + +import pytest + +from backend.blocks.jina._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + JinaCredentialsInput, +) +from backend.blocks.jina.search import ExtractWebsiteContentBlock +from backend.util.request import HTTPClientError + + +@pytest.mark.asyncio +async def test_extract_website_content_returns_content(monkeypatch): + block = ExtractWebsiteContentBlock() + input_data = block.Input( + url="https://example.com", + credentials=cast(JinaCredentialsInput, TEST_CREDENTIALS_INPUT), + raw_content=True, + ) + + async def fake_get_request(url, json=False, headers=None): + assert url == "https://example.com" + assert headers == {} + return "page content" + + monkeypatch.setattr(block, "get_request", fake_get_request) + + results = [ + output + async for output in block.run( + input_data=input_data, credentials=TEST_CREDENTIALS + ) + ] + + assert ("content", "page content") in results + assert all(key != "error" for key, _ in results) + + +@pytest.mark.asyncio +async def test_extract_website_content_handles_http_error(monkeypatch): + block = ExtractWebsiteContentBlock() + input_data = block.Input( + url="https://example.com", + credentials=cast(JinaCredentialsInput, TEST_CREDENTIALS_INPUT), + raw_content=False, + ) + + async def fake_get_request(url, json=False, headers=None): + raise HTTPClientError("HTTP 400 Error: Bad Request", 400) + + monkeypatch.setattr(block, "get_request", fake_get_request) + + results = [ + output + async for output in block.run( + input_data=input_data, credentials=TEST_CREDENTIALS + ) + ] + + assert ("content", "page content") not in results + error_messages = [value for key, value in results if key == "error"] + assert error_messages + assert "Client error (400)" in error_messages[0] + assert "https://example.com" in error_messages[0] From 27d94e395cc8d191a1b284dda2b42572ed9d4796 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Sun, 15 Feb 2026 10:51:25 +0400 Subject: [PATCH 3/7] feat(backend/sdk): enable WebSearch, block WebFetch, consolidate tool constants (#12108) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary - Enable Claude Agent SDK built-in **WebSearch** tool (Brave Search via Anthropic API) for the CoPilot SDK agent - Explicitly **block WebFetch** via `SDK_DISALLOWED_TOOLS`. The agent uses the SSRF-protected `mcp__copilot__web_fetch` MCP tool instead - **Consolidate** all tool security constants (`BLOCKED_TOOLS`, `WORKSPACE_SCOPED_TOOLS`, `DANGEROUS_PATTERNS`, `SDK_DISALLOWED_TOOLS`) into `tool_adapter.py` as a single source of truth — previously scattered across `tool_adapter.py`, `security_hooks.py`, and inline in `service.py` ## Changes - `tool_adapter.py`: Add `WebSearch` to `_SDK_BUILTIN_TOOLS`, add `SDK_DISALLOWED_TOOLS`, move security constants here - `security_hooks.py`: Import constants from `tool_adapter.py` instead of defining locally - `service.py`: Use `SDK_DISALLOWED_TOOLS` instead of inline `["Bash"]` ## Test plan - [x] All 21 security hooks tests pass - [x] Ruff lint clean - [x] All pre-commit hooks pass - [ ] Verify WebSearch works in CoPilot chat (manual test)

Greptile Overview

Greptile Summary

Consolidates tool security constants into `tool_adapter.py` as single source of truth, enables WebSearch (Brave via Anthropic API), and explicitly blocks WebFetch to prevent SSRF attacks. The change improves security by ensuring the agent uses the SSRF-protected `mcp__copilot__web_fetch` tool instead of the built-in WebFetch which can access internal networks like `localhost:8006`.

Confidence Score: 5/5

- This PR is safe to merge with minimal risk - The changes improve security by blocking WebFetch (SSRF risk) while enabling safe WebSearch. The consolidation of constants into a single source of truth improves maintainability. All existing tests pass (21 security hooks tests), and the refactoring is straightforward with no behavioral changes to existing security logic. The only suggestions are minor improvements: adding a test for WebFetch blocking and considering a lowercase alias for consistency. - No files require special attention

Sequence Diagram

```mermaid sequenceDiagram participant Agent as SDK Agent participant Hooks as Security Hooks participant TA as tool_adapter.py participant MCP as MCP Tools Note over TA: SDK_DISALLOWED_TOOLS = ["Bash", "WebFetch"] Note over TA: _SDK_BUILTIN_TOOLS includes WebSearch Agent->>Hooks: Request WebSearch (Brave API) Hooks->>TA: Check BLOCKED_TOOLS TA-->>Hooks: Not blocked Hooks-->>Agent: Allowed ✓ Agent->>Agent: Execute via Anthropic API Agent->>Hooks: Request WebFetch (SSRF risk) Hooks->>TA: Check BLOCKED_TOOLS Note over TA: WebFetch in SDK_DISALLOWED_TOOLS TA-->>Hooks: Blocked Hooks-->>Agent: Denied ✗ Note over Agent: Use mcp__copilot__web_fetch instead Agent->>Hooks: Request mcp__copilot__web_fetch Hooks->>MCP: Validate (MCP tool, not SDK builtin) MCP-->>Hooks: Has SSRF protection Hooks-->>Agent: Allowed ✓ Agent->>MCP: Execute with SSRF checks ```
Last reviewed commit: 2d9975f --- .../api/features/chat/sdk/security_hooks.py | 42 +++--------------- .../backend/api/features/chat/sdk/service.py | 3 +- .../api/features/chat/sdk/tool_adapter.py | 43 ++++++++++++++++++- 3 files changed, 50 insertions(+), 38 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/sdk/security_hooks.py b/autogpt_platform/backend/backend/api/features/chat/sdk/security_hooks.py index 14efc6d459..89853402a3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/sdk/security_hooks.py +++ b/autogpt_platform/backend/backend/api/features/chat/sdk/security_hooks.py @@ -11,45 +11,15 @@ import re from collections.abc import Callable from typing import Any, cast -from backend.api.features.chat.sdk.tool_adapter import MCP_TOOL_PREFIX +from backend.api.features.chat.sdk.tool_adapter import ( + BLOCKED_TOOLS, + DANGEROUS_PATTERNS, + MCP_TOOL_PREFIX, + WORKSPACE_SCOPED_TOOLS, +) logger = logging.getLogger(__name__) -# Tools that are blocked entirely (CLI/system access). -# "Bash" (capital) is the SDK built-in — it's NOT in allowed_tools but blocked -# here as defence-in-depth. The agent uses mcp__copilot__bash_exec instead, -# which has kernel-level network isolation (unshare --net). -BLOCKED_TOOLS = { - "Bash", - "bash", - "shell", - "exec", - "terminal", - "command", -} - -# Tools allowed only when their path argument stays within the SDK workspace. -# The SDK uses these to handle oversized tool results (writes to tool-results/ -# files, then reads them back) and for workspace file operations. -WORKSPACE_SCOPED_TOOLS = {"Read", "Write", "Edit", "Glob", "Grep"} - -# Dangerous patterns in tool inputs -DANGEROUS_PATTERNS = [ - r"sudo", - r"rm\s+-rf", - r"dd\s+if=", - r"/etc/passwd", - r"/etc/shadow", - r"chmod\s+777", - r"curl\s+.*\|.*sh", - r"wget\s+.*\|.*sh", - r"eval\s*\(", - r"exec\s*\(", - r"__import__", - r"os\.system", - r"subprocess", -] - def _deny(reason: str) -> dict[str, Any]: """Return a hook denial response.""" diff --git a/autogpt_platform/backend/backend/api/features/chat/sdk/service.py b/autogpt_platform/backend/backend/api/features/chat/sdk/service.py index 65195b442c..65c4cebb06 100644 --- a/autogpt_platform/backend/backend/api/features/chat/sdk/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/sdk/service.py @@ -41,6 +41,7 @@ from .response_adapter import SDKResponseAdapter from .security_hooks import create_security_hooks from .tool_adapter import ( COPILOT_TOOL_NAMES, + SDK_DISALLOWED_TOOLS, LongRunningCallback, create_copilot_mcp_server, set_execution_context, @@ -543,7 +544,7 @@ async def stream_chat_completion_sdk( "system_prompt": system_prompt, "mcp_servers": {"copilot": mcp_server}, "allowed_tools": COPILOT_TOOL_NAMES, - "disallowed_tools": ["Bash"], + "disallowed_tools": SDK_DISALLOWED_TOOLS, "hooks": security_hooks, "cwd": sdk_cwd, "max_buffer_size": config.claude_agent_max_buffer_size, diff --git a/autogpt_platform/backend/backend/api/features/chat/sdk/tool_adapter.py b/autogpt_platform/backend/backend/api/features/chat/sdk/tool_adapter.py index d983d5e785..2d259730bf 100644 --- a/autogpt_platform/backend/backend/api/features/chat/sdk/tool_adapter.py +++ b/autogpt_platform/backend/backend/api/features/chat/sdk/tool_adapter.py @@ -310,7 +310,48 @@ def create_copilot_mcp_server(): # Bash is NOT included — use the sandboxed MCP bash_exec tool instead, # which provides kernel-level network isolation via unshare --net. # Task allows spawning sub-agents (rate-limited by security hooks). -_SDK_BUILTIN_TOOLS = ["Read", "Write", "Edit", "Glob", "Grep", "Task"] +# WebSearch uses Brave Search via Anthropic's API — safe, no SSRF risk. +_SDK_BUILTIN_TOOLS = ["Read", "Write", "Edit", "Glob", "Grep", "Task", "WebSearch"] + +# SDK built-in tools that must be explicitly blocked. +# Bash: dangerous — agent uses mcp__copilot__bash_exec with kernel-level +# network isolation (unshare --net) instead. +# WebFetch: SSRF risk — can reach internal network (localhost, 10.x, etc.). +# Agent uses the SSRF-protected mcp__copilot__web_fetch tool instead. +SDK_DISALLOWED_TOOLS = ["Bash", "WebFetch"] + +# Tools that are blocked entirely in security hooks (defence-in-depth). +# Includes SDK_DISALLOWED_TOOLS plus common aliases/synonyms. +BLOCKED_TOOLS = { + *SDK_DISALLOWED_TOOLS, + "bash", + "shell", + "exec", + "terminal", + "command", +} + +# Tools allowed only when their path argument stays within the SDK workspace. +# The SDK uses these to handle oversized tool results (writes to tool-results/ +# files, then reads them back) and for workspace file operations. +WORKSPACE_SCOPED_TOOLS = {"Read", "Write", "Edit", "Glob", "Grep"} + +# Dangerous patterns in tool inputs +DANGEROUS_PATTERNS = [ + r"sudo", + r"rm\s+-rf", + r"dd\s+if=", + r"/etc/passwd", + r"/etc/shadow", + r"chmod\s+777", + r"curl\s+.*\|.*sh", + r"wget\s+.*\|.*sh", + r"eval\s*\(", + r"exec\s*\(", + r"__import__", + r"os\.system", + r"subprocess", +] # List of tool names for allowed_tools configuration # Include MCP tools, the MCP Read tool for oversized results, From 647c8ed8d46b133cff44572cb99f8e683f9083b2 Mon Sep 17 00:00:00 2001 From: Eve <162624394+aviu16@users.noreply.github.com> Date: Mon, 16 Feb 2026 00:39:53 -0500 Subject: [PATCH 4/7] feat(backend/blocks): enhance list concatenation with advanced operations (#12105) ## Summary Enhances the existing `ConcatenateListsBlock` and adds five new companion blocks for comprehensive list manipulation, addressing issue #11139 ("Implement block to concatenate lists"). ### Changes - **Enhanced `ConcatenateListsBlock`** with optional deduplication (`deduplicate`) and None-value filtering (`remove_none`), plus an output `length` field - **New `FlattenListBlock`**: Recursively flattens nested list structures with configurable `max_depth` - **New `InterleaveListsBlock`**: Round-robin interleaving of elements from multiple lists - **New `ZipListsBlock`**: Zips corresponding elements from multiple lists with support for padding to longest or truncating to shortest - **New `ListDifferenceBlock`**: Computes set difference between two lists (regular or symmetric) - **New `ListIntersectionBlock`**: Finds common elements between two lists, preserving order ### Helper Utilities Extracted reusable helper functions for validation, flattening, deduplication, interleaving, chunking, and statistics computation to support the blocks and enable future reuse. ### Test Coverage Comprehensive test suite with 188 test functions across 29 test classes covering: - Built-in block test harness validation for all 6 blocks - Manual edge-case tests for each block (empty inputs, large lists, mixed types, nested structures) - Internal method tests for all block classes - Unit tests for all helper utility functions Closes #11139 ## Test plan - [x] All files pass Python syntax validation (`ast.parse`) - [x] Built-in `test_input`/`test_output` tests defined for all blocks - [x] Manual tests cover edge cases: empty lists, large lists, mixed types, nested structures, deduplication, None removal - [x] Helper function tests validate all utility functions independently - [x] All block IDs are valid UUID4 - [x] Block categories set to `BlockCategory.BASIC` for consistency with existing list blocks

Greptile Overview

Greptile Summary

Enhanced `ConcatenateListsBlock` with deduplication and None-filtering options, and added five new list manipulation blocks (`FlattenListBlock`, `InterleaveListsBlock`, `ZipListsBlock`, `ListDifferenceBlock`, `ListIntersectionBlock`) with comprehensive helper functions and test coverage. **Key Changes:** - Enhanced `ConcatenateListsBlock` with `deduplicate` and `remove_none` options, plus `length` output field - Added `FlattenListBlock` for recursively flattening nested lists with configurable `max_depth` - Added `InterleaveListsBlock` for round-robin element interleaving - Added `ZipListsBlock` with support for padding/truncation - Added `ListDifferenceBlock` and `ListIntersectionBlock` for set operations - Extracted 12 reusable helper functions for validation, flattening, deduplication, etc. - Comprehensive test suite with 188 test functions covering edge cases **Minor Issues:** - Helper function `_deduplicate_list` has redundant logic in the `else` branch that duplicates the `if` branch - Three helper functions (`_filter_empty_collections`, `_compute_list_statistics`, `_chunk_list`) are defined but unused - consider removing unless planned for future use - The `_make_hashable` function uses `hash(repr(item))` for unhashable types, which correctly treats structurally identical dicts/lists as duplicates

Confidence Score: 4/5

- Safe to merge with minor style improvements recommended - The implementation is well-structured with comprehensive test coverage (188 tests), proper error handling, and follows existing block patterns. All blocks use valid UUID4 IDs and correct categories. The helper functions provide good code reuse. The minor issues are purely stylistic (redundant code, unused helpers) and don't affect functionality or safety. - No files require special attention - both files are well-tested and follow project conventions

Sequence Diagram

```mermaid sequenceDiagram participant User participant Block as List Block participant Helper as Helper Functions participant Output User->>Block: Input (lists/parameters) Block->>Helper: _validate_all_lists() Helper-->>Block: validation result alt validation fails Block->>Output: error message else validation succeeds Block->>Helper: _concatenate_lists_simple() / _flatten_nested_list() / etc. Helper-->>Block: processed result opt deduplicate enabled Block->>Helper: _deduplicate_list() Helper-->>Block: deduplicated result end opt remove_none enabled Block->>Helper: _filter_none_values() Helper-->>Block: filtered result end Block->>Output: result + length end Output-->>User: Block outputs ```
Last reviewed commit: a6d5445 (2/5) Greptile learns from your feedback when you react with thumbs up/down! --------- Co-authored-by: Otto --- .../backend/blocks/data_manipulation.py | 704 ++++++++- .../test/blocks/test_list_concatenation.py | 1276 +++++++++++++++++ docs/integrations/README.md | 5 + docs/integrations/block-integrations/basic.md | 197 ++- 4 files changed, 2164 insertions(+), 18 deletions(-) create mode 100644 autogpt_platform/backend/test/blocks/test_list_concatenation.py diff --git a/autogpt_platform/backend/backend/blocks/data_manipulation.py b/autogpt_platform/backend/backend/blocks/data_manipulation.py index a8f25ecb18..fe878acfa9 100644 --- a/autogpt_platform/backend/backend/blocks/data_manipulation.py +++ b/autogpt_platform/backend/backend/blocks/data_manipulation.py @@ -682,17 +682,219 @@ class ListIsEmptyBlock(Block): yield "is_empty", len(input_data.list) == 0 +# ============================================================================= +# List Concatenation Helpers +# ============================================================================= + + +def _validate_list_input(item: Any, index: int) -> str | None: + """Validate that an item is a list. Returns error message or None.""" + if item is None: + return None # None is acceptable, will be skipped + if not isinstance(item, list): + return ( + f"Invalid input at index {index}: expected a list, " + f"got {type(item).__name__}. " + f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])." + ) + return None + + +def _validate_all_lists(lists: List[Any]) -> str | None: + """Validate that all items in a sequence are lists. Returns first error or None.""" + for idx, item in enumerate(lists): + error = _validate_list_input(item, idx) + if error is not None and item is not None: + return error + return None + + +def _concatenate_lists_simple(lists: List[List[Any]]) -> List[Any]: + """Concatenate a sequence of lists into a single list, skipping None values.""" + result: List[Any] = [] + for lst in lists: + if lst is None: + continue + result.extend(lst) + return result + + +def _flatten_nested_list(nested: List[Any], max_depth: int = -1) -> List[Any]: + """ + Recursively flatten a nested list structure. + + Args: + nested: The list to flatten. + max_depth: Maximum recursion depth. -1 means unlimited. + + Returns: + A flat list with all nested elements extracted. + """ + result: List[Any] = [] + _flatten_recursive(nested, result, current_depth=0, max_depth=max_depth) + return result + + +_MAX_FLATTEN_DEPTH = 1000 + + +def _flatten_recursive( + items: List[Any], + result: List[Any], + current_depth: int, + max_depth: int, +) -> None: + """Internal recursive helper for flattening nested lists.""" + if current_depth > _MAX_FLATTEN_DEPTH: + raise RecursionError( + f"Flattening exceeded maximum depth of {_MAX_FLATTEN_DEPTH} levels. " + "Input may be too deeply nested." + ) + for item in items: + if isinstance(item, list) and (max_depth == -1 or current_depth < max_depth): + _flatten_recursive(item, result, current_depth + 1, max_depth) + else: + result.append(item) + + +def _deduplicate_list(items: List[Any]) -> List[Any]: + """ + Remove duplicate elements from a list, preserving order of first occurrences. + + Args: + items: The list to deduplicate. + + Returns: + A list with duplicates removed, maintaining original order. + """ + seen: set = set() + result: List[Any] = [] + for item in items: + item_id = _make_hashable(item) + if item_id not in seen: + seen.add(item_id) + result.append(item) + return result + + +def _make_hashable(item: Any): + """ + Create a hashable representation of any item for deduplication. + Converts unhashable types (dicts, lists) into deterministic tuple structures. + """ + if isinstance(item, dict): + return tuple( + sorted( + ((_make_hashable(k), _make_hashable(v)) for k, v in item.items()), + key=lambda x: (str(type(x[0])), str(x[0])), + ) + ) + if isinstance(item, (list, tuple)): + return tuple(_make_hashable(i) for i in item) + if isinstance(item, set): + return frozenset(_make_hashable(i) for i in item) + return item + + +def _filter_none_values(items: List[Any]) -> List[Any]: + """Remove None values from a list.""" + return [item for item in items if item is not None] + + +def _compute_nesting_depth( + items: Any, current: int = 0, max_depth: int = _MAX_FLATTEN_DEPTH +) -> int: + """ + Compute the maximum nesting depth of a list structure using iteration to avoid RecursionError. + + Uses a stack-based approach to handle deeply nested structures without hitting Python's + recursion limit (~1000 levels). + """ + if not isinstance(items, list): + return current + + # Stack contains tuples of (item, depth) + stack = [(items, current)] + max_observed_depth = current + + while stack: + item, depth = stack.pop() + + if depth > max_depth: + return depth + + if not isinstance(item, list): + max_observed_depth = max(max_observed_depth, depth) + continue + + if len(item) == 0: + max_observed_depth = max(max_observed_depth, depth + 1) + continue + + # Add all children to stack with incremented depth + for child in item: + stack.append((child, depth + 1)) + + return max_observed_depth + + +def _interleave_lists(lists: List[List[Any]]) -> List[Any]: + """ + Interleave elements from multiple lists in round-robin fashion. + Example: [[1,2,3], [a,b], [x,y,z]] -> [1, a, x, 2, b, y, 3, z] + """ + if not lists: + return [] + filtered = [lst for lst in lists if lst is not None] + if not filtered: + return [] + result: List[Any] = [] + max_len = max(len(lst) for lst in filtered) + for i in range(max_len): + for lst in filtered: + if i < len(lst): + result.append(lst[i]) + return result + + +# ============================================================================= +# List Concatenation Blocks +# ============================================================================= + + class ConcatenateListsBlock(Block): + """ + Concatenates two or more lists into a single list. + + This block accepts a list of lists and combines all their elements + in order into one flat output list. It supports options for + deduplication and None-filtering to provide flexible list merging + capabilities for workflow pipelines. + """ + class Input(BlockSchemaInput): lists: List[List[Any]] = SchemaField( description="A list of lists to concatenate together. All lists will be combined in order into a single list.", placeholder="e.g., [[1, 2], [3, 4], [5, 6]]", ) + deduplicate: bool = SchemaField( + description="If True, remove duplicate elements from the concatenated result while preserving order.", + default=False, + advanced=True, + ) + remove_none: bool = SchemaField( + description="If True, remove None values from the concatenated result.", + default=False, + advanced=True, + ) class Output(BlockSchemaOutput): concatenated_list: List[Any] = SchemaField( description="The concatenated list containing all elements from all input lists in order." ) + length: int = SchemaField( + description="The total number of elements in the concatenated list." + ) error: str = SchemaField( description="Error message if concatenation failed due to invalid input types." ) @@ -700,7 +902,7 @@ class ConcatenateListsBlock(Block): def __init__(self): super().__init__( id="3cf9298b-5817-4141-9d80-7c2cc5199c8e", - description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order.", + description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order. Supports optional deduplication and None removal.", categories={BlockCategory.BASIC}, input_schema=ConcatenateListsBlock.Input, output_schema=ConcatenateListsBlock.Output, @@ -709,29 +911,497 @@ class ConcatenateListsBlock(Block): {"lists": [["a", "b"], ["c"], ["d", "e", "f"]]}, {"lists": [[1, 2], []]}, {"lists": []}, + {"lists": [[1, 2, 2, 3], [3, 4]], "deduplicate": True}, + {"lists": [[1, None, 2], [None, 3]], "remove_none": True}, ], test_output=[ ("concatenated_list", [1, 2, 3, 4, 5, 6]), + ("length", 6), ("concatenated_list", ["a", "b", "c", "d", "e", "f"]), + ("length", 6), ("concatenated_list", [1, 2]), + ("length", 2), ("concatenated_list", []), + ("length", 0), + ("concatenated_list", [1, 2, 3, 4]), + ("length", 4), + ("concatenated_list", [1, 2, 3]), + ("length", 3), ], ) + def _validate_inputs(self, lists: List[Any]) -> str | None: + return _validate_all_lists(lists) + + def _perform_concatenation(self, lists: List[List[Any]]) -> List[Any]: + return _concatenate_lists_simple(lists) + + def _apply_deduplication(self, items: List[Any]) -> List[Any]: + return _deduplicate_list(items) + + def _apply_none_removal(self, items: List[Any]) -> List[Any]: + return _filter_none_values(items) + + def _post_process( + self, items: List[Any], deduplicate: bool, remove_none: bool + ) -> List[Any]: + """Apply all post-processing steps to the concatenated result.""" + result = items + if remove_none: + result = self._apply_none_removal(result) + if deduplicate: + result = self._apply_deduplication(result) + return result + async def run(self, input_data: Input, **kwargs) -> BlockOutput: - concatenated = [] - for idx, lst in enumerate(input_data.lists): - if lst is None: - # Skip None values to avoid errors - continue - if not isinstance(lst, list): - # Type validation: each item must be a list - # Strings are iterable and would cause extend() to iterate character-by-character - # Non-iterable types would raise TypeError - yield "error", ( - f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. " - f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])." - ) - return - concatenated.extend(lst) - yield "concatenated_list", concatenated + # Validate all inputs are lists + validation_error = self._validate_inputs(input_data.lists) + if validation_error is not None: + yield "error", validation_error + return + + # Perform concatenation + concatenated = self._perform_concatenation(input_data.lists) + + # Apply post-processing + result = self._post_process( + concatenated, input_data.deduplicate, input_data.remove_none + ) + + yield "concatenated_list", result + yield "length", len(result) + + +class FlattenListBlock(Block): + """ + Flattens a nested list structure into a single flat list. + + This block takes a list that may contain nested lists at any depth + and produces a single-level list with all leaf elements. Useful + for normalizing data structures from multiple sources that may + have varying levels of nesting. + """ + + class Input(BlockSchemaInput): + nested_list: List[Any] = SchemaField( + description="A potentially nested list to flatten into a single-level list.", + placeholder="e.g., [[1, [2, 3]], [4, [5, [6]]]]", + ) + max_depth: int = SchemaField( + description="Maximum depth to flatten. -1 means flatten completely. 1 means flatten only one level.", + default=-1, + advanced=True, + ) + + class Output(BlockSchemaOutput): + flattened_list: List[Any] = SchemaField( + description="The flattened list with all nested elements extracted." + ) + length: int = SchemaField( + description="The number of elements in the flattened list." + ) + original_depth: int = SchemaField( + description="The maximum nesting depth of the original input list." + ) + error: str = SchemaField(description="Error message if flattening failed.") + + def __init__(self): + super().__init__( + id="cc45bb0f-d035-4756-96a7-fe3e36254b4d", + description="Flattens a nested list structure into a single flat list. Supports configurable maximum flattening depth.", + categories={BlockCategory.BASIC}, + input_schema=FlattenListBlock.Input, + output_schema=FlattenListBlock.Output, + test_input=[ + {"nested_list": [[1, 2], [3, [4, 5]]]}, + {"nested_list": [1, [2, [3, [4]]]]}, + {"nested_list": [1, [2, [3, [4]]], 5], "max_depth": 1}, + {"nested_list": []}, + {"nested_list": [1, 2, 3]}, + ], + test_output=[ + ("flattened_list", [1, 2, 3, 4, 5]), + ("length", 5), + ("original_depth", 3), + ("flattened_list", [1, 2, 3, 4]), + ("length", 4), + ("original_depth", 4), + ("flattened_list", [1, 2, [3, [4]], 5]), + ("length", 4), + ("original_depth", 4), + ("flattened_list", []), + ("length", 0), + ("original_depth", 1), + ("flattened_list", [1, 2, 3]), + ("length", 3), + ("original_depth", 1), + ], + ) + + def _compute_depth(self, items: List[Any]) -> int: + """Compute the nesting depth of the input list.""" + return _compute_nesting_depth(items) + + def _flatten(self, items: List[Any], max_depth: int) -> List[Any]: + """Flatten the list to the specified depth.""" + return _flatten_nested_list(items, max_depth=max_depth) + + def _validate_max_depth(self, max_depth: int) -> str | None: + """Validate the max_depth parameter.""" + if max_depth < -1: + return f"max_depth must be -1 (unlimited) or a non-negative integer, got {max_depth}" + return None + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: + # Validate max_depth + depth_error = self._validate_max_depth(input_data.max_depth) + if depth_error is not None: + yield "error", depth_error + return + + original_depth = self._compute_depth(input_data.nested_list) + flattened = self._flatten(input_data.nested_list, input_data.max_depth) + + yield "flattened_list", flattened + yield "length", len(flattened) + yield "original_depth", original_depth + + +class InterleaveListsBlock(Block): + """ + Interleaves elements from multiple lists in round-robin fashion. + + Given multiple input lists, this block takes one element from each + list in turn, producing an output where elements alternate between + sources. Lists of different lengths are handled gracefully - shorter + lists simply stop contributing once exhausted. + """ + + class Input(BlockSchemaInput): + lists: List[List[Any]] = SchemaField( + description="A list of lists to interleave. Elements will be taken in round-robin order.", + placeholder="e.g., [[1, 2, 3], ['a', 'b', 'c']]", + ) + + class Output(BlockSchemaOutput): + interleaved_list: List[Any] = SchemaField( + description="The interleaved list with elements alternating from each input list." + ) + length: int = SchemaField( + description="The total number of elements in the interleaved list." + ) + error: str = SchemaField(description="Error message if interleaving failed.") + + def __init__(self): + super().__init__( + id="9f616084-1d9f-4f8e-bc00-5b9d2a75cd75", + description="Interleaves elements from multiple lists in round-robin fashion, alternating between sources.", + categories={BlockCategory.BASIC}, + input_schema=InterleaveListsBlock.Input, + output_schema=InterleaveListsBlock.Output, + test_input=[ + {"lists": [[1, 2, 3], ["a", "b", "c"]]}, + {"lists": [[1, 2, 3], ["a", "b"], ["x", "y", "z"]]}, + {"lists": [[1], [2], [3]]}, + {"lists": []}, + ], + test_output=[ + ("interleaved_list", [1, "a", 2, "b", 3, "c"]), + ("length", 6), + ("interleaved_list", [1, "a", "x", 2, "b", "y", 3, "z"]), + ("length", 8), + ("interleaved_list", [1, 2, 3]), + ("length", 3), + ("interleaved_list", []), + ("length", 0), + ], + ) + + def _validate_inputs(self, lists: List[Any]) -> str | None: + return _validate_all_lists(lists) + + def _interleave(self, lists: List[List[Any]]) -> List[Any]: + return _interleave_lists(lists) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: + validation_error = self._validate_inputs(input_data.lists) + if validation_error is not None: + yield "error", validation_error + return + + result = self._interleave(input_data.lists) + yield "interleaved_list", result + yield "length", len(result) + + +class ZipListsBlock(Block): + """ + Zips multiple lists together into a list of grouped tuples/lists. + + Takes two or more input lists and combines corresponding elements + into sub-lists. For example, zipping [1,2,3] and ['a','b','c'] + produces [[1,'a'], [2,'b'], [3,'c']]. Supports both truncating + to shortest list and padding to longest list with a fill value. + """ + + class Input(BlockSchemaInput): + lists: List[List[Any]] = SchemaField( + description="A list of lists to zip together. Corresponding elements will be grouped.", + placeholder="e.g., [[1, 2, 3], ['a', 'b', 'c']]", + ) + pad_to_longest: bool = SchemaField( + description="If True, pad shorter lists with fill_value to match the longest list. If False, truncate to shortest.", + default=False, + advanced=True, + ) + fill_value: Any = SchemaField( + description="Value to use for padding when pad_to_longest is True.", + default=None, + advanced=True, + ) + + class Output(BlockSchemaOutput): + zipped_list: List[List[Any]] = SchemaField( + description="The zipped list of grouped elements." + ) + length: int = SchemaField( + description="The number of groups in the zipped result." + ) + error: str = SchemaField(description="Error message if zipping failed.") + + def __init__(self): + super().__init__( + id="0d0e684f-5cb9-4c4b-b8d1-47a0860e0c07", + description="Zips multiple lists together into a list of grouped elements. Supports padding to longest or truncating to shortest.", + categories={BlockCategory.BASIC}, + input_schema=ZipListsBlock.Input, + output_schema=ZipListsBlock.Output, + test_input=[ + {"lists": [[1, 2, 3], ["a", "b", "c"]]}, + {"lists": [[1, 2, 3], ["a", "b"]]}, + { + "lists": [[1, 2], ["a", "b", "c"]], + "pad_to_longest": True, + "fill_value": 0, + }, + {"lists": []}, + ], + test_output=[ + ("zipped_list", [[1, "a"], [2, "b"], [3, "c"]]), + ("length", 3), + ("zipped_list", [[1, "a"], [2, "b"]]), + ("length", 2), + ("zipped_list", [[1, "a"], [2, "b"], [0, "c"]]), + ("length", 3), + ("zipped_list", []), + ("length", 0), + ], + ) + + def _validate_inputs(self, lists: List[Any]) -> str | None: + return _validate_all_lists(lists) + + def _zip_truncate(self, lists: List[List[Any]]) -> List[List[Any]]: + """Zip lists, truncating to shortest.""" + filtered = [lst for lst in lists if lst is not None] + if not filtered: + return [] + return [list(group) for group in zip(*filtered)] + + def _zip_pad(self, lists: List[List[Any]], fill_value: Any) -> List[List[Any]]: + """Zip lists, padding shorter ones with fill_value.""" + if not lists: + return [] + lists = [lst for lst in lists if lst is not None] + if not lists: + return [] + max_len = max(len(lst) for lst in lists) + result: List[List[Any]] = [] + for i in range(max_len): + group: List[Any] = [] + for lst in lists: + if i < len(lst): + group.append(lst[i]) + else: + group.append(fill_value) + result.append(group) + return result + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: + validation_error = self._validate_inputs(input_data.lists) + if validation_error is not None: + yield "error", validation_error + return + + if not input_data.lists: + yield "zipped_list", [] + yield "length", 0 + return + + if input_data.pad_to_longest: + result = self._zip_pad(input_data.lists, input_data.fill_value) + else: + result = self._zip_truncate(input_data.lists) + + yield "zipped_list", result + yield "length", len(result) + + +class ListDifferenceBlock(Block): + """ + Computes the difference between two lists (elements in the first + list that are not in the second list). + + This is useful for finding items that exist in one dataset but + not in another, such as finding new items, missing items, or + items that need to be processed. + """ + + class Input(BlockSchemaInput): + list_a: List[Any] = SchemaField( + description="The primary list to check elements from.", + placeholder="e.g., [1, 2, 3, 4, 5]", + ) + list_b: List[Any] = SchemaField( + description="The list to subtract. Elements found here will be removed from list_a.", + placeholder="e.g., [3, 4, 5, 6]", + ) + symmetric: bool = SchemaField( + description="If True, compute symmetric difference (elements in either list but not both).", + default=False, + advanced=True, + ) + + class Output(BlockSchemaOutput): + difference: List[Any] = SchemaField( + description="Elements from list_a not found in list_b (or symmetric difference if enabled)." + ) + length: int = SchemaField( + description="The number of elements in the difference result." + ) + error: str = SchemaField(description="Error message if the operation failed.") + + def __init__(self): + super().__init__( + id="05309873-9d61-447e-96b5-b804e2511829", + description="Computes the difference between two lists. Returns elements in the first list not found in the second, or symmetric difference.", + categories={BlockCategory.BASIC}, + input_schema=ListDifferenceBlock.Input, + output_schema=ListDifferenceBlock.Output, + test_input=[ + {"list_a": [1, 2, 3, 4, 5], "list_b": [3, 4, 5, 6, 7]}, + { + "list_a": [1, 2, 3, 4, 5], + "list_b": [3, 4, 5, 6, 7], + "symmetric": True, + }, + {"list_a": ["a", "b", "c"], "list_b": ["b"]}, + {"list_a": [], "list_b": [1, 2, 3]}, + ], + test_output=[ + ("difference", [1, 2]), + ("length", 2), + ("difference", [1, 2, 6, 7]), + ("length", 4), + ("difference", ["a", "c"]), + ("length", 2), + ("difference", []), + ("length", 0), + ], + ) + + def _compute_difference(self, list_a: List[Any], list_b: List[Any]) -> List[Any]: + """Compute elements in list_a not in list_b.""" + b_hashes = {_make_hashable(item) for item in list_b} + return [item for item in list_a if _make_hashable(item) not in b_hashes] + + def _compute_symmetric_difference( + self, list_a: List[Any], list_b: List[Any] + ) -> List[Any]: + """Compute elements in either list but not both.""" + a_hashes = {_make_hashable(item) for item in list_a} + b_hashes = {_make_hashable(item) for item in list_b} + only_in_a = [item for item in list_a if _make_hashable(item) not in b_hashes] + only_in_b = [item for item in list_b if _make_hashable(item) not in a_hashes] + return only_in_a + only_in_b + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: + if input_data.symmetric: + result = self._compute_symmetric_difference( + input_data.list_a, input_data.list_b + ) + else: + result = self._compute_difference(input_data.list_a, input_data.list_b) + + yield "difference", result + yield "length", len(result) + + +class ListIntersectionBlock(Block): + """ + Computes the intersection of two lists (elements present in both lists). + + This is useful for finding common items between two datasets, + such as shared tags, mutual connections, or overlapping categories. + """ + + class Input(BlockSchemaInput): + list_a: List[Any] = SchemaField( + description="The first list to intersect.", + placeholder="e.g., [1, 2, 3, 4, 5]", + ) + list_b: List[Any] = SchemaField( + description="The second list to intersect.", + placeholder="e.g., [3, 4, 5, 6, 7]", + ) + + class Output(BlockSchemaOutput): + intersection: List[Any] = SchemaField( + description="Elements present in both list_a and list_b." + ) + length: int = SchemaField( + description="The number of elements in the intersection." + ) + error: str = SchemaField(description="Error message if the operation failed.") + + def __init__(self): + super().__init__( + id="b6eb08b6-dbe3-411b-b9b4-2508cb311a1f", + description="Computes the intersection of two lists, returning only elements present in both.", + categories={BlockCategory.BASIC}, + input_schema=ListIntersectionBlock.Input, + output_schema=ListIntersectionBlock.Output, + test_input=[ + {"list_a": [1, 2, 3, 4, 5], "list_b": [3, 4, 5, 6, 7]}, + {"list_a": ["a", "b", "c"], "list_b": ["c", "d", "e"]}, + {"list_a": [1, 2], "list_b": [3, 4]}, + {"list_a": [], "list_b": [1, 2, 3]}, + ], + test_output=[ + ("intersection", [3, 4, 5]), + ("length", 3), + ("intersection", ["c"]), + ("length", 1), + ("intersection", []), + ("length", 0), + ("intersection", []), + ("length", 0), + ], + ) + + def _compute_intersection(self, list_a: List[Any], list_b: List[Any]) -> List[Any]: + """Compute elements present in both lists, preserving order from list_a.""" + b_hashes = {_make_hashable(item) for item in list_b} + seen: set = set() + result: List[Any] = [] + for item in list_a: + h = _make_hashable(item) + if h in b_hashes and h not in seen: + result.append(item) + seen.add(h) + return result + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: + result = self._compute_intersection(input_data.list_a, input_data.list_b) + yield "intersection", result + yield "length", len(result) diff --git a/autogpt_platform/backend/test/blocks/test_list_concatenation.py b/autogpt_platform/backend/test/blocks/test_list_concatenation.py new file mode 100644 index 0000000000..8cea3b60f7 --- /dev/null +++ b/autogpt_platform/backend/test/blocks/test_list_concatenation.py @@ -0,0 +1,1276 @@ +""" +Comprehensive test suite for list concatenation and manipulation blocks. + +Tests cover: +- ConcatenateListsBlock: basic concatenation, deduplication, None removal +- FlattenListBlock: nested list flattening with depth control +- InterleaveListsBlock: round-robin interleaving of multiple lists +- ZipListsBlock: zipping lists with truncation and padding +- ListDifferenceBlock: computing list differences (regular and symmetric) +- ListIntersectionBlock: finding common elements between lists +- Helper utility functions: validation, flattening, deduplication, etc. +""" + +import pytest + +from backend.blocks.data_manipulation import ( + _MAX_FLATTEN_DEPTH, + ConcatenateListsBlock, + FlattenListBlock, + InterleaveListsBlock, + ListDifferenceBlock, + ListIntersectionBlock, + ZipListsBlock, + _compute_nesting_depth, + _concatenate_lists_simple, + _deduplicate_list, + _filter_none_values, + _flatten_nested_list, + _interleave_lists, + _make_hashable, + _validate_all_lists, + _validate_list_input, +) +from backend.util.test import execute_block_test + +# ============================================================================= +# Helper Function Tests +# ============================================================================= + + +class TestValidateListInput: + """Tests for the _validate_list_input helper.""" + + def test_valid_list_returns_none(self): + assert _validate_list_input([1, 2, 3], 0) is None + + def test_empty_list_returns_none(self): + assert _validate_list_input([], 0) is None + + def test_none_returns_none(self): + assert _validate_list_input(None, 0) is None + + def test_string_returns_error(self): + result = _validate_list_input("hello", 0) + assert result is not None + assert "str" in result + assert "index 0" in result + + def test_integer_returns_error(self): + result = _validate_list_input(42, 1) + assert result is not None + assert "int" in result + assert "index 1" in result + + def test_dict_returns_error(self): + result = _validate_list_input({"a": 1}, 2) + assert result is not None + assert "dict" in result + assert "index 2" in result + + def test_tuple_returns_error(self): + result = _validate_list_input((1, 2), 3) + assert result is not None + assert "tuple" in result + + def test_boolean_returns_error(self): + result = _validate_list_input(True, 0) + assert result is not None + assert "bool" in result + + def test_float_returns_error(self): + result = _validate_list_input(3.14, 0) + assert result is not None + assert "float" in result + + +class TestValidateAllLists: + """Tests for the _validate_all_lists helper.""" + + def test_all_valid_lists(self): + assert _validate_all_lists([[1], [2], [3]]) is None + + def test_empty_outer_list(self): + assert _validate_all_lists([]) is None + + def test_mixed_valid_and_none(self): + # None is skipped, so this should pass + assert _validate_all_lists([[1], None, [3]]) is None + + def test_invalid_item_returns_error(self): + result = _validate_all_lists([[1], "bad", [3]]) + assert result is not None + assert "index 1" in result + + def test_first_invalid_is_returned(self): + result = _validate_all_lists(["first_bad", "second_bad"]) + assert result is not None + assert "index 0" in result + + def test_all_none_passes(self): + assert _validate_all_lists([None, None, None]) is None + + +class TestConcatenateListsSimple: + """Tests for the _concatenate_lists_simple helper.""" + + def test_basic_concatenation(self): + assert _concatenate_lists_simple([[1, 2], [3, 4]]) == [1, 2, 3, 4] + + def test_empty_lists(self): + assert _concatenate_lists_simple([[], []]) == [] + + def test_single_list(self): + assert _concatenate_lists_simple([[1, 2, 3]]) == [1, 2, 3] + + def test_no_lists(self): + assert _concatenate_lists_simple([]) == [] + + def test_skip_none_values(self): + assert _concatenate_lists_simple([[1, 2], None, [3, 4]]) == [1, 2, 3, 4] # type: ignore[arg-type] + + def test_mixed_types(self): + result = _concatenate_lists_simple([[1, "a"], [True, 3.14]]) + assert result == [1, "a", True, 3.14] + + def test_nested_lists_preserved(self): + result = _concatenate_lists_simple([[[1, 2]], [[3, 4]]]) + assert result == [[1, 2], [3, 4]] + + def test_large_number_of_lists(self): + lists = [[i] for i in range(100)] + result = _concatenate_lists_simple(lists) + assert result == list(range(100)) + + +class TestFlattenNestedList: + """Tests for the _flatten_nested_list helper.""" + + def test_already_flat(self): + assert _flatten_nested_list([1, 2, 3]) == [1, 2, 3] + + def test_one_level_nesting(self): + assert _flatten_nested_list([[1, 2], [3, 4]]) == [1, 2, 3, 4] + + def test_deep_nesting(self): + assert _flatten_nested_list([1, [2, [3, [4, [5]]]]]) == [1, 2, 3, 4, 5] + + def test_empty_list(self): + assert _flatten_nested_list([]) == [] + + def test_mixed_nesting(self): + assert _flatten_nested_list([1, [2, 3], 4, [5, [6]]]) == [1, 2, 3, 4, 5, 6] + + def test_max_depth_zero(self): + # max_depth=0 means no flattening at all + result = _flatten_nested_list([[1, 2], [3, 4]], max_depth=0) + assert result == [[1, 2], [3, 4]] + + def test_max_depth_one(self): + result = _flatten_nested_list([[1, [2, 3]], [4, [5]]], max_depth=1) + assert result == [1, [2, 3], 4, [5]] + + def test_max_depth_two(self): + result = _flatten_nested_list([[[1, 2], [3]], [[4, [5]]]], max_depth=2) + assert result == [1, 2, 3, 4, [5]] + + def test_unlimited_depth(self): + deeply_nested = [[[[[[[1]]]]]]] + assert _flatten_nested_list(deeply_nested, max_depth=-1) == [1] + + def test_preserves_non_list_iterables(self): + result = _flatten_nested_list(["hello", [1, 2]]) + assert result == ["hello", 1, 2] + + def test_preserves_dicts(self): + result = _flatten_nested_list([{"a": 1}, [{"b": 2}]]) + assert result == [{"a": 1}, {"b": 2}] + + def test_excessive_depth_raises_recursion_error(self): + """Deeply nested lists beyond 1000 levels should raise RecursionError.""" + # Build a list nested 1100 levels deep + nested = [42] + for _ in range(1100): + nested = [nested] + with pytest.raises(RecursionError, match="maximum.*depth"): + _flatten_nested_list(nested, max_depth=-1) + + +class TestDeduplicateList: + """Tests for the _deduplicate_list helper.""" + + def test_no_duplicates(self): + assert _deduplicate_list([1, 2, 3]) == [1, 2, 3] + + def test_with_duplicates(self): + assert _deduplicate_list([1, 2, 2, 3, 3, 3]) == [1, 2, 3] + + def test_all_duplicates(self): + assert _deduplicate_list([1, 1, 1]) == [1] + + def test_empty_list(self): + assert _deduplicate_list([]) == [] + + def test_preserves_order(self): + result = _deduplicate_list([3, 1, 2, 1, 3]) + assert result == [3, 1, 2] + + def test_string_duplicates(self): + assert _deduplicate_list(["a", "b", "a", "c"]) == ["a", "b", "c"] + + def test_mixed_types(self): + result = _deduplicate_list([1, "1", 1, "1"]) + assert result == [1, "1"] + + def test_dict_duplicates(self): + result = _deduplicate_list([{"a": 1}, {"a": 1}, {"b": 2}]) + assert result == [{"a": 1}, {"b": 2}] + + def test_list_duplicates(self): + result = _deduplicate_list([[1, 2], [1, 2], [3, 4]]) + assert result == [[1, 2], [3, 4]] + + def test_none_duplicates(self): + result = _deduplicate_list([None, 1, None, 2]) + assert result == [None, 1, 2] + + def test_single_element(self): + assert _deduplicate_list([42]) == [42] + + +class TestMakeHashable: + """Tests for the _make_hashable helper.""" + + def test_integer(self): + assert _make_hashable(42) == 42 + + def test_string(self): + assert _make_hashable("hello") == "hello" + + def test_none(self): + assert _make_hashable(None) is None + + def test_dict_returns_tuple(self): + result = _make_hashable({"a": 1}) + assert isinstance(result, tuple) + # Should be hashable + hash(result) + + def test_list_returns_tuple(self): + result = _make_hashable([1, 2, 3]) + assert result == (1, 2, 3) + + def test_same_dict_same_hash(self): + assert _make_hashable({"a": 1, "b": 2}) == _make_hashable({"a": 1, "b": 2}) + + def test_different_dict_different_hash(self): + assert _make_hashable({"a": 1}) != _make_hashable({"a": 2}) + + def test_dict_key_order_independent(self): + """Dicts with same keys in different insertion order produce same result.""" + d1 = {"b": 2, "a": 1} + d2 = {"a": 1, "b": 2} + assert _make_hashable(d1) == _make_hashable(d2) + + def test_tuple_hashable(self): + result = _make_hashable((1, 2, 3)) + assert result == (1, 2, 3) + hash(result) + + def test_boolean(self): + result = _make_hashable(True) + assert result is True + + def test_float(self): + result = _make_hashable(3.14) + assert result == 3.14 + + +class TestFilterNoneValues: + """Tests for the _filter_none_values helper.""" + + def test_removes_none(self): + assert _filter_none_values([1, None, 2, None, 3]) == [1, 2, 3] + + def test_no_none(self): + assert _filter_none_values([1, 2, 3]) == [1, 2, 3] + + def test_all_none(self): + assert _filter_none_values([None, None, None]) == [] + + def test_empty_list(self): + assert _filter_none_values([]) == [] + + def test_preserves_falsy_values(self): + assert _filter_none_values([0, False, "", None, []]) == [0, False, "", []] + + +class TestComputeNestingDepth: + """Tests for the _compute_nesting_depth helper.""" + + def test_flat_list(self): + assert _compute_nesting_depth([1, 2, 3]) == 1 + + def test_one_level(self): + assert _compute_nesting_depth([[1, 2], [3, 4]]) == 2 + + def test_deep_nesting(self): + assert _compute_nesting_depth([[[[]]]]) == 4 + + def test_mixed_depth(self): + depth = _compute_nesting_depth([1, [2, [3]]]) + assert depth == 3 + + def test_empty_list(self): + assert _compute_nesting_depth([]) == 1 + + def test_non_list(self): + assert _compute_nesting_depth(42) == 0 + + def test_string_not_recursed(self): + # Strings should not be treated as nested lists + assert _compute_nesting_depth(["hello"]) == 1 + + +class TestInterleaveListsHelper: + """Tests for the _interleave_lists helper.""" + + def test_equal_length_lists(self): + result = _interleave_lists([[1, 2, 3], ["a", "b", "c"]]) + assert result == [1, "a", 2, "b", 3, "c"] + + def test_unequal_length_lists(self): + result = _interleave_lists([[1, 2, 3], ["a"]]) + assert result == [1, "a", 2, 3] + + def test_empty_input(self): + assert _interleave_lists([]) == [] + + def test_single_list(self): + assert _interleave_lists([[1, 2, 3]]) == [1, 2, 3] + + def test_three_lists(self): + result = _interleave_lists([[1], [2], [3]]) + assert result == [1, 2, 3] + + def test_with_none_list(self): + result = _interleave_lists([[1, 2], None, [3, 4]]) # type: ignore[arg-type] + assert result == [1, 3, 2, 4] + + def test_all_empty_lists(self): + assert _interleave_lists([[], [], []]) == [] + + def test_all_none_lists(self): + """All-None inputs should return empty list, not crash.""" + assert _interleave_lists([None, None, None]) == [] # type: ignore[arg-type] + + +class TestComputeNestingDepthEdgeCases: + """Tests for _compute_nesting_depth with deeply nested input.""" + + def test_deeply_nested_does_not_crash(self): + """Deeply nested lists beyond 1000 levels should not raise RecursionError.""" + nested = [42] + for _ in range(1100): + nested = [nested] + # Should return a depth value without crashing + depth = _compute_nesting_depth(nested) + assert depth >= _MAX_FLATTEN_DEPTH + + +class TestMakeHashableMixedKeys: + """Tests for _make_hashable with mixed-type dict keys.""" + + def test_mixed_type_dict_keys(self): + """Dicts with mixed-type keys (int and str) should not crash sorted().""" + d = {1: "one", "two": 2} + result = _make_hashable(d) + assert isinstance(result, tuple) + hash(result) # Should be hashable without error + + def test_mixed_type_keys_deterministic(self): + """Same dict with mixed keys produces same result.""" + d1 = {1: "a", "b": 2} + d2 = {1: "a", "b": 2} + assert _make_hashable(d1) == _make_hashable(d2) + + +class TestZipListsNoneHandling: + """Tests for ZipListsBlock with None values in input.""" + + def setup_method(self): + self.block = ZipListsBlock() + + def test_zip_truncate_with_none(self): + """_zip_truncate should handle None values in input lists.""" + result = self.block._zip_truncate([[1, 2], None, [3, 4]]) # type: ignore[arg-type] + assert result == [[1, 3], [2, 4]] + + def test_zip_pad_with_none(self): + """_zip_pad should handle None values in input lists.""" + result = self.block._zip_pad([[1, 2, 3], None, ["a"]], fill_value="X") # type: ignore[arg-type] + assert result == [[1, "a"], [2, "X"], [3, "X"]] + + def test_zip_truncate_all_none(self): + """All-None inputs should return empty list.""" + result = self.block._zip_truncate([None, None]) # type: ignore[arg-type] + assert result == [] + + def test_zip_pad_all_none(self): + """All-None inputs should return empty list.""" + result = self.block._zip_pad([None, None], fill_value=0) # type: ignore[arg-type] + assert result == [] + + +# ============================================================================= +# Block Built-in Tests (using test_input/test_output) +# ============================================================================= + + +class TestConcatenateListsBlockBuiltin: + """Run the built-in test_input/test_output tests for ConcatenateListsBlock.""" + + @pytest.mark.asyncio + async def test_builtin_tests(self): + block = ConcatenateListsBlock() + await execute_block_test(block) + + +class TestFlattenListBlockBuiltin: + """Run the built-in test_input/test_output tests for FlattenListBlock.""" + + @pytest.mark.asyncio + async def test_builtin_tests(self): + block = FlattenListBlock() + await execute_block_test(block) + + +class TestInterleaveListsBlockBuiltin: + """Run the built-in test_input/test_output tests for InterleaveListsBlock.""" + + @pytest.mark.asyncio + async def test_builtin_tests(self): + block = InterleaveListsBlock() + await execute_block_test(block) + + +class TestZipListsBlockBuiltin: + """Run the built-in test_input/test_output tests for ZipListsBlock.""" + + @pytest.mark.asyncio + async def test_builtin_tests(self): + block = ZipListsBlock() + await execute_block_test(block) + + +class TestListDifferenceBlockBuiltin: + """Run the built-in test_input/test_output tests for ListDifferenceBlock.""" + + @pytest.mark.asyncio + async def test_builtin_tests(self): + block = ListDifferenceBlock() + await execute_block_test(block) + + +class TestListIntersectionBlockBuiltin: + """Run the built-in test_input/test_output tests for ListIntersectionBlock.""" + + @pytest.mark.asyncio + async def test_builtin_tests(self): + block = ListIntersectionBlock() + await execute_block_test(block) + + +# ============================================================================= +# ConcatenateListsBlock Manual Tests +# ============================================================================= + + +class TestConcatenateListsBlockManual: + """Manual test cases for ConcatenateListsBlock edge cases.""" + + def setup_method(self): + self.block = ConcatenateListsBlock() + + @pytest.mark.asyncio + async def test_two_lists(self): + """Test basic two-list concatenation.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input(lists=[[1, 2], [3, 4]]) + ): + results[name] = value + assert results["concatenated_list"] == [1, 2, 3, 4] + assert results["length"] == 4 + + @pytest.mark.asyncio + async def test_three_lists(self): + """Test three-list concatenation.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input(lists=[[1], [2], [3]]) + ): + results[name] = value + assert results["concatenated_list"] == [1, 2, 3] + + @pytest.mark.asyncio + async def test_five_lists(self): + """Test concatenation of five lists.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input(lists=[[1], [2], [3], [4], [5]]) + ): + results[name] = value + assert results["concatenated_list"] == [1, 2, 3, 4, 5] + assert results["length"] == 5 + + @pytest.mark.asyncio + async def test_empty_lists_only(self): + """Test concatenation of only empty lists.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input(lists=[[], [], []]) + ): + results[name] = value + assert results["concatenated_list"] == [] + assert results["length"] == 0 + + @pytest.mark.asyncio + async def test_mixed_types_in_lists(self): + """Test concatenation with mixed types.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input( + lists=[[1, "a"], [True, 3.14], [None, {"key": "val"}]] + ) + ): + results[name] = value + assert results["concatenated_list"] == [ + 1, + "a", + True, + 3.14, + None, + {"key": "val"}, + ] + + @pytest.mark.asyncio + async def test_deduplication_enabled(self): + """Test deduplication removes duplicates.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input( + lists=[[1, 2, 3], [2, 3, 4], [3, 4, 5]], + deduplicate=True, + ) + ): + results[name] = value + assert results["concatenated_list"] == [1, 2, 3, 4, 5] + + @pytest.mark.asyncio + async def test_deduplication_preserves_order(self): + """Test that deduplication preserves first-occurrence order.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input( + lists=[[3, 1, 2], [2, 4, 1]], + deduplicate=True, + ) + ): + results[name] = value + assert results["concatenated_list"] == [3, 1, 2, 4] + + @pytest.mark.asyncio + async def test_remove_none_enabled(self): + """Test None removal from concatenated results.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input( + lists=[[1, None], [None, 2], [3, None]], + remove_none=True, + ) + ): + results[name] = value + assert results["concatenated_list"] == [1, 2, 3] + + @pytest.mark.asyncio + async def test_dedup_and_remove_none_combined(self): + """Test both deduplication and None removal together.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input( + lists=[[1, None, 2], [2, None, 3]], + deduplicate=True, + remove_none=True, + ) + ): + results[name] = value + assert results["concatenated_list"] == [1, 2, 3] + + @pytest.mark.asyncio + async def test_nested_lists_preserved(self): + """Test that nested lists are not flattened during concatenation.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input(lists=[[[1, 2]], [[3, 4]]]) + ): + results[name] = value + assert results["concatenated_list"] == [[1, 2], [3, 4]] + + @pytest.mark.asyncio + async def test_large_lists(self): + """Test concatenation of large lists.""" + list_a = list(range(1000)) + list_b = list(range(1000, 2000)) + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input(lists=[list_a, list_b]) + ): + results[name] = value + assert results["concatenated_list"] == list(range(2000)) + assert results["length"] == 2000 + + @pytest.mark.asyncio + async def test_single_list_input(self): + """Test concatenation with a single list.""" + results = {} + async for name, value in self.block.run( + ConcatenateListsBlock.Input(lists=[[1, 2, 3]]) + ): + results[name] = value + assert results["concatenated_list"] == [1, 2, 3] + + @pytest.mark.asyncio + async def test_block_id_is_valid_uuid(self): + """Test that the block has a valid UUID4 ID.""" + import uuid + + parsed = uuid.UUID(self.block.id) + assert parsed.version == 4 + + @pytest.mark.asyncio + async def test_block_category(self): + """Test that the block has the correct category.""" + from backend.blocks._base import BlockCategory + + assert BlockCategory.BASIC in self.block.categories + + +# ============================================================================= +# FlattenListBlock Manual Tests +# ============================================================================= + + +class TestFlattenListBlockManual: + """Manual test cases for FlattenListBlock.""" + + def setup_method(self): + self.block = FlattenListBlock() + + @pytest.mark.asyncio + async def test_simple_flatten(self): + """Test flattening a simple nested list.""" + results = {} + async for name, value in self.block.run( + FlattenListBlock.Input(nested_list=[[1, 2], [3, 4]]) + ): + results[name] = value + assert results["flattened_list"] == [1, 2, 3, 4] + assert results["length"] == 4 + + @pytest.mark.asyncio + async def test_deeply_nested(self): + """Test flattening a deeply nested structure.""" + results = {} + async for name, value in self.block.run( + FlattenListBlock.Input(nested_list=[1, [2, [3, [4, [5]]]]]) + ): + results[name] = value + assert results["flattened_list"] == [1, 2, 3, 4, 5] + + @pytest.mark.asyncio + async def test_partial_flatten(self): + """Test flattening with max_depth=1.""" + results = {} + async for name, value in self.block.run( + FlattenListBlock.Input( + nested_list=[[1, [2, 3]], [4, [5]]], + max_depth=1, + ) + ): + results[name] = value + assert results["flattened_list"] == [1, [2, 3], 4, [5]] + + @pytest.mark.asyncio + async def test_already_flat_list(self): + """Test flattening an already flat list.""" + results = {} + async for name, value in self.block.run( + FlattenListBlock.Input(nested_list=[1, 2, 3, 4]) + ): + results[name] = value + assert results["flattened_list"] == [1, 2, 3, 4] + + @pytest.mark.asyncio + async def test_empty_nested_lists(self): + """Test flattening with empty nested lists.""" + results = {} + async for name, value in self.block.run( + FlattenListBlock.Input(nested_list=[[], [1], [], [2], []]) + ): + results[name] = value + assert results["flattened_list"] == [1, 2] + + @pytest.mark.asyncio + async def test_mixed_types_preserved(self): + """Test that non-list types are preserved during flattening.""" + results = {} + async for name, value in self.block.run( + FlattenListBlock.Input(nested_list=["hello", [1, {"a": 1}], [True]]) + ): + results[name] = value + assert results["flattened_list"] == ["hello", 1, {"a": 1}, True] + + @pytest.mark.asyncio + async def test_original_depth_reported(self): + """Test that original nesting depth is correctly reported.""" + results = {} + async for name, value in self.block.run( + FlattenListBlock.Input(nested_list=[1, [2, [3]]]) + ): + results[name] = value + assert results["original_depth"] == 3 + + @pytest.mark.asyncio + async def test_block_id_is_valid_uuid(self): + """Test that the block has a valid UUID4 ID.""" + import uuid + + parsed = uuid.UUID(self.block.id) + assert parsed.version == 4 + + +# ============================================================================= +# InterleaveListsBlock Manual Tests +# ============================================================================= + + +class TestInterleaveListsBlockManual: + """Manual test cases for InterleaveListsBlock.""" + + def setup_method(self): + self.block = InterleaveListsBlock() + + @pytest.mark.asyncio + async def test_equal_length_interleave(self): + """Test interleaving two equal-length lists.""" + results = {} + async for name, value in self.block.run( + InterleaveListsBlock.Input(lists=[[1, 2, 3], ["a", "b", "c"]]) + ): + results[name] = value + assert results["interleaved_list"] == [1, "a", 2, "b", 3, "c"] + + @pytest.mark.asyncio + async def test_unequal_length_interleave(self): + """Test interleaving lists of different lengths.""" + results = {} + async for name, value in self.block.run( + InterleaveListsBlock.Input(lists=[[1, 2, 3, 4], ["a", "b"]]) + ): + results[name] = value + assert results["interleaved_list"] == [1, "a", 2, "b", 3, 4] + + @pytest.mark.asyncio + async def test_three_lists_interleave(self): + """Test interleaving three lists.""" + results = {} + async for name, value in self.block.run( + InterleaveListsBlock.Input(lists=[[1, 2], ["a", "b"], ["x", "y"]]) + ): + results[name] = value + assert results["interleaved_list"] == [1, "a", "x", 2, "b", "y"] + + @pytest.mark.asyncio + async def test_single_element_lists(self): + """Test interleaving single-element lists.""" + results = {} + async for name, value in self.block.run( + InterleaveListsBlock.Input(lists=[[1], [2], [3], [4]]) + ): + results[name] = value + assert results["interleaved_list"] == [1, 2, 3, 4] + + @pytest.mark.asyncio + async def test_block_id_is_valid_uuid(self): + """Test that the block has a valid UUID4 ID.""" + import uuid + + parsed = uuid.UUID(self.block.id) + assert parsed.version == 4 + + +# ============================================================================= +# ZipListsBlock Manual Tests +# ============================================================================= + + +class TestZipListsBlockManual: + """Manual test cases for ZipListsBlock.""" + + def setup_method(self): + self.block = ZipListsBlock() + + @pytest.mark.asyncio + async def test_basic_zip(self): + """Test basic zipping of two lists.""" + results = {} + async for name, value in self.block.run( + ZipListsBlock.Input(lists=[[1, 2, 3], ["a", "b", "c"]]) + ): + results[name] = value + assert results["zipped_list"] == [[1, "a"], [2, "b"], [3, "c"]] + + @pytest.mark.asyncio + async def test_truncate_to_shortest(self): + """Test that default behavior truncates to shortest list.""" + results = {} + async for name, value in self.block.run( + ZipListsBlock.Input(lists=[[1, 2, 3], ["a", "b"]]) + ): + results[name] = value + assert results["zipped_list"] == [[1, "a"], [2, "b"]] + assert results["length"] == 2 + + @pytest.mark.asyncio + async def test_pad_to_longest(self): + """Test padding shorter lists with fill value.""" + results = {} + async for name, value in self.block.run( + ZipListsBlock.Input( + lists=[[1, 2, 3], ["a"]], + pad_to_longest=True, + fill_value="X", + ) + ): + results[name] = value + assert results["zipped_list"] == [[1, "a"], [2, "X"], [3, "X"]] + + @pytest.mark.asyncio + async def test_pad_with_none(self): + """Test padding with None (default fill value).""" + results = {} + async for name, value in self.block.run( + ZipListsBlock.Input( + lists=[[1, 2], ["a"]], + pad_to_longest=True, + ) + ): + results[name] = value + assert results["zipped_list"] == [[1, "a"], [2, None]] + + @pytest.mark.asyncio + async def test_three_lists_zip(self): + """Test zipping three lists.""" + results = {} + async for name, value in self.block.run( + ZipListsBlock.Input(lists=[[1, 2], ["a", "b"], [True, False]]) + ): + results[name] = value + assert results["zipped_list"] == [[1, "a", True], [2, "b", False]] + + @pytest.mark.asyncio + async def test_empty_lists_zip(self): + """Test zipping empty input.""" + results = {} + async for name, value in self.block.run(ZipListsBlock.Input(lists=[])): + results[name] = value + assert results["zipped_list"] == [] + assert results["length"] == 0 + + @pytest.mark.asyncio + async def test_block_id_is_valid_uuid(self): + """Test that the block has a valid UUID4 ID.""" + import uuid + + parsed = uuid.UUID(self.block.id) + assert parsed.version == 4 + + +# ============================================================================= +# ListDifferenceBlock Manual Tests +# ============================================================================= + + +class TestListDifferenceBlockManual: + """Manual test cases for ListDifferenceBlock.""" + + def setup_method(self): + self.block = ListDifferenceBlock() + + @pytest.mark.asyncio + async def test_basic_difference(self): + """Test basic set difference.""" + results = {} + async for name, value in self.block.run( + ListDifferenceBlock.Input( + list_a=[1, 2, 3, 4, 5], + list_b=[3, 4, 5, 6, 7], + ) + ): + results[name] = value + assert results["difference"] == [1, 2] + + @pytest.mark.asyncio + async def test_symmetric_difference(self): + """Test symmetric difference.""" + results = {} + async for name, value in self.block.run( + ListDifferenceBlock.Input( + list_a=[1, 2, 3], + list_b=[2, 3, 4], + symmetric=True, + ) + ): + results[name] = value + assert results["difference"] == [1, 4] + + @pytest.mark.asyncio + async def test_no_difference(self): + """Test when lists are identical.""" + results = {} + async for name, value in self.block.run( + ListDifferenceBlock.Input( + list_a=[1, 2, 3], + list_b=[1, 2, 3], + ) + ): + results[name] = value + assert results["difference"] == [] + assert results["length"] == 0 + + @pytest.mark.asyncio + async def test_complete_difference(self): + """Test when lists share no elements.""" + results = {} + async for name, value in self.block.run( + ListDifferenceBlock.Input( + list_a=[1, 2, 3], + list_b=[4, 5, 6], + ) + ): + results[name] = value + assert results["difference"] == [1, 2, 3] + + @pytest.mark.asyncio + async def test_empty_list_a(self): + """Test with empty list_a.""" + results = {} + async for name, value in self.block.run( + ListDifferenceBlock.Input(list_a=[], list_b=[1, 2, 3]) + ): + results[name] = value + assert results["difference"] == [] + + @pytest.mark.asyncio + async def test_empty_list_b(self): + """Test with empty list_b.""" + results = {} + async for name, value in self.block.run( + ListDifferenceBlock.Input(list_a=[1, 2, 3], list_b=[]) + ): + results[name] = value + assert results["difference"] == [1, 2, 3] + + @pytest.mark.asyncio + async def test_string_difference(self): + """Test difference with string elements.""" + results = {} + async for name, value in self.block.run( + ListDifferenceBlock.Input( + list_a=["apple", "banana", "cherry"], + list_b=["banana", "date"], + ) + ): + results[name] = value + assert results["difference"] == ["apple", "cherry"] + + @pytest.mark.asyncio + async def test_dict_difference(self): + """Test difference with dictionary elements.""" + results = {} + async for name, value in self.block.run( + ListDifferenceBlock.Input( + list_a=[{"a": 1}, {"b": 2}, {"c": 3}], + list_b=[{"b": 2}], + ) + ): + results[name] = value + assert results["difference"] == [{"a": 1}, {"c": 3}] + + @pytest.mark.asyncio + async def test_block_id_is_valid_uuid(self): + """Test that the block has a valid UUID4 ID.""" + import uuid + + parsed = uuid.UUID(self.block.id) + assert parsed.version == 4 + + +# ============================================================================= +# ListIntersectionBlock Manual Tests +# ============================================================================= + + +class TestListIntersectionBlockManual: + """Manual test cases for ListIntersectionBlock.""" + + def setup_method(self): + self.block = ListIntersectionBlock() + + @pytest.mark.asyncio + async def test_basic_intersection(self): + """Test basic intersection.""" + results = {} + async for name, value in self.block.run( + ListIntersectionBlock.Input( + list_a=[1, 2, 3, 4, 5], + list_b=[3, 4, 5, 6, 7], + ) + ): + results[name] = value + assert results["intersection"] == [3, 4, 5] + assert results["length"] == 3 + + @pytest.mark.asyncio + async def test_no_intersection(self): + """Test when lists share no elements.""" + results = {} + async for name, value in self.block.run( + ListIntersectionBlock.Input( + list_a=[1, 2, 3], + list_b=[4, 5, 6], + ) + ): + results[name] = value + assert results["intersection"] == [] + assert results["length"] == 0 + + @pytest.mark.asyncio + async def test_identical_lists(self): + """Test intersection of identical lists.""" + results = {} + async for name, value in self.block.run( + ListIntersectionBlock.Input( + list_a=[1, 2, 3], + list_b=[1, 2, 3], + ) + ): + results[name] = value + assert results["intersection"] == [1, 2, 3] + + @pytest.mark.asyncio + async def test_preserves_order_from_list_a(self): + """Test that intersection preserves order from list_a.""" + results = {} + async for name, value in self.block.run( + ListIntersectionBlock.Input( + list_a=[5, 3, 1], + list_b=[1, 3, 5], + ) + ): + results[name] = value + assert results["intersection"] == [5, 3, 1] + + @pytest.mark.asyncio + async def test_empty_list_a(self): + """Test with empty list_a.""" + results = {} + async for name, value in self.block.run( + ListIntersectionBlock.Input(list_a=[], list_b=[1, 2, 3]) + ): + results[name] = value + assert results["intersection"] == [] + + @pytest.mark.asyncio + async def test_empty_list_b(self): + """Test with empty list_b.""" + results = {} + async for name, value in self.block.run( + ListIntersectionBlock.Input(list_a=[1, 2, 3], list_b=[]) + ): + results[name] = value + assert results["intersection"] == [] + + @pytest.mark.asyncio + async def test_string_intersection(self): + """Test intersection with string elements.""" + results = {} + async for name, value in self.block.run( + ListIntersectionBlock.Input( + list_a=["apple", "banana", "cherry"], + list_b=["banana", "cherry", "date"], + ) + ): + results[name] = value + assert results["intersection"] == ["banana", "cherry"] + + @pytest.mark.asyncio + async def test_deduplication_in_intersection(self): + """Test that duplicates in input don't cause duplicate results.""" + results = {} + async for name, value in self.block.run( + ListIntersectionBlock.Input( + list_a=[1, 1, 2, 2, 3], + list_b=[1, 2], + ) + ): + results[name] = value + assert results["intersection"] == [1, 2] + + @pytest.mark.asyncio + async def test_block_id_is_valid_uuid(self): + """Test that the block has a valid UUID4 ID.""" + import uuid + + parsed = uuid.UUID(self.block.id) + assert parsed.version == 4 + + +# ============================================================================= +# Block Method Tests +# ============================================================================= + + +class TestConcatenateListsBlockMethods: + """Tests for internal methods of ConcatenateListsBlock.""" + + def setup_method(self): + self.block = ConcatenateListsBlock() + + def test_validate_inputs_valid(self): + assert self.block._validate_inputs([[1], [2]]) is None + + def test_validate_inputs_invalid(self): + result = self.block._validate_inputs([[1], "bad"]) + assert result is not None + + def test_perform_concatenation(self): + result = self.block._perform_concatenation([[1, 2], [3, 4]]) + assert result == [1, 2, 3, 4] + + def test_apply_deduplication(self): + result = self.block._apply_deduplication([1, 2, 2, 3]) + assert result == [1, 2, 3] + + def test_apply_none_removal(self): + result = self.block._apply_none_removal([1, None, 2]) + assert result == [1, 2] + + def test_post_process_all_options(self): + result = self.block._post_process( + [1, None, 2, None, 2], deduplicate=True, remove_none=True + ) + assert result == [1, 2] + + def test_post_process_no_options(self): + result = self.block._post_process( + [1, None, 2, None, 2], deduplicate=False, remove_none=False + ) + assert result == [1, None, 2, None, 2] + + +class TestFlattenListBlockMethods: + """Tests for internal methods of FlattenListBlock.""" + + def setup_method(self): + self.block = FlattenListBlock() + + def test_compute_depth_flat(self): + assert self.block._compute_depth([1, 2, 3]) == 1 + + def test_compute_depth_nested(self): + assert self.block._compute_depth([[1, [2]]]) == 3 + + def test_flatten_unlimited(self): + result = self.block._flatten([1, [2, [3]]], max_depth=-1) + assert result == [1, 2, 3] + + def test_flatten_limited(self): + result = self.block._flatten([1, [2, [3]]], max_depth=1) + assert result == [1, 2, [3]] + + def test_validate_max_depth_valid(self): + assert self.block._validate_max_depth(-1) is None + assert self.block._validate_max_depth(0) is None + assert self.block._validate_max_depth(5) is None + + def test_validate_max_depth_invalid(self): + result = self.block._validate_max_depth(-2) + assert result is not None + + +class TestZipListsBlockMethods: + """Tests for internal methods of ZipListsBlock.""" + + def setup_method(self): + self.block = ZipListsBlock() + + def test_zip_truncate(self): + result = self.block._zip_truncate([[1, 2, 3], ["a", "b"]]) + assert result == [[1, "a"], [2, "b"]] + + def test_zip_pad(self): + result = self.block._zip_pad([[1, 2, 3], ["a"]], fill_value="X") + assert result == [[1, "a"], [2, "X"], [3, "X"]] + + def test_zip_pad_empty(self): + result = self.block._zip_pad([], fill_value=None) + assert result == [] + + def test_validate_inputs(self): + assert self.block._validate_inputs([[1], [2]]) is None + result = self.block._validate_inputs([[1], "bad"]) + assert result is not None + + +class TestListDifferenceBlockMethods: + """Tests for internal methods of ListDifferenceBlock.""" + + def setup_method(self): + self.block = ListDifferenceBlock() + + def test_compute_difference(self): + result = self.block._compute_difference([1, 2, 3], [2, 3, 4]) + assert result == [1] + + def test_compute_symmetric_difference(self): + result = self.block._compute_symmetric_difference([1, 2, 3], [2, 3, 4]) + assert result == [1, 4] + + def test_compute_difference_empty(self): + result = self.block._compute_difference([], [1, 2]) + assert result == [] + + def test_compute_symmetric_difference_identical(self): + result = self.block._compute_symmetric_difference([1, 2], [1, 2]) + assert result == [] + + +class TestListIntersectionBlockMethods: + """Tests for internal methods of ListIntersectionBlock.""" + + def setup_method(self): + self.block = ListIntersectionBlock() + + def test_compute_intersection(self): + result = self.block._compute_intersection([1, 2, 3], [2, 3, 4]) + assert result == [2, 3] + + def test_compute_intersection_empty(self): + result = self.block._compute_intersection([], [1, 2]) + assert result == [] + + def test_compute_intersection_no_overlap(self): + result = self.block._compute_intersection([1, 2], [3, 4]) + assert result == [] diff --git a/docs/integrations/README.md b/docs/integrations/README.md index c216aa4836..00d4b0c73a 100644 --- a/docs/integrations/README.md +++ b/docs/integrations/README.md @@ -56,12 +56,16 @@ Below is a comprehensive list of all available blocks, categorized by their prim | [File Store](block-integrations/basic.md#file-store) | Downloads and stores a file from a URL, data URI, or local path | | [Find In Dictionary](block-integrations/basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value | | [Find In List](block-integrations/basic.md#find-in-list) | Finds the index of the value in the list | +| [Flatten List](block-integrations/basic.md#flatten-list) | Flattens a nested list structure into a single flat list | | [Get All Memories](block-integrations/basic.md#get-all-memories) | Retrieve all memories from Mem0 with optional conversation filtering | | [Get Latest Memory](block-integrations/basic.md#get-latest-memory) | Retrieve the latest memory from Mem0 with optional key filtering | | [Get List Item](block-integrations/basic.md#get-list-item) | Returns the element at the given index | | [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store | | [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API | | [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution for human review | +| [Interleave Lists](block-integrations/basic.md#interleave-lists) | Interleaves elements from multiple lists in round-robin fashion, alternating between sources | +| [List Difference](block-integrations/basic.md#list-difference) | Computes the difference between two lists | +| [List Intersection](block-integrations/basic.md#list-intersection) | Computes the intersection of two lists, returning only elements present in both | | [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty | | [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library | | [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes | @@ -84,6 +88,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim | [Store Value](block-integrations/basic.md#store-value) | A basic block that stores and forwards a value throughout workflows, allowing it to be reused without changes across multiple blocks | | [Universal Type Converter](block-integrations/basic.md#universal-type-converter) | This block is used to convert a value to a universal type | | [XML Parser](block-integrations/basic.md#xml-parser) | Parses XML using gravitasml to tokenize and coverts it to dict | +| [Zip Lists](block-integrations/basic.md#zip-lists) | Zips multiple lists together into a list of grouped elements | ## Data Processing diff --git a/docs/integrations/block-integrations/basic.md b/docs/integrations/block-integrations/basic.md index 08def38ede..e032690edc 100644 --- a/docs/integrations/block-integrations/basic.md +++ b/docs/integrations/block-integrations/basic.md @@ -637,7 +637,7 @@ This enables extensibility by allowing custom blocks to be added without modifyi ## Concatenate Lists ### What it is -Concatenates multiple lists into a single list. All elements from all input lists are combined in order. +Concatenates multiple lists into a single list. All elements from all input lists are combined in order. Supports optional deduplication and None removal. ### How it works @@ -651,6 +651,8 @@ The block includes validation to ensure each item is actually a list. If a non-l | Input | Description | Type | Required | |-------|-------------|------|----------| | lists | A list of lists to concatenate together. All lists will be combined in order into a single list. | List[List[Any]] | Yes | +| deduplicate | If True, remove duplicate elements from the concatenated result while preserving order. | bool | No | +| remove_none | If True, remove None values from the concatenated result. | bool | No | ### Outputs @@ -658,6 +660,7 @@ The block includes validation to ensure each item is actually a list. If a non-l |--------|-------------|------| | error | Error message if concatenation failed due to invalid input types. | str | | concatenated_list | The concatenated list containing all elements from all input lists in order. | List[Any] | +| length | The total number of elements in the concatenated list. | int | ### Possible use case @@ -820,6 +823,45 @@ This enables conditional logic based on list membership and helps locate items f --- +## Flatten List + +### What it is +Flattens a nested list structure into a single flat list. Supports configurable maximum flattening depth. + +### How it works + +This block recursively traverses a nested list and extracts all leaf elements into a single flat list. You can control how deep the flattening goes with the max_depth parameter: set it to -1 to flatten completely, or to a positive integer to flatten only that many levels. + +The block also reports the original nesting depth of the input, which is useful for understanding the structure of data coming from sources with varying levels of nesting. + + +### Inputs + +| Input | Description | Type | Required | +|-------|-------------|------|----------| +| nested_list | A potentially nested list to flatten into a single-level list. | List[Any] | Yes | +| max_depth | Maximum depth to flatten. -1 means flatten completely. 1 means flatten only one level. | int | No | + +### Outputs + +| Output | Description | Type | +|--------|-------------|------| +| error | Error message if flattening failed. | str | +| flattened_list | The flattened list with all nested elements extracted. | List[Any] | +| length | The number of elements in the flattened list. | int | +| original_depth | The maximum nesting depth of the original input list. | int | + +### Possible use case + +**Normalizing API Responses**: Flatten nested JSON arrays from different API endpoints into a uniform single-level list for consistent processing. + +**Aggregating Nested Results**: Combine results from recursive file searches or nested category trees into a flat list of items for display or export. + +**Data Pipeline Cleanup**: Simplify deeply nested data structures from multiple transformation steps into a clean flat list before final output. + + +--- + ## Get All Memories ### What it is @@ -1012,6 +1054,120 @@ This enables human oversight at critical points in automated workflows, ensuring --- +## Interleave Lists + +### What it is +Interleaves elements from multiple lists in round-robin fashion, alternating between sources. + +### How it works + +This block takes elements from each input list in round-robin order, picking one element from each list in turn. For example, given `[[1, 2, 3], ['a', 'b', 'c']]`, it produces `[1, 'a', 2, 'b', 3, 'c']`. + +When lists have different lengths, shorter lists stop contributing once exhausted, and remaining elements from longer lists continue to be added in order. + + +### Inputs + +| Input | Description | Type | Required | +|-------|-------------|------|----------| +| lists | A list of lists to interleave. Elements will be taken in round-robin order. | List[List[Any]] | Yes | + +### Outputs + +| Output | Description | Type | +|--------|-------------|------| +| error | Error message if interleaving failed. | str | +| interleaved_list | The interleaved list with elements alternating from each input list. | List[Any] | +| length | The total number of elements in the interleaved list. | int | + +### Possible use case + +**Balanced Content Mixing**: Alternate between content from different sources (e.g., mixing promotional and organic posts) for a balanced feed. + +**Round-Robin Scheduling**: Distribute tasks evenly across workers or queues by interleaving items from separate task lists. + +**Multi-Language Output**: Weave together translated text segments with their original counterparts for side-by-side comparison. + + +--- + +## List Difference + +### What it is +Computes the difference between two lists. Returns elements in the first list not found in the second, or symmetric difference. + +### How it works + +This block compares two lists and returns elements from list_a that do not appear in list_b. It uses hash-based lookup for efficient comparison. When symmetric mode is enabled, it returns elements that are in either list but not in both. + +The order of elements from list_a is preserved in the output, and elements from list_b are appended when using symmetric difference. + + +### Inputs + +| Input | Description | Type | Required | +|-------|-------------|------|----------| +| list_a | The primary list to check elements from. | List[Any] | Yes | +| list_b | The list to subtract. Elements found here will be removed from list_a. | List[Any] | Yes | +| symmetric | If True, compute symmetric difference (elements in either list but not both). | bool | No | + +### Outputs + +| Output | Description | Type | +|--------|-------------|------| +| error | Error message if the operation failed. | str | +| difference | Elements from list_a not found in list_b (or symmetric difference if enabled). | List[Any] | +| length | The number of elements in the difference result. | int | + +### Possible use case + +**Change Detection**: Compare a current list of records against a previous snapshot to find newly added or removed items. + +**Exclusion Filtering**: Remove items from a list that appear in a blocklist or already-processed list to avoid duplicates. + +**Data Sync**: Identify which items exist in one system but not another to determine what needs to be synced. + + +--- + +## List Intersection + +### What it is +Computes the intersection of two lists, returning only elements present in both. + +### How it works + +This block finds elements that appear in both input lists by hashing elements from list_b for efficient lookup, then checking each element of list_a against that set. The output preserves the order from list_a and removes duplicates. + +This is useful for finding common items between two datasets without needing to manually iterate or compare. + + +### Inputs + +| Input | Description | Type | Required | +|-------|-------------|------|----------| +| list_a | The first list to intersect. | List[Any] | Yes | +| list_b | The second list to intersect. | List[Any] | Yes | + +### Outputs + +| Output | Description | Type | +|--------|-------------|------| +| error | Error message if the operation failed. | str | +| intersection | Elements present in both list_a and list_b. | List[Any] | +| length | The number of elements in the intersection. | int | + +### Possible use case + +**Finding Common Tags**: Identify shared tags or categories between two items for recommendation or grouping purposes. + +**Mutual Connections**: Find users or contacts that appear in both of two different lists, such as shared friends or overlapping team members. + +**Feature Comparison**: Determine which features or capabilities are supported by both of two systems or products. + + +--- + ## List Is Empty ### What it is @@ -1452,3 +1608,42 @@ This makes XML data accessible using standard dictionary operations, allowing yo --- + +## Zip Lists + +### What it is +Zips multiple lists together into a list of grouped elements. Supports padding to longest or truncating to shortest. + +### How it works + +This block pairs up corresponding elements from multiple input lists into sub-lists. For example, zipping `[[1, 2, 3], ['a', 'b', 'c']]` produces `[[1, 'a'], [2, 'b'], [3, 'c']]`. + +By default, the result is truncated to the length of the shortest input list. Enable pad_to_longest to instead pad shorter lists with a fill_value so no elements from longer lists are lost. + + +### Inputs + +| Input | Description | Type | Required | +|-------|-------------|------|----------| +| lists | A list of lists to zip together. Corresponding elements will be grouped. | List[List[Any]] | Yes | +| pad_to_longest | If True, pad shorter lists with fill_value to match the longest list. If False, truncate to shortest. | bool | No | +| fill_value | Value to use for padding when pad_to_longest is True. | Fill Value | No | + +### Outputs + +| Output | Description | Type | +|--------|-------------|------| +| error | Error message if zipping failed. | str | +| zipped_list | The zipped list of grouped elements. | List[List[Any]] | +| length | The number of groups in the zipped result. | int | + +### Possible use case + +**Creating Key-Value Pairs**: Combine a list of field names with a list of values to build structured records or dictionaries. + +**Parallel Data Alignment**: Pair up corresponding items from separate data sources (e.g., names and email addresses) for processing together. + +**Table Row Construction**: Group column data into rows by zipping each column's values together for CSV export or display. + + +--- From e2d3c8a21761cbbdee88f608833ffcab35eba79e Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Mon, 16 Feb 2026 12:29:33 +0530 Subject: [PATCH 5/7] fix(frontend): Prevent node drag when selecting text in object editor key input (#11955) ## Summary - Add `nodrag` class to the key name input wrapper in `WrapIfAdditionalTemplate.tsx` - This prevents the node from being dragged when users try to select text in the key name input field - Follows the same pattern used by other input components like `TextWidget.tsx` ## Test plan - [x] Open the new builder - [x] Add a custom node with an Object input field - [x] Try to select text in the key name input by clicking and dragging - [x] Verify that text selection works without moving the block Co-authored-by: Claude --- .../InputRenderer/base/object/WrapIfAdditionalTemplate.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx index 97478e9eaf..a8b3514d41 100644 --- a/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx @@ -80,7 +80,7 @@ export default function WrapIfAdditionalTemplate( uiSchema={uiSchema} /> {!isHandleConnected && ( -
+
Date: Mon, 16 Feb 2026 18:29:59 +0800 Subject: [PATCH 6/7] refactor(frontend): remove OldAgentLibraryView and NEW_AGENT_RUNS flag (#12088) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary - Removes the deprecated `OldAgentLibraryView` directory (13 files, ~2200 lines deleted) - Removes the `NEW_AGENT_RUNS` feature flag from the `Flag` enum and defaults - Removes the legacy agent library page at `library/legacy/[id]` - Moves shared `CronScheduler` components to `src/components/contextual/CronScheduler/` - Moves `agent-run-draft-view` and `agent-status-chip` to `legacy-builder/` (co-located with their only consumer) - Updates all import paths in consuming files (`AgentInfoStep`, `SaveControl`, `RunnerInputUI`, `useRunGraph`) ## Test plan - [x] `pnpm format` passes - [x] `pnpm types` passes (no TypeScript errors) - [x] No remaining references to `OldAgentLibraryView`, `NEW_AGENT_RUNS`, or `new-agent-runs` in the codebase - [x] Verify `RunnerInputUI` dialog still works in the legacy builder - [x] Verify `AgentInfoStep` cron scheduling works in the publish modal - [x] Verify `SaveControl` cron scheduling works in the legacy builder 🤖 Generated with [Claude Code](https://claude.com/claude-code)

Greptile Overview

Greptile Summary

This PR removes deprecated code from the legacy agent library view system and consolidates the codebase to use the new agent runs implementation exclusively. The refactor successfully removes ~2200 lines of code across 13 deleted files while properly relocating shared components. **Key changes:** - Removed the entire `OldAgentLibraryView` directory and its 13 component files - Removed the `NEW_AGENT_RUNS` feature flag from the `Flag` enum and defaults - Deleted the legacy agent library page route at `library/legacy/[id]` - Moved `CronScheduler` components to `src/components/contextual/CronScheduler/` for shared use across the application - Moved `agent-run-draft-view` and `agent-status-chip` to `legacy-builder/` directory, co-locating them with their only consumer - Updated `useRunGraph.ts` to import `GraphExecutionMeta` from the generated API models instead of the deleted custom type definition - Updated all import paths in consuming components (`AgentInfoStep`, `SaveControl`, `RunnerInputUI`) **Technical notes:** - The new import path for `GraphExecutionMeta` (`@/app/api/__generated__/models/graphExecutionMeta`) will be generated when running `pnpm generate:api` from the OpenAPI spec - All references to the old code have been cleanly removed from the codebase - The refactor maintains proper separation of concerns by moving shared components to contextual locations

Confidence Score: 4/5

- This PR is safe to merge with minimal risk, pending manual verification of the UI components mentioned in the test plan - The refactor is well-structured and all code changes are correct. The score of 4 (rather than 5) reflects that the PR author has marked three manual testing items as incomplete in the test plan: verifying `RunnerInputUI` dialog, `AgentInfoStep` cron scheduling, and `SaveControl` cron scheduling. While the code changes are sound, these UI components should be manually tested before merging to ensure the moved components work correctly in their new locations. - No files require special attention. The author should complete the manual testing checklist items for `RunnerInputUI`, `AgentInfoStep`, and `SaveControl` as noted in the test plan.

Sequence Diagram

```mermaid sequenceDiagram participant Dev as Developer participant FE as Frontend Build participant API as Backend API participant Gen as Generated Types Note over Dev,Gen: Refactor: Remove OldAgentLibraryView & NEW_AGENT_RUNS flag Dev->>FE: Delete OldAgentLibraryView (13 files, ~2200 lines) Dev->>FE: Remove NEW_AGENT_RUNS from Flag enum Dev->>FE: Delete library/legacy/[id]/page.tsx Dev->>FE: Move CronScheduler → src/components/contextual/ Dev->>FE: Move agent-run-draft-view → legacy-builder/ Dev->>FE: Move agent-status-chip → legacy-builder/ Dev->>FE: Update RunnerInputUI import path Dev->>FE: Update SaveControl import path Dev->>FE: Update AgentInfoStep import path Dev->>FE: Update useRunGraph.ts FE->>Gen: Import GraphExecutionMeta from generated models Note over Gen: Type available after pnpm generate:api Gen-->>API: Uses OpenAPI spec schema API-->>FE: Type-safe GraphExecutionMeta model ```
Co-authored-by: Claude Opus 4.6 --- .../components/RunGraph/useRunGraph.ts | 2 +- .../legacy-builder/RunnerInputUI.tsx | 2 +- .../components/legacy-builder/SaveControl.tsx | 2 +- .../legacy-builder}/agent-run-draft-view.tsx | 7 +- .../legacy-builder}/agent-status-chip.tsx | 0 .../OldAgentLibraryView.tsx | 631 ------------------ .../components/agent-run-details-view.tsx | 445 ------------ .../components/agent-run-output-view.tsx | 178 ----- .../components/agent-run-status-chip.tsx | 68 -- .../components/agent-run-summary-card.tsx | 130 ---- .../components/agent-runs-selector-list.tsx | 237 ------- .../agent-schedule-details-view.tsx | 180 ----- .../components/create-preset-dialog.tsx | 100 --- .../OldAgentLibraryView/use-agent-runs.ts | 210 ------ .../(platform)/library/legacy/[id]/page.tsx | 7 - .../CronScheduler}/cron-scheduler-dialog.tsx | 2 +- .../CronScheduler}/cron-scheduler.tsx | 0 .../AgentInfoStep/AgentInfoStep.tsx | 2 +- .../services/feature-flags/use-get-flag.ts | 2 - 19 files changed, 10 insertions(+), 2195 deletions(-) rename autogpt_platform/frontend/src/app/(platform)/{library/agents/[id]/components/OldAgentLibraryView/components => build/components/legacy-builder}/agent-run-draft-view.tsx (99%) rename autogpt_platform/frontend/src/app/(platform)/{library/agents/[id]/components/OldAgentLibraryView/components => build/components/legacy-builder}/agent-status-chip.tsx (100%) delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-summary-card.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/create-preset-dialog.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/use-agent-runs.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/legacy/[id]/page.tsx rename autogpt_platform/frontend/src/{app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components => components/contextual/CronScheduler}/cron-scheduler-dialog.tsx (97%) rename autogpt_platform/frontend/src/{app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components => components/contextual/CronScheduler}/cron-scheduler.tsx (100%) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts index 6980e95f11..51bb57057f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts @@ -4,7 +4,7 @@ import { } from "@/app/api/__generated__/endpoints/graphs/graphs"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { parseAsInteger, parseAsString, useQueryStates } from "nuqs"; -import { GraphExecutionMeta } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/use-agent-runs"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { useGraphStore } from "@/app/(platform)/build/stores/graphStore"; import { useShallow } from "zustand/react/shallow"; import { useEffect, useState } from "react"; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx index cb06a79683..f7d59a5693 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx @@ -1,6 +1,6 @@ import { useCallback } from "react"; -import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view"; +import { AgentRunDraftView } from "@/app/(platform)/build/components/legacy-builder/agent-run-draft-view"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import type { CredentialsMetaInput, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/SaveControl.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/SaveControl.tsx index dcaa0f6264..3ee5217354 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/SaveControl.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/SaveControl.tsx @@ -18,7 +18,7 @@ import { import { useToast } from "@/components/molecules/Toast/use-toast"; import { useQueryClient } from "@tanstack/react-query"; import { getGetV2ListMySubmissionsQueryKey } from "@/app/api/__generated__/endpoints/store/store"; -import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog"; +import { CronExpressionDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog"; import { humanizeCronExpression } from "@/lib/cron-expression-utils"; import { CalendarClockIcon } from "lucide-react"; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-run-draft-view.tsx similarity index 99% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx rename to autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-run-draft-view.tsx index b0c3a6ff7b..372d479299 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-run-draft-view.tsx @@ -20,7 +20,7 @@ import { import { useBackendAPI } from "@/lib/autogpt-server-api/context"; import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs"; -import { ScheduleTaskDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog"; +import { ScheduleTaskDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog"; import ActionButtonGroup from "@/components/__legacy__/action-button-group"; import type { ButtonAction } from "@/components/__legacy__/types"; import { @@ -53,7 +53,10 @@ import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react"; import { CalendarClockIcon, Trash2Icon } from "lucide-react"; import { analytics } from "@/services/analytics"; -import { AgentStatus, AgentStatusChip } from "./agent-status-chip"; +import { + AgentStatus, + AgentStatusChip, +} from "@/app/(platform)/build/components/legacy-builder/agent-status-chip"; export function AgentRunDraftView({ graph, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-status-chip.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-status-chip.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-status-chip.tsx rename to autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-status-chip.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx deleted file mode 100644 index 54cc07878d..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx +++ /dev/null @@ -1,631 +0,0 @@ -"use client"; -import { useParams, useRouter } from "next/navigation"; -import { useQueryState } from "nuqs"; -import React, { - useCallback, - useEffect, - useMemo, - useRef, - useState, -} from "react"; - -import { - Graph, - GraphExecution, - GraphExecutionID, - GraphExecutionMeta, - GraphID, - LibraryAgent, - LibraryAgentID, - LibraryAgentPreset, - LibraryAgentPresetID, - Schedule, - ScheduleID, -} from "@/lib/autogpt-server-api"; -import { useBackendAPI } from "@/lib/autogpt-server-api/context"; -import { exportAsJSONFile } from "@/lib/utils"; - -import DeleteConfirmDialog from "@/components/__legacy__/delete-confirm-dialog"; -import type { ButtonAction } from "@/components/__legacy__/types"; -import { Button } from "@/components/__legacy__/ui/button"; -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle, -} from "@/components/__legacy__/ui/dialog"; -import LoadingBox, { LoadingSpinner } from "@/components/__legacy__/ui/loading"; -import { - useToast, - useToastOnFail, -} from "@/components/molecules/Toast/use-toast"; -import { AgentRunDetailsView } from "./components/agent-run-details-view"; -import { AgentRunDraftView } from "./components/agent-run-draft-view"; -import { CreatePresetDialog } from "./components/create-preset-dialog"; -import { useAgentRunsInfinite } from "./use-agent-runs"; -import { AgentRunsSelectorList } from "./components/agent-runs-selector-list"; -import { AgentScheduleDetailsView } from "./components/agent-schedule-details-view"; - -export function OldAgentLibraryView() { - const { id: agentID }: { id: LibraryAgentID } = useParams(); - const [executionId, setExecutionId] = useQueryState("executionId"); - const toastOnFail = useToastOnFail(); - const { toast } = useToast(); - const router = useRouter(); - const api = useBackendAPI(); - - // ============================ STATE ============================= - - const [graph, setGraph] = useState(null); // Graph version corresponding to LibraryAgent - const [agent, setAgent] = useState(null); - const agentRunsQuery = useAgentRunsInfinite(graph?.id); // only runs once graph.id is known - const agentRuns = agentRunsQuery.agentRuns; - const [agentPresets, setAgentPresets] = useState([]); - const [schedules, setSchedules] = useState([]); - const [selectedView, selectView] = useState< - | { type: "run"; id?: GraphExecutionID } - | { type: "preset"; id: LibraryAgentPresetID } - | { type: "schedule"; id: ScheduleID } - >({ type: "run" }); - const [selectedRun, setSelectedRun] = useState< - GraphExecution | GraphExecutionMeta | null - >(null); - const selectedSchedule = - selectedView.type == "schedule" - ? schedules.find((s) => s.id == selectedView.id) - : null; - const [isFirstLoad, setIsFirstLoad] = useState(true); - const [agentDeleteDialogOpen, setAgentDeleteDialogOpen] = - useState(false); - const [confirmingDeleteAgentRun, setConfirmingDeleteAgentRun] = - useState(null); - const [confirmingDeleteAgentPreset, setConfirmingDeleteAgentPreset] = - useState(null); - const [copyAgentDialogOpen, setCopyAgentDialogOpen] = useState(false); - const [creatingPresetFromExecutionID, setCreatingPresetFromExecutionID] = - useState(null); - - // Set page title with agent name - useEffect(() => { - if (agent) { - document.title = `${agent.name} - Library - AutoGPT Platform`; - } - }, [agent]); - - const openRunDraftView = useCallback(() => { - selectView({ type: "run" }); - }, []); - - const selectRun = useCallback((id: GraphExecutionID) => { - selectView({ type: "run", id }); - }, []); - - const selectPreset = useCallback((id: LibraryAgentPresetID) => { - selectView({ type: "preset", id }); - }, []); - - const selectSchedule = useCallback((id: ScheduleID) => { - selectView({ type: "schedule", id }); - }, []); - - const graphVersions = useRef>({}); - const loadingGraphVersions = useRef>>({}); - const getGraphVersion = useCallback( - async (graphID: GraphID, version: number) => { - if (version in graphVersions.current) - return graphVersions.current[version]; - if (version in loadingGraphVersions.current) - return loadingGraphVersions.current[version]; - - const pendingGraph = api.getGraph(graphID, version).then((graph) => { - graphVersions.current[version] = graph; - return graph; - }); - // Cache promise as well to avoid duplicate requests - loadingGraphVersions.current[version] = pendingGraph; - return pendingGraph; - }, - [api, graphVersions, loadingGraphVersions], - ); - - const lastRefresh = useRef(0); - const refreshPageData = useCallback(() => { - if (Date.now() - lastRefresh.current < 2e3) return; // 2 second debounce - lastRefresh.current = Date.now(); - - api.getLibraryAgent(agentID).then((agent) => { - setAgent(agent); - - getGraphVersion(agent.graph_id, agent.graph_version).then( - (_graph) => - (graph && graph.version == _graph.version) || setGraph(_graph), - ); - Promise.all([ - agentRunsQuery.refetchRuns(), - api.listLibraryAgentPresets({ - graph_id: agent.graph_id, - page_size: 100, - }), - ]).then(([runsQueryResult, presets]) => { - setAgentPresets(presets.presets); - - const newestAgentRunsResponse = runsQueryResult.data?.pages[0]; - if (!newestAgentRunsResponse || newestAgentRunsResponse.status != 200) - return; - const newestAgentRuns = newestAgentRunsResponse.data.executions; - // Preload the corresponding graph versions for the latest 10 runs - new Set( - newestAgentRuns.slice(0, 10).map((run) => run.graph_version), - ).forEach((version) => getGraphVersion(agent.graph_id, version)); - }); - }); - }, [api, agentID, getGraphVersion, graph]); - - // On first load: select the latest run - useEffect(() => { - // Only for first load or first execution - if (selectedView.id || !isFirstLoad) return; - if (agentRuns.length == 0 && agentPresets.length == 0) return; - - setIsFirstLoad(false); - if (agentRuns.length > 0) { - // select latest run - const latestRun = agentRuns.reduce((latest, current) => { - if (!latest.started_at && !current.started_at) return latest; - if (!latest.started_at) return current; - if (!current.started_at) return latest; - return latest.started_at > current.started_at ? latest : current; - }, agentRuns[0]); - selectRun(latestRun.id as GraphExecutionID); - } else { - // select top preset - const latestPreset = agentPresets.toSorted( - (a, b) => b.updated_at.getTime() - a.updated_at.getTime(), - )[0]; - selectPreset(latestPreset.id); - } - }, [ - isFirstLoad, - selectedView.id, - agentRuns, - agentPresets, - selectRun, - selectPreset, - ]); - - useEffect(() => { - if (executionId) { - selectRun(executionId as GraphExecutionID); - setExecutionId(null); - } - }, [executionId, selectRun, setExecutionId]); - - // Initial load - useEffect(() => { - refreshPageData(); - - // Show a toast when the WebSocket connection disconnects - let connectionToast: ReturnType | null = null; - const cancelDisconnectHandler = api.onWebSocketDisconnect(() => { - connectionToast ??= toast({ - title: "Connection to server was lost", - variant: "destructive", - description: ( -
- Trying to reconnect... - -
- ), - duration: Infinity, - dismissable: true, - }); - }); - const cancelConnectHandler = api.onWebSocketConnect(() => { - if (connectionToast) - connectionToast.update({ - id: connectionToast.id, - title: "✅ Connection re-established", - variant: "default", - description: ( -
- Refreshing data... - -
- ), - duration: 2000, - dismissable: true, - }); - connectionToast = null; - }); - return () => { - cancelDisconnectHandler(); - cancelConnectHandler(); - }; - }, []); - - // Subscribe to WebSocket updates for agent runs - useEffect(() => { - if (!agent?.graph_id) return; - - return api.onWebSocketConnect(() => { - refreshPageData(); // Sync up on (re)connect - - // Subscribe to all executions for this agent - api.subscribeToGraphExecutions(agent.graph_id); - }); - }, [api, agent?.graph_id, refreshPageData]); - - // Handle execution updates - useEffect(() => { - const detachExecUpdateHandler = api.onWebSocketMessage( - "graph_execution_event", - (data) => { - if (data.graph_id != agent?.graph_id) return; - - agentRunsQuery.upsertAgentRun(data); - if (data.id === selectedView.id) { - // Update currently viewed run - setSelectedRun(data); - } - }, - ); - - return () => { - detachExecUpdateHandler(); - }; - }, [api, agent?.graph_id, selectedView.id]); - - // Pre-load selectedRun based on selectedView - useEffect(() => { - if (selectedView.type != "run" || !selectedView.id) return; - - const newSelectedRun = agentRuns.find((run) => run.id == selectedView.id); - if (selectedView.id !== selectedRun?.id) { - // Pull partial data from "cache" while waiting for the rest to load - setSelectedRun((newSelectedRun as GraphExecutionMeta) ?? null); - } - }, [api, selectedView, agentRuns, selectedRun?.id]); - - // Load selectedRun based on selectedView; refresh on agent refresh - useEffect(() => { - if (selectedView.type != "run" || !selectedView.id || !agent) return; - - api - .getGraphExecutionInfo(agent.graph_id, selectedView.id) - .then(async (run) => { - // Ensure corresponding graph version is available before rendering I/O - await getGraphVersion(run.graph_id, run.graph_version); - setSelectedRun(run); - }); - }, [api, selectedView, agent, getGraphVersion]); - - const fetchSchedules = useCallback(async () => { - if (!agent) return; - - setSchedules(await api.listGraphExecutionSchedules(agent.graph_id)); - }, [api, agent?.graph_id]); - - useEffect(() => { - fetchSchedules(); - }, [fetchSchedules]); - - // =========================== ACTIONS ============================ - - const deleteRun = useCallback( - async (run: GraphExecutionMeta) => { - if (run.status == "RUNNING" || run.status == "QUEUED") { - await api.stopGraphExecution(run.graph_id, run.id); - } - await api.deleteGraphExecution(run.id); - - setConfirmingDeleteAgentRun(null); - if (selectedView.type == "run" && selectedView.id == run.id) { - openRunDraftView(); - } - agentRunsQuery.removeAgentRun(run.id); - }, - [api, selectedView, openRunDraftView], - ); - - const deletePreset = useCallback( - async (presetID: LibraryAgentPresetID) => { - await api.deleteLibraryAgentPreset(presetID); - - setConfirmingDeleteAgentPreset(null); - if (selectedView.type == "preset" && selectedView.id == presetID) { - openRunDraftView(); - } - setAgentPresets((presets) => presets.filter((p) => p.id !== presetID)); - }, - [api, selectedView, openRunDraftView], - ); - - const deleteSchedule = useCallback( - async (scheduleID: ScheduleID) => { - const removedSchedule = - await api.deleteGraphExecutionSchedule(scheduleID); - - setSchedules((schedules) => { - const newSchedules = schedules.filter( - (s) => s.id !== removedSchedule.id, - ); - if ( - selectedView.type == "schedule" && - selectedView.id == removedSchedule.id - ) { - if (newSchedules.length > 0) { - // Select next schedule if available - selectSchedule(newSchedules[0].id); - } else { - // Reset to draft view if current schedule was deleted - openRunDraftView(); - } - } - return newSchedules; - }); - openRunDraftView(); - }, - [schedules, api], - ); - - const handleCreatePresetFromRun = useCallback( - async (name: string, description: string) => { - if (!creatingPresetFromExecutionID) return; - - await api - .createLibraryAgentPreset({ - name, - description, - graph_execution_id: creatingPresetFromExecutionID, - }) - .then((preset) => { - setAgentPresets((prev) => [...prev, preset]); - selectPreset(preset.id); - setCreatingPresetFromExecutionID(null); - }) - .catch(toastOnFail("create a preset")); - }, - [api, creatingPresetFromExecutionID, selectPreset, toast], - ); - - const downloadGraph = useCallback( - async () => - agent && - // Export sanitized graph from backend - api - .getGraph(agent.graph_id, agent.graph_version, true) - .then((graph) => - exportAsJSONFile(graph, `${graph.name}_v${graph.version}.json`), - ), - [api, agent], - ); - - const copyAgent = useCallback(async () => { - setCopyAgentDialogOpen(false); - api - .forkLibraryAgent(agentID) - .then((newAgent) => { - router.push(`/library/agents/${newAgent.id}`); - }) - .catch((error) => { - console.error("Error copying agent:", error); - toast({ - title: "Error copying agent", - description: `An error occurred while copying the agent: ${error.message}`, - variant: "destructive", - }); - }); - }, [agentID, api, router, toast]); - - const agentActions: ButtonAction[] = useMemo( - () => [ - { - label: "Customize agent", - href: `/build?flowID=${agent?.graph_id}&flowVersion=${agent?.graph_version}`, - disabled: !agent?.can_access_graph, - }, - { label: "Export agent to file", callback: downloadGraph }, - ...(!agent?.can_access_graph - ? [ - { - label: "Edit a copy", - callback: () => setCopyAgentDialogOpen(true), - }, - ] - : []), - { - label: "Delete agent", - callback: () => setAgentDeleteDialogOpen(true), - }, - ], - [agent, downloadGraph], - ); - - const runGraph = - graphVersions.current[selectedRun?.graph_version ?? 0] ?? graph; - - const onCreateSchedule = useCallback( - (schedule: Schedule) => { - setSchedules((prev) => [...prev, schedule]); - selectSchedule(schedule.id); - }, - [selectView], - ); - - const onCreatePreset = useCallback( - (preset: LibraryAgentPreset) => { - setAgentPresets((prev) => [...prev, preset]); - selectPreset(preset.id); - }, - [selectPreset], - ); - - const onUpdatePreset = useCallback( - (updated: LibraryAgentPreset) => { - setAgentPresets((prev) => - prev.map((p) => (p.id === updated.id ? updated : p)), - ); - selectPreset(updated.id); - }, - [selectPreset], - ); - - if (!agent || !graph) { - return ; - } - - return ( -
- {/* Sidebar w/ list of runs */} - {/* TODO: render this below header in sm and md layouts */} - - -
- {/* Header */} -
-

- { - agent.name /* TODO: use dynamic/custom run title - https://github.com/Significant-Gravitas/AutoGPT/issues/9184 */ - } -

-
- - {/* Run / Schedule views */} - {(selectedView.type == "run" && selectedView.id ? ( - selectedRun && runGraph ? ( - setConfirmingDeleteAgentRun(selectedRun)} - doCreatePresetFromRun={() => - setCreatingPresetFromExecutionID(selectedRun.id) - } - /> - ) : null - ) : selectedView.type == "run" ? ( - /* Draft new runs / Create new presets */ - - ) : selectedView.type == "preset" ? ( - /* Edit & update presets */ - preset.id == selectedView.id)! - } - onRun={selectRun} - recommendedScheduleCron={agent?.recommended_schedule_cron || null} - onCreateSchedule={onCreateSchedule} - onUpdatePreset={onUpdatePreset} - doDeletePreset={setConfirmingDeleteAgentPreset} - agentActions={agentActions} - /> - ) : selectedView.type == "schedule" ? ( - selectedSchedule && - graph && ( - - ) - ) : null) || } - - - agent && - api.deleteLibraryAgent(agent.id).then(() => router.push("/library")) - } - /> - - !open && setConfirmingDeleteAgentRun(null)} - onDoDelete={() => - confirmingDeleteAgentRun && deleteRun(confirmingDeleteAgentRun) - } - /> - !open && setConfirmingDeleteAgentPreset(null)} - onDoDelete={() => - confirmingDeleteAgentPreset && - deletePreset(confirmingDeleteAgentPreset) - } - /> - {/* Copy agent confirmation dialog */} - - - - You're making an editable copy - - The original Marketplace agent stays the same and cannot be - edited. We'll save a new version of this agent to your - Library. From there, you can customize it however you'd - like by clicking "Customize agent" — this will open - the builder where you can see and modify the inner workings. - - - - - - - - - setCreatingPresetFromExecutionID(null)} - onConfirm={handleCreatePresetFromRun} - /> -
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx deleted file mode 100644 index eb5224c958..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx +++ /dev/null @@ -1,445 +0,0 @@ -"use client"; -import { format, formatDistanceToNow, formatDistanceStrict } from "date-fns"; -import React, { useCallback, useMemo, useEffect } from "react"; - -import { - Graph, - GraphExecution, - GraphExecutionID, - GraphExecutionMeta, - LibraryAgent, -} from "@/lib/autogpt-server-api"; -import { useBackendAPI } from "@/lib/autogpt-server-api/context"; - -import ActionButtonGroup from "@/components/__legacy__/action-button-group"; -import type { ButtonAction } from "@/components/__legacy__/types"; -import { - Card, - CardContent, - CardHeader, - CardTitle, -} from "@/components/__legacy__/ui/card"; -import { - IconRefresh, - IconSquare, - IconCircleAlert, -} from "@/components/__legacy__/ui/icons"; -import { Input } from "@/components/__legacy__/ui/input"; -import LoadingBox from "@/components/__legacy__/ui/loading"; -import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from "@/components/atoms/Tooltip/BaseTooltip"; -import { useToastOnFail } from "@/components/molecules/Toast/use-toast"; - -import { AgentRunStatus, agentRunStatusMap } from "./agent-run-status-chip"; -import useCredits from "@/hooks/useCredits"; -import { AgentRunOutputView } from "./agent-run-output-view"; -import { analytics } from "@/services/analytics"; -import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList"; -import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; - -export function AgentRunDetailsView({ - agent, - graph, - run, - agentActions, - onRun, - doDeleteRun, - doCreatePresetFromRun, -}: { - agent: LibraryAgent; - graph: Graph; - run: GraphExecution | GraphExecutionMeta; - agentActions: ButtonAction[]; - onRun: (runID: GraphExecutionID) => void; - doDeleteRun: () => void; - doCreatePresetFromRun: () => void; -}): React.ReactNode { - const api = useBackendAPI(); - const { formatCredits } = useCredits(); - - const runStatus: AgentRunStatus = useMemo( - () => agentRunStatusMap[run.status], - [run], - ); - - const { - pendingReviews, - isLoading: reviewsLoading, - refetch: refetchReviews, - } = usePendingReviewsForExecution(run.id); - - const toastOnFail = useToastOnFail(); - - // Refetch pending reviews when execution status changes to REVIEW - useEffect(() => { - if (runStatus === "review" && run.id) { - refetchReviews(); - } - }, [runStatus, run.id, refetchReviews]); - - const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => { - if (!run) return []; - return [ - { - label: "Status", - value: runStatus.charAt(0).toUpperCase() + runStatus.slice(1), - }, - { - label: "Started", - value: run.started_at - ? `${formatDistanceToNow(run.started_at, { addSuffix: true })}, ${format(run.started_at, "HH:mm")}` - : "—", - }, - ...(run.stats - ? [ - { - label: "Duration", - value: formatDistanceStrict(0, run.stats.duration * 1000), - }, - { label: "Steps", value: run.stats.node_exec_count }, - { label: "Cost", value: formatCredits(run.stats.cost) }, - ] - : []), - ]; - }, [run, runStatus, formatCredits]); - - const agentRunInputs: - | Record< - string, - { - title?: string; - /* type: BlockIOSubType; */ - value: string | number | undefined; - } - > - | undefined = useMemo(() => { - if (!run.inputs) return undefined; - // TODO: show (link to) preset - https://github.com/Significant-Gravitas/AutoGPT/issues/9168 - - // Add type info from agent input schema - return Object.fromEntries( - Object.entries(run.inputs).map(([k, v]) => [ - k, - { - title: graph.input_schema.properties[k]?.title, - // type: graph.input_schema.properties[k].type, // TODO: implement typed graph inputs - value: typeof v == "object" ? JSON.stringify(v, undefined, 2) : v, - }, - ]), - ); - }, [graph, run]); - - const runAgain = useCallback(() => { - if ( - !run.inputs || - !(graph.credentials_input_schema?.required ?? []).every( - (k) => k in (run.credential_inputs ?? {}), - ) - ) - return; - - if (run.preset_id) { - return api - .executeLibraryAgentPreset( - run.preset_id, - run.inputs!, - run.credential_inputs!, - ) - .then(({ id }) => { - analytics.sendDatafastEvent("run_agent", { - name: graph.name, - id: graph.id, - }); - onRun(id); - }) - .catch(toastOnFail("execute agent preset")); - } - - return api - .executeGraph( - graph.id, - graph.version, - run.inputs!, - run.credential_inputs!, - "library", - ) - .then(({ id }) => { - analytics.sendDatafastEvent("run_agent", { - name: graph.name, - id: graph.id, - }); - onRun(id); - }) - .catch(toastOnFail("execute agent")); - }, [api, graph, run, onRun, toastOnFail]); - - const stopRun = useCallback( - () => api.stopGraphExecution(graph.id, run.id), - [api, graph.id, run.id], - ); - - const agentRunOutputs: - | Record< - string, - { - title?: string; - /* type: BlockIOSubType; */ - values: Array; - } - > - | null - | undefined = useMemo(() => { - if (!("outputs" in run)) return undefined; - if (!["running", "success", "failed", "stopped"].includes(runStatus)) - return null; - - // Add type info from agent input schema - return Object.fromEntries( - Object.entries(run.outputs).map(([k, vv]) => [ - k, - { - title: graph.output_schema.properties[k].title, - /* type: agent.output_schema.properties[k].type */ - values: vv.map((v) => - typeof v == "object" ? JSON.stringify(v, undefined, 2) : v, - ), - }, - ]), - ); - }, [graph, run, runStatus]); - - const runActions: ButtonAction[] = useMemo( - () => [ - ...(["running", "queued"].includes(runStatus) - ? ([ - { - label: ( - <> - - Stop run - - ), - variant: "secondary", - callback: stopRun, - }, - ] satisfies ButtonAction[]) - : []), - ...(["success", "failed", "stopped"].includes(runStatus) && - !graph.has_external_trigger && - (graph.credentials_input_schema?.required ?? []).every( - (k) => k in (run.credential_inputs ?? {}), - ) - ? [ - { - label: ( - <> - - Run again - - ), - callback: runAgain, - dataTestId: "run-again-button", - }, - ] - : []), - ...(agent.can_access_graph - ? [ - { - label: "Open run in builder", - href: `/build?flowID=${run.graph_id}&flowVersion=${run.graph_version}&flowExecutionID=${run.id}`, - }, - ] - : []), - { label: "Create preset from run", callback: doCreatePresetFromRun }, - { label: "Delete run", variant: "secondary", callback: doDeleteRun }, - ], - [ - runStatus, - runAgain, - stopRun, - doDeleteRun, - doCreatePresetFromRun, - graph.has_external_trigger, - graph.credentials_input_schema?.required, - agent.can_access_graph, - run.graph_id, - run.graph_version, - run.id, - ], - ); - - return ( -
-
- - - Info - - - -
- {infoStats.map(({ label, value }) => ( -
-

{label}

-

{value}

-
- ))} -
- {run.status === "FAILED" && ( -
-

- Error:{" "} - {run.stats?.error || - "The execution failed due to an internal error. You can re-run the agent to retry."} -

-
- )} -
-
- - {/* Smart Agent Execution Summary */} - {run.stats?.activity_status && ( - - - - Task Summary - - - - - - -

- This AI-generated summary describes how the agent - handled your task. It’s an experimental feature and may - occasionally be inaccurate. -

-
-
-
-
-
- -

- {run.stats.activity_status} -

- - {/* Correctness Score */} - {typeof run.stats.correctness_score === "number" && ( -
-
- - Success Estimate: - -
-
-
= 0.8 - ? "bg-green-500" - : run.stats.correctness_score >= 0.6 - ? "bg-yellow-500" - : run.stats.correctness_score >= 0.4 - ? "bg-orange-500" - : "bg-red-500" - }`} - style={{ - width: `${Math.round(run.stats.correctness_score * 100)}%`, - }} - /> -
- - {Math.round(run.stats.correctness_score * 100)}% - -
-
- - - - - - -

- AI-generated estimate of how well this execution - achieved its intended purpose. This score indicates - {run.stats.correctness_score >= 0.8 - ? " the agent was highly successful." - : run.stats.correctness_score >= 0.6 - ? " the agent was mostly successful with minor issues." - : run.stats.correctness_score >= 0.4 - ? " the agent was partially successful with some gaps." - : " the agent had limited success with significant issues."} -

-
-
-
-
- )} - - - )} - - {agentRunOutputs !== null && ( - - )} - - {/* Pending Reviews Section */} - {runStatus === "review" && ( - - - - Pending Reviews ({pendingReviews.length}) - - - - {reviewsLoading ? ( - - ) : pendingReviews.length > 0 ? ( - - ) : ( -
- No pending reviews for this execution -
- )} -
-
- )} - - - - Input - - - {agentRunInputs !== undefined ? ( - Object.entries(agentRunInputs).map(([key, { title, value }]) => ( -
- - -
- )) - ) : ( - - )} -
-
-
- - {/* Run / Agent Actions */} - -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx deleted file mode 100644 index 668ac2e215..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx +++ /dev/null @@ -1,178 +0,0 @@ -"use client"; - -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; -import React, { useMemo } from "react"; - -import { - Card, - CardContent, - CardHeader, - CardTitle, -} from "@/components/__legacy__/ui/card"; - -import LoadingBox from "@/components/__legacy__/ui/loading"; -import type { OutputMetadata } from "../../../../../../../../components/contextual/OutputRenderers"; -import { - globalRegistry, - OutputActions, - OutputItem, -} from "../../../../../../../../components/contextual/OutputRenderers"; - -export function AgentRunOutputView({ - agentRunOutputs, -}: { - agentRunOutputs: - | Record< - string, - { - title?: string; - /* type: BlockIOSubType; */ - values: Array; - } - > - | undefined; -}) { - const enableEnhancedOutputHandling = useGetFlag( - Flag.ENABLE_ENHANCED_OUTPUT_HANDLING, - ); - - // Prepare items for the renderer system - const outputItems = useMemo(() => { - if (!agentRunOutputs) return []; - - const items: Array<{ - key: string; - label: string; - value: unknown; - metadata?: OutputMetadata; - renderer: any; - }> = []; - - Object.entries(agentRunOutputs).forEach(([key, { title, values }]) => { - values.forEach((value, index) => { - // Enhanced metadata extraction - const metadata: OutputMetadata = {}; - - // Type guard to safely access properties - if ( - typeof value === "object" && - value !== null && - !React.isValidElement(value) - ) { - const objValue = value as any; - if (objValue.type) metadata.type = objValue.type; - if (objValue.mimeType) metadata.mimeType = objValue.mimeType; - if (objValue.filename) metadata.filename = objValue.filename; - } - - const renderer = globalRegistry.getRenderer(value, metadata); - if (renderer) { - items.push({ - key: `${key}-${index}`, - label: index === 0 ? title || key : "", - value, - metadata, - renderer, - }); - } else { - const textRenderer = globalRegistry - .getAllRenderers() - .find((r) => r.name === "TextRenderer"); - if (textRenderer) { - items.push({ - key: `${key}-${index}`, - label: index === 0 ? title || key : "", - value: JSON.stringify(value, null, 2), - metadata, - renderer: textRenderer, - }); - } - } - }); - }); - - return items; - }, [agentRunOutputs]); - - return ( - <> - {enableEnhancedOutputHandling ? ( - - -
- Output - {outputItems.length > 0 && ( - ({ - value: item.value, - metadata: item.metadata, - renderer: item.renderer, - }))} - /> - )} -
-
- - - {agentRunOutputs !== undefined ? ( - outputItems.length > 0 ? ( - outputItems.map((item) => ( - - )) - ) : ( -

- No outputs to display -

- ) - ) : ( - - )} -
-
- ) : ( - - - Output - - - - {agentRunOutputs !== undefined ? ( - Object.entries(agentRunOutputs).map( - ([key, { title, values }]) => ( -
- - {values.map((value, i) => ( -

- {value} -

- ))} - {/* TODO: pretty type-dependent rendering */} -
- ), - ) - ) : ( - - )} -
-
- )} - - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx deleted file mode 100644 index 58f1ee8381..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx +++ /dev/null @@ -1,68 +0,0 @@ -import React from "react"; - -import { Badge } from "@/components/__legacy__/ui/badge"; - -import { GraphExecutionMeta } from "@/lib/autogpt-server-api/types"; - -export type AgentRunStatus = - | "success" - | "failed" - | "queued" - | "running" - | "stopped" - | "scheduled" - | "draft" - | "review"; - -export const agentRunStatusMap: Record< - GraphExecutionMeta["status"], - AgentRunStatus -> = { - INCOMPLETE: "draft", - COMPLETED: "success", - FAILED: "failed", - QUEUED: "queued", - RUNNING: "running", - TERMINATED: "stopped", - REVIEW: "review", -}; - -const statusData: Record< - AgentRunStatus, - { label: string; variant: keyof typeof statusStyles } -> = { - success: { label: "Success", variant: "success" }, - running: { label: "Running", variant: "info" }, - failed: { label: "Failed", variant: "destructive" }, - queued: { label: "Queued", variant: "warning" }, - draft: { label: "Draft", variant: "secondary" }, - stopped: { label: "Stopped", variant: "secondary" }, - scheduled: { label: "Scheduled", variant: "secondary" }, - review: { label: "In Review", variant: "warning" }, -}; - -const statusStyles = { - success: - "bg-green-100 text-green-800 hover:bg-green-100 hover:text-green-800", - destructive: "bg-red-100 text-red-800 hover:bg-red-100 hover:text-red-800", - warning: - "bg-yellow-100 text-yellow-800 hover:bg-yellow-100 hover:text-yellow-800", - info: "bg-blue-100 text-blue-800 hover:bg-blue-100 hover:text-blue-800", - secondary: - "bg-slate-100 text-slate-800 hover:bg-slate-100 hover:text-slate-800", -}; - -export function AgentRunStatusChip({ - status, -}: { - status: AgentRunStatus; -}): React.ReactElement { - return ( - - {statusData[status]?.label} - - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-summary-card.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-summary-card.tsx deleted file mode 100644 index 6f7d7865bc..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-summary-card.tsx +++ /dev/null @@ -1,130 +0,0 @@ -import React from "react"; -import { formatDistanceToNow, isPast } from "date-fns"; - -import { cn } from "@/lib/utils"; - -import { Link2Icon, Link2OffIcon, MoreVertical } from "lucide-react"; -import { Card, CardContent } from "@/components/__legacy__/ui/card"; -import { Button } from "@/components/__legacy__/ui/button"; -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuTrigger, -} from "@/components/__legacy__/ui/dropdown-menu"; - -import { AgentStatus, AgentStatusChip } from "./agent-status-chip"; -import { AgentRunStatus, AgentRunStatusChip } from "./agent-run-status-chip"; -import { PushPinSimpleIcon } from "@phosphor-icons/react"; - -export type AgentRunSummaryProps = ( - | { - type: "run"; - status: AgentRunStatus; - } - | { - type: "preset"; - status?: undefined; - } - | { - type: "preset.triggered"; - status: AgentStatus; - } - | { - type: "schedule"; - status: "scheduled"; - } -) & { - title: string; - timestamp?: number | Date; - selected?: boolean; - onClick?: () => void; - // onRename: () => void; - onDelete: () => void; - onPinAsPreset?: () => void; - className?: string; -}; - -export function AgentRunSummaryCard({ - type, - status, - title, - timestamp, - selected = false, - onClick, - // onRename, - onDelete, - onPinAsPreset, - className, -}: AgentRunSummaryProps): React.ReactElement { - return ( - - - {(type == "run" || type == "schedule") && ( - - )} - {type == "preset" && ( -
- Preset -
- )} - {type == "preset.triggered" && ( -
- - -
- {status == "inactive" ? ( - - ) : ( - - )}{" "} - Trigger -
-
- )} - -
-

- {title} -

- - - - - - - {onPinAsPreset && ( - - Pin as a preset - - )} - - {/* Rename */} - - Delete - - -
- - {timestamp && ( -

- {isPast(timestamp) ? "Ran" : "Runs in"}{" "} - {formatDistanceToNow(timestamp, { addSuffix: true })} -

- )} -
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx deleted file mode 100644 index 49d93b4319..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx +++ /dev/null @@ -1,237 +0,0 @@ -"use client"; -import { Plus } from "lucide-react"; -import React, { useEffect, useState } from "react"; - -import { - GraphExecutionID, - GraphExecutionMeta, - LibraryAgent, - LibraryAgentPreset, - LibraryAgentPresetID, - Schedule, - ScheduleID, -} from "@/lib/autogpt-server-api"; -import { cn } from "@/lib/utils"; - -import { Badge } from "@/components/__legacy__/ui/badge"; -import { Button } from "@/components/atoms/Button/Button"; -import LoadingBox, { LoadingSpinner } from "@/components/__legacy__/ui/loading"; -import { Separator } from "@/components/__legacy__/ui/separator"; -import { ScrollArea } from "@/components/__legacy__/ui/scroll-area"; -import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll"; -import { AgentRunsQuery } from "../use-agent-runs"; -import { agentRunStatusMap } from "./agent-run-status-chip"; -import { AgentRunSummaryCard } from "./agent-run-summary-card"; - -interface AgentRunsSelectorListProps { - agent: LibraryAgent; - agentRunsQuery: AgentRunsQuery; - agentPresets: LibraryAgentPreset[]; - schedules: Schedule[]; - selectedView: { type: "run" | "preset" | "schedule"; id?: string }; - allowDraftNewRun?: boolean; - onSelectRun: (id: GraphExecutionID) => void; - onSelectPreset: (preset: LibraryAgentPresetID) => void; - onSelectSchedule: (id: ScheduleID) => void; - onSelectDraftNewRun: () => void; - doDeleteRun: (id: GraphExecutionMeta) => void; - doDeletePreset: (id: LibraryAgentPresetID) => void; - doDeleteSchedule: (id: ScheduleID) => void; - doCreatePresetFromRun?: (id: GraphExecutionID) => void; - className?: string; -} - -export function AgentRunsSelectorList({ - agent, - agentRunsQuery: { - agentRuns, - agentRunCount, - agentRunsLoading, - hasMoreRuns, - fetchMoreRuns, - isFetchingMoreRuns, - }, - agentPresets, - schedules, - selectedView, - allowDraftNewRun = true, - onSelectRun, - onSelectPreset, - onSelectSchedule, - onSelectDraftNewRun, - doDeleteRun, - doDeletePreset, - doDeleteSchedule, - doCreatePresetFromRun, - className, -}: AgentRunsSelectorListProps): React.ReactElement { - const [activeListTab, setActiveListTab] = useState<"runs" | "scheduled">( - "runs", - ); - - useEffect(() => { - if (selectedView.type === "schedule") { - setActiveListTab("scheduled"); - } else { - setActiveListTab("runs"); - } - }, [selectedView]); - - const listItemClasses = "h-28 w-72 lg:w-full lg:h-32"; - - return ( - - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx deleted file mode 100644 index 30b0a82e65..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx +++ /dev/null @@ -1,180 +0,0 @@ -"use client"; -import React, { useCallback, useMemo } from "react"; - -import { - Graph, - GraphExecutionID, - Schedule, - ScheduleID, -} from "@/lib/autogpt-server-api"; -import { useBackendAPI } from "@/lib/autogpt-server-api/context"; - -import ActionButtonGroup from "@/components/__legacy__/action-button-group"; -import type { ButtonAction } from "@/components/__legacy__/types"; -import { - Card, - CardContent, - CardHeader, - CardTitle, -} from "@/components/__legacy__/ui/card"; -import { IconCross } from "@/components/__legacy__/ui/icons"; -import { Input } from "@/components/__legacy__/ui/input"; -import LoadingBox from "@/components/__legacy__/ui/loading"; -import { useToastOnFail } from "@/components/molecules/Toast/use-toast"; -import { humanizeCronExpression } from "@/lib/cron-expression-utils"; -import { formatScheduleTime } from "@/lib/timezone-utils"; -import { useUserTimezone } from "@/lib/hooks/useUserTimezone"; -import { PlayIcon } from "lucide-react"; - -import { AgentRunStatus } from "./agent-run-status-chip"; - -export function AgentScheduleDetailsView({ - graph, - schedule, - agentActions, - onForcedRun, - doDeleteSchedule, -}: { - graph: Graph; - schedule: Schedule; - agentActions: ButtonAction[]; - onForcedRun: (runID: GraphExecutionID) => void; - doDeleteSchedule: (scheduleID: ScheduleID) => void; -}): React.ReactNode { - const api = useBackendAPI(); - - const selectedRunStatus: AgentRunStatus = "scheduled"; - - const toastOnFail = useToastOnFail(); - - // Get user's timezone for displaying schedule times - const userTimezone = useUserTimezone(); - - const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => { - return [ - { - label: "Status", - value: - selectedRunStatus.charAt(0).toUpperCase() + - selectedRunStatus.slice(1), - }, - { - label: "Schedule", - value: humanizeCronExpression(schedule.cron), - }, - { - label: "Next run", - value: formatScheduleTime(schedule.next_run_time, userTimezone), - }, - ]; - }, [schedule, selectedRunStatus, userTimezone]); - - const agentRunInputs: Record< - string, - { title?: string; /* type: BlockIOSubType; */ value: any } - > = useMemo(() => { - // TODO: show (link to) preset - https://github.com/Significant-Gravitas/AutoGPT/issues/9168 - - // Add type info from agent input schema - return Object.fromEntries( - Object.entries(schedule.input_data).map(([k, v]) => [ - k, - { - title: graph.input_schema.properties[k].title, - /* TODO: type: agent.input_schema.properties[k].type */ - value: v, - }, - ]), - ); - }, [graph, schedule]); - - const runNow = useCallback( - () => - api - .executeGraph( - graph.id, - graph.version, - schedule.input_data, - schedule.input_credentials, - "library", - ) - .then((run) => onForcedRun(run.id)) - .catch(toastOnFail("execute agent")), - [api, graph, schedule, onForcedRun, toastOnFail], - ); - - const runActions: ButtonAction[] = useMemo( - () => [ - { - label: ( - <> - - Run now - - ), - callback: runNow, - }, - { - label: ( - <> - - Delete schedule - - ), - callback: () => doDeleteSchedule(schedule.id), - variant: "destructive", - }, - ], - [runNow], - ); - - return ( -
-
- - - Info - - - -
- {infoStats.map(({ label, value }) => ( -
-

{label}

-

{value}

-
- ))} -
-
-
- - - - Input - - - {agentRunInputs !== undefined ? ( - Object.entries(agentRunInputs).map(([key, { title, value }]) => ( -
- - -
- )) - ) : ( - - )} -
-
-
- - {/* Run / Agent Actions */} - -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/create-preset-dialog.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/create-preset-dialog.tsx deleted file mode 100644 index 2ca64d5ec5..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/create-preset-dialog.tsx +++ /dev/null @@ -1,100 +0,0 @@ -"use client"; - -import React, { useState } from "react"; -import { Button } from "@/components/__legacy__/ui/button"; -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle, -} from "@/components/__legacy__/ui/dialog"; -import { Input } from "@/components/__legacy__/ui/input"; -import { Textarea } from "@/components/__legacy__/ui/textarea"; - -interface CreatePresetDialogProps { - open: boolean; - onOpenChange: (open: boolean) => void; - onConfirm: (name: string, description: string) => Promise | void; -} - -export function CreatePresetDialog({ - open, - onOpenChange, - onConfirm, -}: CreatePresetDialogProps) { - const [name, setName] = useState(""); - const [description, setDescription] = useState(""); - - const handleSubmit = async () => { - if (name.trim()) { - await onConfirm(name.trim(), description.trim()); - setName(""); - setDescription(""); - onOpenChange(false); - } - }; - - const handleCancel = () => { - setName(""); - setDescription(""); - onOpenChange(false); - }; - - const handleKeyDown = (e: React.KeyboardEvent) => { - if (e.key === "Enter" && (e.metaKey || e.ctrlKey)) { - e.preventDefault(); - handleSubmit(); - } - }; - - return ( - - - - Create Preset - - Give your preset a name and description to help identify it later. - - -
-
- - setName(e.target.value)} - onKeyDown={handleKeyDown} - autoFocus - /> -
-
- -