diff --git a/.github/workflows/docs-claude-review.yml b/.github/workflows/docs-claude-review.yml
index ca2788b387..19d5dd667b 100644
--- a/.github/workflows/docs-claude-review.yml
+++ b/.github/workflows/docs-claude-review.yml
@@ -7,6 +7,10 @@ on:
- "docs/integrations/**"
- "autogpt_platform/backend/backend/blocks/**"
+concurrency:
+ group: claude-docs-review-${{ github.event.pull_request.number }}
+ cancel-in-progress: true
+
jobs:
claude-review:
# Only run for PRs from members/collaborators
@@ -91,5 +95,35 @@ jobs:
3. Read corresponding documentation files to verify accuracy
4. Provide your feedback as a PR comment
+ ## IMPORTANT: Comment Marker
+ Start your PR comment with exactly this HTML comment marker on its own line:
+
+
+ This marker is used to identify and replace your comment on subsequent runs.
+
Be constructive and specific. If everything looks good, say so!
If there are issues, explain what's wrong and suggest how to fix it.
+
+ - name: Delete old Claude review comments
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ # Get all comment IDs with our marker, sorted by creation date (oldest first)
+ COMMENT_IDS=$(gh api \
+ repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments \
+ --jq '[.[] | select(.body | contains(""))] | sort_by(.created_at) | .[].id')
+
+ # Count comments
+ COMMENT_COUNT=$(echo "$COMMENT_IDS" | grep -c . || true)
+
+ if [ "$COMMENT_COUNT" -gt 1 ]; then
+ # Delete all but the last (newest) comment
+ echo "$COMMENT_IDS" | head -n -1 | while read -r COMMENT_ID; do
+ if [ -n "$COMMENT_ID" ]; then
+ echo "Deleting old review comment: $COMMENT_ID"
+ gh api -X DELETE repos/${{ github.repository }}/issues/comments/$COMMENT_ID
+ fi
+ done
+ else
+ echo "No old review comments to clean up"
+ fi
diff --git a/autogpt_platform/backend/backend/blocks/data_manipulation.py b/autogpt_platform/backend/backend/blocks/data_manipulation.py
index a8f25ecb18..fe878acfa9 100644
--- a/autogpt_platform/backend/backend/blocks/data_manipulation.py
+++ b/autogpt_platform/backend/backend/blocks/data_manipulation.py
@@ -682,17 +682,219 @@ class ListIsEmptyBlock(Block):
yield "is_empty", len(input_data.list) == 0
+# =============================================================================
+# List Concatenation Helpers
+# =============================================================================
+
+
+def _validate_list_input(item: Any, index: int) -> str | None:
+ """Validate that an item is a list. Returns error message or None."""
+ if item is None:
+ return None # None is acceptable, will be skipped
+ if not isinstance(item, list):
+ return (
+ f"Invalid input at index {index}: expected a list, "
+ f"got {type(item).__name__}. "
+ f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])."
+ )
+ return None
+
+
+def _validate_all_lists(lists: List[Any]) -> str | None:
+ """Validate that all items in a sequence are lists. Returns first error or None."""
+ for idx, item in enumerate(lists):
+ error = _validate_list_input(item, idx)
+ if error is not None and item is not None:
+ return error
+ return None
+
+
+def _concatenate_lists_simple(lists: List[List[Any]]) -> List[Any]:
+ """Concatenate a sequence of lists into a single list, skipping None values."""
+ result: List[Any] = []
+ for lst in lists:
+ if lst is None:
+ continue
+ result.extend(lst)
+ return result
+
+
+def _flatten_nested_list(nested: List[Any], max_depth: int = -1) -> List[Any]:
+ """
+ Recursively flatten a nested list structure.
+
+ Args:
+ nested: The list to flatten.
+ max_depth: Maximum recursion depth. -1 means unlimited.
+
+ Returns:
+ A flat list with all nested elements extracted.
+ """
+ result: List[Any] = []
+ _flatten_recursive(nested, result, current_depth=0, max_depth=max_depth)
+ return result
+
+
+_MAX_FLATTEN_DEPTH = 1000
+
+
+def _flatten_recursive(
+ items: List[Any],
+ result: List[Any],
+ current_depth: int,
+ max_depth: int,
+) -> None:
+ """Internal recursive helper for flattening nested lists."""
+ if current_depth > _MAX_FLATTEN_DEPTH:
+ raise RecursionError(
+ f"Flattening exceeded maximum depth of {_MAX_FLATTEN_DEPTH} levels. "
+ "Input may be too deeply nested."
+ )
+ for item in items:
+ if isinstance(item, list) and (max_depth == -1 or current_depth < max_depth):
+ _flatten_recursive(item, result, current_depth + 1, max_depth)
+ else:
+ result.append(item)
+
+
+def _deduplicate_list(items: List[Any]) -> List[Any]:
+ """
+ Remove duplicate elements from a list, preserving order of first occurrences.
+
+ Args:
+ items: The list to deduplicate.
+
+ Returns:
+ A list with duplicates removed, maintaining original order.
+ """
+ seen: set = set()
+ result: List[Any] = []
+ for item in items:
+ item_id = _make_hashable(item)
+ if item_id not in seen:
+ seen.add(item_id)
+ result.append(item)
+ return result
+
+
+def _make_hashable(item: Any):
+ """
+ Create a hashable representation of any item for deduplication.
+ Converts unhashable types (dicts, lists) into deterministic tuple structures.
+ """
+ if isinstance(item, dict):
+ return tuple(
+ sorted(
+ ((_make_hashable(k), _make_hashable(v)) for k, v in item.items()),
+ key=lambda x: (str(type(x[0])), str(x[0])),
+ )
+ )
+ if isinstance(item, (list, tuple)):
+ return tuple(_make_hashable(i) for i in item)
+ if isinstance(item, set):
+ return frozenset(_make_hashable(i) for i in item)
+ return item
+
+
+def _filter_none_values(items: List[Any]) -> List[Any]:
+ """Remove None values from a list."""
+ return [item for item in items if item is not None]
+
+
+def _compute_nesting_depth(
+ items: Any, current: int = 0, max_depth: int = _MAX_FLATTEN_DEPTH
+) -> int:
+ """
+ Compute the maximum nesting depth of a list structure using iteration to avoid RecursionError.
+
+ Uses a stack-based approach to handle deeply nested structures without hitting Python's
+ recursion limit (~1000 levels).
+ """
+ if not isinstance(items, list):
+ return current
+
+ # Stack contains tuples of (item, depth)
+ stack = [(items, current)]
+ max_observed_depth = current
+
+ while stack:
+ item, depth = stack.pop()
+
+ if depth > max_depth:
+ return depth
+
+ if not isinstance(item, list):
+ max_observed_depth = max(max_observed_depth, depth)
+ continue
+
+ if len(item) == 0:
+ max_observed_depth = max(max_observed_depth, depth + 1)
+ continue
+
+ # Add all children to stack with incremented depth
+ for child in item:
+ stack.append((child, depth + 1))
+
+ return max_observed_depth
+
+
+def _interleave_lists(lists: List[List[Any]]) -> List[Any]:
+ """
+ Interleave elements from multiple lists in round-robin fashion.
+ Example: [[1,2,3], [a,b], [x,y,z]] -> [1, a, x, 2, b, y, 3, z]
+ """
+ if not lists:
+ return []
+ filtered = [lst for lst in lists if lst is not None]
+ if not filtered:
+ return []
+ result: List[Any] = []
+ max_len = max(len(lst) for lst in filtered)
+ for i in range(max_len):
+ for lst in filtered:
+ if i < len(lst):
+ result.append(lst[i])
+ return result
+
+
+# =============================================================================
+# List Concatenation Blocks
+# =============================================================================
+
+
class ConcatenateListsBlock(Block):
+ """
+ Concatenates two or more lists into a single list.
+
+ This block accepts a list of lists and combines all their elements
+ in order into one flat output list. It supports options for
+ deduplication and None-filtering to provide flexible list merging
+ capabilities for workflow pipelines.
+ """
+
class Input(BlockSchemaInput):
lists: List[List[Any]] = SchemaField(
description="A list of lists to concatenate together. All lists will be combined in order into a single list.",
placeholder="e.g., [[1, 2], [3, 4], [5, 6]]",
)
+ deduplicate: bool = SchemaField(
+ description="If True, remove duplicate elements from the concatenated result while preserving order.",
+ default=False,
+ advanced=True,
+ )
+ remove_none: bool = SchemaField(
+ description="If True, remove None values from the concatenated result.",
+ default=False,
+ advanced=True,
+ )
class Output(BlockSchemaOutput):
concatenated_list: List[Any] = SchemaField(
description="The concatenated list containing all elements from all input lists in order."
)
+ length: int = SchemaField(
+ description="The total number of elements in the concatenated list."
+ )
error: str = SchemaField(
description="Error message if concatenation failed due to invalid input types."
)
@@ -700,7 +902,7 @@ class ConcatenateListsBlock(Block):
def __init__(self):
super().__init__(
id="3cf9298b-5817-4141-9d80-7c2cc5199c8e",
- description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order.",
+ description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order. Supports optional deduplication and None removal.",
categories={BlockCategory.BASIC},
input_schema=ConcatenateListsBlock.Input,
output_schema=ConcatenateListsBlock.Output,
@@ -709,29 +911,497 @@ class ConcatenateListsBlock(Block):
{"lists": [["a", "b"], ["c"], ["d", "e", "f"]]},
{"lists": [[1, 2], []]},
{"lists": []},
+ {"lists": [[1, 2, 2, 3], [3, 4]], "deduplicate": True},
+ {"lists": [[1, None, 2], [None, 3]], "remove_none": True},
],
test_output=[
("concatenated_list", [1, 2, 3, 4, 5, 6]),
+ ("length", 6),
("concatenated_list", ["a", "b", "c", "d", "e", "f"]),
+ ("length", 6),
("concatenated_list", [1, 2]),
+ ("length", 2),
("concatenated_list", []),
+ ("length", 0),
+ ("concatenated_list", [1, 2, 3, 4]),
+ ("length", 4),
+ ("concatenated_list", [1, 2, 3]),
+ ("length", 3),
],
)
+ def _validate_inputs(self, lists: List[Any]) -> str | None:
+ return _validate_all_lists(lists)
+
+ def _perform_concatenation(self, lists: List[List[Any]]) -> List[Any]:
+ return _concatenate_lists_simple(lists)
+
+ def _apply_deduplication(self, items: List[Any]) -> List[Any]:
+ return _deduplicate_list(items)
+
+ def _apply_none_removal(self, items: List[Any]) -> List[Any]:
+ return _filter_none_values(items)
+
+ def _post_process(
+ self, items: List[Any], deduplicate: bool, remove_none: bool
+ ) -> List[Any]:
+ """Apply all post-processing steps to the concatenated result."""
+ result = items
+ if remove_none:
+ result = self._apply_none_removal(result)
+ if deduplicate:
+ result = self._apply_deduplication(result)
+ return result
+
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
- concatenated = []
- for idx, lst in enumerate(input_data.lists):
- if lst is None:
- # Skip None values to avoid errors
- continue
- if not isinstance(lst, list):
- # Type validation: each item must be a list
- # Strings are iterable and would cause extend() to iterate character-by-character
- # Non-iterable types would raise TypeError
- yield "error", (
- f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. "
- f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])."
- )
- return
- concatenated.extend(lst)
- yield "concatenated_list", concatenated
+ # Validate all inputs are lists
+ validation_error = self._validate_inputs(input_data.lists)
+ if validation_error is not None:
+ yield "error", validation_error
+ return
+
+ # Perform concatenation
+ concatenated = self._perform_concatenation(input_data.lists)
+
+ # Apply post-processing
+ result = self._post_process(
+ concatenated, input_data.deduplicate, input_data.remove_none
+ )
+
+ yield "concatenated_list", result
+ yield "length", len(result)
+
+
+class FlattenListBlock(Block):
+ """
+ Flattens a nested list structure into a single flat list.
+
+ This block takes a list that may contain nested lists at any depth
+ and produces a single-level list with all leaf elements. Useful
+ for normalizing data structures from multiple sources that may
+ have varying levels of nesting.
+ """
+
+ class Input(BlockSchemaInput):
+ nested_list: List[Any] = SchemaField(
+ description="A potentially nested list to flatten into a single-level list.",
+ placeholder="e.g., [[1, [2, 3]], [4, [5, [6]]]]",
+ )
+ max_depth: int = SchemaField(
+ description="Maximum depth to flatten. -1 means flatten completely. 1 means flatten only one level.",
+ default=-1,
+ advanced=True,
+ )
+
+ class Output(BlockSchemaOutput):
+ flattened_list: List[Any] = SchemaField(
+ description="The flattened list with all nested elements extracted."
+ )
+ length: int = SchemaField(
+ description="The number of elements in the flattened list."
+ )
+ original_depth: int = SchemaField(
+ description="The maximum nesting depth of the original input list."
+ )
+ error: str = SchemaField(description="Error message if flattening failed.")
+
+ def __init__(self):
+ super().__init__(
+ id="cc45bb0f-d035-4756-96a7-fe3e36254b4d",
+ description="Flattens a nested list structure into a single flat list. Supports configurable maximum flattening depth.",
+ categories={BlockCategory.BASIC},
+ input_schema=FlattenListBlock.Input,
+ output_schema=FlattenListBlock.Output,
+ test_input=[
+ {"nested_list": [[1, 2], [3, [4, 5]]]},
+ {"nested_list": [1, [2, [3, [4]]]]},
+ {"nested_list": [1, [2, [3, [4]]], 5], "max_depth": 1},
+ {"nested_list": []},
+ {"nested_list": [1, 2, 3]},
+ ],
+ test_output=[
+ ("flattened_list", [1, 2, 3, 4, 5]),
+ ("length", 5),
+ ("original_depth", 3),
+ ("flattened_list", [1, 2, 3, 4]),
+ ("length", 4),
+ ("original_depth", 4),
+ ("flattened_list", [1, 2, [3, [4]], 5]),
+ ("length", 4),
+ ("original_depth", 4),
+ ("flattened_list", []),
+ ("length", 0),
+ ("original_depth", 1),
+ ("flattened_list", [1, 2, 3]),
+ ("length", 3),
+ ("original_depth", 1),
+ ],
+ )
+
+ def _compute_depth(self, items: List[Any]) -> int:
+ """Compute the nesting depth of the input list."""
+ return _compute_nesting_depth(items)
+
+ def _flatten(self, items: List[Any], max_depth: int) -> List[Any]:
+ """Flatten the list to the specified depth."""
+ return _flatten_nested_list(items, max_depth=max_depth)
+
+ def _validate_max_depth(self, max_depth: int) -> str | None:
+ """Validate the max_depth parameter."""
+ if max_depth < -1:
+ return f"max_depth must be -1 (unlimited) or a non-negative integer, got {max_depth}"
+ return None
+
+ async def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ # Validate max_depth
+ depth_error = self._validate_max_depth(input_data.max_depth)
+ if depth_error is not None:
+ yield "error", depth_error
+ return
+
+ original_depth = self._compute_depth(input_data.nested_list)
+ flattened = self._flatten(input_data.nested_list, input_data.max_depth)
+
+ yield "flattened_list", flattened
+ yield "length", len(flattened)
+ yield "original_depth", original_depth
+
+
+class InterleaveListsBlock(Block):
+ """
+ Interleaves elements from multiple lists in round-robin fashion.
+
+ Given multiple input lists, this block takes one element from each
+ list in turn, producing an output where elements alternate between
+ sources. Lists of different lengths are handled gracefully - shorter
+ lists simply stop contributing once exhausted.
+ """
+
+ class Input(BlockSchemaInput):
+ lists: List[List[Any]] = SchemaField(
+ description="A list of lists to interleave. Elements will be taken in round-robin order.",
+ placeholder="e.g., [[1, 2, 3], ['a', 'b', 'c']]",
+ )
+
+ class Output(BlockSchemaOutput):
+ interleaved_list: List[Any] = SchemaField(
+ description="The interleaved list with elements alternating from each input list."
+ )
+ length: int = SchemaField(
+ description="The total number of elements in the interleaved list."
+ )
+ error: str = SchemaField(description="Error message if interleaving failed.")
+
+ def __init__(self):
+ super().__init__(
+ id="9f616084-1d9f-4f8e-bc00-5b9d2a75cd75",
+ description="Interleaves elements from multiple lists in round-robin fashion, alternating between sources.",
+ categories={BlockCategory.BASIC},
+ input_schema=InterleaveListsBlock.Input,
+ output_schema=InterleaveListsBlock.Output,
+ test_input=[
+ {"lists": [[1, 2, 3], ["a", "b", "c"]]},
+ {"lists": [[1, 2, 3], ["a", "b"], ["x", "y", "z"]]},
+ {"lists": [[1], [2], [3]]},
+ {"lists": []},
+ ],
+ test_output=[
+ ("interleaved_list", [1, "a", 2, "b", 3, "c"]),
+ ("length", 6),
+ ("interleaved_list", [1, "a", "x", 2, "b", "y", 3, "z"]),
+ ("length", 8),
+ ("interleaved_list", [1, 2, 3]),
+ ("length", 3),
+ ("interleaved_list", []),
+ ("length", 0),
+ ],
+ )
+
+ def _validate_inputs(self, lists: List[Any]) -> str | None:
+ return _validate_all_lists(lists)
+
+ def _interleave(self, lists: List[List[Any]]) -> List[Any]:
+ return _interleave_lists(lists)
+
+ async def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ validation_error = self._validate_inputs(input_data.lists)
+ if validation_error is not None:
+ yield "error", validation_error
+ return
+
+ result = self._interleave(input_data.lists)
+ yield "interleaved_list", result
+ yield "length", len(result)
+
+
+class ZipListsBlock(Block):
+ """
+ Zips multiple lists together into a list of grouped tuples/lists.
+
+ Takes two or more input lists and combines corresponding elements
+ into sub-lists. For example, zipping [1,2,3] and ['a','b','c']
+ produces [[1,'a'], [2,'b'], [3,'c']]. Supports both truncating
+ to shortest list and padding to longest list with a fill value.
+ """
+
+ class Input(BlockSchemaInput):
+ lists: List[List[Any]] = SchemaField(
+ description="A list of lists to zip together. Corresponding elements will be grouped.",
+ placeholder="e.g., [[1, 2, 3], ['a', 'b', 'c']]",
+ )
+ pad_to_longest: bool = SchemaField(
+ description="If True, pad shorter lists with fill_value to match the longest list. If False, truncate to shortest.",
+ default=False,
+ advanced=True,
+ )
+ fill_value: Any = SchemaField(
+ description="Value to use for padding when pad_to_longest is True.",
+ default=None,
+ advanced=True,
+ )
+
+ class Output(BlockSchemaOutput):
+ zipped_list: List[List[Any]] = SchemaField(
+ description="The zipped list of grouped elements."
+ )
+ length: int = SchemaField(
+ description="The number of groups in the zipped result."
+ )
+ error: str = SchemaField(description="Error message if zipping failed.")
+
+ def __init__(self):
+ super().__init__(
+ id="0d0e684f-5cb9-4c4b-b8d1-47a0860e0c07",
+ description="Zips multiple lists together into a list of grouped elements. Supports padding to longest or truncating to shortest.",
+ categories={BlockCategory.BASIC},
+ input_schema=ZipListsBlock.Input,
+ output_schema=ZipListsBlock.Output,
+ test_input=[
+ {"lists": [[1, 2, 3], ["a", "b", "c"]]},
+ {"lists": [[1, 2, 3], ["a", "b"]]},
+ {
+ "lists": [[1, 2], ["a", "b", "c"]],
+ "pad_to_longest": True,
+ "fill_value": 0,
+ },
+ {"lists": []},
+ ],
+ test_output=[
+ ("zipped_list", [[1, "a"], [2, "b"], [3, "c"]]),
+ ("length", 3),
+ ("zipped_list", [[1, "a"], [2, "b"]]),
+ ("length", 2),
+ ("zipped_list", [[1, "a"], [2, "b"], [0, "c"]]),
+ ("length", 3),
+ ("zipped_list", []),
+ ("length", 0),
+ ],
+ )
+
+ def _validate_inputs(self, lists: List[Any]) -> str | None:
+ return _validate_all_lists(lists)
+
+ def _zip_truncate(self, lists: List[List[Any]]) -> List[List[Any]]:
+ """Zip lists, truncating to shortest."""
+ filtered = [lst for lst in lists if lst is not None]
+ if not filtered:
+ return []
+ return [list(group) for group in zip(*filtered)]
+
+ def _zip_pad(self, lists: List[List[Any]], fill_value: Any) -> List[List[Any]]:
+ """Zip lists, padding shorter ones with fill_value."""
+ if not lists:
+ return []
+ lists = [lst for lst in lists if lst is not None]
+ if not lists:
+ return []
+ max_len = max(len(lst) for lst in lists)
+ result: List[List[Any]] = []
+ for i in range(max_len):
+ group: List[Any] = []
+ for lst in lists:
+ if i < len(lst):
+ group.append(lst[i])
+ else:
+ group.append(fill_value)
+ result.append(group)
+ return result
+
+ async def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ validation_error = self._validate_inputs(input_data.lists)
+ if validation_error is not None:
+ yield "error", validation_error
+ return
+
+ if not input_data.lists:
+ yield "zipped_list", []
+ yield "length", 0
+ return
+
+ if input_data.pad_to_longest:
+ result = self._zip_pad(input_data.lists, input_data.fill_value)
+ else:
+ result = self._zip_truncate(input_data.lists)
+
+ yield "zipped_list", result
+ yield "length", len(result)
+
+
+class ListDifferenceBlock(Block):
+ """
+ Computes the difference between two lists (elements in the first
+ list that are not in the second list).
+
+ This is useful for finding items that exist in one dataset but
+ not in another, such as finding new items, missing items, or
+ items that need to be processed.
+ """
+
+ class Input(BlockSchemaInput):
+ list_a: List[Any] = SchemaField(
+ description="The primary list to check elements from.",
+ placeholder="e.g., [1, 2, 3, 4, 5]",
+ )
+ list_b: List[Any] = SchemaField(
+ description="The list to subtract. Elements found here will be removed from list_a.",
+ placeholder="e.g., [3, 4, 5, 6]",
+ )
+ symmetric: bool = SchemaField(
+ description="If True, compute symmetric difference (elements in either list but not both).",
+ default=False,
+ advanced=True,
+ )
+
+ class Output(BlockSchemaOutput):
+ difference: List[Any] = SchemaField(
+ description="Elements from list_a not found in list_b (or symmetric difference if enabled)."
+ )
+ length: int = SchemaField(
+ description="The number of elements in the difference result."
+ )
+ error: str = SchemaField(description="Error message if the operation failed.")
+
+ def __init__(self):
+ super().__init__(
+ id="05309873-9d61-447e-96b5-b804e2511829",
+ description="Computes the difference between two lists. Returns elements in the first list not found in the second, or symmetric difference.",
+ categories={BlockCategory.BASIC},
+ input_schema=ListDifferenceBlock.Input,
+ output_schema=ListDifferenceBlock.Output,
+ test_input=[
+ {"list_a": [1, 2, 3, 4, 5], "list_b": [3, 4, 5, 6, 7]},
+ {
+ "list_a": [1, 2, 3, 4, 5],
+ "list_b": [3, 4, 5, 6, 7],
+ "symmetric": True,
+ },
+ {"list_a": ["a", "b", "c"], "list_b": ["b"]},
+ {"list_a": [], "list_b": [1, 2, 3]},
+ ],
+ test_output=[
+ ("difference", [1, 2]),
+ ("length", 2),
+ ("difference", [1, 2, 6, 7]),
+ ("length", 4),
+ ("difference", ["a", "c"]),
+ ("length", 2),
+ ("difference", []),
+ ("length", 0),
+ ],
+ )
+
+ def _compute_difference(self, list_a: List[Any], list_b: List[Any]) -> List[Any]:
+ """Compute elements in list_a not in list_b."""
+ b_hashes = {_make_hashable(item) for item in list_b}
+ return [item for item in list_a if _make_hashable(item) not in b_hashes]
+
+ def _compute_symmetric_difference(
+ self, list_a: List[Any], list_b: List[Any]
+ ) -> List[Any]:
+ """Compute elements in either list but not both."""
+ a_hashes = {_make_hashable(item) for item in list_a}
+ b_hashes = {_make_hashable(item) for item in list_b}
+ only_in_a = [item for item in list_a if _make_hashable(item) not in b_hashes]
+ only_in_b = [item for item in list_b if _make_hashable(item) not in a_hashes]
+ return only_in_a + only_in_b
+
+ async def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ if input_data.symmetric:
+ result = self._compute_symmetric_difference(
+ input_data.list_a, input_data.list_b
+ )
+ else:
+ result = self._compute_difference(input_data.list_a, input_data.list_b)
+
+ yield "difference", result
+ yield "length", len(result)
+
+
+class ListIntersectionBlock(Block):
+ """
+ Computes the intersection of two lists (elements present in both lists).
+
+ This is useful for finding common items between two datasets,
+ such as shared tags, mutual connections, or overlapping categories.
+ """
+
+ class Input(BlockSchemaInput):
+ list_a: List[Any] = SchemaField(
+ description="The first list to intersect.",
+ placeholder="e.g., [1, 2, 3, 4, 5]",
+ )
+ list_b: List[Any] = SchemaField(
+ description="The second list to intersect.",
+ placeholder="e.g., [3, 4, 5, 6, 7]",
+ )
+
+ class Output(BlockSchemaOutput):
+ intersection: List[Any] = SchemaField(
+ description="Elements present in both list_a and list_b."
+ )
+ length: int = SchemaField(
+ description="The number of elements in the intersection."
+ )
+ error: str = SchemaField(description="Error message if the operation failed.")
+
+ def __init__(self):
+ super().__init__(
+ id="b6eb08b6-dbe3-411b-b9b4-2508cb311a1f",
+ description="Computes the intersection of two lists, returning only elements present in both.",
+ categories={BlockCategory.BASIC},
+ input_schema=ListIntersectionBlock.Input,
+ output_schema=ListIntersectionBlock.Output,
+ test_input=[
+ {"list_a": [1, 2, 3, 4, 5], "list_b": [3, 4, 5, 6, 7]},
+ {"list_a": ["a", "b", "c"], "list_b": ["c", "d", "e"]},
+ {"list_a": [1, 2], "list_b": [3, 4]},
+ {"list_a": [], "list_b": [1, 2, 3]},
+ ],
+ test_output=[
+ ("intersection", [3, 4, 5]),
+ ("length", 3),
+ ("intersection", ["c"]),
+ ("length", 1),
+ ("intersection", []),
+ ("length", 0),
+ ("intersection", []),
+ ("length", 0),
+ ],
+ )
+
+ def _compute_intersection(self, list_a: List[Any], list_b: List[Any]) -> List[Any]:
+ """Compute elements present in both lists, preserving order from list_a."""
+ b_hashes = {_make_hashable(item) for item in list_b}
+ seen: set = set()
+ result: List[Any] = []
+ for item in list_a:
+ h = _make_hashable(item)
+ if h in b_hashes and h not in seen:
+ result.append(item)
+ seen.add(h)
+ return result
+
+ async def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ result = self._compute_intersection(input_data.list_a, input_data.list_b)
+ yield "intersection", result
+ yield "length", len(result)
diff --git a/autogpt_platform/backend/backend/blocks/jina/search.py b/autogpt_platform/backend/backend/blocks/jina/search.py
index 22a883fa03..5e58ddcab4 100644
--- a/autogpt_platform/backend/backend/blocks/jina/search.py
+++ b/autogpt_platform/backend/backend/blocks/jina/search.py
@@ -17,6 +17,7 @@ from backend.blocks.jina._auth import (
from backend.blocks.search import GetRequest
from backend.data.model import SchemaField
from backend.util.exceptions import BlockExecutionError
+from backend.util.request import HTTPClientError, HTTPServerError, validate_url
class SearchTheWebBlock(Block, GetRequest):
@@ -110,7 +111,12 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
if input_data.raw_content:
- url = input_data.url
+ try:
+ parsed_url, _, _ = await validate_url(input_data.url, [])
+ url = parsed_url.geturl()
+ except ValueError as e:
+ yield "error", f"Invalid URL: {e}"
+ return
headers = {}
else:
url = f"https://r.jina.ai/{input_data.url}"
@@ -119,5 +125,20 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
- content = await self.get_request(url, json=False, headers=headers)
+ try:
+ content = await self.get_request(url, json=False, headers=headers)
+ except HTTPClientError as e:
+ yield "error", f"Client error ({e.status_code}) fetching {input_data.url}: {e}"
+ return
+ except HTTPServerError as e:
+ yield "error", f"Server error ({e.status_code}) fetching {input_data.url}: {e}"
+ return
+ except Exception as e:
+ yield "error", f"Failed to fetch {input_data.url}: {e}"
+ return
+
+ if not content:
+ yield "error", f"No content returned for {input_data.url}"
+ return
+
yield "content", content
diff --git a/autogpt_platform/backend/backend/copilot/sdk/security_hooks.py b/autogpt_platform/backend/backend/copilot/sdk/security_hooks.py
index 5224400f96..7bae54e38d 100644
--- a/autogpt_platform/backend/backend/copilot/sdk/security_hooks.py
+++ b/autogpt_platform/backend/backend/copilot/sdk/security_hooks.py
@@ -11,45 +11,15 @@ import re
from collections.abc import Callable
from typing import Any, cast
-from .tool_adapter import MCP_TOOL_PREFIX
+from .tool_adapter import (
+ BLOCKED_TOOLS,
+ DANGEROUS_PATTERNS,
+ MCP_TOOL_PREFIX,
+ WORKSPACE_SCOPED_TOOLS,
+)
logger = logging.getLogger(__name__)
-# Tools that are blocked entirely (CLI/system access).
-# "Bash" (capital) is the SDK built-in — it's NOT in allowed_tools but blocked
-# here as defence-in-depth. The agent uses mcp__copilot__bash_exec instead,
-# which has kernel-level network isolation (unshare --net).
-BLOCKED_TOOLS = {
- "Bash",
- "bash",
- "shell",
- "exec",
- "terminal",
- "command",
-}
-
-# Tools allowed only when their path argument stays within the SDK workspace.
-# The SDK uses these to handle oversized tool results (writes to tool-results/
-# files, then reads them back) and for workspace file operations.
-WORKSPACE_SCOPED_TOOLS = {"Read", "Write", "Edit", "Glob", "Grep"}
-
-# Dangerous patterns in tool inputs
-DANGEROUS_PATTERNS = [
- r"sudo",
- r"rm\s+-rf",
- r"dd\s+if=",
- r"/etc/passwd",
- r"/etc/shadow",
- r"chmod\s+777",
- r"curl\s+.*\|.*sh",
- r"wget\s+.*\|.*sh",
- r"eval\s*\(",
- r"exec\s*\(",
- r"__import__",
- r"os\.system",
- r"subprocess",
-]
-
def _deny(reason: str) -> dict[str, Any]:
"""Return a hook denial response."""
diff --git a/autogpt_platform/backend/backend/copilot/sdk/service.py b/autogpt_platform/backend/backend/copilot/sdk/service.py
index dc91cac3f8..076e7b5743 100644
--- a/autogpt_platform/backend/backend/copilot/sdk/service.py
+++ b/autogpt_platform/backend/backend/copilot/sdk/service.py
@@ -41,6 +41,7 @@ from .response_adapter import SDKResponseAdapter
from .security_hooks import create_security_hooks
from .tool_adapter import (
COPILOT_TOOL_NAMES,
+ SDK_DISALLOWED_TOOLS,
LongRunningCallback,
create_copilot_mcp_server,
set_execution_context,
@@ -547,7 +548,7 @@ async def stream_chat_completion_sdk(
"system_prompt": system_prompt,
"mcp_servers": {"copilot": mcp_server},
"allowed_tools": COPILOT_TOOL_NAMES,
- "disallowed_tools": ["Bash"],
+ "disallowed_tools": SDK_DISALLOWED_TOOLS,
"hooks": security_hooks,
"cwd": sdk_cwd,
"max_buffer_size": config.claude_agent_max_buffer_size,
diff --git a/autogpt_platform/backend/backend/copilot/sdk/tool_adapter.py b/autogpt_platform/backend/backend/copilot/sdk/tool_adapter.py
index 4e64e77e14..68364e7797 100644
--- a/autogpt_platform/backend/backend/copilot/sdk/tool_adapter.py
+++ b/autogpt_platform/backend/backend/copilot/sdk/tool_adapter.py
@@ -310,7 +310,48 @@ def create_copilot_mcp_server():
# Bash is NOT included — use the sandboxed MCP bash_exec tool instead,
# which provides kernel-level network isolation via unshare --net.
# Task allows spawning sub-agents (rate-limited by security hooks).
-_SDK_BUILTIN_TOOLS = ["Read", "Write", "Edit", "Glob", "Grep", "Task"]
+# WebSearch uses Brave Search via Anthropic's API — safe, no SSRF risk.
+_SDK_BUILTIN_TOOLS = ["Read", "Write", "Edit", "Glob", "Grep", "Task", "WebSearch"]
+
+# SDK built-in tools that must be explicitly blocked.
+# Bash: dangerous — agent uses mcp__copilot__bash_exec with kernel-level
+# network isolation (unshare --net) instead.
+# WebFetch: SSRF risk — can reach internal network (localhost, 10.x, etc.).
+# Agent uses the SSRF-protected mcp__copilot__web_fetch tool instead.
+SDK_DISALLOWED_TOOLS = ["Bash", "WebFetch"]
+
+# Tools that are blocked entirely in security hooks (defence-in-depth).
+# Includes SDK_DISALLOWED_TOOLS plus common aliases/synonyms.
+BLOCKED_TOOLS = {
+ *SDK_DISALLOWED_TOOLS,
+ "bash",
+ "shell",
+ "exec",
+ "terminal",
+ "command",
+}
+
+# Tools allowed only when their path argument stays within the SDK workspace.
+# The SDK uses these to handle oversized tool results (writes to tool-results/
+# files, then reads them back) and for workspace file operations.
+WORKSPACE_SCOPED_TOOLS = {"Read", "Write", "Edit", "Glob", "Grep"}
+
+# Dangerous patterns in tool inputs
+DANGEROUS_PATTERNS = [
+ r"sudo",
+ r"rm\s+-rf",
+ r"dd\s+if=",
+ r"/etc/passwd",
+ r"/etc/shadow",
+ r"chmod\s+777",
+ r"curl\s+.*\|.*sh",
+ r"wget\s+.*\|.*sh",
+ r"eval\s*\(",
+ r"exec\s*\(",
+ r"__import__",
+ r"os\.system",
+ r"subprocess",
+]
# List of tool names for allowed_tools configuration
# Include MCP tools, the MCP Read tool for oversized results,
diff --git a/autogpt_platform/backend/test/blocks/test_jina_extract_website.py b/autogpt_platform/backend/test/blocks/test_jina_extract_website.py
new file mode 100644
index 0000000000..335c43f966
--- /dev/null
+++ b/autogpt_platform/backend/test/blocks/test_jina_extract_website.py
@@ -0,0 +1,66 @@
+from typing import cast
+
+import pytest
+
+from backend.blocks.jina._auth import (
+ TEST_CREDENTIALS,
+ TEST_CREDENTIALS_INPUT,
+ JinaCredentialsInput,
+)
+from backend.blocks.jina.search import ExtractWebsiteContentBlock
+from backend.util.request import HTTPClientError
+
+
+@pytest.mark.asyncio
+async def test_extract_website_content_returns_content(monkeypatch):
+ block = ExtractWebsiteContentBlock()
+ input_data = block.Input(
+ url="https://example.com",
+ credentials=cast(JinaCredentialsInput, TEST_CREDENTIALS_INPUT),
+ raw_content=True,
+ )
+
+ async def fake_get_request(url, json=False, headers=None):
+ assert url == "https://example.com"
+ assert headers == {}
+ return "page content"
+
+ monkeypatch.setattr(block, "get_request", fake_get_request)
+
+ results = [
+ output
+ async for output in block.run(
+ input_data=input_data, credentials=TEST_CREDENTIALS
+ )
+ ]
+
+ assert ("content", "page content") in results
+ assert all(key != "error" for key, _ in results)
+
+
+@pytest.mark.asyncio
+async def test_extract_website_content_handles_http_error(monkeypatch):
+ block = ExtractWebsiteContentBlock()
+ input_data = block.Input(
+ url="https://example.com",
+ credentials=cast(JinaCredentialsInput, TEST_CREDENTIALS_INPUT),
+ raw_content=False,
+ )
+
+ async def fake_get_request(url, json=False, headers=None):
+ raise HTTPClientError("HTTP 400 Error: Bad Request", 400)
+
+ monkeypatch.setattr(block, "get_request", fake_get_request)
+
+ results = [
+ output
+ async for output in block.run(
+ input_data=input_data, credentials=TEST_CREDENTIALS
+ )
+ ]
+
+ assert ("content", "page content") not in results
+ error_messages = [value for key, value in results if key == "error"]
+ assert error_messages
+ assert "Client error (400)" in error_messages[0]
+ assert "https://example.com" in error_messages[0]
diff --git a/autogpt_platform/backend/test/blocks/test_list_concatenation.py b/autogpt_platform/backend/test/blocks/test_list_concatenation.py
new file mode 100644
index 0000000000..8cea3b60f7
--- /dev/null
+++ b/autogpt_platform/backend/test/blocks/test_list_concatenation.py
@@ -0,0 +1,1276 @@
+"""
+Comprehensive test suite for list concatenation and manipulation blocks.
+
+Tests cover:
+- ConcatenateListsBlock: basic concatenation, deduplication, None removal
+- FlattenListBlock: nested list flattening with depth control
+- InterleaveListsBlock: round-robin interleaving of multiple lists
+- ZipListsBlock: zipping lists with truncation and padding
+- ListDifferenceBlock: computing list differences (regular and symmetric)
+- ListIntersectionBlock: finding common elements between lists
+- Helper utility functions: validation, flattening, deduplication, etc.
+"""
+
+import pytest
+
+from backend.blocks.data_manipulation import (
+ _MAX_FLATTEN_DEPTH,
+ ConcatenateListsBlock,
+ FlattenListBlock,
+ InterleaveListsBlock,
+ ListDifferenceBlock,
+ ListIntersectionBlock,
+ ZipListsBlock,
+ _compute_nesting_depth,
+ _concatenate_lists_simple,
+ _deduplicate_list,
+ _filter_none_values,
+ _flatten_nested_list,
+ _interleave_lists,
+ _make_hashable,
+ _validate_all_lists,
+ _validate_list_input,
+)
+from backend.util.test import execute_block_test
+
+# =============================================================================
+# Helper Function Tests
+# =============================================================================
+
+
+class TestValidateListInput:
+ """Tests for the _validate_list_input helper."""
+
+ def test_valid_list_returns_none(self):
+ assert _validate_list_input([1, 2, 3], 0) is None
+
+ def test_empty_list_returns_none(self):
+ assert _validate_list_input([], 0) is None
+
+ def test_none_returns_none(self):
+ assert _validate_list_input(None, 0) is None
+
+ def test_string_returns_error(self):
+ result = _validate_list_input("hello", 0)
+ assert result is not None
+ assert "str" in result
+ assert "index 0" in result
+
+ def test_integer_returns_error(self):
+ result = _validate_list_input(42, 1)
+ assert result is not None
+ assert "int" in result
+ assert "index 1" in result
+
+ def test_dict_returns_error(self):
+ result = _validate_list_input({"a": 1}, 2)
+ assert result is not None
+ assert "dict" in result
+ assert "index 2" in result
+
+ def test_tuple_returns_error(self):
+ result = _validate_list_input((1, 2), 3)
+ assert result is not None
+ assert "tuple" in result
+
+ def test_boolean_returns_error(self):
+ result = _validate_list_input(True, 0)
+ assert result is not None
+ assert "bool" in result
+
+ def test_float_returns_error(self):
+ result = _validate_list_input(3.14, 0)
+ assert result is not None
+ assert "float" in result
+
+
+class TestValidateAllLists:
+ """Tests for the _validate_all_lists helper."""
+
+ def test_all_valid_lists(self):
+ assert _validate_all_lists([[1], [2], [3]]) is None
+
+ def test_empty_outer_list(self):
+ assert _validate_all_lists([]) is None
+
+ def test_mixed_valid_and_none(self):
+ # None is skipped, so this should pass
+ assert _validate_all_lists([[1], None, [3]]) is None
+
+ def test_invalid_item_returns_error(self):
+ result = _validate_all_lists([[1], "bad", [3]])
+ assert result is not None
+ assert "index 1" in result
+
+ def test_first_invalid_is_returned(self):
+ result = _validate_all_lists(["first_bad", "second_bad"])
+ assert result is not None
+ assert "index 0" in result
+
+ def test_all_none_passes(self):
+ assert _validate_all_lists([None, None, None]) is None
+
+
+class TestConcatenateListsSimple:
+ """Tests for the _concatenate_lists_simple helper."""
+
+ def test_basic_concatenation(self):
+ assert _concatenate_lists_simple([[1, 2], [3, 4]]) == [1, 2, 3, 4]
+
+ def test_empty_lists(self):
+ assert _concatenate_lists_simple([[], []]) == []
+
+ def test_single_list(self):
+ assert _concatenate_lists_simple([[1, 2, 3]]) == [1, 2, 3]
+
+ def test_no_lists(self):
+ assert _concatenate_lists_simple([]) == []
+
+ def test_skip_none_values(self):
+ assert _concatenate_lists_simple([[1, 2], None, [3, 4]]) == [1, 2, 3, 4] # type: ignore[arg-type]
+
+ def test_mixed_types(self):
+ result = _concatenate_lists_simple([[1, "a"], [True, 3.14]])
+ assert result == [1, "a", True, 3.14]
+
+ def test_nested_lists_preserved(self):
+ result = _concatenate_lists_simple([[[1, 2]], [[3, 4]]])
+ assert result == [[1, 2], [3, 4]]
+
+ def test_large_number_of_lists(self):
+ lists = [[i] for i in range(100)]
+ result = _concatenate_lists_simple(lists)
+ assert result == list(range(100))
+
+
+class TestFlattenNestedList:
+ """Tests for the _flatten_nested_list helper."""
+
+ def test_already_flat(self):
+ assert _flatten_nested_list([1, 2, 3]) == [1, 2, 3]
+
+ def test_one_level_nesting(self):
+ assert _flatten_nested_list([[1, 2], [3, 4]]) == [1, 2, 3, 4]
+
+ def test_deep_nesting(self):
+ assert _flatten_nested_list([1, [2, [3, [4, [5]]]]]) == [1, 2, 3, 4, 5]
+
+ def test_empty_list(self):
+ assert _flatten_nested_list([]) == []
+
+ def test_mixed_nesting(self):
+ assert _flatten_nested_list([1, [2, 3], 4, [5, [6]]]) == [1, 2, 3, 4, 5, 6]
+
+ def test_max_depth_zero(self):
+ # max_depth=0 means no flattening at all
+ result = _flatten_nested_list([[1, 2], [3, 4]], max_depth=0)
+ assert result == [[1, 2], [3, 4]]
+
+ def test_max_depth_one(self):
+ result = _flatten_nested_list([[1, [2, 3]], [4, [5]]], max_depth=1)
+ assert result == [1, [2, 3], 4, [5]]
+
+ def test_max_depth_two(self):
+ result = _flatten_nested_list([[[1, 2], [3]], [[4, [5]]]], max_depth=2)
+ assert result == [1, 2, 3, 4, [5]]
+
+ def test_unlimited_depth(self):
+ deeply_nested = [[[[[[[1]]]]]]]
+ assert _flatten_nested_list(deeply_nested, max_depth=-1) == [1]
+
+ def test_preserves_non_list_iterables(self):
+ result = _flatten_nested_list(["hello", [1, 2]])
+ assert result == ["hello", 1, 2]
+
+ def test_preserves_dicts(self):
+ result = _flatten_nested_list([{"a": 1}, [{"b": 2}]])
+ assert result == [{"a": 1}, {"b": 2}]
+
+ def test_excessive_depth_raises_recursion_error(self):
+ """Deeply nested lists beyond 1000 levels should raise RecursionError."""
+ # Build a list nested 1100 levels deep
+ nested = [42]
+ for _ in range(1100):
+ nested = [nested]
+ with pytest.raises(RecursionError, match="maximum.*depth"):
+ _flatten_nested_list(nested, max_depth=-1)
+
+
+class TestDeduplicateList:
+ """Tests for the _deduplicate_list helper."""
+
+ def test_no_duplicates(self):
+ assert _deduplicate_list([1, 2, 3]) == [1, 2, 3]
+
+ def test_with_duplicates(self):
+ assert _deduplicate_list([1, 2, 2, 3, 3, 3]) == [1, 2, 3]
+
+ def test_all_duplicates(self):
+ assert _deduplicate_list([1, 1, 1]) == [1]
+
+ def test_empty_list(self):
+ assert _deduplicate_list([]) == []
+
+ def test_preserves_order(self):
+ result = _deduplicate_list([3, 1, 2, 1, 3])
+ assert result == [3, 1, 2]
+
+ def test_string_duplicates(self):
+ assert _deduplicate_list(["a", "b", "a", "c"]) == ["a", "b", "c"]
+
+ def test_mixed_types(self):
+ result = _deduplicate_list([1, "1", 1, "1"])
+ assert result == [1, "1"]
+
+ def test_dict_duplicates(self):
+ result = _deduplicate_list([{"a": 1}, {"a": 1}, {"b": 2}])
+ assert result == [{"a": 1}, {"b": 2}]
+
+ def test_list_duplicates(self):
+ result = _deduplicate_list([[1, 2], [1, 2], [3, 4]])
+ assert result == [[1, 2], [3, 4]]
+
+ def test_none_duplicates(self):
+ result = _deduplicate_list([None, 1, None, 2])
+ assert result == [None, 1, 2]
+
+ def test_single_element(self):
+ assert _deduplicate_list([42]) == [42]
+
+
+class TestMakeHashable:
+ """Tests for the _make_hashable helper."""
+
+ def test_integer(self):
+ assert _make_hashable(42) == 42
+
+ def test_string(self):
+ assert _make_hashable("hello") == "hello"
+
+ def test_none(self):
+ assert _make_hashable(None) is None
+
+ def test_dict_returns_tuple(self):
+ result = _make_hashable({"a": 1})
+ assert isinstance(result, tuple)
+ # Should be hashable
+ hash(result)
+
+ def test_list_returns_tuple(self):
+ result = _make_hashable([1, 2, 3])
+ assert result == (1, 2, 3)
+
+ def test_same_dict_same_hash(self):
+ assert _make_hashable({"a": 1, "b": 2}) == _make_hashable({"a": 1, "b": 2})
+
+ def test_different_dict_different_hash(self):
+ assert _make_hashable({"a": 1}) != _make_hashable({"a": 2})
+
+ def test_dict_key_order_independent(self):
+ """Dicts with same keys in different insertion order produce same result."""
+ d1 = {"b": 2, "a": 1}
+ d2 = {"a": 1, "b": 2}
+ assert _make_hashable(d1) == _make_hashable(d2)
+
+ def test_tuple_hashable(self):
+ result = _make_hashable((1, 2, 3))
+ assert result == (1, 2, 3)
+ hash(result)
+
+ def test_boolean(self):
+ result = _make_hashable(True)
+ assert result is True
+
+ def test_float(self):
+ result = _make_hashable(3.14)
+ assert result == 3.14
+
+
+class TestFilterNoneValues:
+ """Tests for the _filter_none_values helper."""
+
+ def test_removes_none(self):
+ assert _filter_none_values([1, None, 2, None, 3]) == [1, 2, 3]
+
+ def test_no_none(self):
+ assert _filter_none_values([1, 2, 3]) == [1, 2, 3]
+
+ def test_all_none(self):
+ assert _filter_none_values([None, None, None]) == []
+
+ def test_empty_list(self):
+ assert _filter_none_values([]) == []
+
+ def test_preserves_falsy_values(self):
+ assert _filter_none_values([0, False, "", None, []]) == [0, False, "", []]
+
+
+class TestComputeNestingDepth:
+ """Tests for the _compute_nesting_depth helper."""
+
+ def test_flat_list(self):
+ assert _compute_nesting_depth([1, 2, 3]) == 1
+
+ def test_one_level(self):
+ assert _compute_nesting_depth([[1, 2], [3, 4]]) == 2
+
+ def test_deep_nesting(self):
+ assert _compute_nesting_depth([[[[]]]]) == 4
+
+ def test_mixed_depth(self):
+ depth = _compute_nesting_depth([1, [2, [3]]])
+ assert depth == 3
+
+ def test_empty_list(self):
+ assert _compute_nesting_depth([]) == 1
+
+ def test_non_list(self):
+ assert _compute_nesting_depth(42) == 0
+
+ def test_string_not_recursed(self):
+ # Strings should not be treated as nested lists
+ assert _compute_nesting_depth(["hello"]) == 1
+
+
+class TestInterleaveListsHelper:
+ """Tests for the _interleave_lists helper."""
+
+ def test_equal_length_lists(self):
+ result = _interleave_lists([[1, 2, 3], ["a", "b", "c"]])
+ assert result == [1, "a", 2, "b", 3, "c"]
+
+ def test_unequal_length_lists(self):
+ result = _interleave_lists([[1, 2, 3], ["a"]])
+ assert result == [1, "a", 2, 3]
+
+ def test_empty_input(self):
+ assert _interleave_lists([]) == []
+
+ def test_single_list(self):
+ assert _interleave_lists([[1, 2, 3]]) == [1, 2, 3]
+
+ def test_three_lists(self):
+ result = _interleave_lists([[1], [2], [3]])
+ assert result == [1, 2, 3]
+
+ def test_with_none_list(self):
+ result = _interleave_lists([[1, 2], None, [3, 4]]) # type: ignore[arg-type]
+ assert result == [1, 3, 2, 4]
+
+ def test_all_empty_lists(self):
+ assert _interleave_lists([[], [], []]) == []
+
+ def test_all_none_lists(self):
+ """All-None inputs should return empty list, not crash."""
+ assert _interleave_lists([None, None, None]) == [] # type: ignore[arg-type]
+
+
+class TestComputeNestingDepthEdgeCases:
+ """Tests for _compute_nesting_depth with deeply nested input."""
+
+ def test_deeply_nested_does_not_crash(self):
+ """Deeply nested lists beyond 1000 levels should not raise RecursionError."""
+ nested = [42]
+ for _ in range(1100):
+ nested = [nested]
+ # Should return a depth value without crashing
+ depth = _compute_nesting_depth(nested)
+ assert depth >= _MAX_FLATTEN_DEPTH
+
+
+class TestMakeHashableMixedKeys:
+ """Tests for _make_hashable with mixed-type dict keys."""
+
+ def test_mixed_type_dict_keys(self):
+ """Dicts with mixed-type keys (int and str) should not crash sorted()."""
+ d = {1: "one", "two": 2}
+ result = _make_hashable(d)
+ assert isinstance(result, tuple)
+ hash(result) # Should be hashable without error
+
+ def test_mixed_type_keys_deterministic(self):
+ """Same dict with mixed keys produces same result."""
+ d1 = {1: "a", "b": 2}
+ d2 = {1: "a", "b": 2}
+ assert _make_hashable(d1) == _make_hashable(d2)
+
+
+class TestZipListsNoneHandling:
+ """Tests for ZipListsBlock with None values in input."""
+
+ def setup_method(self):
+ self.block = ZipListsBlock()
+
+ def test_zip_truncate_with_none(self):
+ """_zip_truncate should handle None values in input lists."""
+ result = self.block._zip_truncate([[1, 2], None, [3, 4]]) # type: ignore[arg-type]
+ assert result == [[1, 3], [2, 4]]
+
+ def test_zip_pad_with_none(self):
+ """_zip_pad should handle None values in input lists."""
+ result = self.block._zip_pad([[1, 2, 3], None, ["a"]], fill_value="X") # type: ignore[arg-type]
+ assert result == [[1, "a"], [2, "X"], [3, "X"]]
+
+ def test_zip_truncate_all_none(self):
+ """All-None inputs should return empty list."""
+ result = self.block._zip_truncate([None, None]) # type: ignore[arg-type]
+ assert result == []
+
+ def test_zip_pad_all_none(self):
+ """All-None inputs should return empty list."""
+ result = self.block._zip_pad([None, None], fill_value=0) # type: ignore[arg-type]
+ assert result == []
+
+
+# =============================================================================
+# Block Built-in Tests (using test_input/test_output)
+# =============================================================================
+
+
+class TestConcatenateListsBlockBuiltin:
+ """Run the built-in test_input/test_output tests for ConcatenateListsBlock."""
+
+ @pytest.mark.asyncio
+ async def test_builtin_tests(self):
+ block = ConcatenateListsBlock()
+ await execute_block_test(block)
+
+
+class TestFlattenListBlockBuiltin:
+ """Run the built-in test_input/test_output tests for FlattenListBlock."""
+
+ @pytest.mark.asyncio
+ async def test_builtin_tests(self):
+ block = FlattenListBlock()
+ await execute_block_test(block)
+
+
+class TestInterleaveListsBlockBuiltin:
+ """Run the built-in test_input/test_output tests for InterleaveListsBlock."""
+
+ @pytest.mark.asyncio
+ async def test_builtin_tests(self):
+ block = InterleaveListsBlock()
+ await execute_block_test(block)
+
+
+class TestZipListsBlockBuiltin:
+ """Run the built-in test_input/test_output tests for ZipListsBlock."""
+
+ @pytest.mark.asyncio
+ async def test_builtin_tests(self):
+ block = ZipListsBlock()
+ await execute_block_test(block)
+
+
+class TestListDifferenceBlockBuiltin:
+ """Run the built-in test_input/test_output tests for ListDifferenceBlock."""
+
+ @pytest.mark.asyncio
+ async def test_builtin_tests(self):
+ block = ListDifferenceBlock()
+ await execute_block_test(block)
+
+
+class TestListIntersectionBlockBuiltin:
+ """Run the built-in test_input/test_output tests for ListIntersectionBlock."""
+
+ @pytest.mark.asyncio
+ async def test_builtin_tests(self):
+ block = ListIntersectionBlock()
+ await execute_block_test(block)
+
+
+# =============================================================================
+# ConcatenateListsBlock Manual Tests
+# =============================================================================
+
+
+class TestConcatenateListsBlockManual:
+ """Manual test cases for ConcatenateListsBlock edge cases."""
+
+ def setup_method(self):
+ self.block = ConcatenateListsBlock()
+
+ @pytest.mark.asyncio
+ async def test_two_lists(self):
+ """Test basic two-list concatenation."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(lists=[[1, 2], [3, 4]])
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [1, 2, 3, 4]
+ assert results["length"] == 4
+
+ @pytest.mark.asyncio
+ async def test_three_lists(self):
+ """Test three-list concatenation."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(lists=[[1], [2], [3]])
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [1, 2, 3]
+
+ @pytest.mark.asyncio
+ async def test_five_lists(self):
+ """Test concatenation of five lists."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(lists=[[1], [2], [3], [4], [5]])
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [1, 2, 3, 4, 5]
+ assert results["length"] == 5
+
+ @pytest.mark.asyncio
+ async def test_empty_lists_only(self):
+ """Test concatenation of only empty lists."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(lists=[[], [], []])
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == []
+ assert results["length"] == 0
+
+ @pytest.mark.asyncio
+ async def test_mixed_types_in_lists(self):
+ """Test concatenation with mixed types."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(
+ lists=[[1, "a"], [True, 3.14], [None, {"key": "val"}]]
+ )
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [
+ 1,
+ "a",
+ True,
+ 3.14,
+ None,
+ {"key": "val"},
+ ]
+
+ @pytest.mark.asyncio
+ async def test_deduplication_enabled(self):
+ """Test deduplication removes duplicates."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(
+ lists=[[1, 2, 3], [2, 3, 4], [3, 4, 5]],
+ deduplicate=True,
+ )
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [1, 2, 3, 4, 5]
+
+ @pytest.mark.asyncio
+ async def test_deduplication_preserves_order(self):
+ """Test that deduplication preserves first-occurrence order."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(
+ lists=[[3, 1, 2], [2, 4, 1]],
+ deduplicate=True,
+ )
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [3, 1, 2, 4]
+
+ @pytest.mark.asyncio
+ async def test_remove_none_enabled(self):
+ """Test None removal from concatenated results."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(
+ lists=[[1, None], [None, 2], [3, None]],
+ remove_none=True,
+ )
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [1, 2, 3]
+
+ @pytest.mark.asyncio
+ async def test_dedup_and_remove_none_combined(self):
+ """Test both deduplication and None removal together."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(
+ lists=[[1, None, 2], [2, None, 3]],
+ deduplicate=True,
+ remove_none=True,
+ )
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [1, 2, 3]
+
+ @pytest.mark.asyncio
+ async def test_nested_lists_preserved(self):
+ """Test that nested lists are not flattened during concatenation."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(lists=[[[1, 2]], [[3, 4]]])
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [[1, 2], [3, 4]]
+
+ @pytest.mark.asyncio
+ async def test_large_lists(self):
+ """Test concatenation of large lists."""
+ list_a = list(range(1000))
+ list_b = list(range(1000, 2000))
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(lists=[list_a, list_b])
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == list(range(2000))
+ assert results["length"] == 2000
+
+ @pytest.mark.asyncio
+ async def test_single_list_input(self):
+ """Test concatenation with a single list."""
+ results = {}
+ async for name, value in self.block.run(
+ ConcatenateListsBlock.Input(lists=[[1, 2, 3]])
+ ):
+ results[name] = value
+ assert results["concatenated_list"] == [1, 2, 3]
+
+ @pytest.mark.asyncio
+ async def test_block_id_is_valid_uuid(self):
+ """Test that the block has a valid UUID4 ID."""
+ import uuid
+
+ parsed = uuid.UUID(self.block.id)
+ assert parsed.version == 4
+
+ @pytest.mark.asyncio
+ async def test_block_category(self):
+ """Test that the block has the correct category."""
+ from backend.blocks._base import BlockCategory
+
+ assert BlockCategory.BASIC in self.block.categories
+
+
+# =============================================================================
+# FlattenListBlock Manual Tests
+# =============================================================================
+
+
+class TestFlattenListBlockManual:
+ """Manual test cases for FlattenListBlock."""
+
+ def setup_method(self):
+ self.block = FlattenListBlock()
+
+ @pytest.mark.asyncio
+ async def test_simple_flatten(self):
+ """Test flattening a simple nested list."""
+ results = {}
+ async for name, value in self.block.run(
+ FlattenListBlock.Input(nested_list=[[1, 2], [3, 4]])
+ ):
+ results[name] = value
+ assert results["flattened_list"] == [1, 2, 3, 4]
+ assert results["length"] == 4
+
+ @pytest.mark.asyncio
+ async def test_deeply_nested(self):
+ """Test flattening a deeply nested structure."""
+ results = {}
+ async for name, value in self.block.run(
+ FlattenListBlock.Input(nested_list=[1, [2, [3, [4, [5]]]]])
+ ):
+ results[name] = value
+ assert results["flattened_list"] == [1, 2, 3, 4, 5]
+
+ @pytest.mark.asyncio
+ async def test_partial_flatten(self):
+ """Test flattening with max_depth=1."""
+ results = {}
+ async for name, value in self.block.run(
+ FlattenListBlock.Input(
+ nested_list=[[1, [2, 3]], [4, [5]]],
+ max_depth=1,
+ )
+ ):
+ results[name] = value
+ assert results["flattened_list"] == [1, [2, 3], 4, [5]]
+
+ @pytest.mark.asyncio
+ async def test_already_flat_list(self):
+ """Test flattening an already flat list."""
+ results = {}
+ async for name, value in self.block.run(
+ FlattenListBlock.Input(nested_list=[1, 2, 3, 4])
+ ):
+ results[name] = value
+ assert results["flattened_list"] == [1, 2, 3, 4]
+
+ @pytest.mark.asyncio
+ async def test_empty_nested_lists(self):
+ """Test flattening with empty nested lists."""
+ results = {}
+ async for name, value in self.block.run(
+ FlattenListBlock.Input(nested_list=[[], [1], [], [2], []])
+ ):
+ results[name] = value
+ assert results["flattened_list"] == [1, 2]
+
+ @pytest.mark.asyncio
+ async def test_mixed_types_preserved(self):
+ """Test that non-list types are preserved during flattening."""
+ results = {}
+ async for name, value in self.block.run(
+ FlattenListBlock.Input(nested_list=["hello", [1, {"a": 1}], [True]])
+ ):
+ results[name] = value
+ assert results["flattened_list"] == ["hello", 1, {"a": 1}, True]
+
+ @pytest.mark.asyncio
+ async def test_original_depth_reported(self):
+ """Test that original nesting depth is correctly reported."""
+ results = {}
+ async for name, value in self.block.run(
+ FlattenListBlock.Input(nested_list=[1, [2, [3]]])
+ ):
+ results[name] = value
+ assert results["original_depth"] == 3
+
+ @pytest.mark.asyncio
+ async def test_block_id_is_valid_uuid(self):
+ """Test that the block has a valid UUID4 ID."""
+ import uuid
+
+ parsed = uuid.UUID(self.block.id)
+ assert parsed.version == 4
+
+
+# =============================================================================
+# InterleaveListsBlock Manual Tests
+# =============================================================================
+
+
+class TestInterleaveListsBlockManual:
+ """Manual test cases for InterleaveListsBlock."""
+
+ def setup_method(self):
+ self.block = InterleaveListsBlock()
+
+ @pytest.mark.asyncio
+ async def test_equal_length_interleave(self):
+ """Test interleaving two equal-length lists."""
+ results = {}
+ async for name, value in self.block.run(
+ InterleaveListsBlock.Input(lists=[[1, 2, 3], ["a", "b", "c"]])
+ ):
+ results[name] = value
+ assert results["interleaved_list"] == [1, "a", 2, "b", 3, "c"]
+
+ @pytest.mark.asyncio
+ async def test_unequal_length_interleave(self):
+ """Test interleaving lists of different lengths."""
+ results = {}
+ async for name, value in self.block.run(
+ InterleaveListsBlock.Input(lists=[[1, 2, 3, 4], ["a", "b"]])
+ ):
+ results[name] = value
+ assert results["interleaved_list"] == [1, "a", 2, "b", 3, 4]
+
+ @pytest.mark.asyncio
+ async def test_three_lists_interleave(self):
+ """Test interleaving three lists."""
+ results = {}
+ async for name, value in self.block.run(
+ InterleaveListsBlock.Input(lists=[[1, 2], ["a", "b"], ["x", "y"]])
+ ):
+ results[name] = value
+ assert results["interleaved_list"] == [1, "a", "x", 2, "b", "y"]
+
+ @pytest.mark.asyncio
+ async def test_single_element_lists(self):
+ """Test interleaving single-element lists."""
+ results = {}
+ async for name, value in self.block.run(
+ InterleaveListsBlock.Input(lists=[[1], [2], [3], [4]])
+ ):
+ results[name] = value
+ assert results["interleaved_list"] == [1, 2, 3, 4]
+
+ @pytest.mark.asyncio
+ async def test_block_id_is_valid_uuid(self):
+ """Test that the block has a valid UUID4 ID."""
+ import uuid
+
+ parsed = uuid.UUID(self.block.id)
+ assert parsed.version == 4
+
+
+# =============================================================================
+# ZipListsBlock Manual Tests
+# =============================================================================
+
+
+class TestZipListsBlockManual:
+ """Manual test cases for ZipListsBlock."""
+
+ def setup_method(self):
+ self.block = ZipListsBlock()
+
+ @pytest.mark.asyncio
+ async def test_basic_zip(self):
+ """Test basic zipping of two lists."""
+ results = {}
+ async for name, value in self.block.run(
+ ZipListsBlock.Input(lists=[[1, 2, 3], ["a", "b", "c"]])
+ ):
+ results[name] = value
+ assert results["zipped_list"] == [[1, "a"], [2, "b"], [3, "c"]]
+
+ @pytest.mark.asyncio
+ async def test_truncate_to_shortest(self):
+ """Test that default behavior truncates to shortest list."""
+ results = {}
+ async for name, value in self.block.run(
+ ZipListsBlock.Input(lists=[[1, 2, 3], ["a", "b"]])
+ ):
+ results[name] = value
+ assert results["zipped_list"] == [[1, "a"], [2, "b"]]
+ assert results["length"] == 2
+
+ @pytest.mark.asyncio
+ async def test_pad_to_longest(self):
+ """Test padding shorter lists with fill value."""
+ results = {}
+ async for name, value in self.block.run(
+ ZipListsBlock.Input(
+ lists=[[1, 2, 3], ["a"]],
+ pad_to_longest=True,
+ fill_value="X",
+ )
+ ):
+ results[name] = value
+ assert results["zipped_list"] == [[1, "a"], [2, "X"], [3, "X"]]
+
+ @pytest.mark.asyncio
+ async def test_pad_with_none(self):
+ """Test padding with None (default fill value)."""
+ results = {}
+ async for name, value in self.block.run(
+ ZipListsBlock.Input(
+ lists=[[1, 2], ["a"]],
+ pad_to_longest=True,
+ )
+ ):
+ results[name] = value
+ assert results["zipped_list"] == [[1, "a"], [2, None]]
+
+ @pytest.mark.asyncio
+ async def test_three_lists_zip(self):
+ """Test zipping three lists."""
+ results = {}
+ async for name, value in self.block.run(
+ ZipListsBlock.Input(lists=[[1, 2], ["a", "b"], [True, False]])
+ ):
+ results[name] = value
+ assert results["zipped_list"] == [[1, "a", True], [2, "b", False]]
+
+ @pytest.mark.asyncio
+ async def test_empty_lists_zip(self):
+ """Test zipping empty input."""
+ results = {}
+ async for name, value in self.block.run(ZipListsBlock.Input(lists=[])):
+ results[name] = value
+ assert results["zipped_list"] == []
+ assert results["length"] == 0
+
+ @pytest.mark.asyncio
+ async def test_block_id_is_valid_uuid(self):
+ """Test that the block has a valid UUID4 ID."""
+ import uuid
+
+ parsed = uuid.UUID(self.block.id)
+ assert parsed.version == 4
+
+
+# =============================================================================
+# ListDifferenceBlock Manual Tests
+# =============================================================================
+
+
+class TestListDifferenceBlockManual:
+ """Manual test cases for ListDifferenceBlock."""
+
+ def setup_method(self):
+ self.block = ListDifferenceBlock()
+
+ @pytest.mark.asyncio
+ async def test_basic_difference(self):
+ """Test basic set difference."""
+ results = {}
+ async for name, value in self.block.run(
+ ListDifferenceBlock.Input(
+ list_a=[1, 2, 3, 4, 5],
+ list_b=[3, 4, 5, 6, 7],
+ )
+ ):
+ results[name] = value
+ assert results["difference"] == [1, 2]
+
+ @pytest.mark.asyncio
+ async def test_symmetric_difference(self):
+ """Test symmetric difference."""
+ results = {}
+ async for name, value in self.block.run(
+ ListDifferenceBlock.Input(
+ list_a=[1, 2, 3],
+ list_b=[2, 3, 4],
+ symmetric=True,
+ )
+ ):
+ results[name] = value
+ assert results["difference"] == [1, 4]
+
+ @pytest.mark.asyncio
+ async def test_no_difference(self):
+ """Test when lists are identical."""
+ results = {}
+ async for name, value in self.block.run(
+ ListDifferenceBlock.Input(
+ list_a=[1, 2, 3],
+ list_b=[1, 2, 3],
+ )
+ ):
+ results[name] = value
+ assert results["difference"] == []
+ assert results["length"] == 0
+
+ @pytest.mark.asyncio
+ async def test_complete_difference(self):
+ """Test when lists share no elements."""
+ results = {}
+ async for name, value in self.block.run(
+ ListDifferenceBlock.Input(
+ list_a=[1, 2, 3],
+ list_b=[4, 5, 6],
+ )
+ ):
+ results[name] = value
+ assert results["difference"] == [1, 2, 3]
+
+ @pytest.mark.asyncio
+ async def test_empty_list_a(self):
+ """Test with empty list_a."""
+ results = {}
+ async for name, value in self.block.run(
+ ListDifferenceBlock.Input(list_a=[], list_b=[1, 2, 3])
+ ):
+ results[name] = value
+ assert results["difference"] == []
+
+ @pytest.mark.asyncio
+ async def test_empty_list_b(self):
+ """Test with empty list_b."""
+ results = {}
+ async for name, value in self.block.run(
+ ListDifferenceBlock.Input(list_a=[1, 2, 3], list_b=[])
+ ):
+ results[name] = value
+ assert results["difference"] == [1, 2, 3]
+
+ @pytest.mark.asyncio
+ async def test_string_difference(self):
+ """Test difference with string elements."""
+ results = {}
+ async for name, value in self.block.run(
+ ListDifferenceBlock.Input(
+ list_a=["apple", "banana", "cherry"],
+ list_b=["banana", "date"],
+ )
+ ):
+ results[name] = value
+ assert results["difference"] == ["apple", "cherry"]
+
+ @pytest.mark.asyncio
+ async def test_dict_difference(self):
+ """Test difference with dictionary elements."""
+ results = {}
+ async for name, value in self.block.run(
+ ListDifferenceBlock.Input(
+ list_a=[{"a": 1}, {"b": 2}, {"c": 3}],
+ list_b=[{"b": 2}],
+ )
+ ):
+ results[name] = value
+ assert results["difference"] == [{"a": 1}, {"c": 3}]
+
+ @pytest.mark.asyncio
+ async def test_block_id_is_valid_uuid(self):
+ """Test that the block has a valid UUID4 ID."""
+ import uuid
+
+ parsed = uuid.UUID(self.block.id)
+ assert parsed.version == 4
+
+
+# =============================================================================
+# ListIntersectionBlock Manual Tests
+# =============================================================================
+
+
+class TestListIntersectionBlockManual:
+ """Manual test cases for ListIntersectionBlock."""
+
+ def setup_method(self):
+ self.block = ListIntersectionBlock()
+
+ @pytest.mark.asyncio
+ async def test_basic_intersection(self):
+ """Test basic intersection."""
+ results = {}
+ async for name, value in self.block.run(
+ ListIntersectionBlock.Input(
+ list_a=[1, 2, 3, 4, 5],
+ list_b=[3, 4, 5, 6, 7],
+ )
+ ):
+ results[name] = value
+ assert results["intersection"] == [3, 4, 5]
+ assert results["length"] == 3
+
+ @pytest.mark.asyncio
+ async def test_no_intersection(self):
+ """Test when lists share no elements."""
+ results = {}
+ async for name, value in self.block.run(
+ ListIntersectionBlock.Input(
+ list_a=[1, 2, 3],
+ list_b=[4, 5, 6],
+ )
+ ):
+ results[name] = value
+ assert results["intersection"] == []
+ assert results["length"] == 0
+
+ @pytest.mark.asyncio
+ async def test_identical_lists(self):
+ """Test intersection of identical lists."""
+ results = {}
+ async for name, value in self.block.run(
+ ListIntersectionBlock.Input(
+ list_a=[1, 2, 3],
+ list_b=[1, 2, 3],
+ )
+ ):
+ results[name] = value
+ assert results["intersection"] == [1, 2, 3]
+
+ @pytest.mark.asyncio
+ async def test_preserves_order_from_list_a(self):
+ """Test that intersection preserves order from list_a."""
+ results = {}
+ async for name, value in self.block.run(
+ ListIntersectionBlock.Input(
+ list_a=[5, 3, 1],
+ list_b=[1, 3, 5],
+ )
+ ):
+ results[name] = value
+ assert results["intersection"] == [5, 3, 1]
+
+ @pytest.mark.asyncio
+ async def test_empty_list_a(self):
+ """Test with empty list_a."""
+ results = {}
+ async for name, value in self.block.run(
+ ListIntersectionBlock.Input(list_a=[], list_b=[1, 2, 3])
+ ):
+ results[name] = value
+ assert results["intersection"] == []
+
+ @pytest.mark.asyncio
+ async def test_empty_list_b(self):
+ """Test with empty list_b."""
+ results = {}
+ async for name, value in self.block.run(
+ ListIntersectionBlock.Input(list_a=[1, 2, 3], list_b=[])
+ ):
+ results[name] = value
+ assert results["intersection"] == []
+
+ @pytest.mark.asyncio
+ async def test_string_intersection(self):
+ """Test intersection with string elements."""
+ results = {}
+ async for name, value in self.block.run(
+ ListIntersectionBlock.Input(
+ list_a=["apple", "banana", "cherry"],
+ list_b=["banana", "cherry", "date"],
+ )
+ ):
+ results[name] = value
+ assert results["intersection"] == ["banana", "cherry"]
+
+ @pytest.mark.asyncio
+ async def test_deduplication_in_intersection(self):
+ """Test that duplicates in input don't cause duplicate results."""
+ results = {}
+ async for name, value in self.block.run(
+ ListIntersectionBlock.Input(
+ list_a=[1, 1, 2, 2, 3],
+ list_b=[1, 2],
+ )
+ ):
+ results[name] = value
+ assert results["intersection"] == [1, 2]
+
+ @pytest.mark.asyncio
+ async def test_block_id_is_valid_uuid(self):
+ """Test that the block has a valid UUID4 ID."""
+ import uuid
+
+ parsed = uuid.UUID(self.block.id)
+ assert parsed.version == 4
+
+
+# =============================================================================
+# Block Method Tests
+# =============================================================================
+
+
+class TestConcatenateListsBlockMethods:
+ """Tests for internal methods of ConcatenateListsBlock."""
+
+ def setup_method(self):
+ self.block = ConcatenateListsBlock()
+
+ def test_validate_inputs_valid(self):
+ assert self.block._validate_inputs([[1], [2]]) is None
+
+ def test_validate_inputs_invalid(self):
+ result = self.block._validate_inputs([[1], "bad"])
+ assert result is not None
+
+ def test_perform_concatenation(self):
+ result = self.block._perform_concatenation([[1, 2], [3, 4]])
+ assert result == [1, 2, 3, 4]
+
+ def test_apply_deduplication(self):
+ result = self.block._apply_deduplication([1, 2, 2, 3])
+ assert result == [1, 2, 3]
+
+ def test_apply_none_removal(self):
+ result = self.block._apply_none_removal([1, None, 2])
+ assert result == [1, 2]
+
+ def test_post_process_all_options(self):
+ result = self.block._post_process(
+ [1, None, 2, None, 2], deduplicate=True, remove_none=True
+ )
+ assert result == [1, 2]
+
+ def test_post_process_no_options(self):
+ result = self.block._post_process(
+ [1, None, 2, None, 2], deduplicate=False, remove_none=False
+ )
+ assert result == [1, None, 2, None, 2]
+
+
+class TestFlattenListBlockMethods:
+ """Tests for internal methods of FlattenListBlock."""
+
+ def setup_method(self):
+ self.block = FlattenListBlock()
+
+ def test_compute_depth_flat(self):
+ assert self.block._compute_depth([1, 2, 3]) == 1
+
+ def test_compute_depth_nested(self):
+ assert self.block._compute_depth([[1, [2]]]) == 3
+
+ def test_flatten_unlimited(self):
+ result = self.block._flatten([1, [2, [3]]], max_depth=-1)
+ assert result == [1, 2, 3]
+
+ def test_flatten_limited(self):
+ result = self.block._flatten([1, [2, [3]]], max_depth=1)
+ assert result == [1, 2, [3]]
+
+ def test_validate_max_depth_valid(self):
+ assert self.block._validate_max_depth(-1) is None
+ assert self.block._validate_max_depth(0) is None
+ assert self.block._validate_max_depth(5) is None
+
+ def test_validate_max_depth_invalid(self):
+ result = self.block._validate_max_depth(-2)
+ assert result is not None
+
+
+class TestZipListsBlockMethods:
+ """Tests for internal methods of ZipListsBlock."""
+
+ def setup_method(self):
+ self.block = ZipListsBlock()
+
+ def test_zip_truncate(self):
+ result = self.block._zip_truncate([[1, 2, 3], ["a", "b"]])
+ assert result == [[1, "a"], [2, "b"]]
+
+ def test_zip_pad(self):
+ result = self.block._zip_pad([[1, 2, 3], ["a"]], fill_value="X")
+ assert result == [[1, "a"], [2, "X"], [3, "X"]]
+
+ def test_zip_pad_empty(self):
+ result = self.block._zip_pad([], fill_value=None)
+ assert result == []
+
+ def test_validate_inputs(self):
+ assert self.block._validate_inputs([[1], [2]]) is None
+ result = self.block._validate_inputs([[1], "bad"])
+ assert result is not None
+
+
+class TestListDifferenceBlockMethods:
+ """Tests for internal methods of ListDifferenceBlock."""
+
+ def setup_method(self):
+ self.block = ListDifferenceBlock()
+
+ def test_compute_difference(self):
+ result = self.block._compute_difference([1, 2, 3], [2, 3, 4])
+ assert result == [1]
+
+ def test_compute_symmetric_difference(self):
+ result = self.block._compute_symmetric_difference([1, 2, 3], [2, 3, 4])
+ assert result == [1, 4]
+
+ def test_compute_difference_empty(self):
+ result = self.block._compute_difference([], [1, 2])
+ assert result == []
+
+ def test_compute_symmetric_difference_identical(self):
+ result = self.block._compute_symmetric_difference([1, 2], [1, 2])
+ assert result == []
+
+
+class TestListIntersectionBlockMethods:
+ """Tests for internal methods of ListIntersectionBlock."""
+
+ def setup_method(self):
+ self.block = ListIntersectionBlock()
+
+ def test_compute_intersection(self):
+ result = self.block._compute_intersection([1, 2, 3], [2, 3, 4])
+ assert result == [2, 3]
+
+ def test_compute_intersection_empty(self):
+ result = self.block._compute_intersection([], [1, 2])
+ assert result == []
+
+ def test_compute_intersection_no_overlap(self):
+ result = self.block._compute_intersection([1, 2], [3, 4])
+ assert result == []
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts
index 6980e95f11..51bb57057f 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts
@@ -4,7 +4,7 @@ import {
} from "@/app/api/__generated__/endpoints/graphs/graphs";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
-import { GraphExecutionMeta } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/use-agent-runs";
+import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
import { useShallow } from "zustand/react/shallow";
import { useEffect, useState } from "react";
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx
index cb06a79683..f7d59a5693 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx
@@ -1,6 +1,6 @@
import { useCallback } from "react";
-import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view";
+import { AgentRunDraftView } from "@/app/(platform)/build/components/legacy-builder/agent-run-draft-view";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import type {
CredentialsMetaInput,
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/SaveControl.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/SaveControl.tsx
index dcaa0f6264..3ee5217354 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/SaveControl.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/SaveControl.tsx
@@ -18,7 +18,7 @@ import {
import { useToast } from "@/components/molecules/Toast/use-toast";
import { useQueryClient } from "@tanstack/react-query";
import { getGetV2ListMySubmissionsQueryKey } from "@/app/api/__generated__/endpoints/store/store";
-import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
+import { CronExpressionDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { CalendarClockIcon } from "lucide-react";
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-run-draft-view.tsx
similarity index 99%
rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx
rename to autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-run-draft-view.tsx
index b0c3a6ff7b..372d479299 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-run-draft-view.tsx
@@ -20,7 +20,7 @@ import {
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs";
-import { ScheduleTaskDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
+import { ScheduleTaskDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
import ActionButtonGroup from "@/components/__legacy__/action-button-group";
import type { ButtonAction } from "@/components/__legacy__/types";
import {
@@ -53,7 +53,10 @@ import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react";
import { CalendarClockIcon, Trash2Icon } from "lucide-react";
import { analytics } from "@/services/analytics";
-import { AgentStatus, AgentStatusChip } from "./agent-status-chip";
+import {
+ AgentStatus,
+ AgentStatusChip,
+} from "@/app/(platform)/build/components/legacy-builder/agent-status-chip";
export function AgentRunDraftView({
graph,
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-status-chip.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-status-chip.tsx
similarity index 100%
rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-status-chip.tsx
rename to autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/agent-status-chip.tsx
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx
index 26977a207a..78ccdd88d9 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx
@@ -4,11 +4,11 @@ import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import {
BookOpenIcon,
- CheckFatIcon,
PencilSimpleIcon,
WarningDiamondIcon,
} from "@phosphor-icons/react";
import type { ToolUIPart } from "ai";
+import Image from "next/image";
import NextLink from "next/link";
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
@@ -24,6 +24,7 @@ import {
ClarificationQuestionsCard,
ClarifyingQuestion,
} from "./components/ClarificationQuestionsCard";
+import sparklesImg from "./components/MiniGame/assets/sparkles.png";
import { MiniGame } from "./components/MiniGame/MiniGame";
import {
AccordionIcon,
@@ -83,7 +84,8 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
) {
return {
icon,
- title: "Creating agent, this may take a few minutes. Sit back and relax.",
+ title:
+ "Creating agent, this may take a few minutes. Play while you wait.",
expanded: true,
};
}
@@ -167,16 +169,20 @@ export function CreateAgentTool({ part }: Props) {
{isAgentSavedOutput(output) && (
-
- {output.message}
+ Agent{" "}
+ {output.agent_name} {" "}
+ has been saved to your library!
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx
index 53cfcf2731..281238a425 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx
@@ -2,20 +2,65 @@
import { useMiniGame } from "./useMiniGame";
+function Key({ children }: { children: React.ReactNode }) {
+ return
[{children}] ;
+}
+
export function MiniGame() {
- const { canvasRef } = useMiniGame();
+ const { canvasRef, activeMode, showOverlay, score, highScore, onContinue } =
+ useMiniGame();
+
+ const isRunActive =
+ activeMode === "run" || activeMode === "idle" || activeMode === "over";
+
+ let overlayText: string | undefined;
+ let buttonLabel = "Continue";
+ if (activeMode === "idle") {
+ buttonLabel = "Start";
+ } else if (activeMode === "boss-intro") {
+ overlayText = "Face the bandit!";
+ } else if (activeMode === "boss-defeated") {
+ overlayText = "Great job, keep on going";
+ } else if (activeMode === "over") {
+ overlayText = `Score: ${score} / Record: ${highScore}`;
+ buttonLabel = "Retry";
+ }
return (
-
-
+
+
+ {isRunActive ? (
+ <>
+ Run mode: Space to jump
+ >
+ ) : (
+ <>
+ Duel mode: ←→ to move · Z to attack ·{" "}
+ X to block · Space to jump
+ >
+ )}
+
+
+
+ {showOverlay && (
+
+ {overlayText && (
+
{overlayText}
+ )}
+
+ {buttonLabel}
+
+
+ )}
+
);
}
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/archer-attack.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/archer-attack.png
new file mode 100644
index 0000000000..af199cfcb9
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/archer-attack.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/archer-idle.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/archer-idle.png
new file mode 100644
index 0000000000..169ccb7d98
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/archer-idle.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/archer-shoot.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/archer-shoot.png
new file mode 100644
index 0000000000..9119dcb778
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/archer-shoot.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/attack.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/attack.png
new file mode 100644
index 0000000000..c5259f423b
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/attack.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/guard.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/guard.png
new file mode 100644
index 0000000000..064c170add
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/guard.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/idle.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/idle.png
new file mode 100644
index 0000000000..b8ebdc7294
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/idle.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/run.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/run.png
new file mode 100644
index 0000000000..a6ba2f3452
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/run.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/sparkles.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/sparkles.png
new file mode 100644
index 0000000000..befa6f253e
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/sparkles.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/tree-1.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/tree-1.png
new file mode 100644
index 0000000000..655a141adf
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/tree-1.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/tree-2.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/tree-2.png
new file mode 100644
index 0000000000..fe6d67bafd
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/tree-2.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/tree-3.png b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/tree-3.png
new file mode 100644
index 0000000000..162140b90d
Binary files /dev/null and b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/assets/tree-3.png differ
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts
index e91f1766ca..55c4635d5e 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts
@@ -1,4 +1,13 @@
-import { useEffect, useRef } from "react";
+import { useEffect, useRef, useState } from "react";
+import runSheet from "./assets/run.png";
+import idleSheet from "./assets/idle.png";
+import attackSheet from "./assets/attack.png";
+import tree1Sheet from "./assets/tree-1.png";
+import tree2Sheet from "./assets/tree-2.png";
+import tree3Sheet from "./assets/tree-3.png";
+import archerIdleSheet from "./assets/archer-idle.png";
+import archerAttackSheet from "./assets/archer-attack.png";
+import guardSheet from "./assets/guard.png";
/* ------------------------------------------------------------------ */
/* Constants */
@@ -12,24 +21,63 @@ const SPEED_INCREMENT = 0.0008;
const SPAWN_MIN = 70;
const SPAWN_MAX = 130;
const CHAR_SIZE = 18;
+const CHAR_SPRITE_SIZE = 67;
const CHAR_X = 50;
const GROUND_PAD = 20;
const STORAGE_KEY = "copilot-minigame-highscore";
+// Character sprite sheets (each frame is 192x192)
+const SPRITE_FRAME_SIZE = 192;
+const RUN_FRAMES = 6;
+const IDLE_FRAMES = 8;
+const ATTACK_FRAMES = 4;
+const ANIM_SPEED = 8;
+const ATTACK_ANIM_SPEED = 6;
+const ATTACK_RANGE = 40;
+const ATTACK_HIT_FRAME = 2;
+const GUARD_FRAMES = 6;
+const GUARD_ANIM_SPEED = 8;
+
+// Tree sprite sheets: 8 frames each, 192px wide per frame
+const TREE_FRAMES = 8;
+const TREE_ANIM_SPEED = 10;
+const TREE_CONFIGS = [
+ { frameW: 192, frameH: 256, renderW: 40, renderH: 61, hitW: 16, hitH: 50 },
+ { frameW: 192, frameH: 192, renderW: 38, renderH: 52, hitW: 16, hitH: 40 },
+ { frameW: 192, frameH: 192, renderW: 32, renderH: 40, hitW: 14, hitH: 30 },
+] as const;
+
// Colors
const COLOR_BG = "#E8EAF6";
const COLOR_CHAR = "#263238";
-const COLOR_BOSS = "#F50057";
// Boss
const BOSS_SIZE = 36;
+const BOSS_SPRITE_SIZE = 70;
const BOSS_ENTER_SPEED = 2;
-const BOSS_LEAVE_SPEED = 3;
-const BOSS_SHOOT_COOLDOWN = 90;
-const BOSS_SHOTS_TO_EVADE = 5;
-const BOSS_INTERVAL = 20; // every N score
-const PROJ_SPEED = 4.5;
-const PROJ_SIZE = 12;
+const BOSS_HP = 1;
+const MOVE_SPEED = 3;
+const BOSS_CHASE_SPEED = 2.2;
+const BOSS_RETREAT_SPEED = 2;
+const BOSS_ATTACK_RANGE = 50;
+const BOSS_IDLE_TIME = 166;
+const BOSS_RETREAT_TIME = 166;
+
+// Archer sprite sheets
+const ARCHER_IDLE_FRAMES = 6;
+const ARCHER_ATTACK_FRAMES = 4;
+const ARCHER_FRAME_SIZE = 192;
+const ARCHER_ANIM_SPEED = 8;
+const ARCHER_ATTACK_ANIM_SPEED = 6;
+const ARCHER_ATTACK_HIT_FRAME = 2;
+
+// Death animation
+const DEATH_PARTICLE_COUNT = 15;
+const DEATH_ANIM_DURATION = 40;
+
+// Attack effect
+const ATTACK_EFFECT_COUNT = 8;
+const ATTACK_EFFECT_DURATION = 15;
/* ------------------------------------------------------------------ */
/* Types */
@@ -40,27 +88,38 @@ interface Obstacle {
width: number;
height: number;
scored: boolean;
-}
-
-interface Projectile {
- x: number;
- y: number;
- speed: number;
- evaded: boolean;
- type: "low" | "high";
+ treeType: 0 | 1 | 2;
}
interface BossState {
- phase: "inactive" | "entering" | "fighting" | "leaving";
+ phase: "inactive" | "entering" | "fighting";
x: number;
+ y: number;
+ vy: number;
targetX: number;
- shotsEvaded: number;
- cooldown: number;
- projectiles: Projectile[];
- bob: number;
+ hp: number;
+ action: "idle" | "chase" | "retreat" | "attack";
+ actionTimer: number;
+ attackFrame: number;
+ attackHit: boolean;
+}
+
+interface Particle {
+ x: number;
+ y: number;
+ vx: number;
+ vy: number;
+ life: number;
+}
+
+interface DeathAnim {
+ particles: Particle[];
+ type: "boss" | "player";
+ timer: number;
}
interface GameState {
+ charX: number;
charY: number;
vy: number;
obstacles: Obstacle[];
@@ -74,6 +133,31 @@ interface GameState {
groundY: number;
boss: BossState;
bossThreshold: number;
+ bossesDefeated: number;
+ paused: boolean;
+ nextTreeType: 0 | 1 | 2;
+ attacking: boolean;
+ attackFrame: number;
+ attackHit: boolean;
+ guarding: boolean;
+ guardFrame: number;
+ deathAnim: DeathAnim | null;
+ attackEffects: Particle[];
+}
+
+interface KeyState {
+ left: boolean;
+ right: boolean;
+}
+
+interface Sprites {
+ run: HTMLImageElement;
+ idle: HTMLImageElement;
+ attack: HTMLImageElement;
+ guard: HTMLImageElement;
+ trees: HTMLImageElement[];
+ archerIdle: HTMLImageElement;
+ archerAttack: HTMLImageElement;
}
/* ------------------------------------------------------------------ */
@@ -100,20 +184,24 @@ function writeHighScore(score: number) {
}
}
-function makeBoss(): BossState {
+function makeBoss(groundY: number): BossState {
return {
phase: "inactive",
x: 0,
+ y: groundY - BOSS_SIZE,
+ vy: 0,
targetX: 0,
- shotsEvaded: 0,
- cooldown: 0,
- projectiles: [],
- bob: 0,
+ hp: BOSS_HP,
+ action: "idle",
+ actionTimer: BOSS_IDLE_TIME,
+ attackFrame: 0,
+ attackHit: false,
};
}
function makeState(groundY: number): GameState {
return {
+ charX: CHAR_X,
charY: groundY - CHAR_SIZE,
vy: 0,
obstacles: [],
@@ -125,62 +213,107 @@ function makeState(groundY: number): GameState {
running: false,
over: false,
groundY,
- boss: makeBoss(),
- bossThreshold: BOSS_INTERVAL,
+ boss: makeBoss(groundY),
+ bossThreshold: 10,
+ bossesDefeated: 0,
+ paused: false,
+ nextTreeType: 0,
+ attacking: false,
+ attackFrame: 0,
+ attackHit: false,
+ guarding: false,
+ guardFrame: 0,
+ deathAnim: null,
+ attackEffects: [],
};
}
-function gameOver(s: GameState) {
- s.running = false;
- s.over = true;
- if (s.score > s.highScore) {
- s.highScore = s.score;
- writeHighScore(s.score);
+function spawnParticles(x: number, y: number): Particle[] {
+ const particles: Particle[] = [];
+ for (let i = 0; i < DEATH_PARTICLE_COUNT; i++) {
+ const angle = Math.random() * Math.PI * 2;
+ const speed = 1 + Math.random() * 3;
+ particles.push({
+ x,
+ y,
+ vx: Math.cos(angle) * speed,
+ vy: Math.sin(angle) * speed - 2,
+ life: DEATH_ANIM_DURATION,
+ });
}
+ return particles;
}
-/* ------------------------------------------------------------------ */
-/* Projectile collision — shared between fighting & leaving phases */
-/* ------------------------------------------------------------------ */
+function startPlayerDeath(s: GameState) {
+ s.deathAnim = {
+ particles: spawnParticles(s.charX + CHAR_SIZE / 2, s.charY + CHAR_SIZE / 2),
+ type: "player",
+ timer: DEATH_ANIM_DURATION,
+ };
+}
-/** Returns true if the player died. */
-function tickProjectiles(s: GameState): boolean {
- const boss = s.boss;
-
- for (const p of boss.projectiles) {
- p.x -= p.speed;
-
- if (!p.evaded && p.x + PROJ_SIZE < CHAR_X) {
- p.evaded = true;
- boss.shotsEvaded++;
- }
-
- // Collision
- if (
- !p.evaded &&
- CHAR_X + CHAR_SIZE > p.x &&
- CHAR_X < p.x + PROJ_SIZE &&
- s.charY + CHAR_SIZE > p.y &&
- s.charY < p.y + PROJ_SIZE
- ) {
- gameOver(s);
- return true;
- }
- }
-
- boss.projectiles = boss.projectiles.filter((p) => p.x + PROJ_SIZE > -20);
- return false;
+function startBossDeath(s: GameState) {
+ s.deathAnim = {
+ particles: spawnParticles(
+ s.boss.x + BOSS_SIZE / 2,
+ s.boss.y + BOSS_SIZE / 2,
+ ),
+ type: "boss",
+ timer: DEATH_ANIM_DURATION,
+ };
}
/* ------------------------------------------------------------------ */
/* Update */
/* ------------------------------------------------------------------ */
-function update(s: GameState, canvasWidth: number) {
- if (!s.running) return;
+function update(s: GameState, canvasWidth: number, keys: KeyState) {
+ if (!s.running || s.paused) return;
s.frame++;
+ // ---- Attack effects ---- //
+ for (const p of s.attackEffects) {
+ p.x += p.vx;
+ p.y += p.vy;
+ p.vy += 0.08;
+ p.life--;
+ }
+ s.attackEffects = s.attackEffects.filter((p) => p.life > 0);
+
+ // ---- Death animation ---- //
+ if (s.deathAnim) {
+ s.deathAnim.timer--;
+ for (const p of s.deathAnim.particles) {
+ p.x += p.vx;
+ p.y += p.vy;
+ p.vy += 0.1;
+ p.life--;
+ }
+ if (s.deathAnim.timer <= 0) {
+ if (s.deathAnim.type === "player") {
+ s.deathAnim = null;
+ s.running = false;
+ s.over = true;
+ if (s.score > s.highScore) {
+ s.highScore = s.score;
+ writeHighScore(s.score);
+ }
+ } else {
+ s.deathAnim = null;
+ s.score += 10;
+ s.bossesDefeated++;
+ if (s.bossesDefeated === 1) {
+ s.bossThreshold = s.score + 15;
+ } else {
+ s.bossThreshold = s.score + 20;
+ }
+ s.paused = true;
+ }
+ }
+ return;
+ }
+
// Speed only ramps during regular play
if (s.boss.phase === "inactive") {
s.speed = BASE_SPEED + s.frame * SPEED_INCREMENT;
@@ -194,86 +327,207 @@ function update(s: GameState, canvasWidth: number) {
s.vy = 0;
}
+ // ---- Attack animation ---- //
+ if (s.attacking) {
+ s.attackFrame++;
+
+ if (
+ !s.attackHit &&
+ Math.floor(s.attackFrame / ATTACK_ANIM_SPEED) === ATTACK_HIT_FRAME &&
+ s.boss.phase === "fighting" &&
+ s.charX + CHAR_SIZE + ATTACK_RANGE >= s.boss.x
+ ) {
+ s.boss.hp--;
+ s.attackHit = true;
+ }
+
+ if (s.attackFrame >= ATTACK_FRAMES * ATTACK_ANIM_SPEED) {
+ s.attacking = false;
+ s.attackFrame = 0;
+ s.attackHit = false;
+ }
+ }
+
+ // ---- Guard animation ---- //
+ if (s.guarding) {
+ s.guardFrame++;
+ if (s.guardFrame >= GUARD_FRAMES * GUARD_ANIM_SPEED) {
+ s.guardFrame = GUARD_FRAMES * GUARD_ANIM_SPEED - 1;
+ }
+ }
+
+ // ---- Horizontal movement during boss fight ---- //
+ if (s.boss.phase !== "inactive") {
+ if (keys.left) {
+ s.charX = Math.max(10, s.charX - MOVE_SPEED);
+ }
+ if (keys.right) {
+ s.charX = Math.min(canvasWidth - CHAR_SIZE - 10, s.charX + MOVE_SPEED);
+ }
+ } else {
+ s.charX = CHAR_X;
+ }
+
// ---- Trigger boss ---- //
- if (s.boss.phase === "inactive" && s.score >= s.bossThreshold) {
+ const isOnGround = s.charY + CHAR_SIZE >= s.groundY;
+ if (
+ s.boss.phase === "inactive" &&
+ s.score >= s.bossThreshold &&
+ s.obstacles.length === 0 &&
+ isOnGround
+ ) {
s.boss.phase = "entering";
s.boss.x = canvasWidth + 10;
+ s.boss.y = s.groundY - BOSS_SIZE;
+ s.boss.vy = 0;
s.boss.targetX = canvasWidth - BOSS_SIZE - 40;
- s.boss.shotsEvaded = 0;
- s.boss.cooldown = BOSS_SHOOT_COOLDOWN;
- s.boss.projectiles = [];
- s.obstacles = [];
+ s.boss.hp = BOSS_HP;
+ s.boss.action = "idle";
+ s.boss.actionTimer = BOSS_IDLE_TIME;
+ s.boss.attackFrame = 0;
+ s.boss.attackHit = false;
+
+ if (s.bossesDefeated === 0) {
+ s.paused = true;
+ }
}
// ---- Boss: entering ---- //
if (s.boss.phase === "entering") {
- s.boss.bob = Math.sin(s.frame * 0.05) * 3;
s.boss.x -= BOSS_ENTER_SPEED;
if (s.boss.x <= s.boss.targetX) {
s.boss.x = s.boss.targetX;
s.boss.phase = "fighting";
}
- return; // no obstacles while entering
+ return;
}
// ---- Boss: fighting ---- //
if (s.boss.phase === "fighting") {
- s.boss.bob = Math.sin(s.frame * 0.05) * 3;
-
- // Shoot
- s.boss.cooldown--;
- if (s.boss.cooldown <= 0) {
- const isLow = Math.random() < 0.5;
- s.boss.projectiles.push({
- x: s.boss.x - PROJ_SIZE,
- y: isLow ? s.groundY - 14 : s.groundY - 70,
- speed: PROJ_SPEED,
- evaded: false,
- type: isLow ? "low" : "high",
- });
- s.boss.cooldown = BOSS_SHOOT_COOLDOWN;
+ // Boss physics
+ s.boss.vy += GRAVITY;
+ s.boss.y += s.boss.vy;
+ if (s.boss.y + BOSS_SIZE >= s.groundY) {
+ s.boss.y = s.groundY - BOSS_SIZE;
+ s.boss.vy = 0;
}
- if (tickProjectiles(s)) return;
-
// Boss defeated?
- if (s.boss.shotsEvaded >= BOSS_SHOTS_TO_EVADE) {
- s.boss.phase = "leaving";
- s.score += 5; // bonus
- s.bossThreshold = s.score + BOSS_INTERVAL;
+ if (s.boss.hp <= 0) {
+ startBossDeath(s);
+ return;
}
- return;
- }
- // ---- Boss: leaving ---- //
- if (s.boss.phase === "leaving") {
- s.boss.bob = Math.sin(s.frame * 0.05) * 3;
- s.boss.x += BOSS_LEAVE_SPEED;
+ // Boss AI
+ if (s.boss.action === "attack") {
+ s.boss.attackFrame++;
+ const hitFrame = Math.floor(
+ s.boss.attackFrame / ARCHER_ATTACK_ANIM_SPEED,
+ );
- // Still check in-flight projectiles
- if (tickProjectiles(s)) return;
+ // Spawn yellow attack effect at hit frame
+ if (
+ s.boss.attackFrame ===
+ ARCHER_ATTACK_HIT_FRAME * ARCHER_ATTACK_ANIM_SPEED
+ ) {
+ const effectX = s.boss.x - 5;
+ const effectY = s.boss.y + BOSS_SIZE / 2;
+ for (let i = 0; i < ATTACK_EFFECT_COUNT; i++) {
+ const angle = Math.PI + (Math.random() - 0.5) * 1.2;
+ const speed = 2 + Math.random() * 3;
+ s.attackEffects.push({
+ x: effectX,
+ y: effectY,
+ vx: Math.cos(angle) * speed,
+ vy: Math.sin(angle) * speed - 1,
+ life: ATTACK_EFFECT_DURATION,
+ });
+ }
+ }
- if (s.boss.x > canvasWidth + 50) {
- s.boss = makeBoss();
- s.nextSpawn = s.frame + randInt(SPAWN_MIN / 2, SPAWN_MAX / 2);
+ if (!s.boss.attackHit && hitFrame === ARCHER_ATTACK_HIT_FRAME) {
+ const dist = s.boss.x - (s.charX + CHAR_SIZE);
+ if (dist < BOSS_ATTACK_RANGE && dist > -BOSS_SIZE) {
+ s.boss.attackHit = true;
+ if (!s.guarding) {
+ startPlayerDeath(s);
+ return;
+ }
+ }
+ }
+
+ if (
+ s.boss.attackFrame >=
+ ARCHER_ATTACK_FRAMES * ARCHER_ATTACK_ANIM_SPEED
+ ) {
+ s.boss.action = "retreat";
+ s.boss.actionTimer = BOSS_RETREAT_TIME;
+ s.boss.attackFrame = 0;
+ s.boss.attackHit = false;
+ }
+ } else {
+ s.boss.actionTimer--;
+
+ if (s.boss.action === "chase") {
+ if (s.boss.x > s.charX + CHAR_SIZE) {
+ s.boss.x -= BOSS_CHASE_SPEED;
+ } else {
+ s.boss.x += BOSS_CHASE_SPEED;
+ }
+
+ // Occasional jump
+ if (s.boss.y + BOSS_SIZE >= s.groundY && Math.random() < 0.008) {
+ s.boss.vy = JUMP_FORCE * 0.7;
+ }
+
+ // Close enough to attack
+ const dist = Math.abs(s.boss.x - (s.charX + CHAR_SIZE));
+ if (dist < BOSS_ATTACK_RANGE) {
+ s.boss.action = "attack";
+ s.boss.attackFrame = 0;
+ s.boss.attackHit = false;
+ }
+ } else if (s.boss.action === "retreat") {
+ s.boss.x += BOSS_RETREAT_SPEED;
+ if (s.boss.x > canvasWidth - BOSS_SIZE - 10) {
+ s.boss.x = canvasWidth - BOSS_SIZE - 10;
+ }
+ }
+
+ // Timer expired → next action
+ if (s.boss.actionTimer <= 0) {
+ if (s.boss.action === "idle" || s.boss.action === "retreat") {
+ s.boss.action = "chase";
+ s.boss.actionTimer = 999;
+ } else {
+ s.boss.action = "idle";
+ s.boss.actionTimer = BOSS_IDLE_TIME;
+ }
+ }
}
return;
}
// ---- Regular obstacle play ---- //
- if (s.frame >= s.nextSpawn) {
+ // Stop spawning trees if enough are queued to reach boss threshold
+ const unscoredCount = s.obstacles.filter((o) => !o.scored).length;
+ if (s.score + unscoredCount < s.bossThreshold && s.frame >= s.nextSpawn) {
+ const tt = s.nextTreeType;
+ const cfg = TREE_CONFIGS[tt];
s.obstacles.push({
x: canvasWidth + 10,
- width: randInt(10, 16),
- height: randInt(20, 48),
+ width: cfg.hitW,
+ height: cfg.hitH,
scored: false,
+ treeType: tt,
});
+ s.nextTreeType = Math.floor(Math.random() * 3) as 0 | 1 | 2;
s.nextSpawn = s.frame + randInt(SPAWN_MIN, SPAWN_MAX);
}
for (const o of s.obstacles) {
o.x -= s.speed;
- if (!o.scored && o.x + o.width < CHAR_X) {
+ if (!o.scored && o.x + o.width < s.charX) {
o.scored = true;
s.score++;
}
@@ -284,11 +538,11 @@ function update(s: GameState, canvasWidth: number) {
for (const o of s.obstacles) {
const oY = s.groundY - o.height;
if (
- CHAR_X + CHAR_SIZE > o.x &&
- CHAR_X < o.x + o.width &&
+ s.charX + CHAR_SIZE > o.x &&
+ s.charX < o.x + o.width &&
s.charY + CHAR_SIZE > oY
) {
- gameOver(s);
+ startPlayerDeath(s);
return;
}
}
@@ -298,73 +552,79 @@ function update(s: GameState, canvasWidth: number) {
/* Drawing */
/* ------------------------------------------------------------------ */
-function drawBoss(ctx: CanvasRenderingContext2D, s: GameState, bg: string) {
- const bx = s.boss.x;
- const by = s.groundY - BOSS_SIZE + s.boss.bob;
+function drawBoss(
+ ctx: CanvasRenderingContext2D,
+ s: GameState,
+ sprites: Sprites,
+) {
+ const boss = s.boss;
+ const isAttacking = boss.action === "attack";
+ const sheet = isAttacking ? sprites.archerAttack : sprites.archerIdle;
+ const totalFrames = isAttacking ? ARCHER_ATTACK_FRAMES : ARCHER_IDLE_FRAMES;
+ const animSpeed = isAttacking ? ARCHER_ATTACK_ANIM_SPEED : ARCHER_ANIM_SPEED;
- // Body
- ctx.save();
- ctx.fillStyle = COLOR_BOSS;
- ctx.globalAlpha = 0.9;
- ctx.beginPath();
- ctx.roundRect(bx, by, BOSS_SIZE, BOSS_SIZE, 4);
- ctx.fill();
- ctx.restore();
+ let frameIndex: number;
+ if (isAttacking) {
+ frameIndex = Math.min(
+ Math.floor(boss.attackFrame / animSpeed),
+ totalFrames - 1,
+ );
+ } else {
+ frameIndex = Math.floor(s.frame / animSpeed) % totalFrames;
+ }
- // Eyes
- ctx.save();
- ctx.fillStyle = bg;
- const eyeY = by + 13;
- ctx.beginPath();
- ctx.arc(bx + 10, eyeY, 4, 0, Math.PI * 2);
- ctx.fill();
- ctx.beginPath();
- ctx.arc(bx + 26, eyeY, 4, 0, Math.PI * 2);
- ctx.fill();
- ctx.restore();
+ const srcX = frameIndex * ARCHER_FRAME_SIZE;
+ const spriteDrawX = boss.x + (BOSS_SIZE - BOSS_SPRITE_SIZE) / 2;
+ const spriteDrawY = boss.y + BOSS_SIZE - BOSS_SPRITE_SIZE + 12;
- // Angry eyebrows
- ctx.save();
- ctx.strokeStyle = bg;
- ctx.lineWidth = 2;
- ctx.beginPath();
- ctx.moveTo(bx + 5, eyeY - 7);
- ctx.lineTo(bx + 14, eyeY - 4);
- ctx.stroke();
- ctx.beginPath();
- ctx.moveTo(bx + 31, eyeY - 7);
- ctx.lineTo(bx + 22, eyeY - 4);
- ctx.stroke();
- ctx.restore();
+ if (sheet.complete && sheet.naturalWidth > 0) {
+ ctx.drawImage(
+ sheet,
+ srcX,
+ 0,
+ ARCHER_FRAME_SIZE,
+ ARCHER_FRAME_SIZE,
+ spriteDrawX,
+ spriteDrawY,
+ BOSS_SPRITE_SIZE,
+ BOSS_SPRITE_SIZE,
+ );
+ } else {
+ ctx.save();
+ ctx.fillStyle = "#F50057";
+ ctx.globalAlpha = 0.9;
+ ctx.beginPath();
+ ctx.roundRect(boss.x, boss.y, BOSS_SIZE, BOSS_SIZE, 4);
+ ctx.fill();
+ ctx.restore();
+ }
+}
- // Zigzag mouth
+function drawParticles(ctx: CanvasRenderingContext2D, anim: DeathAnim) {
ctx.save();
- ctx.strokeStyle = bg;
- ctx.lineWidth = 1.5;
- ctx.beginPath();
- ctx.moveTo(bx + 10, by + 27);
- ctx.lineTo(bx + 14, by + 24);
- ctx.lineTo(bx + 18, by + 27);
- ctx.lineTo(bx + 22, by + 24);
- ctx.lineTo(bx + 26, by + 27);
- ctx.stroke();
+ for (const p of anim.particles) {
+ if (p.life <= 0) continue;
+ const alpha = p.life / DEATH_ANIM_DURATION;
+ const size = 2 + alpha * 3;
+ ctx.globalAlpha = alpha;
+ ctx.fillStyle = "#a855f7";
+ ctx.beginPath();
+ ctx.arc(p.x, p.y, size, 0, Math.PI * 2);
+ ctx.fill();
+ }
ctx.restore();
}
-function drawProjectiles(ctx: CanvasRenderingContext2D, boss: BossState) {
+function drawAttackEffects(ctx: CanvasRenderingContext2D, effects: Particle[]) {
ctx.save();
- ctx.fillStyle = COLOR_BOSS;
- ctx.globalAlpha = 0.8;
- for (const p of boss.projectiles) {
- if (p.evaded) continue;
+ for (const p of effects) {
+ if (p.life <= 0) continue;
+ const alpha = p.life / ATTACK_EFFECT_DURATION;
+ const size = 1.5 + alpha * 2.5;
+ ctx.globalAlpha = alpha;
+ ctx.fillStyle = "#facc15";
ctx.beginPath();
- ctx.arc(
- p.x + PROJ_SIZE / 2,
- p.y + PROJ_SIZE / 2,
- PROJ_SIZE / 2,
- 0,
- Math.PI * 2,
- );
+ ctx.arc(p.x, p.y, size, 0, Math.PI * 2);
ctx.fill();
}
ctx.restore();
@@ -376,7 +636,7 @@ function draw(
w: number,
h: number,
fg: string,
- started: boolean,
+ sprites: Sprites,
) {
ctx.fillStyle = COLOR_BG;
ctx.fillRect(0, 0, w, h);
@@ -392,39 +652,109 @@ function draw(
ctx.stroke();
ctx.restore();
- // Character
- ctx.save();
- ctx.fillStyle = COLOR_CHAR;
- ctx.globalAlpha = 0.85;
- ctx.beginPath();
- ctx.roundRect(CHAR_X, s.charY, CHAR_SIZE, CHAR_SIZE, 3);
- ctx.fill();
- ctx.restore();
+ // Character sprite (hidden during player death)
+ if (!s.deathAnim || s.deathAnim.type !== "player") {
+ const isJumping = s.charY + CHAR_SIZE < s.groundY;
+ let sheet: HTMLImageElement;
+ let totalFrames: number;
+ let frameIndex: number;
- // Eyes
- ctx.save();
- ctx.fillStyle = COLOR_BG;
- ctx.beginPath();
- ctx.arc(CHAR_X + 6, s.charY + 7, 2.5, 0, Math.PI * 2);
- ctx.fill();
- ctx.beginPath();
- ctx.arc(CHAR_X + 12, s.charY + 7, 2.5, 0, Math.PI * 2);
- ctx.fill();
- ctx.restore();
+ if (s.guarding) {
+ sheet = sprites.guard;
+ totalFrames = GUARD_FRAMES;
+ frameIndex = Math.min(
+ Math.floor(s.guardFrame / GUARD_ANIM_SPEED),
+ totalFrames - 1,
+ );
+ } else if (s.attacking) {
+ sheet = sprites.attack;
+ totalFrames = ATTACK_FRAMES;
+ frameIndex = Math.min(
+ Math.floor(s.attackFrame / ATTACK_ANIM_SPEED),
+ totalFrames - 1,
+ );
+ } else if (isJumping) {
+ sheet = sprites.idle;
+ totalFrames = IDLE_FRAMES;
+ frameIndex = Math.floor(s.frame / ANIM_SPEED) % totalFrames;
+ } else {
+ sheet = sprites.run;
+ totalFrames = RUN_FRAMES;
+ frameIndex = Math.floor(s.frame / ANIM_SPEED) % totalFrames;
+ }
- // Obstacles
- ctx.save();
- ctx.fillStyle = fg;
- ctx.globalAlpha = 0.55;
- for (const o of s.obstacles) {
- ctx.fillRect(o.x, s.groundY - o.height, o.width, o.height);
+ const srcX = frameIndex * SPRITE_FRAME_SIZE;
+ const drawX = s.charX + (CHAR_SIZE - CHAR_SPRITE_SIZE) / 2;
+ const drawY = s.charY + CHAR_SIZE - CHAR_SPRITE_SIZE + 15;
+
+ if (sheet.complete && sheet.naturalWidth > 0) {
+ ctx.drawImage(
+ sheet,
+ srcX,
+ 0,
+ SPRITE_FRAME_SIZE,
+ SPRITE_FRAME_SIZE,
+ drawX,
+ drawY,
+ CHAR_SPRITE_SIZE,
+ CHAR_SPRITE_SIZE,
+ );
+ } else {
+ ctx.save();
+ ctx.fillStyle = COLOR_CHAR;
+ ctx.globalAlpha = 0.85;
+ ctx.beginPath();
+ ctx.roundRect(s.charX, s.charY, CHAR_SIZE, CHAR_SIZE, 3);
+ ctx.fill();
+ ctx.restore();
+ }
}
- ctx.restore();
- // Boss + projectiles
- if (s.boss.phase !== "inactive") {
- drawBoss(ctx, s, COLOR_BG);
- drawProjectiles(ctx, s.boss);
+ // Tree obstacles
+ const treeFrame = Math.floor(s.frame / TREE_ANIM_SPEED) % TREE_FRAMES;
+ for (const o of s.obstacles) {
+ const cfg = TREE_CONFIGS[o.treeType];
+ const treeImg = sprites.trees[o.treeType];
+ if (treeImg.complete && treeImg.naturalWidth > 0) {
+ const treeSrcX = treeFrame * cfg.frameW;
+ const treeDrawX = o.x + (o.width - cfg.renderW) / 2;
+ const treeDrawY = s.groundY - cfg.renderH;
+ ctx.drawImage(
+ treeImg,
+ treeSrcX,
+ 0,
+ cfg.frameW,
+ cfg.frameH,
+ treeDrawX,
+ treeDrawY,
+ cfg.renderW,
+ cfg.renderH,
+ );
+ } else {
+ ctx.save();
+ ctx.fillStyle = fg;
+ ctx.globalAlpha = 0.55;
+ ctx.fillRect(o.x, s.groundY - o.height, o.width, o.height);
+ ctx.restore();
+ }
+ }
+
+ // Boss (hidden during boss death)
+ if (
+ s.boss.phase !== "inactive" &&
+ (!s.deathAnim || s.deathAnim.type !== "boss")
+ ) {
+ drawBoss(ctx, s, sprites);
+ }
+
+ // Attack effects
+ if (s.attackEffects.length > 0) {
+ drawAttackEffects(ctx, s.attackEffects);
+ }
+
+ // Death particles
+ if (s.deathAnim) {
+ drawParticles(ctx, s.deathAnim);
}
// Score HUD
@@ -435,37 +765,7 @@ function draw(
ctx.textAlign = "right";
ctx.fillText(`Score: ${s.score}`, w - 12, 20);
ctx.fillText(`Best: ${s.highScore}`, w - 12, 34);
- if (s.boss.phase === "fighting") {
- ctx.fillText(
- `Evade: ${s.boss.shotsEvaded}/${BOSS_SHOTS_TO_EVADE}`,
- w - 12,
- 48,
- );
- }
ctx.restore();
-
- // Prompts
- if (!started && !s.running && !s.over) {
- ctx.save();
- ctx.fillStyle = fg;
- ctx.globalAlpha = 0.5;
- ctx.font = "12px sans-serif";
- ctx.textAlign = "center";
- ctx.fillText("Click or press Space to play while you wait", w / 2, h / 2);
- ctx.restore();
- }
-
- if (s.over) {
- ctx.save();
- ctx.fillStyle = fg;
- ctx.globalAlpha = 0.7;
- ctx.font = "bold 13px sans-serif";
- ctx.textAlign = "center";
- ctx.fillText("Game Over", w / 2, h / 2 - 8);
- ctx.font = "11px sans-serif";
- ctx.fillText("Click or Space to restart", w / 2, h / 2 + 10);
- ctx.restore();
- }
}
/* ------------------------------------------------------------------ */
@@ -477,6 +777,13 @@ export function useMiniGame() {
const stateRef = useRef
(null);
const rafRef = useRef(0);
const startedRef = useRef(false);
+ const keysRef = useRef({ left: false, right: false });
+ const [activeMode, setActiveMode] = useState<
+ "idle" | "run" | "boss" | "over" | "boss-intro" | "boss-defeated"
+ >("idle");
+ const [showOverlay, setShowOverlay] = useState(true);
+ const [score, setScore] = useState(0);
+ const [highScore, setHighScore] = useState(0);
useEffect(() => {
const canvas = canvasRef.current;
@@ -494,40 +801,91 @@ export function useMiniGame() {
const style = getComputedStyle(canvas);
let fg = style.color || "#71717a";
+ // Load sprite sheets
+ const sprites: Sprites = {
+ run: new Image(),
+ idle: new Image(),
+ attack: new Image(),
+ guard: new Image(),
+ trees: [new Image(), new Image(), new Image()],
+ archerIdle: new Image(),
+ archerAttack: new Image(),
+ };
+ sprites.run.src = runSheet.src;
+ sprites.idle.src = idleSheet.src;
+ sprites.attack.src = attackSheet.src;
+ sprites.guard.src = guardSheet.src;
+ sprites.trees[0].src = tree1Sheet.src;
+ sprites.trees[1].src = tree2Sheet.src;
+ sprites.trees[2].src = tree3Sheet.src;
+ sprites.archerIdle.src = archerIdleSheet.src;
+ sprites.archerAttack.src = archerAttackSheet.src;
+
+ let prevPhase = "";
+
// -------------------------------------------------------------- //
- // Jump //
+ // Input //
// -------------------------------------------------------------- //
function jump() {
const s = stateRef.current;
- if (!s) return;
+ if (!s || !s.running || s.paused || s.over || s.deathAnim) return;
- if (s.over) {
- const hs = s.highScore;
- const gy = s.groundY;
- stateRef.current = makeState(gy);
- stateRef.current.highScore = hs;
- stateRef.current.running = true;
- startedRef.current = true;
- return;
- }
-
- if (!s.running) {
- s.running = true;
- startedRef.current = true;
- return;
- }
-
- // Only jump when on the ground
if (s.charY + CHAR_SIZE >= s.groundY) {
s.vy = JUMP_FORCE;
}
}
- function onKey(e: KeyboardEvent) {
+ function attack() {
+ const s = stateRef.current;
+ if (!s || !s.running || s.attacking || s.guarding || s.deathAnim) return;
+ s.attacking = true;
+ s.attackFrame = 0;
+ s.attackHit = false;
+ }
+
+ function guardStart() {
+ const s = stateRef.current;
+ if (!s || !s.running || s.attacking || s.deathAnim) return;
+ if (!s.guarding) {
+ s.guarding = true;
+ s.guardFrame = 0;
+ }
+ }
+
+ function guardEnd() {
+ const s = stateRef.current;
+ if (!s) return;
+ s.guarding = false;
+ s.guardFrame = 0;
+ }
+
+ function onKeyDown(e: KeyboardEvent) {
if (e.code === "Space" || e.key === " ") {
e.preventDefault();
jump();
}
+ if (e.code === "KeyZ") {
+ e.preventDefault();
+ attack();
+ }
+ if (e.code === "KeyX") {
+ e.preventDefault();
+ guardStart();
+ }
+ if (e.code === "ArrowLeft") {
+ e.preventDefault();
+ keysRef.current.left = true;
+ }
+ if (e.code === "ArrowRight") {
+ e.preventDefault();
+ keysRef.current.right = true;
+ }
+ }
+
+ function onKeyUp(e: KeyboardEvent) {
+ if (e.code === "ArrowLeft") keysRef.current.left = false;
+ if (e.code === "ArrowRight") keysRef.current.right = false;
+ if (e.code === "KeyX") guardEnd();
}
function onClick() {
@@ -544,15 +902,58 @@ export function useMiniGame() {
const ctx = canvas.getContext("2d");
if (!ctx) return;
- update(s, canvas.width);
- draw(ctx, s, canvas.width, canvas.height, fg, startedRef.current);
+ update(s, canvas.width, keysRef.current);
+ draw(ctx, s, canvas.width, canvas.height, fg, sprites);
+
+ // Update active mode on phase change
+ let phase: string;
+ if (s.over) phase = "over";
+ else if (!startedRef.current) phase = "idle";
+ else if (s.paused && s.boss.hp <= 0) phase = "boss-defeated";
+ else if (s.paused) phase = "boss-intro";
+ else if (s.boss.phase !== "inactive") phase = "boss";
+ else phase = "running";
+
+ if (phase !== prevPhase) {
+ prevPhase = phase;
+ switch (phase) {
+ case "idle":
+ setActiveMode("idle");
+ setShowOverlay(true);
+ break;
+ case "running":
+ setActiveMode("run");
+ setShowOverlay(false);
+ break;
+ case "boss-intro":
+ setActiveMode("boss-intro");
+ setShowOverlay(true);
+ break;
+ case "boss":
+ setActiveMode("boss");
+ setShowOverlay(false);
+ break;
+ case "boss-defeated":
+ setActiveMode("boss-defeated");
+ setShowOverlay(true);
+ break;
+ case "over":
+ setActiveMode("over");
+ setScore(s.score);
+ setHighScore(s.highScore);
+ setShowOverlay(true);
+ break;
+ }
+ }
+
rafRef.current = requestAnimationFrame(loop);
}
rafRef.current = requestAnimationFrame(loop);
canvas.addEventListener("click", onClick);
- canvas.addEventListener("keydown", onKey);
+ canvas.addEventListener("keydown", onKeyDown);
+ canvas.addEventListener("keyup", onKeyUp);
const observer = new ResizeObserver((entries) => {
for (const entry of entries) {
@@ -570,10 +971,42 @@ export function useMiniGame() {
return () => {
cancelAnimationFrame(rafRef.current);
canvas.removeEventListener("click", onClick);
- canvas.removeEventListener("keydown", onKey);
+ canvas.removeEventListener("keydown", onKeyDown);
+ canvas.removeEventListener("keyup", onKeyUp);
observer.disconnect();
};
}, []);
- return { canvasRef };
+ function onContinue() {
+ const s = stateRef.current;
+ if (!s) return;
+
+ if (s.over) {
+ // Restart after game over
+ const hs = s.highScore;
+ const gy = s.groundY;
+ stateRef.current = makeState(gy);
+ stateRef.current.highScore = hs;
+ stateRef.current.running = true;
+ startedRef.current = true;
+ } else if (!s.running) {
+ // Start game from idle
+ s.running = true;
+ startedRef.current = true;
+ } else if (s.boss.hp <= 0) {
+ // Boss defeated — reset boss, resume running
+ s.boss = makeBoss(s.groundY);
+ s.charX = CHAR_X;
+ s.nextSpawn = s.frame + randInt(SPAWN_MIN / 2, SPAWN_MAX / 2);
+ s.paused = false;
+ } else {
+ // Boss intro — unpause
+ s.paused = false;
+ }
+
+ setShowOverlay(false);
+ canvasRef.current?.focus();
+ }
+
+ return { canvasRef, activeMode, showOverlay, score, highScore, onContinue };
}
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/helpers.tsx
index bd47eac051..03fdb8966f 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/helpers.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/helpers.tsx
@@ -136,7 +136,7 @@ export function getAnimationText(part: {
if (isOperationPendingOutput(output)) return "Agent creation in progress";
if (isOperationInProgressOutput(output))
return "Agent creation already in progress";
- if (isAgentSavedOutput(output)) return `Saved "${output.agent_name}"`;
+ if (isAgentSavedOutput(output)) return `Saved ${output.agent_name}`;
if (isAgentPreviewOutput(output)) return `Preview "${output.agent_name}"`;
if (isClarificationNeededOutput(output)) return "Needs clarification";
return "Error creating agent";
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx
index 6766a5cb49..40bccd6c61 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/EditAgent/EditAgent.tsx
@@ -5,7 +5,6 @@ import type { ToolUIPart } from "ai";
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
-import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
import {
ContentCardDescription,
ContentCodeBlock,
@@ -15,7 +14,7 @@ import {
ContentMessage,
} from "../../components/ToolAccordion/AccordionContent";
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
-import { useAsymptoticProgress } from "../../hooks/useAsymptoticProgress";
+import { MiniGame } from "../CreateAgent/components/MiniGame/MiniGame";
import {
ClarificationQuestionsCard,
ClarifyingQuestion,
@@ -54,6 +53,7 @@ function getAccordionMeta(output: EditAgentToolOutput): {
title: string;
titleClassName?: string;
description?: string;
+ expanded?: boolean;
} {
const icon = ;
@@ -80,7 +80,11 @@ function getAccordionMeta(output: EditAgentToolOutput): {
isOperationPendingOutput(output) ||
isOperationInProgressOutput(output)
) {
- return { icon: , title: "Editing agent" };
+ return {
+ icon: ,
+ title: "Editing agent, this may take a few minutes. Play while you wait.",
+ expanded: true,
+ };
}
return {
icon: (
@@ -105,7 +109,6 @@ export function EditAgentTool({ part }: Props) {
(isOperationStartedOutput(output) ||
isOperationPendingOutput(output) ||
isOperationInProgressOutput(output));
- const progress = useAsymptoticProgress(isOperating);
const hasExpandableContent =
part.state === "output-available" &&
!!output &&
@@ -149,9 +152,9 @@ export function EditAgentTool({ part }: Props) {
{isOperating && (
-
+
- This could take a few minutes, grab a coffee ☕
+ This could take a few minutes — play while you wait!
)}
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx
index f16b9d2b2f..835c04d5a0 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunAgent/RunAgent.tsx
@@ -2,8 +2,14 @@
import type { ToolUIPart } from "ai";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
+import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
-import { ContentMessage } from "../../components/ToolAccordion/AccordionContent";
+import {
+ ContentGrid,
+ ContentHint,
+ ContentMessage,
+} from "../../components/ToolAccordion/AccordionContent";
+import { MiniGame } from "../CreateAgent/components/MiniGame/MiniGame";
import {
getAccordionMeta,
getAnimationText,
@@ -60,6 +66,21 @@ export function RunAgentTool({ part }: Props) {
/>
+ {isStreaming && !output && (
+
}
+ title="Running agent, this may take a few minutes. Play while you wait."
+ expanded={true}
+ >
+
+
+
+ This could take a few minutes — play while you wait!
+
+
+
+ )}
+
{hasExpandableContent && output && (
{isRunAgentExecutionStartedOutput(output) && (
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx
deleted file mode 100644
index 54cc07878d..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView.tsx
+++ /dev/null
@@ -1,631 +0,0 @@
-"use client";
-import { useParams, useRouter } from "next/navigation";
-import { useQueryState } from "nuqs";
-import React, {
- useCallback,
- useEffect,
- useMemo,
- useRef,
- useState,
-} from "react";
-
-import {
- Graph,
- GraphExecution,
- GraphExecutionID,
- GraphExecutionMeta,
- GraphID,
- LibraryAgent,
- LibraryAgentID,
- LibraryAgentPreset,
- LibraryAgentPresetID,
- Schedule,
- ScheduleID,
-} from "@/lib/autogpt-server-api";
-import { useBackendAPI } from "@/lib/autogpt-server-api/context";
-import { exportAsJSONFile } from "@/lib/utils";
-
-import DeleteConfirmDialog from "@/components/__legacy__/delete-confirm-dialog";
-import type { ButtonAction } from "@/components/__legacy__/types";
-import { Button } from "@/components/__legacy__/ui/button";
-import {
- Dialog,
- DialogContent,
- DialogDescription,
- DialogFooter,
- DialogHeader,
- DialogTitle,
-} from "@/components/__legacy__/ui/dialog";
-import LoadingBox, { LoadingSpinner } from "@/components/__legacy__/ui/loading";
-import {
- useToast,
- useToastOnFail,
-} from "@/components/molecules/Toast/use-toast";
-import { AgentRunDetailsView } from "./components/agent-run-details-view";
-import { AgentRunDraftView } from "./components/agent-run-draft-view";
-import { CreatePresetDialog } from "./components/create-preset-dialog";
-import { useAgentRunsInfinite } from "./use-agent-runs";
-import { AgentRunsSelectorList } from "./components/agent-runs-selector-list";
-import { AgentScheduleDetailsView } from "./components/agent-schedule-details-view";
-
-export function OldAgentLibraryView() {
- const { id: agentID }: { id: LibraryAgentID } = useParams();
- const [executionId, setExecutionId] = useQueryState("executionId");
- const toastOnFail = useToastOnFail();
- const { toast } = useToast();
- const router = useRouter();
- const api = useBackendAPI();
-
- // ============================ STATE =============================
-
- const [graph, setGraph] = useState(null); // Graph version corresponding to LibraryAgent
- const [agent, setAgent] = useState(null);
- const agentRunsQuery = useAgentRunsInfinite(graph?.id); // only runs once graph.id is known
- const agentRuns = agentRunsQuery.agentRuns;
- const [agentPresets, setAgentPresets] = useState([]);
- const [schedules, setSchedules] = useState([]);
- const [selectedView, selectView] = useState<
- | { type: "run"; id?: GraphExecutionID }
- | { type: "preset"; id: LibraryAgentPresetID }
- | { type: "schedule"; id: ScheduleID }
- >({ type: "run" });
- const [selectedRun, setSelectedRun] = useState<
- GraphExecution | GraphExecutionMeta | null
- >(null);
- const selectedSchedule =
- selectedView.type == "schedule"
- ? schedules.find((s) => s.id == selectedView.id)
- : null;
- const [isFirstLoad, setIsFirstLoad] = useState(true);
- const [agentDeleteDialogOpen, setAgentDeleteDialogOpen] =
- useState(false);
- const [confirmingDeleteAgentRun, setConfirmingDeleteAgentRun] =
- useState(null);
- const [confirmingDeleteAgentPreset, setConfirmingDeleteAgentPreset] =
- useState(null);
- const [copyAgentDialogOpen, setCopyAgentDialogOpen] = useState(false);
- const [creatingPresetFromExecutionID, setCreatingPresetFromExecutionID] =
- useState(null);
-
- // Set page title with agent name
- useEffect(() => {
- if (agent) {
- document.title = `${agent.name} - Library - AutoGPT Platform`;
- }
- }, [agent]);
-
- const openRunDraftView = useCallback(() => {
- selectView({ type: "run" });
- }, []);
-
- const selectRun = useCallback((id: GraphExecutionID) => {
- selectView({ type: "run", id });
- }, []);
-
- const selectPreset = useCallback((id: LibraryAgentPresetID) => {
- selectView({ type: "preset", id });
- }, []);
-
- const selectSchedule = useCallback((id: ScheduleID) => {
- selectView({ type: "schedule", id });
- }, []);
-
- const graphVersions = useRef>({});
- const loadingGraphVersions = useRef>>({});
- const getGraphVersion = useCallback(
- async (graphID: GraphID, version: number) => {
- if (version in graphVersions.current)
- return graphVersions.current[version];
- if (version in loadingGraphVersions.current)
- return loadingGraphVersions.current[version];
-
- const pendingGraph = api.getGraph(graphID, version).then((graph) => {
- graphVersions.current[version] = graph;
- return graph;
- });
- // Cache promise as well to avoid duplicate requests
- loadingGraphVersions.current[version] = pendingGraph;
- return pendingGraph;
- },
- [api, graphVersions, loadingGraphVersions],
- );
-
- const lastRefresh = useRef(0);
- const refreshPageData = useCallback(() => {
- if (Date.now() - lastRefresh.current < 2e3) return; // 2 second debounce
- lastRefresh.current = Date.now();
-
- api.getLibraryAgent(agentID).then((agent) => {
- setAgent(agent);
-
- getGraphVersion(agent.graph_id, agent.graph_version).then(
- (_graph) =>
- (graph && graph.version == _graph.version) || setGraph(_graph),
- );
- Promise.all([
- agentRunsQuery.refetchRuns(),
- api.listLibraryAgentPresets({
- graph_id: agent.graph_id,
- page_size: 100,
- }),
- ]).then(([runsQueryResult, presets]) => {
- setAgentPresets(presets.presets);
-
- const newestAgentRunsResponse = runsQueryResult.data?.pages[0];
- if (!newestAgentRunsResponse || newestAgentRunsResponse.status != 200)
- return;
- const newestAgentRuns = newestAgentRunsResponse.data.executions;
- // Preload the corresponding graph versions for the latest 10 runs
- new Set(
- newestAgentRuns.slice(0, 10).map((run) => run.graph_version),
- ).forEach((version) => getGraphVersion(agent.graph_id, version));
- });
- });
- }, [api, agentID, getGraphVersion, graph]);
-
- // On first load: select the latest run
- useEffect(() => {
- // Only for first load or first execution
- if (selectedView.id || !isFirstLoad) return;
- if (agentRuns.length == 0 && agentPresets.length == 0) return;
-
- setIsFirstLoad(false);
- if (agentRuns.length > 0) {
- // select latest run
- const latestRun = agentRuns.reduce((latest, current) => {
- if (!latest.started_at && !current.started_at) return latest;
- if (!latest.started_at) return current;
- if (!current.started_at) return latest;
- return latest.started_at > current.started_at ? latest : current;
- }, agentRuns[0]);
- selectRun(latestRun.id as GraphExecutionID);
- } else {
- // select top preset
- const latestPreset = agentPresets.toSorted(
- (a, b) => b.updated_at.getTime() - a.updated_at.getTime(),
- )[0];
- selectPreset(latestPreset.id);
- }
- }, [
- isFirstLoad,
- selectedView.id,
- agentRuns,
- agentPresets,
- selectRun,
- selectPreset,
- ]);
-
- useEffect(() => {
- if (executionId) {
- selectRun(executionId as GraphExecutionID);
- setExecutionId(null);
- }
- }, [executionId, selectRun, setExecutionId]);
-
- // Initial load
- useEffect(() => {
- refreshPageData();
-
- // Show a toast when the WebSocket connection disconnects
- let connectionToast: ReturnType | null = null;
- const cancelDisconnectHandler = api.onWebSocketDisconnect(() => {
- connectionToast ??= toast({
- title: "Connection to server was lost",
- variant: "destructive",
- description: (
-
- Trying to reconnect...
-
-
- ),
- duration: Infinity,
- dismissable: true,
- });
- });
- const cancelConnectHandler = api.onWebSocketConnect(() => {
- if (connectionToast)
- connectionToast.update({
- id: connectionToast.id,
- title: "✅ Connection re-established",
- variant: "default",
- description: (
-
- Refreshing data...
-
-
- ),
- duration: 2000,
- dismissable: true,
- });
- connectionToast = null;
- });
- return () => {
- cancelDisconnectHandler();
- cancelConnectHandler();
- };
- }, []);
-
- // Subscribe to WebSocket updates for agent runs
- useEffect(() => {
- if (!agent?.graph_id) return;
-
- return api.onWebSocketConnect(() => {
- refreshPageData(); // Sync up on (re)connect
-
- // Subscribe to all executions for this agent
- api.subscribeToGraphExecutions(agent.graph_id);
- });
- }, [api, agent?.graph_id, refreshPageData]);
-
- // Handle execution updates
- useEffect(() => {
- const detachExecUpdateHandler = api.onWebSocketMessage(
- "graph_execution_event",
- (data) => {
- if (data.graph_id != agent?.graph_id) return;
-
- agentRunsQuery.upsertAgentRun(data);
- if (data.id === selectedView.id) {
- // Update currently viewed run
- setSelectedRun(data);
- }
- },
- );
-
- return () => {
- detachExecUpdateHandler();
- };
- }, [api, agent?.graph_id, selectedView.id]);
-
- // Pre-load selectedRun based on selectedView
- useEffect(() => {
- if (selectedView.type != "run" || !selectedView.id) return;
-
- const newSelectedRun = agentRuns.find((run) => run.id == selectedView.id);
- if (selectedView.id !== selectedRun?.id) {
- // Pull partial data from "cache" while waiting for the rest to load
- setSelectedRun((newSelectedRun as GraphExecutionMeta) ?? null);
- }
- }, [api, selectedView, agentRuns, selectedRun?.id]);
-
- // Load selectedRun based on selectedView; refresh on agent refresh
- useEffect(() => {
- if (selectedView.type != "run" || !selectedView.id || !agent) return;
-
- api
- .getGraphExecutionInfo(agent.graph_id, selectedView.id)
- .then(async (run) => {
- // Ensure corresponding graph version is available before rendering I/O
- await getGraphVersion(run.graph_id, run.graph_version);
- setSelectedRun(run);
- });
- }, [api, selectedView, agent, getGraphVersion]);
-
- const fetchSchedules = useCallback(async () => {
- if (!agent) return;
-
- setSchedules(await api.listGraphExecutionSchedules(agent.graph_id));
- }, [api, agent?.graph_id]);
-
- useEffect(() => {
- fetchSchedules();
- }, [fetchSchedules]);
-
- // =========================== ACTIONS ============================
-
- const deleteRun = useCallback(
- async (run: GraphExecutionMeta) => {
- if (run.status == "RUNNING" || run.status == "QUEUED") {
- await api.stopGraphExecution(run.graph_id, run.id);
- }
- await api.deleteGraphExecution(run.id);
-
- setConfirmingDeleteAgentRun(null);
- if (selectedView.type == "run" && selectedView.id == run.id) {
- openRunDraftView();
- }
- agentRunsQuery.removeAgentRun(run.id);
- },
- [api, selectedView, openRunDraftView],
- );
-
- const deletePreset = useCallback(
- async (presetID: LibraryAgentPresetID) => {
- await api.deleteLibraryAgentPreset(presetID);
-
- setConfirmingDeleteAgentPreset(null);
- if (selectedView.type == "preset" && selectedView.id == presetID) {
- openRunDraftView();
- }
- setAgentPresets((presets) => presets.filter((p) => p.id !== presetID));
- },
- [api, selectedView, openRunDraftView],
- );
-
- const deleteSchedule = useCallback(
- async (scheduleID: ScheduleID) => {
- const removedSchedule =
- await api.deleteGraphExecutionSchedule(scheduleID);
-
- setSchedules((schedules) => {
- const newSchedules = schedules.filter(
- (s) => s.id !== removedSchedule.id,
- );
- if (
- selectedView.type == "schedule" &&
- selectedView.id == removedSchedule.id
- ) {
- if (newSchedules.length > 0) {
- // Select next schedule if available
- selectSchedule(newSchedules[0].id);
- } else {
- // Reset to draft view if current schedule was deleted
- openRunDraftView();
- }
- }
- return newSchedules;
- });
- openRunDraftView();
- },
- [schedules, api],
- );
-
- const handleCreatePresetFromRun = useCallback(
- async (name: string, description: string) => {
- if (!creatingPresetFromExecutionID) return;
-
- await api
- .createLibraryAgentPreset({
- name,
- description,
- graph_execution_id: creatingPresetFromExecutionID,
- })
- .then((preset) => {
- setAgentPresets((prev) => [...prev, preset]);
- selectPreset(preset.id);
- setCreatingPresetFromExecutionID(null);
- })
- .catch(toastOnFail("create a preset"));
- },
- [api, creatingPresetFromExecutionID, selectPreset, toast],
- );
-
- const downloadGraph = useCallback(
- async () =>
- agent &&
- // Export sanitized graph from backend
- api
- .getGraph(agent.graph_id, agent.graph_version, true)
- .then((graph) =>
- exportAsJSONFile(graph, `${graph.name}_v${graph.version}.json`),
- ),
- [api, agent],
- );
-
- const copyAgent = useCallback(async () => {
- setCopyAgentDialogOpen(false);
- api
- .forkLibraryAgent(agentID)
- .then((newAgent) => {
- router.push(`/library/agents/${newAgent.id}`);
- })
- .catch((error) => {
- console.error("Error copying agent:", error);
- toast({
- title: "Error copying agent",
- description: `An error occurred while copying the agent: ${error.message}`,
- variant: "destructive",
- });
- });
- }, [agentID, api, router, toast]);
-
- const agentActions: ButtonAction[] = useMemo(
- () => [
- {
- label: "Customize agent",
- href: `/build?flowID=${agent?.graph_id}&flowVersion=${agent?.graph_version}`,
- disabled: !agent?.can_access_graph,
- },
- { label: "Export agent to file", callback: downloadGraph },
- ...(!agent?.can_access_graph
- ? [
- {
- label: "Edit a copy",
- callback: () => setCopyAgentDialogOpen(true),
- },
- ]
- : []),
- {
- label: "Delete agent",
- callback: () => setAgentDeleteDialogOpen(true),
- },
- ],
- [agent, downloadGraph],
- );
-
- const runGraph =
- graphVersions.current[selectedRun?.graph_version ?? 0] ?? graph;
-
- const onCreateSchedule = useCallback(
- (schedule: Schedule) => {
- setSchedules((prev) => [...prev, schedule]);
- selectSchedule(schedule.id);
- },
- [selectView],
- );
-
- const onCreatePreset = useCallback(
- (preset: LibraryAgentPreset) => {
- setAgentPresets((prev) => [...prev, preset]);
- selectPreset(preset.id);
- },
- [selectPreset],
- );
-
- const onUpdatePreset = useCallback(
- (updated: LibraryAgentPreset) => {
- setAgentPresets((prev) =>
- prev.map((p) => (p.id === updated.id ? updated : p)),
- );
- selectPreset(updated.id);
- },
- [selectPreset],
- );
-
- if (!agent || !graph) {
- return ;
- }
-
- return (
-
- {/* Sidebar w/ list of runs */}
- {/* TODO: render this below header in sm and md layouts */}
-
-
-
- {/* Header */}
-
-
- {
- agent.name /* TODO: use dynamic/custom run title - https://github.com/Significant-Gravitas/AutoGPT/issues/9184 */
- }
-
-
-
- {/* Run / Schedule views */}
- {(selectedView.type == "run" && selectedView.id ? (
- selectedRun && runGraph ? (
-
setConfirmingDeleteAgentRun(selectedRun)}
- doCreatePresetFromRun={() =>
- setCreatingPresetFromExecutionID(selectedRun.id)
- }
- />
- ) : null
- ) : selectedView.type == "run" ? (
- /* Draft new runs / Create new presets */
-
- ) : selectedView.type == "preset" ? (
- /* Edit & update presets */
- preset.id == selectedView.id)!
- }
- onRun={selectRun}
- recommendedScheduleCron={agent?.recommended_schedule_cron || null}
- onCreateSchedule={onCreateSchedule}
- onUpdatePreset={onUpdatePreset}
- doDeletePreset={setConfirmingDeleteAgentPreset}
- agentActions={agentActions}
- />
- ) : selectedView.type == "schedule" ? (
- selectedSchedule &&
- graph && (
-
- )
- ) : null) || }
-
-
- agent &&
- api.deleteLibraryAgent(agent.id).then(() => router.push("/library"))
- }
- />
-
- !open && setConfirmingDeleteAgentRun(null)}
- onDoDelete={() =>
- confirmingDeleteAgentRun && deleteRun(confirmingDeleteAgentRun)
- }
- />
- !open && setConfirmingDeleteAgentPreset(null)}
- onDoDelete={() =>
- confirmingDeleteAgentPreset &&
- deletePreset(confirmingDeleteAgentPreset)
- }
- />
- {/* Copy agent confirmation dialog */}
-
-
-
- You're making an editable copy
-
- The original Marketplace agent stays the same and cannot be
- edited. We'll save a new version of this agent to your
- Library. From there, you can customize it however you'd
- like by clicking "Customize agent" — this will open
- the builder where you can see and modify the inner workings.
-
-
-
- setCopyAgentDialogOpen(false)}
- >
- Cancel
-
-
- Continue
-
-
-
-
- setCreatingPresetFromExecutionID(null)}
- onConfirm={handleCreatePresetFromRun}
- />
-
-
- );
-}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx
deleted file mode 100644
index eb5224c958..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx
+++ /dev/null
@@ -1,445 +0,0 @@
-"use client";
-import { format, formatDistanceToNow, formatDistanceStrict } from "date-fns";
-import React, { useCallback, useMemo, useEffect } from "react";
-
-import {
- Graph,
- GraphExecution,
- GraphExecutionID,
- GraphExecutionMeta,
- LibraryAgent,
-} from "@/lib/autogpt-server-api";
-import { useBackendAPI } from "@/lib/autogpt-server-api/context";
-
-import ActionButtonGroup from "@/components/__legacy__/action-button-group";
-import type { ButtonAction } from "@/components/__legacy__/types";
-import {
- Card,
- CardContent,
- CardHeader,
- CardTitle,
-} from "@/components/__legacy__/ui/card";
-import {
- IconRefresh,
- IconSquare,
- IconCircleAlert,
-} from "@/components/__legacy__/ui/icons";
-import { Input } from "@/components/__legacy__/ui/input";
-import LoadingBox from "@/components/__legacy__/ui/loading";
-import {
- Tooltip,
- TooltipContent,
- TooltipProvider,
- TooltipTrigger,
-} from "@/components/atoms/Tooltip/BaseTooltip";
-import { useToastOnFail } from "@/components/molecules/Toast/use-toast";
-
-import { AgentRunStatus, agentRunStatusMap } from "./agent-run-status-chip";
-import useCredits from "@/hooks/useCredits";
-import { AgentRunOutputView } from "./agent-run-output-view";
-import { analytics } from "@/services/analytics";
-import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList";
-import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews";
-
-export function AgentRunDetailsView({
- agent,
- graph,
- run,
- agentActions,
- onRun,
- doDeleteRun,
- doCreatePresetFromRun,
-}: {
- agent: LibraryAgent;
- graph: Graph;
- run: GraphExecution | GraphExecutionMeta;
- agentActions: ButtonAction[];
- onRun: (runID: GraphExecutionID) => void;
- doDeleteRun: () => void;
- doCreatePresetFromRun: () => void;
-}): React.ReactNode {
- const api = useBackendAPI();
- const { formatCredits } = useCredits();
-
- const runStatus: AgentRunStatus = useMemo(
- () => agentRunStatusMap[run.status],
- [run],
- );
-
- const {
- pendingReviews,
- isLoading: reviewsLoading,
- refetch: refetchReviews,
- } = usePendingReviewsForExecution(run.id);
-
- const toastOnFail = useToastOnFail();
-
- // Refetch pending reviews when execution status changes to REVIEW
- useEffect(() => {
- if (runStatus === "review" && run.id) {
- refetchReviews();
- }
- }, [runStatus, run.id, refetchReviews]);
-
- const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => {
- if (!run) return [];
- return [
- {
- label: "Status",
- value: runStatus.charAt(0).toUpperCase() + runStatus.slice(1),
- },
- {
- label: "Started",
- value: run.started_at
- ? `${formatDistanceToNow(run.started_at, { addSuffix: true })}, ${format(run.started_at, "HH:mm")}`
- : "—",
- },
- ...(run.stats
- ? [
- {
- label: "Duration",
- value: formatDistanceStrict(0, run.stats.duration * 1000),
- },
- { label: "Steps", value: run.stats.node_exec_count },
- { label: "Cost", value: formatCredits(run.stats.cost) },
- ]
- : []),
- ];
- }, [run, runStatus, formatCredits]);
-
- const agentRunInputs:
- | Record<
- string,
- {
- title?: string;
- /* type: BlockIOSubType; */
- value: string | number | undefined;
- }
- >
- | undefined = useMemo(() => {
- if (!run.inputs) return undefined;
- // TODO: show (link to) preset - https://github.com/Significant-Gravitas/AutoGPT/issues/9168
-
- // Add type info from agent input schema
- return Object.fromEntries(
- Object.entries(run.inputs).map(([k, v]) => [
- k,
- {
- title: graph.input_schema.properties[k]?.title,
- // type: graph.input_schema.properties[k].type, // TODO: implement typed graph inputs
- value: typeof v == "object" ? JSON.stringify(v, undefined, 2) : v,
- },
- ]),
- );
- }, [graph, run]);
-
- const runAgain = useCallback(() => {
- if (
- !run.inputs ||
- !(graph.credentials_input_schema?.required ?? []).every(
- (k) => k in (run.credential_inputs ?? {}),
- )
- )
- return;
-
- if (run.preset_id) {
- return api
- .executeLibraryAgentPreset(
- run.preset_id,
- run.inputs!,
- run.credential_inputs!,
- )
- .then(({ id }) => {
- analytics.sendDatafastEvent("run_agent", {
- name: graph.name,
- id: graph.id,
- });
- onRun(id);
- })
- .catch(toastOnFail("execute agent preset"));
- }
-
- return api
- .executeGraph(
- graph.id,
- graph.version,
- run.inputs!,
- run.credential_inputs!,
- "library",
- )
- .then(({ id }) => {
- analytics.sendDatafastEvent("run_agent", {
- name: graph.name,
- id: graph.id,
- });
- onRun(id);
- })
- .catch(toastOnFail("execute agent"));
- }, [api, graph, run, onRun, toastOnFail]);
-
- const stopRun = useCallback(
- () => api.stopGraphExecution(graph.id, run.id),
- [api, graph.id, run.id],
- );
-
- const agentRunOutputs:
- | Record<
- string,
- {
- title?: string;
- /* type: BlockIOSubType; */
- values: Array;
- }
- >
- | null
- | undefined = useMemo(() => {
- if (!("outputs" in run)) return undefined;
- if (!["running", "success", "failed", "stopped"].includes(runStatus))
- return null;
-
- // Add type info from agent input schema
- return Object.fromEntries(
- Object.entries(run.outputs).map(([k, vv]) => [
- k,
- {
- title: graph.output_schema.properties[k].title,
- /* type: agent.output_schema.properties[k].type */
- values: vv.map((v) =>
- typeof v == "object" ? JSON.stringify(v, undefined, 2) : v,
- ),
- },
- ]),
- );
- }, [graph, run, runStatus]);
-
- const runActions: ButtonAction[] = useMemo(
- () => [
- ...(["running", "queued"].includes(runStatus)
- ? ([
- {
- label: (
- <>
-
- Stop run
- >
- ),
- variant: "secondary",
- callback: stopRun,
- },
- ] satisfies ButtonAction[])
- : []),
- ...(["success", "failed", "stopped"].includes(runStatus) &&
- !graph.has_external_trigger &&
- (graph.credentials_input_schema?.required ?? []).every(
- (k) => k in (run.credential_inputs ?? {}),
- )
- ? [
- {
- label: (
- <>
-
- Run again
- >
- ),
- callback: runAgain,
- dataTestId: "run-again-button",
- },
- ]
- : []),
- ...(agent.can_access_graph
- ? [
- {
- label: "Open run in builder",
- href: `/build?flowID=${run.graph_id}&flowVersion=${run.graph_version}&flowExecutionID=${run.id}`,
- },
- ]
- : []),
- { label: "Create preset from run", callback: doCreatePresetFromRun },
- { label: "Delete run", variant: "secondary", callback: doDeleteRun },
- ],
- [
- runStatus,
- runAgain,
- stopRun,
- doDeleteRun,
- doCreatePresetFromRun,
- graph.has_external_trigger,
- graph.credentials_input_schema?.required,
- agent.can_access_graph,
- run.graph_id,
- run.graph_version,
- run.id,
- ],
- );
-
- return (
-
-
-
-
- Info
-
-
-
-
- {infoStats.map(({ label, value }) => (
-
- ))}
-
- {run.status === "FAILED" && (
-
-
- Error: {" "}
- {run.stats?.error ||
- "The execution failed due to an internal error. You can re-run the agent to retry."}
-
-
- )}
-
-
-
- {/* Smart Agent Execution Summary */}
- {run.stats?.activity_status && (
-
-
-
- Task Summary
-
-
-
-
-
-
-
- This AI-generated summary describes how the agent
- handled your task. It’s an experimental feature and may
- occasionally be inaccurate.
-
-
-
-
-
-
-
-
- {run.stats.activity_status}
-
-
- {/* Correctness Score */}
- {typeof run.stats.correctness_score === "number" && (
-
-
-
- Success Estimate:
-
-
-
-
= 0.8
- ? "bg-green-500"
- : run.stats.correctness_score >= 0.6
- ? "bg-yellow-500"
- : run.stats.correctness_score >= 0.4
- ? "bg-orange-500"
- : "bg-red-500"
- }`}
- style={{
- width: `${Math.round(run.stats.correctness_score * 100)}%`,
- }}
- />
-
-
- {Math.round(run.stats.correctness_score * 100)}%
-
-
-
-
-
-
-
-
-
-
- AI-generated estimate of how well this execution
- achieved its intended purpose. This score indicates
- {run.stats.correctness_score >= 0.8
- ? " the agent was highly successful."
- : run.stats.correctness_score >= 0.6
- ? " the agent was mostly successful with minor issues."
- : run.stats.correctness_score >= 0.4
- ? " the agent was partially successful with some gaps."
- : " the agent had limited success with significant issues."}
-
-
-
-
-
- )}
-
-
- )}
-
- {agentRunOutputs !== null && (
-
- )}
-
- {/* Pending Reviews Section */}
- {runStatus === "review" && (
-
-
-
- Pending Reviews ({pendingReviews.length})
-
-
-
- {reviewsLoading ? (
-
- ) : pendingReviews.length > 0 ? (
-
- ) : (
-
- No pending reviews for this execution
-
- )}
-
-
- )}
-
-
-
- Input
-
-
- {agentRunInputs !== undefined ? (
- Object.entries(agentRunInputs).map(([key, { title, value }]) => (
-
- {title || key}
-
-
- ))
- ) : (
-
- )}
-
-
-
-
- {/* Run / Agent Actions */}
-
-
- );
-}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx
deleted file mode 100644
index 668ac2e215..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx
+++ /dev/null
@@ -1,178 +0,0 @@
-"use client";
-
-import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
-import React, { useMemo } from "react";
-
-import {
- Card,
- CardContent,
- CardHeader,
- CardTitle,
-} from "@/components/__legacy__/ui/card";
-
-import LoadingBox from "@/components/__legacy__/ui/loading";
-import type { OutputMetadata } from "../../../../../../../../components/contextual/OutputRenderers";
-import {
- globalRegistry,
- OutputActions,
- OutputItem,
-} from "../../../../../../../../components/contextual/OutputRenderers";
-
-export function AgentRunOutputView({
- agentRunOutputs,
-}: {
- agentRunOutputs:
- | Record<
- string,
- {
- title?: string;
- /* type: BlockIOSubType; */
- values: Array
;
- }
- >
- | undefined;
-}) {
- const enableEnhancedOutputHandling = useGetFlag(
- Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
- );
-
- // Prepare items for the renderer system
- const outputItems = useMemo(() => {
- if (!agentRunOutputs) return [];
-
- const items: Array<{
- key: string;
- label: string;
- value: unknown;
- metadata?: OutputMetadata;
- renderer: any;
- }> = [];
-
- Object.entries(agentRunOutputs).forEach(([key, { title, values }]) => {
- values.forEach((value, index) => {
- // Enhanced metadata extraction
- const metadata: OutputMetadata = {};
-
- // Type guard to safely access properties
- if (
- typeof value === "object" &&
- value !== null &&
- !React.isValidElement(value)
- ) {
- const objValue = value as any;
- if (objValue.type) metadata.type = objValue.type;
- if (objValue.mimeType) metadata.mimeType = objValue.mimeType;
- if (objValue.filename) metadata.filename = objValue.filename;
- }
-
- const renderer = globalRegistry.getRenderer(value, metadata);
- if (renderer) {
- items.push({
- key: `${key}-${index}`,
- label: index === 0 ? title || key : "",
- value,
- metadata,
- renderer,
- });
- } else {
- const textRenderer = globalRegistry
- .getAllRenderers()
- .find((r) => r.name === "TextRenderer");
- if (textRenderer) {
- items.push({
- key: `${key}-${index}`,
- label: index === 0 ? title || key : "",
- value: JSON.stringify(value, null, 2),
- metadata,
- renderer: textRenderer,
- });
- }
- }
- });
- });
-
- return items;
- }, [agentRunOutputs]);
-
- return (
- <>
- {enableEnhancedOutputHandling ? (
-
-
-
- Output
- {outputItems.length > 0 && (
- ({
- value: item.value,
- metadata: item.metadata,
- renderer: item.renderer,
- }))}
- />
- )}
-
-
-
-
- {agentRunOutputs !== undefined ? (
- outputItems.length > 0 ? (
- outputItems.map((item) => (
-
- ))
- ) : (
-
- No outputs to display
-
- )
- ) : (
-
- )}
-
-
- ) : (
-
-
- Output
-
-
-
- {agentRunOutputs !== undefined ? (
- Object.entries(agentRunOutputs).map(
- ([key, { title, values }]) => (
-
-
- {title || key}
-
- {values.map((value, i) => (
-
- {value}
-
- ))}
- {/* TODO: pretty type-dependent rendering */}
-
- ),
- )
- ) : (
-
- )}
-
-
- )}
- >
- );
-}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx
deleted file mode 100644
index 58f1ee8381..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx
+++ /dev/null
@@ -1,68 +0,0 @@
-import React from "react";
-
-import { Badge } from "@/components/__legacy__/ui/badge";
-
-import { GraphExecutionMeta } from "@/lib/autogpt-server-api/types";
-
-export type AgentRunStatus =
- | "success"
- | "failed"
- | "queued"
- | "running"
- | "stopped"
- | "scheduled"
- | "draft"
- | "review";
-
-export const agentRunStatusMap: Record<
- GraphExecutionMeta["status"],
- AgentRunStatus
-> = {
- INCOMPLETE: "draft",
- COMPLETED: "success",
- FAILED: "failed",
- QUEUED: "queued",
- RUNNING: "running",
- TERMINATED: "stopped",
- REVIEW: "review",
-};
-
-const statusData: Record<
- AgentRunStatus,
- { label: string; variant: keyof typeof statusStyles }
-> = {
- success: { label: "Success", variant: "success" },
- running: { label: "Running", variant: "info" },
- failed: { label: "Failed", variant: "destructive" },
- queued: { label: "Queued", variant: "warning" },
- draft: { label: "Draft", variant: "secondary" },
- stopped: { label: "Stopped", variant: "secondary" },
- scheduled: { label: "Scheduled", variant: "secondary" },
- review: { label: "In Review", variant: "warning" },
-};
-
-const statusStyles = {
- success:
- "bg-green-100 text-green-800 hover:bg-green-100 hover:text-green-800",
- destructive: "bg-red-100 text-red-800 hover:bg-red-100 hover:text-red-800",
- warning:
- "bg-yellow-100 text-yellow-800 hover:bg-yellow-100 hover:text-yellow-800",
- info: "bg-blue-100 text-blue-800 hover:bg-blue-100 hover:text-blue-800",
- secondary:
- "bg-slate-100 text-slate-800 hover:bg-slate-100 hover:text-slate-800",
-};
-
-export function AgentRunStatusChip({
- status,
-}: {
- status: AgentRunStatus;
-}): React.ReactElement {
- return (
-
- {statusData[status]?.label}
-
- );
-}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-summary-card.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-summary-card.tsx
deleted file mode 100644
index 6f7d7865bc..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-summary-card.tsx
+++ /dev/null
@@ -1,130 +0,0 @@
-import React from "react";
-import { formatDistanceToNow, isPast } from "date-fns";
-
-import { cn } from "@/lib/utils";
-
-import { Link2Icon, Link2OffIcon, MoreVertical } from "lucide-react";
-import { Card, CardContent } from "@/components/__legacy__/ui/card";
-import { Button } from "@/components/__legacy__/ui/button";
-import {
- DropdownMenu,
- DropdownMenuContent,
- DropdownMenuItem,
- DropdownMenuTrigger,
-} from "@/components/__legacy__/ui/dropdown-menu";
-
-import { AgentStatus, AgentStatusChip } from "./agent-status-chip";
-import { AgentRunStatus, AgentRunStatusChip } from "./agent-run-status-chip";
-import { PushPinSimpleIcon } from "@phosphor-icons/react";
-
-export type AgentRunSummaryProps = (
- | {
- type: "run";
- status: AgentRunStatus;
- }
- | {
- type: "preset";
- status?: undefined;
- }
- | {
- type: "preset.triggered";
- status: AgentStatus;
- }
- | {
- type: "schedule";
- status: "scheduled";
- }
-) & {
- title: string;
- timestamp?: number | Date;
- selected?: boolean;
- onClick?: () => void;
- // onRename: () => void;
- onDelete: () => void;
- onPinAsPreset?: () => void;
- className?: string;
-};
-
-export function AgentRunSummaryCard({
- type,
- status,
- title,
- timestamp,
- selected = false,
- onClick,
- // onRename,
- onDelete,
- onPinAsPreset,
- className,
-}: AgentRunSummaryProps): React.ReactElement {
- return (
-
-
- {(type == "run" || type == "schedule") && (
-
- )}
- {type == "preset" && (
-
- )}
- {type == "preset.triggered" && (
-
-
-
-
- {status == "inactive" ? (
-
- ) : (
-
- )}{" "}
- Trigger
-
-
- )}
-
-
-
- {title}
-
-
-
-
-
-
-
-
-
- {onPinAsPreset && (
-
- Pin as a preset
-
- )}
-
- {/* Rename */}
-
- Delete
-
-
-
-
- {timestamp && (
-
- {isPast(timestamp) ? "Ran" : "Runs in"}{" "}
- {formatDistanceToNow(timestamp, { addSuffix: true })}
-
- )}
-
-
- );
-}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx
deleted file mode 100644
index 49d93b4319..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-runs-selector-list.tsx
+++ /dev/null
@@ -1,237 +0,0 @@
-"use client";
-import { Plus } from "lucide-react";
-import React, { useEffect, useState } from "react";
-
-import {
- GraphExecutionID,
- GraphExecutionMeta,
- LibraryAgent,
- LibraryAgentPreset,
- LibraryAgentPresetID,
- Schedule,
- ScheduleID,
-} from "@/lib/autogpt-server-api";
-import { cn } from "@/lib/utils";
-
-import { Badge } from "@/components/__legacy__/ui/badge";
-import { Button } from "@/components/atoms/Button/Button";
-import LoadingBox, { LoadingSpinner } from "@/components/__legacy__/ui/loading";
-import { Separator } from "@/components/__legacy__/ui/separator";
-import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
-import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
-import { AgentRunsQuery } from "../use-agent-runs";
-import { agentRunStatusMap } from "./agent-run-status-chip";
-import { AgentRunSummaryCard } from "./agent-run-summary-card";
-
-interface AgentRunsSelectorListProps {
- agent: LibraryAgent;
- agentRunsQuery: AgentRunsQuery;
- agentPresets: LibraryAgentPreset[];
- schedules: Schedule[];
- selectedView: { type: "run" | "preset" | "schedule"; id?: string };
- allowDraftNewRun?: boolean;
- onSelectRun: (id: GraphExecutionID) => void;
- onSelectPreset: (preset: LibraryAgentPresetID) => void;
- onSelectSchedule: (id: ScheduleID) => void;
- onSelectDraftNewRun: () => void;
- doDeleteRun: (id: GraphExecutionMeta) => void;
- doDeletePreset: (id: LibraryAgentPresetID) => void;
- doDeleteSchedule: (id: ScheduleID) => void;
- doCreatePresetFromRun?: (id: GraphExecutionID) => void;
- className?: string;
-}
-
-export function AgentRunsSelectorList({
- agent,
- agentRunsQuery: {
- agentRuns,
- agentRunCount,
- agentRunsLoading,
- hasMoreRuns,
- fetchMoreRuns,
- isFetchingMoreRuns,
- },
- agentPresets,
- schedules,
- selectedView,
- allowDraftNewRun = true,
- onSelectRun,
- onSelectPreset,
- onSelectSchedule,
- onSelectDraftNewRun,
- doDeleteRun,
- doDeletePreset,
- doDeleteSchedule,
- doCreatePresetFromRun,
- className,
-}: AgentRunsSelectorListProps): React.ReactElement {
- const [activeListTab, setActiveListTab] = useState<"runs" | "scheduled">(
- "runs",
- );
-
- useEffect(() => {
- if (selectedView.type === "schedule") {
- setActiveListTab("scheduled");
- } else {
- setActiveListTab("runs");
- }
- }, [selectedView]);
-
- const listItemClasses = "h-28 w-72 lg:w-full lg:h-32";
-
- return (
-
- {allowDraftNewRun ? (
- }
- >
- New {agent.has_external_trigger ? "trigger" : "run"}
-
- ) : null}
-
-
- setActiveListTab("runs")}
- >
- Runs
-
- {agentRunCount ?? }
-
-
-
- setActiveListTab("scheduled")}
- >
- Scheduled
- {schedules.length}
-
-
-
- {/* Runs / Schedules list */}
- {agentRunsLoading && activeListTab === "runs" ? (
-
- ) : (
- = 1024 ? "vertical" : "horizontal"}
- >
- = 1024 ? "vertical" : "horizontal"}
- hasNextPage={hasMoreRuns}
- fetchNextPage={fetchMoreRuns}
- isFetchingNextPage={isFetchingMoreRuns}
- >
-
- {/* New Run button - only in small layouts */}
- {allowDraftNewRun && (
-
}
- >
- New {agent.has_external_trigger ? "trigger" : "run"}
-
- )}
-
- {activeListTab === "runs" ? (
- <>
- {agentPresets
- .filter((preset) => preset.webhook) // Triggers
- .toSorted(
- (a, b) => b.updated_at.getTime() - a.updated_at.getTime(),
- )
- .map((preset) => (
-
onSelectPreset(preset.id)}
- onDelete={() => doDeletePreset(preset.id)}
- />
- ))}
- {agentPresets
- .filter((preset) => !preset.webhook) // Presets
- .toSorted(
- (a, b) => b.updated_at.getTime() - a.updated_at.getTime(),
- )
- .map((preset) => (
- onSelectPreset(preset.id)}
- onDelete={() => doDeletePreset(preset.id)}
- />
- ))}
- {agentPresets.length > 0 && }
- {agentRuns
- .toSorted((a, b) => {
- const aTime = a.started_at?.getTime() ?? 0;
- const bTime = b.started_at?.getTime() ?? 0;
- return bTime - aTime;
- })
- .map((run) => (
- p.id == run.preset_id)
- ?.name
- : null) ?? agent.name
- }
- timestamp={run.started_at ?? undefined}
- selected={selectedView.id === run.id}
- onClick={() => onSelectRun(run.id)}
- onDelete={() => doDeleteRun(run as GraphExecutionMeta)}
- onPinAsPreset={
- doCreatePresetFromRun
- ? () => doCreatePresetFromRun(run.id)
- : undefined
- }
- />
- ))}
- >
- ) : (
- schedules.map((schedule) => (
- onSelectSchedule(schedule.id)}
- onDelete={() => doDeleteSchedule(schedule.id)}
- />
- ))
- )}
-
-
-
- )}
-
- );
-}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx
deleted file mode 100644
index 30b0a82e65..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx
+++ /dev/null
@@ -1,180 +0,0 @@
-"use client";
-import React, { useCallback, useMemo } from "react";
-
-import {
- Graph,
- GraphExecutionID,
- Schedule,
- ScheduleID,
-} from "@/lib/autogpt-server-api";
-import { useBackendAPI } from "@/lib/autogpt-server-api/context";
-
-import ActionButtonGroup from "@/components/__legacy__/action-button-group";
-import type { ButtonAction } from "@/components/__legacy__/types";
-import {
- Card,
- CardContent,
- CardHeader,
- CardTitle,
-} from "@/components/__legacy__/ui/card";
-import { IconCross } from "@/components/__legacy__/ui/icons";
-import { Input } from "@/components/__legacy__/ui/input";
-import LoadingBox from "@/components/__legacy__/ui/loading";
-import { useToastOnFail } from "@/components/molecules/Toast/use-toast";
-import { humanizeCronExpression } from "@/lib/cron-expression-utils";
-import { formatScheduleTime } from "@/lib/timezone-utils";
-import { useUserTimezone } from "@/lib/hooks/useUserTimezone";
-import { PlayIcon } from "lucide-react";
-
-import { AgentRunStatus } from "./agent-run-status-chip";
-
-export function AgentScheduleDetailsView({
- graph,
- schedule,
- agentActions,
- onForcedRun,
- doDeleteSchedule,
-}: {
- graph: Graph;
- schedule: Schedule;
- agentActions: ButtonAction[];
- onForcedRun: (runID: GraphExecutionID) => void;
- doDeleteSchedule: (scheduleID: ScheduleID) => void;
-}): React.ReactNode {
- const api = useBackendAPI();
-
- const selectedRunStatus: AgentRunStatus = "scheduled";
-
- const toastOnFail = useToastOnFail();
-
- // Get user's timezone for displaying schedule times
- const userTimezone = useUserTimezone();
-
- const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => {
- return [
- {
- label: "Status",
- value:
- selectedRunStatus.charAt(0).toUpperCase() +
- selectedRunStatus.slice(1),
- },
- {
- label: "Schedule",
- value: humanizeCronExpression(schedule.cron),
- },
- {
- label: "Next run",
- value: formatScheduleTime(schedule.next_run_time, userTimezone),
- },
- ];
- }, [schedule, selectedRunStatus, userTimezone]);
-
- const agentRunInputs: Record<
- string,
- { title?: string; /* type: BlockIOSubType; */ value: any }
- > = useMemo(() => {
- // TODO: show (link to) preset - https://github.com/Significant-Gravitas/AutoGPT/issues/9168
-
- // Add type info from agent input schema
- return Object.fromEntries(
- Object.entries(schedule.input_data).map(([k, v]) => [
- k,
- {
- title: graph.input_schema.properties[k].title,
- /* TODO: type: agent.input_schema.properties[k].type */
- value: v,
- },
- ]),
- );
- }, [graph, schedule]);
-
- const runNow = useCallback(
- () =>
- api
- .executeGraph(
- graph.id,
- graph.version,
- schedule.input_data,
- schedule.input_credentials,
- "library",
- )
- .then((run) => onForcedRun(run.id))
- .catch(toastOnFail("execute agent")),
- [api, graph, schedule, onForcedRun, toastOnFail],
- );
-
- const runActions: ButtonAction[] = useMemo(
- () => [
- {
- label: (
- <>
-
- Run now
- >
- ),
- callback: runNow,
- },
- {
- label: (
- <>
-
- Delete schedule
- >
- ),
- callback: () => doDeleteSchedule(schedule.id),
- variant: "destructive",
- },
- ],
- [runNow],
- );
-
- return (
-
-
-
-
- Info
-
-
-
-
- {infoStats.map(({ label, value }) => (
-
- ))}
-
-
-
-
-
-
- Input
-
-
- {agentRunInputs !== undefined ? (
- Object.entries(agentRunInputs).map(([key, { title, value }]) => (
-
- {title || key}
-
-
- ))
- ) : (
-
- )}
-
-
-
-
- {/* Run / Agent Actions */}
-
-
- );
-}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/create-preset-dialog.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/create-preset-dialog.tsx
deleted file mode 100644
index 2ca64d5ec5..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/create-preset-dialog.tsx
+++ /dev/null
@@ -1,100 +0,0 @@
-"use client";
-
-import React, { useState } from "react";
-import { Button } from "@/components/__legacy__/ui/button";
-import {
- Dialog,
- DialogContent,
- DialogDescription,
- DialogFooter,
- DialogHeader,
- DialogTitle,
-} from "@/components/__legacy__/ui/dialog";
-import { Input } from "@/components/__legacy__/ui/input";
-import { Textarea } from "@/components/__legacy__/ui/textarea";
-
-interface CreatePresetDialogProps {
- open: boolean;
- onOpenChange: (open: boolean) => void;
- onConfirm: (name: string, description: string) => Promise | void;
-}
-
-export function CreatePresetDialog({
- open,
- onOpenChange,
- onConfirm,
-}: CreatePresetDialogProps) {
- const [name, setName] = useState("");
- const [description, setDescription] = useState("");
-
- const handleSubmit = async () => {
- if (name.trim()) {
- await onConfirm(name.trim(), description.trim());
- setName("");
- setDescription("");
- onOpenChange(false);
- }
- };
-
- const handleCancel = () => {
- setName("");
- setDescription("");
- onOpenChange(false);
- };
-
- const handleKeyDown = (e: React.KeyboardEvent) => {
- if (e.key === "Enter" && (e.metaKey || e.ctrlKey)) {
- e.preventDefault();
- handleSubmit();
- }
- };
-
- return (
-
-
-
- Create Preset
-
- Give your preset a name and description to help identify it later.
-
-
-
-
-
- Name *
-
- setName(e.target.value)}
- onKeyDown={handleKeyDown}
- autoFocus
- />
-
-
-
- Description
-
-
-
-
-
- Cancel
-
-
- Create Preset
-
-
-
-
- );
-}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/use-agent-runs.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/use-agent-runs.ts
deleted file mode 100644
index c74a37e6d0..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/use-agent-runs.ts
+++ /dev/null
@@ -1,210 +0,0 @@
-import {
- GraphExecutionMeta as LegacyGraphExecutionMeta,
- GraphID,
- GraphExecutionID,
-} from "@/lib/autogpt-server-api";
-import { getQueryClient } from "@/lib/react-query/queryClient";
-import {
- getPaginatedTotalCount,
- getPaginationNextPageNumber,
- unpaginate,
-} from "@/app/api/helpers";
-import {
- getV1ListGraphExecutionsResponse,
- getV1ListGraphExecutionsResponse200,
- useGetV1ListGraphExecutionsInfinite,
-} from "@/app/api/__generated__/endpoints/graphs/graphs";
-import { GraphExecutionsPaginated } from "@/app/api/__generated__/models/graphExecutionsPaginated";
-import { GraphExecutionMeta as RawGraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
-
-export type GraphExecutionMeta = Omit<
- RawGraphExecutionMeta,
- "id" | "user_id" | "graph_id" | "preset_id" | "stats"
-> &
- Pick<
- LegacyGraphExecutionMeta,
- "id" | "user_id" | "graph_id" | "preset_id" | "stats"
- >;
-
-/** Hook to fetch runs for a specific graph, with support for infinite scroll.
- *
- * @param graphID - The ID of the graph to fetch agent runs for. This parameter is
- * optional in the sense that the hook doesn't run unless it is passed.
- * This way, it can be used in components where the graph ID is not
- * immediately available.
- */
-export const useAgentRunsInfinite = (graphID?: GraphID) => {
- const queryClient = getQueryClient();
- const {
- data: queryResults,
- refetch: refetchRuns,
- isPending: agentRunsLoading,
- isRefetching: agentRunsReloading,
- hasNextPage: hasMoreRuns,
- fetchNextPage: fetchMoreRuns,
- isFetchingNextPage: isFetchingMoreRuns,
- queryKey,
- } = useGetV1ListGraphExecutionsInfinite(
- graphID!,
- { page: 1, page_size: 20 },
- {
- query: {
- getNextPageParam: getPaginationNextPageNumber,
-
- // Prevent query from running if graphID is not available (yet)
- ...(!graphID
- ? {
- enabled: false,
- queryFn: () =>
- // Fake empty response if graphID is not available (yet)
- Promise.resolve({
- status: 200,
- data: {
- executions: [],
- pagination: {
- current_page: 1,
- page_size: 20,
- total_items: 0,
- total_pages: 0,
- },
- },
- headers: new Headers(),
- } satisfies getV1ListGraphExecutionsResponse),
- }
- : {}),
- },
- },
- queryClient,
- );
-
- const agentRuns = queryResults ? unpaginate(queryResults, "executions") : [];
- const agentRunCount = getPaginatedTotalCount(queryResults);
-
- const upsertAgentRun = (newAgentRun: GraphExecutionMeta) => {
- queryClient.setQueryData(
- queryKey,
- (currentQueryData: typeof queryResults) => {
- if (!currentQueryData?.pages || agentRunCount === undefined)
- return currentQueryData;
-
- const exists = currentQueryData.pages.some((page) => {
- if (page.status !== 200) return false;
-
- const response = page.data;
- return response.executions.some((run) => run.id === newAgentRun.id);
- });
- if (exists) {
- // If the run already exists, we update it
- return {
- ...currentQueryData,
- pages: currentQueryData.pages.map((page) => {
- if (page.status !== 200) return page;
- const response = page.data;
- const executions = response.executions;
-
- const index = executions.findIndex(
- (run) => run.id === newAgentRun.id,
- );
- if (index === -1) return page;
-
- const newExecutions = [...executions];
- newExecutions[index] = newAgentRun;
-
- return {
- ...page,
- data: {
- ...response,
- executions: newExecutions,
- },
- } satisfies getV1ListGraphExecutionsResponse;
- }),
- };
- }
-
- // If the run does not exist, we add it to the first page
- const page = currentQueryData
- .pages[0] as getV1ListGraphExecutionsResponse200 & {
- headers: Headers;
- };
- const updatedExecutions = [newAgentRun, ...page.data.executions];
- const updatedPage = {
- ...page,
- data: {
- ...page.data,
- executions: updatedExecutions,
- },
- } satisfies getV1ListGraphExecutionsResponse;
- const updatedPages = [updatedPage, ...currentQueryData.pages.slice(1)];
- return {
- ...currentQueryData,
- pages: updatedPages.map(
- // Increment the total runs count in the pagination info of all pages
- (page) =>
- page.status === 200
- ? {
- ...page,
- data: {
- ...page.data,
- pagination: {
- ...page.data.pagination,
- total_items: agentRunCount + 1,
- },
- },
- }
- : page,
- ),
- };
- },
- );
- };
-
- const removeAgentRun = (runID: GraphExecutionID) => {
- queryClient.setQueryData(
- [queryKey, { page: 1, page_size: 20 }],
- (currentQueryData: typeof queryResults) => {
- if (!currentQueryData?.pages) return currentQueryData;
-
- let found = false;
- return {
- ...currentQueryData,
- pages: currentQueryData.pages.map((page) => {
- const response = page.data as GraphExecutionsPaginated;
- const filteredExecutions = response.executions.filter(
- (run) => run.id !== runID,
- );
- if (filteredExecutions.length < response.executions.length) {
- found = true;
- }
-
- return {
- ...page,
- data: {
- ...response,
- executions: filteredExecutions,
- pagination: {
- ...response.pagination,
- total_items:
- response.pagination.total_items - (found ? 1 : 0),
- },
- },
- };
- }),
- };
- },
- );
- };
-
- return {
- agentRuns: agentRuns as GraphExecutionMeta[],
- refetchRuns,
- agentRunCount,
- agentRunsLoading: agentRunsLoading || agentRunsReloading,
- hasMoreRuns,
- fetchMoreRuns,
- isFetchingMoreRuns,
- upsertAgentRun,
- removeAgentRun,
- };
-};
-
-export type AgentRunsQuery = ReturnType;
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/legacy/[id]/page.tsx b/autogpt_platform/frontend/src/app/(platform)/library/legacy/[id]/page.tsx
deleted file mode 100644
index 6c2537725c..0000000000
--- a/autogpt_platform/frontend/src/app/(platform)/library/legacy/[id]/page.tsx
+++ /dev/null
@@ -1,7 +0,0 @@
-"use client";
-
-import { OldAgentLibraryView } from "../../agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView";
-
-export default function OldAgentLibraryPage() {
- return ;
-}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog.tsx b/autogpt_platform/frontend/src/components/contextual/CronScheduler/cron-scheduler-dialog.tsx
similarity index 97%
rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog.tsx
rename to autogpt_platform/frontend/src/components/contextual/CronScheduler/cron-scheduler-dialog.tsx
index 30c3e7d777..4da59b0358 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog.tsx
+++ b/autogpt_platform/frontend/src/components/contextual/CronScheduler/cron-scheduler-dialog.tsx
@@ -2,7 +2,7 @@ import { useEffect, useState } from "react";
import { Input } from "@/components/__legacy__/ui/input";
import { Button } from "@/components/__legacy__/ui/button";
import { useToast } from "@/components/molecules/Toast/use-toast";
-import { CronScheduler } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler";
+import { CronScheduler } from "@/components/contextual/CronScheduler/cron-scheduler";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { getTimezoneDisplayName } from "@/lib/timezone-utils";
import { useUserTimezone } from "@/lib/hooks/useUserTimezone";
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler.tsx b/autogpt_platform/frontend/src/components/contextual/CronScheduler/cron-scheduler.tsx
similarity index 100%
rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler.tsx
rename to autogpt_platform/frontend/src/components/contextual/CronScheduler/cron-scheduler.tsx
diff --git a/autogpt_platform/frontend/src/components/contextual/PublishAgentModal/components/AgentInfoStep/AgentInfoStep.tsx b/autogpt_platform/frontend/src/components/contextual/PublishAgentModal/components/AgentInfoStep/AgentInfoStep.tsx
index 7cd6b25d91..199801aff4 100644
--- a/autogpt_platform/frontend/src/components/contextual/PublishAgentModal/components/AgentInfoStep/AgentInfoStep.tsx
+++ b/autogpt_platform/frontend/src/components/contextual/PublishAgentModal/components/AgentInfoStep/AgentInfoStep.tsx
@@ -1,6 +1,6 @@
"use client";
-import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
+import { CronExpressionDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
import { Form, FormField } from "@/components/__legacy__/ui/form";
import { Button } from "@/components/atoms/Button/Button";
import { Input } from "@/components/atoms/Input/Input";
diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx
index 97478e9eaf..a8b3514d41 100644
--- a/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx
+++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx
@@ -80,7 +80,7 @@ export default function WrapIfAdditionalTemplate(
uiSchema={uiSchema}
/>
{!isHandleConnected && (
-
+
@@ -651,6 +651,8 @@ The block includes validation to ensure each item is actually a list. If a non-l
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| lists | A list of lists to concatenate together. All lists will be combined in order into a single list. | List[List[Any]] | Yes |
+| deduplicate | If True, remove duplicate elements from the concatenated result while preserving order. | bool | No |
+| remove_none | If True, remove None values from the concatenated result. | bool | No |
### Outputs
@@ -658,6 +660,7 @@ The block includes validation to ensure each item is actually a list. If a non-l
|--------|-------------|------|
| error | Error message if concatenation failed due to invalid input types. | str |
| concatenated_list | The concatenated list containing all elements from all input lists in order. | List[Any] |
+| length | The total number of elements in the concatenated list. | int |
### Possible use case
@@ -820,6 +823,45 @@ This enables conditional logic based on list membership and helps locate items f
---
+## Flatten List
+
+### What it is
+Flattens a nested list structure into a single flat list. Supports configurable maximum flattening depth.
+
+### How it works
+
+This block recursively traverses a nested list and extracts all leaf elements into a single flat list. You can control how deep the flattening goes with the max_depth parameter: set it to -1 to flatten completely, or to a positive integer to flatten only that many levels.
+
+The block also reports the original nesting depth of the input, which is useful for understanding the structure of data coming from sources with varying levels of nesting.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| nested_list | A potentially nested list to flatten into a single-level list. | List[Any] | Yes |
+| max_depth | Maximum depth to flatten. -1 means flatten completely. 1 means flatten only one level. | int | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if flattening failed. | str |
+| flattened_list | The flattened list with all nested elements extracted. | List[Any] |
+| length | The number of elements in the flattened list. | int |
+| original_depth | The maximum nesting depth of the original input list. | int |
+
+### Possible use case
+
+**Normalizing API Responses**: Flatten nested JSON arrays from different API endpoints into a uniform single-level list for consistent processing.
+
+**Aggregating Nested Results**: Combine results from recursive file searches or nested category trees into a flat list of items for display or export.
+
+**Data Pipeline Cleanup**: Simplify deeply nested data structures from multiple transformation steps into a clean flat list before final output.
+
+
+---
+
## Get All Memories
### What it is
@@ -1012,6 +1054,120 @@ This enables human oversight at critical points in automated workflows, ensuring
---
+## Interleave Lists
+
+### What it is
+Interleaves elements from multiple lists in round-robin fashion, alternating between sources.
+
+### How it works
+
+This block takes elements from each input list in round-robin order, picking one element from each list in turn. For example, given `[[1, 2, 3], ['a', 'b', 'c']]`, it produces `[1, 'a', 2, 'b', 3, 'c']`.
+
+When lists have different lengths, shorter lists stop contributing once exhausted, and remaining elements from longer lists continue to be added in order.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| lists | A list of lists to interleave. Elements will be taken in round-robin order. | List[List[Any]] | Yes |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if interleaving failed. | str |
+| interleaved_list | The interleaved list with elements alternating from each input list. | List[Any] |
+| length | The total number of elements in the interleaved list. | int |
+
+### Possible use case
+
+**Balanced Content Mixing**: Alternate between content from different sources (e.g., mixing promotional and organic posts) for a balanced feed.
+
+**Round-Robin Scheduling**: Distribute tasks evenly across workers or queues by interleaving items from separate task lists.
+
+**Multi-Language Output**: Weave together translated text segments with their original counterparts for side-by-side comparison.
+
+
+---
+
+## List Difference
+
+### What it is
+Computes the difference between two lists. Returns elements in the first list not found in the second, or symmetric difference.
+
+### How it works
+
+This block compares two lists and returns elements from list_a that do not appear in list_b. It uses hash-based lookup for efficient comparison. When symmetric mode is enabled, it returns elements that are in either list but not in both.
+
+The order of elements from list_a is preserved in the output, and elements from list_b are appended when using symmetric difference.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| list_a | The primary list to check elements from. | List[Any] | Yes |
+| list_b | The list to subtract. Elements found here will be removed from list_a. | List[Any] | Yes |
+| symmetric | If True, compute symmetric difference (elements in either list but not both). | bool | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if the operation failed. | str |
+| difference | Elements from list_a not found in list_b (or symmetric difference if enabled). | List[Any] |
+| length | The number of elements in the difference result. | int |
+
+### Possible use case
+
+**Change Detection**: Compare a current list of records against a previous snapshot to find newly added or removed items.
+
+**Exclusion Filtering**: Remove items from a list that appear in a blocklist or already-processed list to avoid duplicates.
+
+**Data Sync**: Identify which items exist in one system but not another to determine what needs to be synced.
+
+
+---
+
+## List Intersection
+
+### What it is
+Computes the intersection of two lists, returning only elements present in both.
+
+### How it works
+
+This block finds elements that appear in both input lists by hashing elements from list_b for efficient lookup, then checking each element of list_a against that set. The output preserves the order from list_a and removes duplicates.
+
+This is useful for finding common items between two datasets without needing to manually iterate or compare.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| list_a | The first list to intersect. | List[Any] | Yes |
+| list_b | The second list to intersect. | List[Any] | Yes |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if the operation failed. | str |
+| intersection | Elements present in both list_a and list_b. | List[Any] |
+| length | The number of elements in the intersection. | int |
+
+### Possible use case
+
+**Finding Common Tags**: Identify shared tags or categories between two items for recommendation or grouping purposes.
+
+**Mutual Connections**: Find users or contacts that appear in both of two different lists, such as shared friends or overlapping team members.
+
+**Feature Comparison**: Determine which features or capabilities are supported by both of two systems or products.
+
+
+---
+
## List Is Empty
### What it is
@@ -1452,3 +1608,42 @@ This makes XML data accessible using standard dictionary operations, allowing yo
---
+
+## Zip Lists
+
+### What it is
+Zips multiple lists together into a list of grouped elements. Supports padding to longest or truncating to shortest.
+
+### How it works
+
+This block pairs up corresponding elements from multiple input lists into sub-lists. For example, zipping `[[1, 2, 3], ['a', 'b', 'c']]` produces `[[1, 'a'], [2, 'b'], [3, 'c']]`.
+
+By default, the result is truncated to the length of the shortest input list. Enable pad_to_longest to instead pad shorter lists with a fill_value so no elements from longer lists are lost.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| lists | A list of lists to zip together. Corresponding elements will be grouped. | List[List[Any]] | Yes |
+| pad_to_longest | If True, pad shorter lists with fill_value to match the longest list. If False, truncate to shortest. | bool | No |
+| fill_value | Value to use for padding when pad_to_longest is True. | Fill Value | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if zipping failed. | str |
+| zipped_list | The zipped list of grouped elements. | List[List[Any]] |
+| length | The number of groups in the zipped result. | int |
+
+### Possible use case
+
+**Creating Key-Value Pairs**: Combine a list of field names with a list of values to build structured records or dictionaries.
+
+**Parallel Data Alignment**: Pair up corresponding items from separate data sources (e.g., names and email addresses) for processing together.
+
+**Table Row Construction**: Group column data into rows by zipping each column's values together for CSV export or display.
+
+
+---