Merge branch 'dev' into fix/launch-darkly-card

This commit is contained in:
Ubbe
2025-12-18 17:59:49 +01:00
committed by GitHub
39 changed files with 2125 additions and 578 deletions

View File

@@ -1,5 +1,5 @@
import logging
from typing import Any, Literal
from typing import Any
from prisma.enums import ReviewStatus
@@ -45,11 +45,11 @@ class HumanInTheLoopBlock(Block):
)
class Output(BlockSchemaOutput):
reviewed_data: Any = SchemaField(
description="The data after human review (may be modified)"
approved_data: Any = SchemaField(
description="The data when approved (may be modified by reviewer)"
)
status: Literal["approved", "rejected"] = SchemaField(
description="Status of the review: 'approved' or 'rejected'"
rejected_data: Any = SchemaField(
description="The data when rejected (may be modified by reviewer)"
)
review_message: str = SchemaField(
description="Any message provided by the reviewer", default=""
@@ -69,8 +69,7 @@ class HumanInTheLoopBlock(Block):
"editable": True,
},
test_output=[
("status", "approved"),
("reviewed_data", {"name": "John Doe", "age": 30}),
("approved_data", {"name": "John Doe", "age": 30}),
],
test_mock={
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
@@ -116,8 +115,7 @@ class HumanInTheLoopBlock(Block):
logger.info(
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
)
yield "status", "approved"
yield "reviewed_data", input_data.data
yield "approved_data", input_data.data
yield "review_message", "Auto-approved (safe mode disabled)"
return
@@ -158,12 +156,11 @@ class HumanInTheLoopBlock(Block):
)
if result.status == ReviewStatus.APPROVED:
yield "status", "approved"
yield "reviewed_data", result.data
yield "approved_data", result.data
if result.message:
yield "review_message", result.message
elif result.status == ReviewStatus.REJECTED:
yield "status", "rejected"
yield "rejected_data", result.data
if result.message:
yield "review_message", result.message

View File

@@ -1,8 +1,11 @@
import logging
import re
from collections import Counter
from concurrent.futures import Future
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel
import backend.blocks.llm as llm
from backend.blocks.agent import AgentExecutorBlock
from backend.data.block import (
@@ -20,16 +23,41 @@ from backend.data.dynamic_fields import (
is_dynamic_field,
is_tool_pin,
)
from backend.data.execution import ExecutionContext
from backend.data.model import NodeExecutionStats, SchemaField
from backend.util import json
from backend.util.clients import get_database_manager_async_client
from backend.util.prompt import MAIN_OBJECTIVE_PREFIX
if TYPE_CHECKING:
from backend.data.graph import Link, Node
from backend.executor.manager import ExecutionProcessor
logger = logging.getLogger(__name__)
class ToolInfo(BaseModel):
"""Processed tool call information."""
tool_call: Any # The original tool call object from LLM response
tool_name: str # The function name
tool_def: dict[str, Any] # The tool definition from tool_functions
input_data: dict[str, Any] # Processed input data ready for tool execution
field_mapping: dict[str, str] # Field name mapping for the tool
class ExecutionParams(BaseModel):
"""Tool execution parameters."""
user_id: str
graph_id: str
node_id: str
graph_version: int
graph_exec_id: str
node_exec_id: str
execution_context: "ExecutionContext"
def _get_tool_requests(entry: dict[str, Any]) -> list[str]:
"""
Return a list of tool_call_ids if the entry is a tool request.
@@ -105,6 +133,50 @@ def _create_tool_response(call_id: str, output: Any) -> dict[str, Any]:
return {"role": "tool", "tool_call_id": call_id, "content": content}
def _combine_tool_responses(tool_outputs: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""
Combine multiple Anthropic tool responses into a single user message.
For non-Anthropic formats, returns the original list unchanged.
"""
if len(tool_outputs) <= 1:
return tool_outputs
# Anthropic responses have role="user", type="message", and content is a list with tool_result items
anthropic_responses = [
output
for output in tool_outputs
if (
output.get("role") == "user"
and output.get("type") == "message"
and isinstance(output.get("content"), list)
and any(
item.get("type") == "tool_result"
for item in output.get("content", [])
if isinstance(item, dict)
)
)
]
if len(anthropic_responses) > 1:
combined_content = [
item for response in anthropic_responses for item in response["content"]
]
combined_response = {
"role": "user",
"type": "message",
"content": combined_content,
}
non_anthropic_responses = [
output for output in tool_outputs if output not in anthropic_responses
]
return [combined_response] + non_anthropic_responses
return tool_outputs
def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]:
"""
Safely convert raw_response to dictionary format for conversation history.
@@ -204,6 +276,17 @@ class SmartDecisionMakerBlock(Block):
default="localhost:11434",
description="Ollama host for local models",
)
agent_mode_max_iterations: int = SchemaField(
title="Agent Mode Max Iterations",
description="Maximum iterations for agent mode. 0 = traditional mode (single LLM call, yield tool calls for external execution), -1 = infinite agent mode (loop until finished), 1+ = agent mode with max iterations limit.",
advanced=True,
default=0,
)
conversation_compaction: bool = SchemaField(
default=True,
title="Context window auto-compaction",
description="Automatically compact the context window once it hits the limit",
)
@classmethod
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
@@ -506,6 +589,7 @@ class SmartDecisionMakerBlock(Block):
Returns the response if successful, raises ValueError if validation fails.
"""
resp = await llm.llm_call(
compress_prompt_to_fit=input_data.conversation_compaction,
credentials=credentials,
llm_model=input_data.model,
prompt=current_prompt,
@@ -593,6 +677,291 @@ class SmartDecisionMakerBlock(Block):
return resp
def _process_tool_calls(
self, response, tool_functions: list[dict[str, Any]]
) -> list[ToolInfo]:
"""Process tool calls and extract tool definitions, arguments, and input data.
Returns a list of tool info dicts with:
- tool_call: The original tool call object
- tool_name: The function name
- tool_def: The tool definition from tool_functions
- input_data: Processed input data dict (includes None values)
- field_mapping: Field name mapping for the tool
"""
if not response.tool_calls:
return []
processed_tools = []
for tool_call in response.tool_calls:
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
tool_def = next(
(
tool
for tool in tool_functions
if tool["function"]["name"] == tool_name
),
None,
)
if not tool_def:
if len(tool_functions) == 1:
tool_def = tool_functions[0]
else:
continue
# Build input data for the tool
input_data = {}
field_mapping = tool_def["function"].get("_field_mapping", {})
if "function" in tool_def and "parameters" in tool_def["function"]:
expected_args = tool_def["function"]["parameters"].get("properties", {})
for clean_arg_name in expected_args:
original_field_name = field_mapping.get(
clean_arg_name, clean_arg_name
)
arg_value = tool_args.get(clean_arg_name)
# Include all expected parameters, even if None (for backward compatibility with tests)
input_data[original_field_name] = arg_value
processed_tools.append(
ToolInfo(
tool_call=tool_call,
tool_name=tool_name,
tool_def=tool_def,
input_data=input_data,
field_mapping=field_mapping,
)
)
return processed_tools
def _update_conversation(
self, prompt: list[dict], response, tool_outputs: list | None = None
):
"""Update conversation history with response and tool outputs."""
# Don't add separate reasoning message with tool calls (breaks Anthropic's tool_use->tool_result pairing)
assistant_message = _convert_raw_response_to_dict(response.raw_response)
has_tool_calls = isinstance(assistant_message.get("content"), list) and any(
item.get("type") == "tool_use"
for item in assistant_message.get("content", [])
)
if response.reasoning and not has_tool_calls:
prompt.append(
{"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"}
)
prompt.append(assistant_message)
if tool_outputs:
prompt.extend(tool_outputs)
async def _execute_single_tool_with_manager(
self,
tool_info: ToolInfo,
execution_params: ExecutionParams,
execution_processor: "ExecutionProcessor",
) -> dict:
"""Execute a single tool using the execution manager for proper integration."""
# Lazy imports to avoid circular dependencies
from backend.data.execution import NodeExecutionEntry
tool_call = tool_info.tool_call
tool_def = tool_info.tool_def
raw_input_data = tool_info.input_data
# Get sink node and field mapping
sink_node_id = tool_def["function"]["_sink_node_id"]
# Use proper database operations for tool execution
db_client = get_database_manager_async_client()
# Get target node
target_node = await db_client.get_node(sink_node_id)
if not target_node:
raise ValueError(f"Target node {sink_node_id} not found")
# Create proper node execution using upsert_execution_input
node_exec_result = None
final_input_data = None
# Add all inputs to the execution
if not raw_input_data:
raise ValueError(f"Tool call has no input data: {tool_call}")
for input_name, input_value in raw_input_data.items():
node_exec_result, final_input_data = await db_client.upsert_execution_input(
node_id=sink_node_id,
graph_exec_id=execution_params.graph_exec_id,
input_name=input_name,
input_data=input_value,
)
assert node_exec_result is not None, "node_exec_result should not be None"
# Create NodeExecutionEntry for execution manager
node_exec_entry = NodeExecutionEntry(
user_id=execution_params.user_id,
graph_exec_id=execution_params.graph_exec_id,
graph_id=execution_params.graph_id,
graph_version=execution_params.graph_version,
node_exec_id=node_exec_result.node_exec_id,
node_id=sink_node_id,
block_id=target_node.block_id,
inputs=final_input_data or {},
execution_context=execution_params.execution_context,
)
# Use the execution manager to execute the tool node
try:
# Get NodeExecutionProgress from the execution manager's running nodes
node_exec_progress = execution_processor.running_node_execution[
sink_node_id
]
# Use the execution manager's own graph stats
graph_stats_pair = (
execution_processor.execution_stats,
execution_processor.execution_stats_lock,
)
# Create a completed future for the task tracking system
node_exec_future = Future()
node_exec_progress.add_task(
node_exec_id=node_exec_result.node_exec_id,
task=node_exec_future,
)
# Execute the node directly since we're in the SmartDecisionMaker context
node_exec_future.set_result(
await execution_processor.on_node_execution(
node_exec=node_exec_entry,
node_exec_progress=node_exec_progress,
nodes_input_masks=None,
graph_stats_pair=graph_stats_pair,
)
)
# Get outputs from database after execution completes using database manager client
node_outputs = await db_client.get_execution_outputs_by_node_exec_id(
node_exec_result.node_exec_id
)
# Create tool response
tool_response_content = (
json.dumps(node_outputs)
if node_outputs
else "Tool executed successfully"
)
return _create_tool_response(tool_call.id, tool_response_content)
except Exception as e:
logger.error(f"Tool execution with manager failed: {e}")
# Return error response
return _create_tool_response(
tool_call.id, f"Tool execution failed: {str(e)}"
)
async def _execute_tools_agent_mode(
self,
input_data,
credentials,
tool_functions: list[dict[str, Any]],
prompt: list[dict],
graph_exec_id: str,
node_id: str,
node_exec_id: str,
user_id: str,
graph_id: str,
graph_version: int,
execution_context: ExecutionContext,
execution_processor: "ExecutionProcessor",
):
"""Execute tools in agent mode with a loop until finished."""
max_iterations = input_data.agent_mode_max_iterations
iteration = 0
# Execution parameters for tool execution
execution_params = ExecutionParams(
user_id=user_id,
graph_id=graph_id,
node_id=node_id,
graph_version=graph_version,
graph_exec_id=graph_exec_id,
node_exec_id=node_exec_id,
execution_context=execution_context,
)
current_prompt = list(prompt)
while max_iterations < 0 or iteration < max_iterations:
iteration += 1
logger.debug(f"Agent mode iteration {iteration}")
# Prepare prompt for this iteration
iteration_prompt = list(current_prompt)
# On the last iteration, add a special system message to encourage completion
if max_iterations > 0 and iteration == max_iterations:
last_iteration_message = {
"role": "system",
"content": f"{MAIN_OBJECTIVE_PREFIX}This is your last iteration ({iteration}/{max_iterations}). "
"Try to complete the task with the information you have. If you cannot fully complete it, "
"provide a summary of what you've accomplished and what remains to be done. "
"Prefer finishing with a clear response rather than making additional tool calls.",
}
iteration_prompt.append(last_iteration_message)
# Get LLM response
try:
response = await self._attempt_llm_call_with_validation(
credentials, input_data, iteration_prompt, tool_functions
)
except Exception as e:
yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}"
return
# Process tool calls
processed_tools = self._process_tool_calls(response, tool_functions)
# If no tool calls, we're done
if not processed_tools:
yield "finished", response.response
self._update_conversation(current_prompt, response)
yield "conversations", current_prompt
return
# Execute tools and collect responses
tool_outputs = []
for tool_info in processed_tools:
try:
tool_response = await self._execute_single_tool_with_manager(
tool_info, execution_params, execution_processor
)
tool_outputs.append(tool_response)
except Exception as e:
logger.error(f"Tool execution failed: {e}")
# Create error response for the tool
error_response = _create_tool_response(
tool_info.tool_call.id, f"Error: {str(e)}"
)
tool_outputs.append(error_response)
tool_outputs = _combine_tool_responses(tool_outputs)
self._update_conversation(current_prompt, response, tool_outputs)
# Yield intermediate conversation state
yield "conversations", current_prompt
# If we reach max iterations, yield the current state
if max_iterations < 0:
yield "finished", f"Agent mode completed after {iteration} iterations"
else:
yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)"
yield "conversations", current_prompt
async def run(
self,
input_data: Input,
@@ -603,8 +972,12 @@ class SmartDecisionMakerBlock(Block):
graph_exec_id: str,
node_exec_id: str,
user_id: str,
graph_version: int,
execution_context: ExecutionContext,
execution_processor: "ExecutionProcessor",
**kwargs,
) -> BlockOutput:
tool_functions = await self._create_tool_node_signatures(node_id)
yield "tool_functions", json.dumps(tool_functions)
@@ -648,24 +1021,52 @@ class SmartDecisionMakerBlock(Block):
input_data.prompt = llm.fmt.format_string(input_data.prompt, values)
input_data.sys_prompt = llm.fmt.format_string(input_data.sys_prompt, values)
prefix = "[Main Objective Prompt]: "
if input_data.sys_prompt and not any(
p["role"] == "system" and p["content"].startswith(prefix) for p in prompt
p["role"] == "system" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
for p in prompt
):
prompt.append({"role": "system", "content": prefix + input_data.sys_prompt})
prompt.append(
{
"role": "system",
"content": MAIN_OBJECTIVE_PREFIX + input_data.sys_prompt,
}
)
if input_data.prompt and not any(
p["role"] == "user" and p["content"].startswith(prefix) for p in prompt
p["role"] == "user" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
for p in prompt
):
prompt.append({"role": "user", "content": prefix + input_data.prompt})
prompt.append(
{"role": "user", "content": MAIN_OBJECTIVE_PREFIX + input_data.prompt}
)
# Execute tools based on the selected mode
if input_data.agent_mode_max_iterations != 0:
# In agent mode, execute tools directly in a loop until finished
async for result in self._execute_tools_agent_mode(
input_data=input_data,
credentials=credentials,
tool_functions=tool_functions,
prompt=prompt,
graph_exec_id=graph_exec_id,
node_id=node_id,
node_exec_id=node_exec_id,
user_id=user_id,
graph_id=graph_id,
graph_version=graph_version,
execution_context=execution_context,
execution_processor=execution_processor,
):
yield result
return
# One-off mode: single LLM call and yield tool calls for external execution
current_prompt = list(prompt)
max_attempts = max(1, int(input_data.retry))
response = None
last_error = None
for attempt in range(max_attempts):
for _ in range(max_attempts):
try:
response = await self._attempt_llm_call_with_validation(
credentials, input_data, current_prompt, tool_functions

View File

@@ -1,7 +1,11 @@
import logging
import threading
from collections import defaultdict
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from backend.data.execution import ExecutionContext
from backend.data.model import ProviderName, User
from backend.server.model import CreateGraph
from backend.server.rest_api import AgentServer
@@ -17,10 +21,10 @@ async def create_graph(s: SpinTestServer, g, u: User):
async def create_credentials(s: SpinTestServer, u: User):
import backend.blocks.llm as llm
import backend.blocks.llm as llm_module
provider = ProviderName.OPENAI
credentials = llm.TEST_CREDENTIALS
credentials = llm_module.TEST_CREDENTIALS
return await s.agent_server.test_create_credentials(u.id, provider, credentials)
@@ -196,8 +200,6 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer):
@pytest.mark.asyncio
async def test_smart_decision_maker_tracks_llm_stats():
"""Test that SmartDecisionMakerBlock correctly tracks LLM usage stats."""
from unittest.mock import MagicMock, patch
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
@@ -216,7 +218,6 @@ async def test_smart_decision_maker_tracks_llm_stats():
}
# Mock the _create_tool_node_signatures method to avoid database calls
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
@@ -234,10 +235,19 @@ async def test_smart_decision_maker_tracks_llm_stats():
prompt="Should I continue with this task?",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
# Execute the block
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -246,6 +256,9 @@ async def test_smart_decision_maker_tracks_llm_stats():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -263,8 +276,6 @@ async def test_smart_decision_maker_tracks_llm_stats():
@pytest.mark.asyncio
async def test_smart_decision_maker_parameter_validation():
"""Test that SmartDecisionMakerBlock correctly validates tool call parameters."""
from unittest.mock import MagicMock, patch
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
@@ -311,8 +322,6 @@ async def test_smart_decision_maker_parameter_validation():
mock_response_with_typo.reasoning = None
mock_response_with_typo.raw_response = {"role": "assistant", "content": None}
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -329,8 +338,17 @@ async def test_smart_decision_maker_parameter_validation():
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2, # Set retry to 2 for testing
agent_mode_max_iterations=0,
)
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
# Should raise ValueError after retries due to typo'd parameter name
with pytest.raises(ValueError) as exc_info:
outputs = {}
@@ -342,6 +360,9 @@ async def test_smart_decision_maker_parameter_validation():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -368,8 +389,6 @@ async def test_smart_decision_maker_parameter_validation():
mock_response_missing_required.reasoning = None
mock_response_missing_required.raw_response = {"role": "assistant", "content": None}
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -385,8 +404,17 @@ async def test_smart_decision_maker_parameter_validation():
prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
# Should raise ValueError due to missing required parameter
with pytest.raises(ValueError) as exc_info:
outputs = {}
@@ -398,6 +426,9 @@ async def test_smart_decision_maker_parameter_validation():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -418,8 +449,6 @@ async def test_smart_decision_maker_parameter_validation():
mock_response_valid.reasoning = None
mock_response_valid.raw_response = {"role": "assistant", "content": None}
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -435,10 +464,19 @@ async def test_smart_decision_maker_parameter_validation():
prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
# Should succeed - optional parameter missing is OK
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -447,6 +485,9 @@ async def test_smart_decision_maker_parameter_validation():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -472,8 +513,6 @@ async def test_smart_decision_maker_parameter_validation():
mock_response_all_params.reasoning = None
mock_response_all_params.raw_response = {"role": "assistant", "content": None}
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -489,10 +528,19 @@ async def test_smart_decision_maker_parameter_validation():
prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
# Should succeed with all parameters
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -501,6 +549,9 @@ async def test_smart_decision_maker_parameter_validation():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -513,8 +564,6 @@ async def test_smart_decision_maker_parameter_validation():
@pytest.mark.asyncio
async def test_smart_decision_maker_raw_response_conversion():
"""Test that SmartDecisionMaker correctly handles different raw_response types with retry mechanism."""
from unittest.mock import MagicMock, patch
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
@@ -584,7 +633,6 @@ async def test_smart_decision_maker_raw_response_conversion():
)
# Mock llm_call to return different responses on different calls
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call", new_callable=AsyncMock
@@ -603,10 +651,19 @@ async def test_smart_decision_maker_raw_response_conversion():
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2,
agent_mode_max_iterations=0,
)
# Should succeed after retry, demonstrating our helper function works
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -615,6 +672,9 @@ async def test_smart_decision_maker_raw_response_conversion():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -650,8 +710,6 @@ async def test_smart_decision_maker_raw_response_conversion():
"I'll help you with that." # Ollama returns string
)
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -666,9 +724,18 @@ async def test_smart_decision_maker_raw_response_conversion():
prompt="Simple prompt",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -677,6 +744,9 @@ async def test_smart_decision_maker_raw_response_conversion():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -696,8 +766,6 @@ async def test_smart_decision_maker_raw_response_conversion():
"content": "Test response",
} # Dict format
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -712,6 +780,160 @@ async def test_smart_decision_maker_raw_response_conversion():
prompt="Another test",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
graph_id="test-graph-id",
node_id="test-node-id",
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
assert "finished" in outputs
assert outputs["finished"] == "Test response"
@pytest.mark.asyncio
async def test_smart_decision_maker_agent_mode():
"""Test that agent mode executes tools directly and loops until finished."""
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
block = SmartDecisionMakerBlock()
# Mock tool call that requires multiple iterations
mock_tool_call_1 = MagicMock()
mock_tool_call_1.id = "call_1"
mock_tool_call_1.function.name = "search_keywords"
mock_tool_call_1.function.arguments = (
'{"query": "test", "max_keyword_difficulty": 50}'
)
mock_response_1 = MagicMock()
mock_response_1.response = None
mock_response_1.tool_calls = [mock_tool_call_1]
mock_response_1.prompt_tokens = 50
mock_response_1.completion_tokens = 25
mock_response_1.reasoning = "Using search tool"
mock_response_1.raw_response = {
"role": "assistant",
"content": None,
"tool_calls": [{"id": "call_1", "type": "function"}],
}
# Final response with no tool calls (finished)
mock_response_2 = MagicMock()
mock_response_2.response = "Task completed successfully"
mock_response_2.tool_calls = []
mock_response_2.prompt_tokens = 30
mock_response_2.completion_tokens = 15
mock_response_2.reasoning = None
mock_response_2.raw_response = {
"role": "assistant",
"content": "Task completed successfully",
}
# Mock the LLM call to return different responses on each iteration
llm_call_mock = AsyncMock()
llm_call_mock.side_effect = [mock_response_1, mock_response_2]
# Mock tool node signatures
mock_tool_signatures = [
{
"type": "function",
"function": {
"name": "search_keywords",
"_sink_node_id": "test-sink-node-id",
"_field_mapping": {},
"parameters": {
"properties": {
"query": {"type": "string"},
"max_keyword_difficulty": {"type": "integer"},
},
"required": ["query", "max_keyword_difficulty"],
},
},
}
]
# Mock database and execution components
mock_db_client = AsyncMock()
mock_node = MagicMock()
mock_node.block_id = "test-block-id"
mock_db_client.get_node.return_value = mock_node
# Mock upsert_execution_input to return proper NodeExecutionResult and input data
mock_node_exec_result = MagicMock()
mock_node_exec_result.node_exec_id = "test-tool-exec-id"
mock_input_data = {"query": "test", "max_keyword_difficulty": 50}
mock_db_client.upsert_execution_input.return_value = (
mock_node_exec_result,
mock_input_data,
)
# No longer need mock_execute_node since we use execution_processor.on_node_execution
with patch("backend.blocks.llm.llm_call", llm_call_mock), patch.object(
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
), patch(
"backend.blocks.smart_decision_maker.get_database_manager_async_client",
return_value=mock_db_client,
), patch(
"backend.executor.manager.async_update_node_execution_status",
new_callable=AsyncMock,
), patch(
"backend.integrations.creds_manager.IntegrationCredentialsManager"
):
# Create a mock execution context
mock_execution_context = ExecutionContext(
safe_mode=False,
)
# Create a mock execution processor for agent mode tests
mock_execution_processor = AsyncMock()
# Configure the execution processor mock with required attributes
mock_execution_processor.running_node_execution = defaultdict(MagicMock)
mock_execution_processor.execution_stats = MagicMock()
mock_execution_processor.execution_stats_lock = threading.Lock()
# Mock the on_node_execution method to return successful stats
mock_node_stats = MagicMock()
mock_node_stats.error = None # No error
mock_execution_processor.on_node_execution = AsyncMock(
return_value=mock_node_stats
)
# Mock the get_execution_outputs_by_node_exec_id method
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = {
"result": {"status": "success", "data": "search completed"}
}
# Test agent mode with max_iterations = 3
input_data = SmartDecisionMakerBlock.Input(
prompt="Complete this task using tools",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations
)
outputs = {}
@@ -723,8 +945,115 @@ async def test_smart_decision_maker_raw_response_conversion():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
# Verify agent mode behavior
assert "tool_functions" in outputs # tool_functions is yielded in both modes
assert "finished" in outputs
assert outputs["finished"] == "Test response"
assert outputs["finished"] == "Task completed successfully"
assert "conversations" in outputs
# Verify the conversation includes tool responses
conversations = outputs["conversations"]
assert len(conversations) > 2 # Should have multiple conversation entries
# Verify LLM was called twice (once for tool call, once for finish)
assert llm_call_mock.call_count == 2
# Verify tool was executed via execution processor
assert mock_execution_processor.on_node_execution.call_count == 1
@pytest.mark.asyncio
async def test_smart_decision_maker_traditional_mode_default():
"""Test that default behavior (agent_mode_max_iterations=0) works as traditional mode."""
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
block = SmartDecisionMakerBlock()
# Mock tool call
mock_tool_call = MagicMock()
mock_tool_call.function.name = "search_keywords"
mock_tool_call.function.arguments = (
'{"query": "test", "max_keyword_difficulty": 50}'
)
mock_response = MagicMock()
mock_response.response = None
mock_response.tool_calls = [mock_tool_call]
mock_response.prompt_tokens = 50
mock_response.completion_tokens = 25
mock_response.reasoning = None
mock_response.raw_response = {"role": "assistant", "content": None}
mock_tool_signatures = [
{
"type": "function",
"function": {
"name": "search_keywords",
"_sink_node_id": "test-sink-node-id",
"_field_mapping": {},
"parameters": {
"properties": {
"query": {"type": "string"},
"max_keyword_difficulty": {"type": "integer"},
},
"required": ["query", "max_keyword_difficulty"],
},
},
}
]
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
return_value=mock_response,
), patch.object(
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
):
# Test default behavior (traditional mode)
input_data = SmartDecisionMakerBlock.Input(
prompt="Test prompt",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, # Traditional mode
)
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
outputs = {}
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
graph_id="test-graph-id",
node_id="test-node-id",
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
# Verify traditional mode behavior
assert (
"tool_functions" in outputs
) # Should yield tool_functions in traditional mode
assert (
"tools_^_test-sink-node-id_~_query" in outputs
) # Should yield individual tool parameters
assert "tools_^_test-sink-node-id_~_max_keyword_difficulty" in outputs
assert "conversations" in outputs

View File

@@ -1,7 +1,7 @@
"""Comprehensive tests for SmartDecisionMakerBlock dynamic field handling."""
import json
from unittest.mock import AsyncMock, Mock, patch
from unittest.mock import AsyncMock, MagicMock, Mock, patch
import pytest
@@ -308,10 +308,47 @@ async def test_output_yielding_with_dynamic_fields():
) as mock_llm:
mock_llm.return_value = mock_response
# Mock the function signature creation
with patch.object(
# Mock the database manager to avoid HTTP calls during tool execution
with patch(
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
) as mock_db_manager, patch.object(
block, "_create_tool_node_signatures", new_callable=AsyncMock
) as mock_sig:
# Set up the mock database manager
mock_db_client = AsyncMock()
mock_db_manager.return_value = mock_db_client
# Mock the node retrieval
mock_target_node = Mock()
mock_target_node.id = "test-sink-node-id"
mock_target_node.block_id = "CreateDictionaryBlock"
mock_target_node.block = Mock()
mock_target_node.block.name = "Create Dictionary"
mock_db_client.get_node.return_value = mock_target_node
# Mock the execution result creation
mock_node_exec_result = Mock()
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
mock_final_input_data = {
"values_#_name": "Alice",
"values_#_age": 30,
"values_#_email": "alice@example.com",
}
mock_db_client.upsert_execution_input.return_value = (
mock_node_exec_result,
mock_final_input_data,
)
# Mock the output retrieval
mock_outputs = {
"values_#_name": "Alice",
"values_#_age": 30,
"values_#_email": "alice@example.com",
}
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
mock_outputs
)
mock_sig.return_value = [
{
"type": "function",
@@ -337,10 +374,16 @@ async def test_output_yielding_with_dynamic_fields():
prompt="Create a user dictionary",
credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.LlmModel.GPT4O,
agent_mode_max_iterations=0, # Use traditional mode to test output yielding
)
# Run the block
outputs = {}
from backend.data.execution import ExecutionContext
mock_execution_context = ExecutionContext(safe_mode=False)
mock_execution_processor = MagicMock()
async for output_name, output_value in block.run(
input_data,
credentials=llm.TEST_CREDENTIALS,
@@ -349,6 +392,9 @@ async def test_output_yielding_with_dynamic_fields():
graph_exec_id="test_exec",
node_exec_id="test_node_exec",
user_id="test_user",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_value
@@ -511,45 +557,108 @@ async def test_validation_errors_dont_pollute_conversation():
}
]
# Create input data
from backend.blocks import llm
# Mock the database manager to avoid HTTP calls during tool execution
with patch(
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
) as mock_db_manager:
# Set up the mock database manager for agent mode
mock_db_client = AsyncMock()
mock_db_manager.return_value = mock_db_client
input_data = block.input_schema(
prompt="Test prompt",
credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.LlmModel.GPT4O,
retry=3, # Allow retries
)
# Mock the node retrieval
mock_target_node = Mock()
mock_target_node.id = "test-sink-node-id"
mock_target_node.block_id = "TestBlock"
mock_target_node.block = Mock()
mock_target_node.block.name = "Test Block"
mock_db_client.get_node.return_value = mock_target_node
# Run the block
outputs = {}
async for output_name, output_value in block.run(
input_data,
credentials=llm.TEST_CREDENTIALS,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_exec",
node_exec_id="test_node_exec",
user_id="test_user",
):
outputs[output_name] = output_value
# Mock the execution result creation
mock_node_exec_result = Mock()
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
mock_final_input_data = {"correct_param": "value"}
mock_db_client.upsert_execution_input.return_value = (
mock_node_exec_result,
mock_final_input_data,
)
# Verify we had 2 LLM calls (initial + retry)
assert call_count == 2
# Mock the output retrieval
mock_outputs = {"correct_param": "value"}
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
mock_outputs
)
# Check the final conversation output
final_conversation = outputs.get("conversations", [])
# Create input data
from backend.blocks import llm
# The final conversation should NOT contain the validation error message
error_messages = [
msg
for msg in final_conversation
if msg.get("role") == "user"
and "parameter errors" in msg.get("content", "")
]
assert (
len(error_messages) == 0
), "Validation error leaked into final conversation"
input_data = block.input_schema(
prompt="Test prompt",
credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.LlmModel.GPT4O,
retry=3, # Allow retries
agent_mode_max_iterations=1,
)
# The final conversation should only have the successful response
assert final_conversation[-1]["content"] == "valid"
# Run the block
outputs = {}
from backend.data.execution import ExecutionContext
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a proper mock execution processor for agent mode
from collections import defaultdict
mock_execution_processor = AsyncMock()
mock_execution_processor.execution_stats = MagicMock()
mock_execution_processor.execution_stats_lock = MagicMock()
# Create a mock NodeExecutionProgress for the sink node
mock_node_exec_progress = MagicMock()
mock_node_exec_progress.add_task = MagicMock()
mock_node_exec_progress.pop_output = MagicMock(
return_value=None
) # No outputs to process
# Set up running_node_execution as a defaultdict that returns our mock for any key
mock_execution_processor.running_node_execution = defaultdict(
lambda: mock_node_exec_progress
)
# Mock the on_node_execution method that gets called during tool execution
mock_node_stats = MagicMock()
mock_node_stats.error = None
mock_execution_processor.on_node_execution.return_value = (
mock_node_stats
)
async for output_name, output_value in block.run(
input_data,
credentials=llm.TEST_CREDENTIALS,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_exec",
node_exec_id="test_node_exec",
user_id="test_user",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_value
# Verify we had at least 1 LLM call
assert call_count >= 1
# Check the final conversation output
final_conversation = outputs.get("conversations", [])
# The final conversation should NOT contain validation error messages
# Even if retries don't happen in agent mode, we should not leak errors
error_messages = [
msg
for msg in final_conversation
if msg.get("role") == "user"
and "parameter errors" in msg.get("content", "")
]
assert (
len(error_messages) == 0
), "Validation error leaked into final conversation"

View File

@@ -5,6 +5,7 @@ from enum import Enum
from multiprocessing import Manager
from queue import Empty
from typing import (
TYPE_CHECKING,
Annotated,
Any,
AsyncGenerator,
@@ -65,6 +66,9 @@ from .includes import (
)
from .model import CredentialsMetaInput, GraphExecutionStats, NodeExecutionStats
if TYPE_CHECKING:
pass
T = TypeVar("T")
logger = logging.getLogger(__name__)
@@ -836,6 +840,30 @@ async def upsert_execution_output(
await AgentNodeExecutionInputOutput.prisma().create(data=data)
async def get_execution_outputs_by_node_exec_id(
node_exec_id: str,
) -> dict[str, Any]:
"""
Get all execution outputs for a specific node execution ID.
Args:
node_exec_id: The node execution ID to get outputs for
Returns:
Dictionary mapping output names to their data values
"""
outputs = await AgentNodeExecutionInputOutput.prisma().find_many(
where={"referencedByOutputExecId": node_exec_id}
)
result = {}
for output in outputs:
if output.data is not None:
result[output.name] = type_utils.convert(output.data, JsonValue)
return result
async def update_graph_execution_start_time(
graph_exec_id: str,
) -> GraphExecution | None:

View File

@@ -100,7 +100,7 @@ async def get_or_create_human_review(
return None
else:
return ReviewResult(
data=review.payload if review.status == ReviewStatus.APPROVED else None,
data=review.payload,
status=review.status,
message=review.reviewMessage or "",
processed=review.processed,

View File

@@ -13,6 +13,7 @@ from backend.data.execution import (
get_block_error_stats,
get_child_graph_executions,
get_execution_kv_data,
get_execution_outputs_by_node_exec_id,
get_frequently_executed_graphs,
get_graph_execution_meta,
get_graph_executions,
@@ -147,6 +148,7 @@ class DatabaseManager(AppService):
update_graph_execution_stats = _(update_graph_execution_stats)
upsert_execution_input = _(upsert_execution_input)
upsert_execution_output = _(upsert_execution_output)
get_execution_outputs_by_node_exec_id = _(get_execution_outputs_by_node_exec_id)
get_execution_kv_data = _(get_execution_kv_data)
set_execution_kv_data = _(set_execution_kv_data)
get_block_error_stats = _(get_block_error_stats)
@@ -277,6 +279,7 @@ class DatabaseManagerAsyncClient(AppServiceClient):
get_user_integrations = d.get_user_integrations
upsert_execution_input = d.upsert_execution_input
upsert_execution_output = d.upsert_execution_output
get_execution_outputs_by_node_exec_id = d.get_execution_outputs_by_node_exec_id
update_graph_execution_stats = d.update_graph_execution_stats
update_node_execution_status = d.update_node_execution_status
update_node_execution_status_batch = d.update_node_execution_status_batch

View File

@@ -133,9 +133,8 @@ def execute_graph(
cluster_lock: ClusterLock,
):
"""Execute graph using thread-local ExecutionProcessor instance"""
return _tls.processor.on_graph_execution(
graph_exec_entry, cancel_event, cluster_lock
)
processor: ExecutionProcessor = _tls.processor
return processor.on_graph_execution(graph_exec_entry, cancel_event, cluster_lock)
T = TypeVar("T")
@@ -143,8 +142,8 @@ T = TypeVar("T")
async def execute_node(
node: Node,
creds_manager: IntegrationCredentialsManager,
data: NodeExecutionEntry,
execution_processor: "ExecutionProcessor",
execution_stats: NodeExecutionStats | None = None,
nodes_input_masks: Optional[NodesInputMasks] = None,
) -> BlockOutput:
@@ -169,6 +168,7 @@ async def execute_node(
node_id = data.node_id
node_block = node.block
execution_context = data.execution_context
creds_manager = execution_processor.creds_manager
log_metadata = LogMetadata(
logger=_logger,
@@ -212,6 +212,7 @@ async def execute_node(
"node_exec_id": node_exec_id,
"user_id": user_id,
"execution_context": execution_context,
"execution_processor": execution_processor,
}
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
@@ -608,8 +609,8 @@ class ExecutionProcessor:
async for output_name, output_data in execute_node(
node=node,
creds_manager=self.creds_manager,
data=node_exec,
execution_processor=self,
execution_stats=stats,
nodes_input_masks=nodes_input_masks,
):
@@ -860,12 +861,17 @@ class ExecutionProcessor:
execution_stats_lock = threading.Lock()
# State holders ----------------------------------------------------
running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
self.running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
NodeExecutionProgress
)
running_node_evaluation: dict[str, Future] = {}
self.running_node_evaluation: dict[str, Future] = {}
self.execution_stats = execution_stats
self.execution_stats_lock = execution_stats_lock
execution_queue = ExecutionQueue[NodeExecutionEntry]()
running_node_execution = self.running_node_execution
running_node_evaluation = self.running_node_evaluation
try:
if db_client.get_credits(graph_exec.user_id) <= 0:
raise InsufficientBalanceError(

View File

@@ -134,18 +134,14 @@ async def process_review_action(
# Build review decisions map
review_decisions = {}
for review in request.reviews:
if review.approved:
review_decisions[review.node_exec_id] = (
ReviewStatus.APPROVED,
review.reviewed_data,
review.message,
)
else:
review_decisions[review.node_exec_id] = (
ReviewStatus.REJECTED,
None,
review.message,
)
review_status = (
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
)
review_decisions[review.node_exec_id] = (
review_status,
review.reviewed_data,
review.message,
)
# Process all reviews
updated_reviews = await process_all_reviews_for_execution(

View File

@@ -5,6 +5,13 @@ from tiktoken import encoding_for_model
from backend.util import json
# ---------------------------------------------------------------------------#
# CONSTANTS #
# ---------------------------------------------------------------------------#
# Message prefixes for important system messages that should be protected during compression
MAIN_OBJECTIVE_PREFIX = "[Main Objective Prompt]: "
# ---------------------------------------------------------------------------#
# INTERNAL UTILITIES #
# ---------------------------------------------------------------------------#
@@ -63,6 +70,55 @@ def _msg_tokens(msg: dict, enc) -> int:
return WRAPPER + content_tokens + tool_call_tokens
def _is_tool_message(msg: dict) -> bool:
"""Check if a message contains tool calls or results that should be protected."""
content = msg.get("content")
# Check for Anthropic-style tool messages
if isinstance(content, list) and any(
isinstance(item, dict) and item.get("type") in ("tool_use", "tool_result")
for item in content
):
return True
# Check for OpenAI-style tool calls in the message
if "tool_calls" in msg or msg.get("role") == "tool":
return True
return False
def _is_objective_message(msg: dict) -> bool:
"""Check if a message contains objective/system prompts that should be absolutely protected."""
content = msg.get("content", "")
if isinstance(content, str):
# Protect any message with the main objective prefix
return content.startswith(MAIN_OBJECTIVE_PREFIX)
return False
def _truncate_tool_message_content(msg: dict, enc, max_tokens: int) -> None:
"""
Carefully truncate tool message content while preserving tool structure.
Only truncates tool_result content, leaves tool_use intact.
"""
content = msg.get("content")
if not isinstance(content, list):
return
for item in content:
# Only process tool_result items, leave tool_use blocks completely intact
if not (isinstance(item, dict) and item.get("type") == "tool_result"):
continue
result_content = item.get("content", "")
if (
isinstance(result_content, str)
and _tok_len(result_content, enc) > max_tokens
):
item["content"] = _truncate_middle_tokens(result_content, enc, max_tokens)
def _truncate_middle_tokens(text: str, enc, max_tok: int) -> str:
"""
Return *text* shortened to ≈max_tok tokens by keeping the head & tail
@@ -140,13 +196,21 @@ def compress_prompt(
return sum(_msg_tokens(m, enc) for m in msgs)
original_token_count = total_tokens()
if original_token_count + reserve <= target_tokens:
return msgs
# ---- STEP 0 : normalise content --------------------------------------
# Convert non-string payloads to strings so token counting is coherent.
for m in msgs[1:-1]: # keep the first & last intact
for i, m in enumerate(msgs):
if not isinstance(m.get("content"), str) and m.get("content") is not None:
if _is_tool_message(m):
continue
# Keep first and last messages intact (unless they're tool messages)
if i == 0 or i == len(msgs) - 1:
continue
# Reasonable 20k-char ceiling prevents pathological blobs
content_str = json.dumps(m["content"], separators=(",", ":"))
if len(content_str) > 20_000:
@@ -157,34 +221,45 @@ def compress_prompt(
cap = start_cap
while total_tokens() + reserve > target_tokens and cap >= floor_cap:
for m in msgs[1:-1]: # keep first & last intact
if _tok_len(m.get("content") or "", enc) > cap:
m["content"] = _truncate_middle_tokens(m["content"], enc, cap)
if _is_tool_message(m):
# For tool messages, only truncate tool result content, preserve structure
_truncate_tool_message_content(m, enc, cap)
continue
if _is_objective_message(m):
# Never truncate objective messages - they contain the core task
continue
content = m.get("content") or ""
if _tok_len(content, enc) > cap:
m["content"] = _truncate_middle_tokens(content, enc, cap)
cap //= 2 # tighten the screw
# ---- STEP 2 : middle-out deletion -----------------------------------
while total_tokens() + reserve > target_tokens and len(msgs) > 2:
# Identify all deletable messages (not first/last, not tool messages, not objective messages)
deletable_indices = []
for i in range(1, len(msgs) - 1): # Skip first and last
if not _is_tool_message(msgs[i]) and not _is_objective_message(msgs[i]):
deletable_indices.append(i)
if not deletable_indices:
break # nothing more we can drop
# Delete from center outward - find the index closest to center
centre = len(msgs) // 2
# Build a symmetrical centre-out index walk: centre, centre+1, centre-1, ...
order = [centre] + [
i
for pair in zip(range(centre + 1, len(msgs) - 1), range(centre - 1, 0, -1))
for i in pair
]
removed = False
for i in order:
msg = msgs[i]
if "tool_calls" in msg or msg.get("role") == "tool":
continue # protect tool shells
del msgs[i]
removed = True
break
if not removed: # nothing more we can drop
break
to_delete = min(deletable_indices, key=lambda i: abs(i - centre))
del msgs[to_delete]
# ---- STEP 3 : final safety-net trim on first & last ------------------
cap = start_cap
while total_tokens() + reserve > target_tokens and cap >= floor_cap:
for idx in (0, -1): # first and last
if _is_tool_message(msgs[idx]):
# For tool messages at first/last position, truncate tool result content only
_truncate_tool_message_content(msgs[idx], enc, cap)
continue
text = msgs[idx].get("content") or ""
if _tok_len(text, enc) > cap:
msgs[idx]["content"] = _truncate_middle_tokens(text, enc, cap)

View File

@@ -106,7 +106,11 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
/>
<NodeAdvancedToggle nodeId={nodeId} />
{data.uiType != BlockUIType.OUTPUT && (
<OutputHandler outputSchema={outputSchema} nodeId={nodeId} />
<OutputHandler
uiType={data.uiType}
outputSchema={outputSchema}
nodeId={nodeId}
/>
)}
<NodeDataRenderer nodeId={nodeId} />
</div>

View File

@@ -20,17 +20,32 @@ export const FormCreator = React.memo(
className?: string;
}) => {
const updateNodeData = useNodeStore((state) => state.updateNodeData);
const getHardCodedValues = useNodeStore(
(state) => state.getHardCodedValues,
);
const handleChange = ({ formData }: any) => {
if ("credentials" in formData && !formData.credentials?.id) {
delete formData.credentials;
}
updateNodeData(nodeId, { hardcodedValues: formData });
const updatedValues =
uiType === BlockUIType.AGENT
? {
...getHardCodedValues(nodeId),
inputs: formData,
}
: formData;
updateNodeData(nodeId, { hardcodedValues: updatedValues });
};
const initialValues = getHardCodedValues(nodeId);
const hardcodedValues = getHardCodedValues(nodeId);
const initialValues =
uiType === BlockUIType.AGENT
? (hardcodedValues.inputs ?? {})
: hardcodedValues;
return (
<div className={className}>

View File

@@ -14,13 +14,16 @@ import {
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
import { getTypeDisplayInfo } from "./helpers";
import { generateHandleId } from "../handlers/helpers";
import { BlockUIType } from "../../types";
export const OutputHandler = ({
outputSchema,
nodeId,
uiType,
}: {
outputSchema: RJSFSchema;
nodeId: string;
uiType: BlockUIType;
}) => {
const { isOutputConnected } = useEdgeStore();
const properties = outputSchema?.properties || {};
@@ -79,7 +82,9 @@ export const OutputHandler = ({
</Text>
<NodeHandle
handleId={generateHandleId(key)}
handleId={
uiType === BlockUIType.AGENT ? key : generateHandleId(key)
}
isConnected={isConnected}
side="right"
/>

View File

@@ -7,6 +7,7 @@ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { getV2GetSpecificAgent } from "@/app/api/__generated__/endpoints/store/store";
import {
getGetV2ListLibraryAgentsQueryKey,
getV2GetLibraryAgent,
usePostV2AddMarketplaceAgent,
} from "@/app/api/__generated__/endpoints/library/library";
import {
@@ -151,7 +152,12 @@ export const useBlockMenuSearch = () => {
});
const libraryAgent = response.data as LibraryAgent;
addAgentToBuilder(libraryAgent);
const { data: libraryAgentDetails } = await getV2GetLibraryAgent(
libraryAgent.id,
);
addAgentToBuilder(libraryAgentDetails as LibraryAgent);
toast({
title: "Agent Added",

View File

@@ -1,6 +1,7 @@
import { getGetV2GetBuilderItemCountsQueryKey } from "@/app/api/__generated__/endpoints/default/default";
import {
getGetV2ListLibraryAgentsQueryKey,
getV2GetLibraryAgent,
usePostV2AddMarketplaceAgent,
} from "@/app/api/__generated__/endpoints/library/library";
import {
@@ -105,8 +106,16 @@ export const useMarketplaceAgentsContent = () => {
},
});
// Here, libraryAgent has empty input and output schemas.
// Not updating the endpoint because this endpoint is used elsewhere.
// TODO: Create a new endpoint for builder specific to marketplace agents.
const libraryAgent = response.data as LibraryAgent;
addAgentToBuilder(libraryAgent);
const { data: libraryAgentDetails } = await getV2GetLibraryAgent(
libraryAgent.id,
);
addAgentToBuilder(libraryAgentDetails as LibraryAgent);
toast({
title: "Agent Added",

View File

@@ -1,16 +1,11 @@
"use client";
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import type {
BlockIOSubSchema,
CredentialsMetaInput,
} from "@/lib/autogpt-server-api/types";
import { Text } from "@/components/atoms/Text/Text";
import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types";
import { CredentialsInput } from "../CredentialsInputs/CredentialsInputs";
import {
getAgentCredentialsFields,
getAgentInputFields,
renderValue,
} from "./helpers";
import { RunAgentInputs } from "../RunAgentInputs/RunAgentInputs";
import { getAgentCredentialsFields, getAgentInputFields } from "./helpers";
type Props = {
agent: LibraryAgent;
@@ -28,19 +23,23 @@ export function AgentInputsReadOnly({
getAgentCredentialsFields(agent),
);
// Take actual input entries as leading; augment with schema from input fields.
// TODO: ensure consistent ordering.
const inputEntries =
inputs &&
Object.entries(inputs).map<[string, [BlockIOSubSchema | undefined, any]]>(
([k, v]) => [k, [inputFields[k], v]],
);
Object.entries(inputs).map(([key, value]) => ({
key,
schema: inputFields[key],
value,
}));
const hasInputs = inputEntries && inputEntries.length > 0;
const hasCredentials = credentialInputs && credentialFieldEntries.length > 0;
if (!hasInputs && !hasCredentials) {
return <div className="text-neutral-600">No input for this run.</div>;
return (
<Text variant="body" className="text-zinc-700">
No input for this run.
</Text>
);
}
return (
@@ -48,16 +47,20 @@ export function AgentInputsReadOnly({
{/* Regular inputs */}
{hasInputs && (
<div className="flex flex-col gap-4">
{inputEntries.map(([key, [schema, value]]) => (
<div key={key} className="flex flex-col gap-1.5">
<label className="text-sm font-medium">
{schema?.title || key}
</label>
<p className="whitespace-pre-wrap break-words text-sm text-neutral-700">
{renderValue(value)}
</p>
</div>
))}
{inputEntries.map(({ key, schema, value }) => {
if (!schema) return null;
return (
<RunAgentInputs
key={key}
schema={schema}
value={value}
placeholder={schema.description}
onChange={() => {}}
readOnly={true}
/>
);
})}
</div>
)}

View File

@@ -9,6 +9,7 @@ import { Button } from "@/components/atoms/Button/Button";
import { FileInput } from "@/components/atoms/FileInput/FileInput";
import { Switch } from "@/components/atoms/Switch/Switch";
import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import { TimePicker } from "@/components/molecules/TimePicker/TimePicker";
import {
BlockIOObjectSubSchema,
@@ -32,6 +33,7 @@ interface Props {
value?: any;
placeholder?: string;
onChange: (value: any) => void;
readOnly?: boolean;
}
/**
@@ -44,6 +46,7 @@ export function RunAgentInputs({
value,
placeholder,
onChange,
readOnly = false,
...props
}: Props & React.HTMLAttributes<HTMLElement>) {
const { handleUploadFile, uploadProgress } = useRunAgentInputs();
@@ -62,7 +65,6 @@ export function RunAgentInputs({
id={`${baseId}-number`}
label={schema.title ?? placeholder ?? "Number"}
hideLabel
size="small"
type="number"
value={value ?? ""}
placeholder={placeholder || "Enter number"}
@@ -80,7 +82,6 @@ export function RunAgentInputs({
id={`${baseId}-textarea`}
label={schema.title ?? placeholder ?? "Text"}
hideLabel
size="small"
type="textarea"
rows={3}
value={value ?? ""}
@@ -130,7 +131,6 @@ export function RunAgentInputs({
id={`${baseId}-date`}
label={schema.title ?? placeholder ?? "Date"}
hideLabel
size="small"
type="date"
value={value ? format(value as Date, "yyyy-MM-dd") : ""}
onChange={(e) => {
@@ -159,7 +159,6 @@ export function RunAgentInputs({
id={`${baseId}-datetime`}
label={schema.title ?? placeholder ?? "Date time"}
hideLabel
size="small"
type="datetime-local"
value={value ?? ""}
onChange={(e) => onChange((e.target as HTMLInputElement).value)}
@@ -194,7 +193,6 @@ export function RunAgentInputs({
label={schema.title ?? placeholder ?? "Select"}
hideLabel
value={value ?? ""}
size="small"
onValueChange={(val: string) => onChange(val)}
placeholder={placeholder || "Select an option"}
options={schema.enum
@@ -217,7 +215,6 @@ export function RunAgentInputs({
items={allKeys.map((key) => ({
value: key,
label: _schema.properties[key]?.title ?? key,
size: "small",
}))}
selectedValues={selectedValues}
onChange={(values: string[]) =>
@@ -336,7 +333,6 @@ export function RunAgentInputs({
id={`${baseId}-text`}
label={schema.title ?? placeholder ?? "Text"}
hideLabel
size="small"
type="text"
value={value ?? ""}
onChange={(e) => onChange((e.target as HTMLInputElement).value)}
@@ -347,6 +343,17 @@ export function RunAgentInputs({
}
return (
<div className="no-drag relative flex w-full">{innerInputElement}</div>
<div className="flex w-full flex-col gap-0 space-y-2">
<label className="large-medium flex items-center gap-1 font-medium">
{schema.title || placeholder}
<InformationTooltip description={schema.description} />
</label>
<div
className="no-drag relative flex w-full"
style={readOnly ? { pointerEvents: "none", opacity: 0.7 } : undefined}
>
{innerInputElement}
</div>
</div>
);
}

View File

@@ -73,22 +73,15 @@ export function ModalRunSection() {
title="Task Inputs"
subtitle="Enter the information you want to provide to the agent for this task"
>
{/* Regular inputs */}
{inputFields.map(([key, inputSubSchema]) => (
<div key={key} className="flex w-full flex-col gap-0 space-y-2">
<label className="flex items-center gap-1 text-sm font-medium">
{inputSubSchema.title || key}
<InformationTooltip description={inputSubSchema.description} />
</label>
<RunAgentInputs
schema={inputSubSchema}
value={inputValues[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
data-testid={`agent-input-${key}`}
/>
</div>
<RunAgentInputs
key={key}
schema={inputSubSchema}
value={inputValues[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
data-testid={`agent-input-${key}`}
/>
))}
</ModalSection>
) : null}

View File

@@ -4,20 +4,19 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecut
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { Text } from "@/components/atoms/Text/Text";
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import {
ScrollableTabs,
ScrollableTabsContent,
ScrollableTabsList,
ScrollableTabsTrigger,
} from "@/components/molecules/ScrollableTabs/ScrollableTabs";
import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList";
import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews";
import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint";
import { InfoIcon } from "@phosphor-icons/react";
import { useEffect } from "react";
import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly";
import { AnchorLinksWrap } from "../AnchorLinksWrap";
import { LoadingSelectedContent } from "../LoadingSelectedContent";
import { RunDetailCard } from "../RunDetailCard/RunDetailCard";
import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader";
@@ -28,9 +27,6 @@ import { SelectedRunActions } from "./components/SelectedRunActions/SelectedRunA
import { WebhookTriggerSection } from "./components/WebhookTriggerSection";
import { useSelectedRunView } from "./useSelectedRunView";
const anchorStyles =
"border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900";
interface Props {
agent: LibraryAgent;
runId: string;
@@ -65,13 +61,6 @@ export function SelectedRunView({
const withSummary = run?.stats?.activity_status;
const withReviews = run?.status === AgentExecutionStatus.REVIEW;
function scrollToSection(id: string) {
const element = document.getElementById(id);
if (element) {
element.scrollIntoView({ behavior: "smooth", block: "start" });
}
}
if (responseError || httpError) {
return (
<ErrorCard
@@ -112,118 +101,116 @@ export function SelectedRunView({
/>
)}
{/* Navigation Links */}
<AnchorLinksWrap>
{withSummary && (
<button
onClick={() => scrollToSection("summary")}
className={anchorStyles}
>
Summary
</button>
)}
<button
onClick={() => scrollToSection("output")}
className={anchorStyles}
>
Output
</button>
<button
onClick={() => scrollToSection("input")}
className={anchorStyles}
>
Your input
</button>
{withReviews && (
<button
onClick={() => scrollToSection("reviews")}
className={anchorStyles}
>
Reviews ({pendingReviews.length})
</button>
)}
</AnchorLinksWrap>
{/* Summary Section */}
{withSummary && (
<div id="summary" className="scroll-mt-4">
<RunDetailCard
title={
<div className="flex items-center gap-2">
<Text variant="lead-semibold">Summary</Text>
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<InfoIcon
size={16}
className="cursor-help text-neutral-500 hover:text-neutral-700"
/>
</TooltipTrigger>
<TooltipContent>
<p className="max-w-xs">
This AI-generated summary describes how the agent
handled your task. It&apos;s an experimental
feature and may occasionally be inaccurate.
</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
}
>
<RunSummary run={run} />
</RunDetailCard>
</div>
)}
{/* Output Section */}
<div id="output" className="scroll-mt-4">
<RunDetailCard title="Output">
{isLoading ? (
<div className="text-neutral-500">
<LoadingSpinner />
</div>
) : run && "outputs" in run ? (
<RunOutputs outputs={run.outputs as any} />
) : (
<Text variant="body" className="text-neutral-600">
No output from this run.
</Text>
<ScrollableTabs
defaultValue="output"
className="-mt-2 flex flex-col"
>
<ScrollableTabsList className="px-4">
{withSummary && (
<ScrollableTabsTrigger value="summary">
Summary
</ScrollableTabsTrigger>
)}
</RunDetailCard>
</div>
{/* Input Section */}
<div id="input" className="scroll-mt-4">
<RunDetailCard title="Your input">
<AgentInputsReadOnly
agent={agent}
inputs={run?.inputs}
credentialInputs={run?.credential_inputs}
/>
</RunDetailCard>
</div>
{/* Reviews Section */}
{withReviews && (
<div id="reviews" className="scroll-mt-4">
<RunDetailCard>
{reviewsLoading ? (
<div className="text-neutral-500">Loading reviews</div>
) : pendingReviews.length > 0 ? (
<PendingReviewsList
reviews={pendingReviews}
onReviewComplete={refetchReviews}
emptyMessage="No pending reviews for this execution"
/>
) : (
<div className="text-neutral-600">
No pending reviews for this execution
<ScrollableTabsTrigger value="output">
Output
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="input">
Your input
</ScrollableTabsTrigger>
{withReviews && (
<ScrollableTabsTrigger value="reviews">
Reviews ({pendingReviews.length})
</ScrollableTabsTrigger>
)}
</ScrollableTabsList>
<div className="my-6 flex flex-col gap-6">
{/* Summary Section */}
{withSummary && (
<ScrollableTabsContent value="summary">
<div className="scroll-mt-4">
<RunDetailCard
title={
<div className="flex items-center gap-1">
<Text variant="lead-semibold">Summary</Text>
<InformationTooltip
iconSize={20}
description="This AI-generated summary describes how the agent handled your task. It's an experimental feature and may occasionally be inaccurate."
/>
</div>
}
>
<RunSummary run={run} />
</RunDetailCard>
</div>
)}
</RunDetailCard>
</ScrollableTabsContent>
)}
{/* Output Section */}
<ScrollableTabsContent value="output">
<div className="scroll-mt-4">
<RunDetailCard title="Output">
{isLoading ? (
<div className="text-neutral-500">
<LoadingSpinner />
</div>
) : run && "outputs" in run ? (
<RunOutputs outputs={run.outputs as any} />
) : (
<Text variant="body" className="text-neutral-600">
No output from this run.
</Text>
)}
</RunDetailCard>
</div>
</ScrollableTabsContent>
{/* Input Section */}
<ScrollableTabsContent value="input">
<div id="input" className="scroll-mt-4">
<RunDetailCard
title={
<div className="flex items-center gap-1">
<Text variant="lead-semibold">Your input</Text>
<InformationTooltip
iconSize={20}
description="This is the input that was provided to the agent for running this task."
/>
</div>
}
>
<AgentInputsReadOnly
agent={agent}
inputs={run?.inputs}
credentialInputs={run?.credential_inputs}
/>
</RunDetailCard>
</div>
</ScrollableTabsContent>
{/* Reviews Section */}
{withReviews && (
<ScrollableTabsContent value="reviews">
<div className="scroll-mt-4">
<RunDetailCard>
{reviewsLoading ? (
<LoadingSpinner size="small" />
) : pendingReviews.length > 0 ? (
<PendingReviewsList
reviews={pendingReviews}
onReviewComplete={refetchReviews}
emptyMessage="No pending reviews for this execution"
/>
) : (
<Text variant="body" className="text-zinc-700">
No pending reviews for this execution
</Text>
)}
</RunDetailCard>
</div>
</ScrollableTabsContent>
)}
</div>
)}
</ScrollableTabs>
</div>
</SelectedViewLayout>
</div>

View File

@@ -9,7 +9,6 @@ import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint";
import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils";
import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly";
import { AnchorLinksWrap } from "../AnchorLinksWrap";
import { LoadingSelectedContent } from "../LoadingSelectedContent";
import { RunDetailCard } from "../RunDetailCard/RunDetailCard";
import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader";
@@ -17,9 +16,6 @@ import { SelectedViewLayout } from "../SelectedViewLayout";
import { SelectedScheduleActions } from "./components/SelectedScheduleActions";
import { useSelectedScheduleView } from "./useSelectedScheduleView";
const anchorStyles =
"border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900";
interface Props {
agent: LibraryAgent;
scheduleId: string;
@@ -45,13 +41,6 @@ export function SelectedScheduleView({
const breakpoint = useBreakpoint();
const isLgScreenUp = isLargeScreen(breakpoint);
function scrollToSection(id: string) {
const element = document.getElementById(id);
if (element) {
element.scrollIntoView({ behavior: "smooth", block: "start" });
}
}
if (error) {
return (
<ErrorCard
@@ -108,22 +97,6 @@ export function SelectedScheduleView({
) : null}
</div>
{/* Navigation Links */}
<AnchorLinksWrap>
<button
onClick={() => scrollToSection("schedule")}
className={anchorStyles}
>
Schedule
</button>
<button
onClick={() => scrollToSection("input")}
className={anchorStyles}
>
Your input
</button>
</AnchorLinksWrap>
{/* Schedule Section */}
<div id="schedule" className="scroll-mt-4">
<RunDetailCard title="Schedule">

View File

@@ -1,84 +0,0 @@
"use client";
import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { PencilSimpleIcon } from "@phosphor-icons/react";
import { RunAgentInputs } from "../../../../modals/RunAgentInputs/RunAgentInputs";
import { useEditInputsModal } from "./useEditInputsModal";
type Props = {
agent: LibraryAgent;
schedule: GraphExecutionJobInfo;
};
export function EditInputsModal({ agent, schedule }: Props) {
const {
isOpen,
setIsOpen,
inputFields,
values,
setValues,
handleSave,
isSaving,
} = useEditInputsModal(agent, schedule);
return (
<Dialog
controlled={{ isOpen, set: setIsOpen }}
styling={{ maxWidth: "32rem" }}
>
<Dialog.Trigger>
<Button
variant="ghost"
size="small"
className="absolute -right-2 -top-2"
>
<PencilSimpleIcon className="size-4" /> Edit inputs
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="flex flex-col gap-4">
<Text variant="h3">Edit inputs</Text>
<div className="flex flex-col gap-4">
{Object.entries(inputFields).map(([key, fieldSchema]) => (
<div key={key} className="flex flex-col gap-1.5">
<label className="text-sm font-medium">
{fieldSchema?.title || key}
</label>
<RunAgentInputs
schema={fieldSchema as any}
value={values[key]}
onChange={(v) => setValues((prev) => ({ ...prev, [key]: v }))}
/>
</div>
))}
</div>
</div>
<Dialog.Footer>
<div className="flex w-full justify-end gap-2">
<Button
variant="secondary"
size="small"
onClick={() => setIsOpen(false)}
className="min-w-32"
>
Cancel
</Button>
<Button
variant="primary"
size="small"
onClick={handleSave}
loading={isSaving}
className="min-w-32"
>
{isSaving ? "Saving…" : "Save"}
</Button>
</div>
</Dialog.Footer>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -1,78 +0,0 @@
"use client";
import { useMemo, useState } from "react";
import { useQueryClient } from "@tanstack/react-query";
import { getGetV1ListExecutionSchedulesForAGraphQueryKey } from "@/app/api/__generated__/endpoints/schedules/schedules";
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
import { useToast } from "@/components/molecules/Toast/use-toast";
function getAgentInputFields(agent: LibraryAgent): Record<string, any> {
const schema = agent.input_schema as unknown as {
properties?: Record<string, any>;
} | null;
if (!schema || !schema.properties) return {};
const properties = schema.properties as Record<string, any>;
const visibleEntries = Object.entries(properties).filter(
([, sub]) => !sub?.hidden,
);
return Object.fromEntries(visibleEntries);
}
export function useEditInputsModal(
agent: LibraryAgent,
schedule: GraphExecutionJobInfo,
) {
const queryClient = useQueryClient();
const { toast } = useToast();
const [isOpen, setIsOpen] = useState(false);
const [isSaving, setIsSaving] = useState(false);
const inputFields = useMemo(() => getAgentInputFields(agent), [agent]);
const [values, setValues] = useState<Record<string, any>>({
...(schedule.input_data as Record<string, any>),
});
async function handleSave() {
setIsSaving(true);
try {
const res = await fetch(`/api/schedules/${schedule.id}`, {
method: "PATCH",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ inputs: values }),
});
if (!res.ok) {
let message = "Failed to update schedule inputs";
const data = await res.json();
message = data?.message || data?.detail || message;
throw new Error(message);
}
await queryClient.invalidateQueries({
queryKey: getGetV1ListExecutionSchedulesForAGraphQueryKey(
schedule.graph_id,
),
});
toast({
title: "Schedule inputs updated",
});
setIsOpen(false);
} catch (error: any) {
toast({
title: "Failed to update schedule inputs",
description: error?.message || "An unexpected error occurred.",
variant: "destructive",
});
}
setIsSaving(false);
}
return {
isOpen,
setIsOpen,
inputFields,
values,
setValues,
handleSave,
isSaving,
} as const;
}

View File

@@ -25,9 +25,10 @@ export function SelectedScheduleActions({ agent, scheduleId }: Props) {
<Button
variant="icon"
size="icon"
aria-label="Open in builder"
as="NextLink"
href={openInBuilderHref}
target="_blank"
aria-label="View scheduled task details"
>
<EyeIcon weight="bold" size={18} className="text-zinc-700" />
</Button>

View File

@@ -4,7 +4,6 @@ import type { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExe
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { Input } from "@/components/atoms/Input/Input";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import {
getAgentCredentialsFields,
getAgentInputFields,
@@ -138,25 +137,13 @@ export function SelectedTemplateView({
<RunDetailCard title="Your Input">
<div className="flex flex-col gap-4">
{inputFields.map(([key, inputSubSchema]) => (
<div
<RunAgentInputs
key={key}
className="flex w-full flex-col gap-0 space-y-2"
>
<label className="flex items-center gap-1 text-sm font-medium">
{inputSubSchema.title || key}
{inputSubSchema.description && (
<InformationTooltip
description={inputSubSchema.description}
/>
)}
</label>
<RunAgentInputs
schema={inputSubSchema}
value={inputs[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
/>
</div>
schema={inputSubSchema}
value={inputs[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
/>
))}
</div>
</RunDetailCard>

View File

@@ -3,7 +3,6 @@
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { Input } from "@/components/atoms/Input/Input";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import {
getAgentCredentialsFields,
getAgentInputFields,
@@ -131,25 +130,13 @@ export function SelectedTriggerView({
<RunDetailCard title="Your Input">
<div className="flex flex-col gap-4">
{inputFields.map(([key, inputSubSchema]) => (
<div
<RunAgentInputs
key={key}
className="flex w-full flex-col gap-0 space-y-2"
>
<label className="flex items-center gap-1 text-sm font-medium">
{inputSubSchema.title || key}
{inputSubSchema.description && (
<InformationTooltip
description={inputSubSchema.description}
/>
)}
</label>
<RunAgentInputs
schema={inputSubSchema}
value={inputs[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
/>
</div>
schema={inputSubSchema}
value={inputs[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
/>
))}
</div>
</RunDetailCard>

View File

@@ -680,28 +680,20 @@ export function AgentRunDraftView({
{/* Regular inputs */}
{Object.entries(agentInputFields).map(([key, inputSubSchema]) => (
<div key={key} className="flex flex-col space-y-2">
<label className="flex items-center gap-1 text-sm font-medium">
{inputSubSchema.title || key}
<InformationTooltip
description={inputSubSchema.description}
/>
</label>
<RunAgentInputs
schema={inputSubSchema}
value={inputValues[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => {
setInputValues((obj) => ({
...obj,
[key]: value,
}));
setChangedPresetAttributes((prev) => prev.add("inputs"));
}}
data-testid={`agent-input-${key}`}
/>
</div>
<RunAgentInputs
key={key}
schema={inputSubSchema}
value={inputValues[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => {
setInputValues((obj) => ({
...obj,
[key]: value,
}));
setChangedPresetAttributes((prev) => prev.add("inputs"));
}}
data-testid={`agent-input-${key}`}
/>
))}
</CardContent>
</Card>

View File

@@ -1,36 +1,33 @@
"use client";
import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider";
import OnboardingProvider from "@/providers/onboarding/onboarding-provider";
import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip";
import { SentryUserTracker } from "@/components/monitor/SentryUserTracker";
import { BackendAPIProvider } from "@/lib/autogpt-server-api/context";
import { getQueryClient } from "@/lib/react-query/queryClient";
import { QueryClientProvider } from "@tanstack/react-query";
import {
ThemeProvider as NextThemesProvider,
ThemeProviderProps,
} from "next-themes";
import { NuqsAdapter } from "nuqs/adapters/next/app";
import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip";
import CredentialsProvider from "@/providers/agent-credentials/credentials-provider";
import { SentryUserTracker } from "@/components/monitor/SentryUserTracker";
import OnboardingProvider from "@/providers/onboarding/onboarding-provider";
import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider";
import { QueryClientProvider } from "@tanstack/react-query";
import { ThemeProvider, ThemeProviderProps } from "next-themes";
import { NuqsAdapter } from "nuqs/adapters/next/app";
export function Providers({ children, ...props }: ThemeProviderProps) {
const queryClient = getQueryClient();
return (
<QueryClientProvider client={queryClient}>
<NuqsAdapter>
<NextThemesProvider {...props}>
<BackendAPIProvider>
<SentryUserTracker />
<CredentialsProvider>
<LaunchDarklyProvider>
<OnboardingProvider>
<BackendAPIProvider>
<SentryUserTracker />
<CredentialsProvider>
<LaunchDarklyProvider>
<OnboardingProvider>
<ThemeProvider forcedTheme="light" {...props}>
<TooltipProvider>{children}</TooltipProvider>
</OnboardingProvider>
</LaunchDarklyProvider>
</CredentialsProvider>
</BackendAPIProvider>
</NextThemesProvider>
</ThemeProvider>
</OnboardingProvider>
</LaunchDarklyProvider>
</CredentialsProvider>
</BackendAPIProvider>
</NuqsAdapter>
</QueryClientProvider>
);

View File

@@ -9,16 +9,20 @@ import ReactMarkdown from "react-markdown";
type Props = {
description?: string;
iconSize?: number;
};
export function InformationTooltip({ description }: Props) {
export function InformationTooltip({ description, iconSize = 24 }: Props) {
if (!description) return null;
return (
<TooltipProvider delayDuration={400}>
<Tooltip>
<TooltipTrigger asChild>
<Info className="rounded-full p-1 hover:bg-slate-50" size={24} />
<Info
className="rounded-full p-1 hover:bg-slate-50"
size={iconSize}
/>
</TooltipTrigger>
<TooltipContent>
<ReactMarkdown

View File

@@ -0,0 +1,437 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import {
ScrollableTabs,
ScrollableTabsContent,
ScrollableTabsList,
ScrollableTabsTrigger,
} from "./ScrollableTabs";
const meta = {
title: "Molecules/ScrollableTabs",
component: ScrollableTabs,
parameters: {
layout: "fullscreen",
},
tags: ["autodocs"],
argTypes: {},
} satisfies Meta<typeof ScrollableTabs>;
export default meta;
type Story = StoryObj<typeof meta>;
function ScrollableTabsDemo() {
return (
<div className="flex flex-col gap-8 p-8">
<h2 className="text-2xl font-bold">ScrollableTabs Examples</h2>
<div className="space-y-6">
<div>
<h3 className="mb-4 text-lg font-semibold">
Short Content (Tabs Hidden)
</h3>
<div className="h-[300px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="tab1" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="tab1">
Account
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab2">
Password
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab3">
Settings
</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="tab1">
<div className="p-4 text-sm">
Make changes to your account here. Click save when you&apos;re
done.
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab2">
<div className="p-4 text-sm">
Change your password here. After saving, you&apos;ll be logged
out.
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab3">
<div className="p-4 text-sm">
Update your preferences and settings here.
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
<div>
<h3 className="mb-4 text-lg font-semibold">
Long Content (Tabs Visible)
</h3>
<div className="h-[400px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="tab1" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="tab1">
Account
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab2">
Password
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab3">
Settings
</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="tab1">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">
Account Settings
</h4>
<p className="mb-4">
Make changes to your account here. Click save when
you&apos;re done.
</p>
<p className="mb-4">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed
do eiusmod tempor incididunt ut labore et dolore magna
aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris.
</p>
<p className="mb-4">
Duis aute irure dolor in reprehenderit in voluptate velit
esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident.
</p>
<p>
Sed ut perspiciatis unde omnis iste natus error sit
voluptatem accusantium doloremque laudantium, totam rem
aperiam.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab2">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">
Password Settings
</h4>
<p className="mb-4">
Change your password here. After saving, you&apos;ll be
logged out.
</p>
<p className="mb-4">
At vero eos et accusamus et iusto odio dignissimos ducimus
qui blanditiis praesentium voluptatum deleniti atque
corrupti quos dolores et quas molestias excepturi sint
occaecati cupiditate.
</p>
<p className="mb-4">
Et harum quidem rerum facilis est et expedita distinctio.
Nam libero tempore, cum soluta nobis est eligendi optio
cumque nihil impedit quo minus.
</p>
<p>
Temporibus autem quibusdam et aut officiis debitis aut rerum
necessitatibus saepe eveniet ut et voluptates repudiandae
sint.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab3">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">
General Settings
</h4>
<p className="mb-4">
Update your preferences and settings here.
</p>
<p className="mb-4">
Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut
odit aut fugit, sed quia consequuntur magni dolores eos qui
ratione voluptatem sequi nesciunt.
</p>
<p className="mb-4">
Neque porro quisquam est, qui dolorem ipsum quia dolor sit
amet, consectetur, adipisci velit, sed quia non numquam eius
modi tempora incidunt ut labore et dolore magnam aliquam
quaerat voluptatem.
</p>
<p>
Ut enim ad minima veniam, quis nostrum exercitationem ullam
corporis suscipit laboriosam, nisi ut aliquid ex ea commodi
consequatur.
</p>
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
<div>
<h3 className="mb-4 text-lg font-semibold">Many Tabs</h3>
<div className="h-[500px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="overview" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="overview">
Overview
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="analytics">
Analytics
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="reports">
Reports
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="notifications">
Notifications
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="integrations">
Integrations
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="billing">
Billing
</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="overview">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">
Dashboard Overview
</h4>
<p className="mb-4">
Dashboard overview with key metrics and recent activity.
</p>
<p className="mb-4">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed
do eiusmod tempor incididunt ut labore et dolore magna
aliqua.
</p>
<p>
Ut enim ad minim veniam, quis nostrud exercitation ullamco
laboris nisi ut aliquip ex ea commodo consequat.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="analytics">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Analytics</h4>
<p className="mb-4">
Detailed analytics and performance metrics.
</p>
<p className="mb-4">
Duis aute irure dolor in reprehenderit in voluptate velit
esse cillum dolore eu fugiat nulla pariatur.
</p>
<p>
Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="reports">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Reports</h4>
<p className="mb-4">
Generate and view reports for your account.
</p>
<p className="mb-4">
Sed ut perspiciatis unde omnis iste natus error sit
voluptatem accusantium doloremque laudantium.
</p>
<p>
Totam rem aperiam, eaque ipsa quae ab illo inventore
veritatis et quasi architecto beatae vitae dicta sunt
explicabo.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="notifications">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Notifications</h4>
<p className="mb-4">Manage your notification preferences.</p>
<p className="mb-4">
Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut
odit aut fugit.
</p>
<p>
Sed quia consequuntur magni dolores eos qui ratione
voluptatem sequi nesciunt.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="integrations">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Integrations</h4>
<p className="mb-4">
Connect and manage third-party integrations.
</p>
<p className="mb-4">
Neque porro quisquam est, qui dolorem ipsum quia dolor sit
amet.
</p>
<p>
Consectetur, adipisci velit, sed quia non numquam eius modi
tempora incidunt.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="billing">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Billing</h4>
<p className="mb-4">
View and manage your billing information.
</p>
<p className="mb-4">
Ut enim ad minima veniam, quis nostrum exercitationem ullam
corporis suscipit laboriosam.
</p>
<p>
Nisi ut aliquid ex ea commodi consequatur? Quis autem vel
eum iure reprehenderit qui in ea voluptate velit esse.
</p>
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
</div>
</div>
);
}
export const Default = {
render: () => <ScrollableTabsDemo />,
} satisfies Story;
export const ShortContent = {
render: () => (
<div className="p-8">
<div className="h-[200px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="account" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="account">
Account
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="password">
Password
</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="account">
<div className="p-4 text-sm">
Make changes to your account here. Click save when you&apos;re
done.
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="password">
<div className="p-4 text-sm">
Change your password here. After saving, you&apos;ll be logged
out.
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
),
} satisfies Story;
export const LongContent = {
render: () => (
<div className="p-8">
<div className="h-[600px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="tab1" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="tab1">Account</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab2">Password</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab3">Settings</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="tab1">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Account Settings</h4>
<p className="mb-4">
Make changes to your account here. Click save when you&apos;re
done.
</p>
<p className="mb-4">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut
enim ad minim veniam, quis nostrud exercitation ullamco laboris
nisi ut aliquip ex ea commodo consequat.
</p>
<p className="mb-4">
Duis aute irure dolor in reprehenderit in voluptate velit esse
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat
cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
<p className="mb-4">
Sed ut perspiciatis unde omnis iste natus error sit voluptatem
accusantium doloremque laudantium, totam rem aperiam, eaque ipsa
quae ab illo inventore veritatis et quasi architecto beatae
vitae dicta sunt explicabo.
</p>
<p>
Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit
aut fugit, sed quia consequuntur magni dolores eos qui ratione
voluptatem sequi nesciunt.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab2">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Password Settings</h4>
<p className="mb-4">
Change your password here. After saving, you&apos;ll be logged
out.
</p>
<p className="mb-4">
At vero eos et accusamus et iusto odio dignissimos ducimus qui
blanditiis praesentium voluptatum deleniti atque corrupti quos
dolores et quas molestias excepturi sint occaecati cupiditate
non provident.
</p>
<p className="mb-4">
Similique sunt in culpa qui officia deserunt mollitia animi, id
est laborum et dolorum fuga. Et harum quidem rerum facilis est
et expedita distinctio.
</p>
<p className="mb-4">
Nam libero tempore, cum soluta nobis est eligendi optio cumque
nihil impedit quo minus id quod maxime placeat facere possimus,
omnis voluptas assumenda est, omnis dolor repellendus.
</p>
<p>
Temporibus autem quibusdam et aut officiis debitis aut rerum
necessitatibus saepe eveniet ut et voluptates repudiandae sint
et molestiae non recusandae.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab3">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">General Settings</h4>
<p className="mb-4">Update your preferences and settings here.</p>
<p className="mb-4">
Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet,
consectetur, adipisci velit, sed quia non numquam eius modi
tempora incidunt ut labore et dolore magnam aliquam quaerat
voluptatem.
</p>
<p className="mb-4">
Ut enim ad minima veniam, quis nostrum exercitationem ullam
corporis suscipit laboriosam, nisi ut aliquid ex ea commodi
consequatur? Quis autem vel eum iure reprehenderit qui in ea
voluptate velit esse quam nihil molestiae consequatur.
</p>
<p className="mb-4">
Vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? At
vero eos et accusamus et iusto odio dignissimos ducimus qui
blanditiis praesentium voluptatum deleniti atque corrupti quos
dolores.
</p>
<p>
Et quas molestias excepturi sint occaecati cupiditate non
provident, similique sunt in culpa qui officia deserunt mollitia
animi, id est laborum et dolorum fuga.
</p>
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
),
} satisfies Story;

View File

@@ -0,0 +1,59 @@
"use client";
import { cn } from "@/lib/utils";
import { Children } from "react";
import { ScrollableTabsContent } from "./components/ScrollableTabsContent";
import { ScrollableTabsList } from "./components/ScrollableTabsList";
import { ScrollableTabsTrigger } from "./components/ScrollableTabsTrigger";
import { ScrollableTabsContext } from "./context";
import { findContentElements, findListElement } from "./helpers";
import { useScrollableTabsInternal } from "./useScrollableTabs";
interface Props {
children?: React.ReactNode;
className?: string;
defaultValue?: string;
}
export function ScrollableTabs({ children, className, defaultValue }: Props) {
const {
activeValue,
setActiveValue,
registerContent,
scrollToSection,
scrollContainer,
contentContainerRef,
} = useScrollableTabsInternal({ defaultValue });
const childrenArray = Children.toArray(children);
const listElement = findListElement(childrenArray);
const contentElements = findContentElements(childrenArray);
return (
<ScrollableTabsContext.Provider
value={{
activeValue,
setActiveValue,
registerContent,
scrollToSection,
scrollContainer,
}}
>
<div className={cn("relative flex flex-col", className)}>
{listElement}
<div
ref={(node) => {
if (contentContainerRef) {
contentContainerRef.current = node;
}
}}
className="max-h-[64rem] overflow-y-auto scrollbar-thin scrollbar-track-transparent scrollbar-thumb-zinc-300 dark:scrollbar-thumb-zinc-700"
>
<div className="min-h-full pb-[200px]">{contentElements}</div>
</div>
</div>
</ScrollableTabsContext.Provider>
);
}
export { ScrollableTabsContent, ScrollableTabsList, ScrollableTabsTrigger };

View File

@@ -0,0 +1,48 @@
"use client";
import { cn } from "@/lib/utils";
import * as React from "react";
import { useScrollableTabs } from "../context";
interface Props extends React.HTMLAttributes<HTMLDivElement> {
value: string;
}
export const ScrollableTabsContent = React.forwardRef<HTMLDivElement, Props>(
function ScrollableTabsContent(
{ className, value, children, ...props },
ref,
) {
const { registerContent } = useScrollableTabs();
const contentRef = React.useRef<HTMLDivElement>(null);
React.useEffect(() => {
if (contentRef.current) {
registerContent(value, contentRef.current);
}
return () => {
registerContent(value, null);
};
}, [value, registerContent]);
return (
<div
ref={(node) => {
if (typeof ref === "function") ref(node);
else if (ref) ref.current = node;
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
contentRef.current = node;
}}
data-scrollable-tab-content
data-value={value}
className={cn("focus-visible:outline-none", className)}
{...props}
>
{children}
</div>
);
},
);
ScrollableTabsContent.displayName = "ScrollableTabsContent";

View File

@@ -0,0 +1,52 @@
"use client";
import { cn } from "@/lib/utils";
import * as React from "react";
import { useScrollableTabs } from "../context";
export const ScrollableTabsList = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(function ScrollableTabsList({ className, children, ...props }, ref) {
const { activeValue } = useScrollableTabs();
const [activeTabElement, setActiveTabElement] =
React.useState<HTMLElement | null>(null);
React.useEffect(() => {
const activeButton = Array.from(
document.querySelectorAll<HTMLElement>(
'[data-scrollable-tab-trigger][data-value="' + activeValue + '"]',
),
)[0];
if (activeButton) {
setActiveTabElement(activeButton);
}
}, [activeValue]);
return (
<div className="relative" ref={ref}>
<div
className={cn(
"inline-flex w-full items-center justify-start border-b border-zinc-100",
className,
)}
{...props}
>
{children}
</div>
{activeTabElement && (
<div
className="transition-left transition-right absolute bottom-0 h-0.5 bg-purple-600 duration-200 ease-in-out"
style={{
left: activeTabElement.offsetLeft,
width: activeTabElement.offsetWidth,
willChange: "left, width",
}}
/>
)}
</div>
);
});
ScrollableTabsList.displayName = "ScrollableTabsList";

View File

@@ -0,0 +1,53 @@
"use client";
import { cn } from "@/lib/utils";
import * as React from "react";
import { useScrollableTabs } from "../context";
interface Props extends React.ButtonHTMLAttributes<HTMLButtonElement> {
value: string;
}
export const ScrollableTabsTrigger = React.forwardRef<HTMLButtonElement, Props>(
function ScrollableTabsTrigger(
{ className, value, children, ...props },
ref,
) {
const { activeValue, scrollToSection } = useScrollableTabs();
const elementRef = React.useRef<HTMLButtonElement>(null);
const isActive = activeValue === value;
function handleClick(e: React.MouseEvent<HTMLButtonElement>) {
e.preventDefault();
e.stopPropagation();
scrollToSection(value);
props.onClick?.(e);
}
return (
<button
type="button"
ref={(node) => {
if (typeof ref === "function") ref(node);
else if (ref) ref.current = node;
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
elementRef.current = node;
}}
data-scrollable-tab-trigger
data-value={value}
onClick={handleClick}
className={cn(
"relative inline-flex items-center justify-center whitespace-nowrap px-3 py-3 font-sans text-[0.875rem] font-medium leading-[1.5rem] text-zinc-700 transition-all focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-neutral-400 focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50",
isActive && "text-purple-600",
className,
)}
{...props}
>
{children}
</button>
);
},
);
ScrollableTabsTrigger.displayName = "ScrollableTabsTrigger";

View File

@@ -0,0 +1,22 @@
import * as React from "react";
import { createContext, useContext } from "react";
interface ScrollableTabsContextValue {
activeValue: string | null;
setActiveValue: React.Dispatch<React.SetStateAction<string | null>>;
registerContent: (value: string, element: HTMLElement | null) => void;
scrollToSection: (value: string) => void;
scrollContainer: HTMLElement | null;
}
export const ScrollableTabsContext = createContext<
ScrollableTabsContextValue | undefined
>(undefined);
export function useScrollableTabs() {
const context = useContext(ScrollableTabsContext);
if (!context) {
throw new Error("useScrollableTabs must be used within a ScrollableTabs");
}
return context;
}

View File

@@ -0,0 +1,48 @@
import * as React from "react";
const HEADER_OFFSET = 100;
export function calculateScrollPosition(
elementRect: DOMRect,
containerRect: DOMRect,
currentScrollTop: number,
): number {
const elementTopRelativeToContainer =
elementRect.top - containerRect.top + currentScrollTop - HEADER_OFFSET;
return Math.max(0, elementTopRelativeToContainer);
}
function hasDisplayName(
type: unknown,
displayName: string,
): type is { displayName: string } {
return (
typeof type === "object" &&
type !== null &&
"displayName" in type &&
(type as { displayName: unknown }).displayName === displayName
);
}
export function findListElement(
children: React.ReactNode[],
): React.ReactElement | undefined {
return children.find(
(child) =>
React.isValidElement(child) &&
hasDisplayName(child.type, "ScrollableTabsList"),
) as React.ReactElement | undefined;
}
export function findContentElements(
children: React.ReactNode[],
): React.ReactNode[] {
return children.filter(
(child) =>
!(
React.isValidElement(child) &&
hasDisplayName(child.type, "ScrollableTabsList")
),
);
}

View File

@@ -0,0 +1,60 @@
import { useCallback, useRef, useState } from "react";
import { calculateScrollPosition } from "./helpers";
interface Args {
defaultValue?: string;
}
export function useScrollableTabsInternal({ defaultValue }: Args) {
const [activeValue, setActiveValue] = useState<string | null>(
defaultValue || null,
);
const contentRefs = useRef<Map<string, HTMLElement>>(new Map());
const contentContainerRef = useRef<HTMLDivElement | null>(null);
function registerContent(value: string, element: HTMLElement | null) {
if (element) {
contentRefs.current.set(value, element);
} else {
contentRefs.current.delete(value);
}
}
function scrollToSection(value: string) {
const element = contentRefs.current.get(value);
const scrollContainer = contentContainerRef.current;
if (!element || !scrollContainer) return;
setActiveValue(value);
const containerRect = scrollContainer.getBoundingClientRect();
const elementRect = element.getBoundingClientRect();
const currentScrollTop = scrollContainer.scrollTop;
const scrollTop = calculateScrollPosition(
elementRect,
containerRect,
currentScrollTop,
);
const maxScrollTop =
scrollContainer.scrollHeight - scrollContainer.clientHeight;
const clampedScrollTop = Math.min(Math.max(0, scrollTop), maxScrollTop);
scrollContainer.scrollTo({
top: clampedScrollTop,
behavior: "smooth",
});
}
const memoizedRegisterContent = useCallback(registerContent, []);
const memoizedScrollToSection = useCallback(scrollToSection, []);
return {
activeValue,
setActiveValue,
registerContent: memoizedRegisterContent,
scrollToSection: memoizedScrollToSection,
scrollContainer: contentContainerRef.current,
contentContainerRef,
};
}

View File

@@ -23,6 +23,7 @@ import {
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { cn } from "@/lib/utils";
import { BlockUIType } from "@/app/(platform)/build/components/types";
type TypeOption = {
type: string;
@@ -47,7 +48,14 @@ export const AnyOfField = ({
onBlur,
onFocus,
}: FieldProps) => {
const handleId = generateHandleId(idSchema.$id ?? "");
const handleId =
formContext.uiType === BlockUIType.AGENT
? (idSchema.$id ?? "")
.split("_")
.filter((p) => p !== "root" && p !== "properties" && p.length > 0)
.join("_") || ""
: generateHandleId(idSchema.$id ?? "");
const updatedFormContexrt = { ...formContext, fromAnyOf: true };
const { nodeId, showHandles = true } = updatedFormContexrt;

View File

@@ -58,7 +58,15 @@ const FieldTemplate: React.FC<FieldTemplateProps> = ({
let handleId = null;
if (!isArrayItem) {
handleId = generateHandleId(fieldId);
if (uiType === BlockUIType.AGENT) {
const parts = fieldId.split("_");
const filtered = parts.filter(
(p) => p !== "root" && p !== "properties" && p.length > 0,
);
handleId = filtered.join("_") || "";
} else {
handleId = generateHandleId(fieldId);
}
} else {
handleId = arrayFieldHandleId;
}

View File

@@ -1,10 +1,10 @@
import scrollbar from "tailwind-scrollbar";
import type { Config } from "tailwindcss";
import tailwindcssAnimate from "tailwindcss-animate";
import scrollbar from "tailwind-scrollbar";
import { colors } from "./src/components/styles/colors";
const config = {
darkMode: ["class"],
darkMode: ["class", ".dark-mode"], // ignore dark: prefix classes for now until we fully support dark mode
content: ["./src/**/*.{ts,tsx}"],
prefix: "",
theme: {