mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Merge branch 'dev' into swiftyos/caching-pt2
This commit is contained in:
@@ -13,6 +13,11 @@ from backend.data.block import (
|
||||
BlockSchema,
|
||||
BlockType,
|
||||
)
|
||||
from backend.data.dynamic_fields import (
|
||||
extract_base_field_name,
|
||||
get_dynamic_field_description,
|
||||
is_dynamic_field,
|
||||
)
|
||||
from backend.data.model import NodeExecutionStats, SchemaField
|
||||
from backend.util import json
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
@@ -277,6 +282,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def cleanup(s: str):
|
||||
"""Clean up block names for use as tool function names."""
|
||||
return re.sub(r"[^a-zA-Z0-9_-]", "_", s).lower()
|
||||
|
||||
@staticmethod
|
||||
@@ -304,41 +310,66 @@ class SmartDecisionMakerBlock(Block):
|
||||
}
|
||||
sink_block_input_schema = block.input_schema
|
||||
properties = {}
|
||||
field_mapping = {} # clean_name -> original_name
|
||||
|
||||
for link in links:
|
||||
sink_name = SmartDecisionMakerBlock.cleanup(link.sink_name)
|
||||
field_name = link.sink_name
|
||||
is_dynamic = is_dynamic_field(field_name)
|
||||
# Clean property key to ensure Anthropic API compatibility for ALL fields
|
||||
clean_field_name = SmartDecisionMakerBlock.cleanup(field_name)
|
||||
field_mapping[clean_field_name] = field_name
|
||||
|
||||
# Handle dynamic fields (e.g., values_#_*, items_$_*, etc.)
|
||||
# These are fields that get merged by the executor into their base field
|
||||
if (
|
||||
"_#_" in link.sink_name
|
||||
or "_$_" in link.sink_name
|
||||
or "_@_" in link.sink_name
|
||||
):
|
||||
# For dynamic fields, provide a generic string schema
|
||||
# The executor will handle merging these into the appropriate structure
|
||||
properties[sink_name] = {
|
||||
if is_dynamic:
|
||||
# For dynamic fields, use cleaned name but preserve original in description
|
||||
properties[clean_field_name] = {
|
||||
"type": "string",
|
||||
"description": f"Dynamic value for {link.sink_name}",
|
||||
"description": get_dynamic_field_description(field_name),
|
||||
}
|
||||
else:
|
||||
# For regular fields, use the block's schema
|
||||
# For regular fields, use the block's schema directly
|
||||
try:
|
||||
properties[sink_name] = sink_block_input_schema.get_field_schema(
|
||||
link.sink_name
|
||||
properties[clean_field_name] = (
|
||||
sink_block_input_schema.get_field_schema(field_name)
|
||||
)
|
||||
except (KeyError, AttributeError):
|
||||
# If the field doesn't exist in the schema, provide a generic schema
|
||||
properties[sink_name] = {
|
||||
# If field doesn't exist in schema, provide a generic one
|
||||
properties[clean_field_name] = {
|
||||
"type": "string",
|
||||
"description": f"Value for {link.sink_name}",
|
||||
"description": f"Value for {field_name}",
|
||||
}
|
||||
|
||||
# Build the parameters schema using a single unified path
|
||||
base_schema = block.input_schema.jsonschema()
|
||||
base_required = set(base_schema.get("required", []))
|
||||
|
||||
# Compute required fields at the leaf level:
|
||||
# - If a linked field is dynamic and its base is required in the block schema, require the leaf
|
||||
# - If a linked field is regular and is required in the block schema, require the leaf
|
||||
required_fields: set[str] = set()
|
||||
for link in links:
|
||||
field_name = link.sink_name
|
||||
is_dynamic = is_dynamic_field(field_name)
|
||||
# Always use cleaned field name for property key (Anthropic API compliance)
|
||||
clean_field_name = SmartDecisionMakerBlock.cleanup(field_name)
|
||||
|
||||
if is_dynamic:
|
||||
base_name = extract_base_field_name(field_name)
|
||||
if base_name in base_required:
|
||||
required_fields.add(clean_field_name)
|
||||
else:
|
||||
if field_name in base_required:
|
||||
required_fields.add(clean_field_name)
|
||||
|
||||
tool_function["parameters"] = {
|
||||
**block.input_schema.jsonschema(),
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"additionalProperties": False,
|
||||
"required": sorted(required_fields),
|
||||
}
|
||||
|
||||
# Store field mapping for later use in output processing
|
||||
tool_function["_field_mapping"] = field_mapping
|
||||
|
||||
return {"type": "function", "function": tool_function}
|
||||
|
||||
@staticmethod
|
||||
@@ -382,13 +413,12 @@ class SmartDecisionMakerBlock(Block):
|
||||
sink_block_properties = sink_block_input_schema.get("properties", {}).get(
|
||||
link.sink_name, {}
|
||||
)
|
||||
sink_name = SmartDecisionMakerBlock.cleanup(link.sink_name)
|
||||
description = (
|
||||
sink_block_properties["description"]
|
||||
if "description" in sink_block_properties
|
||||
else f"The {link.sink_name} of the tool"
|
||||
)
|
||||
properties[sink_name] = {
|
||||
properties[link.sink_name] = {
|
||||
"type": "string",
|
||||
"description": description,
|
||||
"default": json.dumps(sink_block_properties.get("default", None)),
|
||||
@@ -404,24 +434,17 @@ class SmartDecisionMakerBlock(Block):
|
||||
return {"type": "function", "function": tool_function}
|
||||
|
||||
@staticmethod
|
||||
async def _create_function_signature(node_id: str) -> list[dict[str, Any]]:
|
||||
async def _create_function_signature(
|
||||
node_id: str,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Creates function signatures for tools linked to a specified node within a graph.
|
||||
|
||||
This method filters the graph links to identify those that are tools and are
|
||||
connected to the given node_id. It then constructs function signatures for each
|
||||
tool based on the metadata and input schema of the linked nodes.
|
||||
Creates function signatures for connected tools.
|
||||
|
||||
Args:
|
||||
node_id: The node_id for which to create function signatures.
|
||||
|
||||
Returns:
|
||||
list[dict[str, Any]]: A list of dictionaries, each representing a function signature
|
||||
for a tool, including its name, description, and parameters.
|
||||
|
||||
Raises:
|
||||
ValueError: If no tool links are found for the specified node_id, or if a sink node
|
||||
or its metadata cannot be found.
|
||||
List of function signatures for tools
|
||||
"""
|
||||
db_client = get_database_manager_async_client()
|
||||
tools = [
|
||||
@@ -446,20 +469,116 @@ class SmartDecisionMakerBlock(Block):
|
||||
raise ValueError(f"Sink node not found: {links[0].sink_id}")
|
||||
|
||||
if sink_node.block_id == AgentExecutorBlock().id:
|
||||
return_tool_functions.append(
|
||||
tool_func = (
|
||||
await SmartDecisionMakerBlock._create_agent_function_signature(
|
||||
sink_node, links
|
||||
)
|
||||
)
|
||||
return_tool_functions.append(tool_func)
|
||||
else:
|
||||
return_tool_functions.append(
|
||||
tool_func = (
|
||||
await SmartDecisionMakerBlock._create_block_function_signature(
|
||||
sink_node, links
|
||||
)
|
||||
)
|
||||
return_tool_functions.append(tool_func)
|
||||
|
||||
return return_tool_functions
|
||||
|
||||
async def _attempt_llm_call_with_validation(
|
||||
self,
|
||||
credentials: llm.APIKeyCredentials,
|
||||
input_data: Input,
|
||||
current_prompt: list[dict],
|
||||
tool_functions: list[dict[str, Any]],
|
||||
):
|
||||
"""
|
||||
Attempt a single LLM call with tool validation.
|
||||
|
||||
Returns the response if successful, raises ValueError if validation fails.
|
||||
"""
|
||||
resp = await llm.llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=current_prompt,
|
||||
max_tokens=input_data.max_tokens,
|
||||
tools=tool_functions,
|
||||
ollama_host=input_data.ollama_host,
|
||||
parallel_tool_calls=input_data.multiple_tool_calls,
|
||||
)
|
||||
|
||||
# Track LLM usage stats per call
|
||||
self.merge_stats(
|
||||
NodeExecutionStats(
|
||||
input_token_count=resp.prompt_tokens,
|
||||
output_token_count=resp.completion_tokens,
|
||||
llm_call_count=1,
|
||||
)
|
||||
)
|
||||
|
||||
if not resp.tool_calls:
|
||||
return resp
|
||||
validation_errors_list: list[str] = []
|
||||
for tool_call in resp.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
try:
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
except Exception as e:
|
||||
validation_errors_list.append(
|
||||
f"Tool call '{tool_name}' has invalid JSON arguments: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
# Find the tool definition to get the expected arguments
|
||||
tool_def = next(
|
||||
(
|
||||
tool
|
||||
for tool in tool_functions
|
||||
if tool["function"]["name"] == tool_name
|
||||
),
|
||||
None,
|
||||
)
|
||||
if tool_def is None and len(tool_functions) == 1:
|
||||
tool_def = tool_functions[0]
|
||||
|
||||
# Get parameters schema from tool definition
|
||||
if (
|
||||
tool_def
|
||||
and "function" in tool_def
|
||||
and "parameters" in tool_def["function"]
|
||||
):
|
||||
parameters = tool_def["function"]["parameters"]
|
||||
expected_args = parameters.get("properties", {})
|
||||
required_params = set(parameters.get("required", []))
|
||||
else:
|
||||
expected_args = {arg: {} for arg in tool_args.keys()}
|
||||
required_params = set()
|
||||
|
||||
# Validate tool call arguments
|
||||
provided_args = set(tool_args.keys())
|
||||
expected_args_set = set(expected_args.keys())
|
||||
|
||||
# Check for unexpected arguments (typos)
|
||||
unexpected_args = provided_args - expected_args_set
|
||||
# Only check for missing REQUIRED parameters
|
||||
missing_required_args = required_params - provided_args
|
||||
|
||||
if unexpected_args or missing_required_args:
|
||||
error_msg = f"Tool call '{tool_name}' has parameter errors:"
|
||||
if unexpected_args:
|
||||
error_msg += f" Unknown parameters: {sorted(unexpected_args)}."
|
||||
if missing_required_args:
|
||||
error_msg += f" Missing required parameters: {sorted(missing_required_args)}."
|
||||
error_msg += f" Expected parameters: {sorted(expected_args_set)}."
|
||||
if required_params:
|
||||
error_msg += f" Required parameters: {sorted(required_params)}."
|
||||
validation_errors_list.append(error_msg)
|
||||
|
||||
if validation_errors_list:
|
||||
raise ValueError("; ".join(validation_errors_list))
|
||||
|
||||
return resp
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
@@ -482,27 +601,19 @@ class SmartDecisionMakerBlock(Block):
|
||||
if pending_tool_calls and input_data.last_tool_output is None:
|
||||
raise ValueError(f"Tool call requires an output for {pending_tool_calls}")
|
||||
|
||||
# Only assign the last tool output to the first pending tool call
|
||||
tool_output = []
|
||||
if pending_tool_calls and input_data.last_tool_output is not None:
|
||||
# Get the first pending tool call ID
|
||||
first_call_id = next(iter(pending_tool_calls.keys()))
|
||||
tool_output.append(
|
||||
_create_tool_response(first_call_id, input_data.last_tool_output)
|
||||
)
|
||||
|
||||
# Add tool output to prompt right away
|
||||
prompt.extend(tool_output)
|
||||
|
||||
# Check if there are still pending tool calls after handling the first one
|
||||
remaining_pending_calls = get_pending_tool_calls(prompt)
|
||||
|
||||
# If there are still pending tool calls, yield the conversation and return early
|
||||
if remaining_pending_calls:
|
||||
yield "conversations", prompt
|
||||
return
|
||||
|
||||
# Fallback on adding tool output in the conversation history as user prompt.
|
||||
elif input_data.last_tool_output:
|
||||
logger.error(
|
||||
f"[SmartDecisionMakerBlock-node_exec_id={node_exec_id}] "
|
||||
@@ -535,121 +646,42 @@ class SmartDecisionMakerBlock(Block):
|
||||
):
|
||||
prompt.append({"role": "user", "content": prefix + input_data.prompt})
|
||||
|
||||
# Use retry decorator for LLM calls with validation
|
||||
from backend.util.retry import create_retry_decorator
|
||||
current_prompt = list(prompt)
|
||||
max_attempts = max(1, int(input_data.retry))
|
||||
response = None
|
||||
|
||||
# Create retry decorator that excludes ValueError from retry (for non-LLM errors)
|
||||
llm_retry = create_retry_decorator(
|
||||
max_attempts=input_data.retry,
|
||||
exclude_exceptions=(), # Don't exclude ValueError - we want to retry validation failures
|
||||
context="SmartDecisionMaker LLM call",
|
||||
)
|
||||
|
||||
@llm_retry
|
||||
async def call_llm_with_validation():
|
||||
response = await llm.llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=prompt,
|
||||
max_tokens=input_data.max_tokens,
|
||||
tools=tool_functions,
|
||||
ollama_host=input_data.ollama_host,
|
||||
parallel_tool_calls=input_data.multiple_tool_calls,
|
||||
)
|
||||
|
||||
# Track LLM usage stats
|
||||
self.merge_stats(
|
||||
NodeExecutionStats(
|
||||
input_token_count=response.prompt_tokens,
|
||||
output_token_count=response.completion_tokens,
|
||||
llm_call_count=1,
|
||||
last_error = None
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
response = await self._attempt_llm_call_with_validation(
|
||||
credentials, input_data, current_prompt, tool_functions
|
||||
)
|
||||
)
|
||||
break
|
||||
|
||||
if not response.tool_calls:
|
||||
return response, None # No tool calls, return response
|
||||
|
||||
# Validate all tool calls before proceeding
|
||||
validation_errors = []
|
||||
for tool_call in response.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
# Find the tool definition to get the expected arguments
|
||||
tool_def = next(
|
||||
(
|
||||
tool
|
||||
for tool in tool_functions
|
||||
if tool["function"]["name"] == tool_name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
# Get parameters schema from tool definition
|
||||
if (
|
||||
tool_def
|
||||
and "function" in tool_def
|
||||
and "parameters" in tool_def["function"]
|
||||
):
|
||||
parameters = tool_def["function"]["parameters"]
|
||||
expected_args = parameters.get("properties", {})
|
||||
required_params = set(parameters.get("required", []))
|
||||
else:
|
||||
expected_args = {arg: {} for arg in tool_args.keys()}
|
||||
required_params = set()
|
||||
|
||||
# Validate tool call arguments
|
||||
provided_args = set(tool_args.keys())
|
||||
expected_args_set = set(expected_args.keys())
|
||||
|
||||
# Check for unexpected arguments (typos)
|
||||
unexpected_args = provided_args - expected_args_set
|
||||
# Only check for missing REQUIRED parameters
|
||||
missing_required_args = required_params - provided_args
|
||||
|
||||
if unexpected_args or missing_required_args:
|
||||
error_msg = f"Tool call '{tool_name}' has parameter errors:"
|
||||
if unexpected_args:
|
||||
error_msg += f" Unknown parameters: {sorted(unexpected_args)}."
|
||||
if missing_required_args:
|
||||
error_msg += f" Missing required parameters: {sorted(missing_required_args)}."
|
||||
error_msg += f" Expected parameters: {sorted(expected_args_set)}."
|
||||
if required_params:
|
||||
error_msg += f" Required parameters: {sorted(required_params)}."
|
||||
validation_errors.append(error_msg)
|
||||
|
||||
# If validation failed, add feedback and raise for retry
|
||||
if validation_errors:
|
||||
# Add the failed response to conversation
|
||||
prompt.append(_convert_raw_response_to_dict(response.raw_response))
|
||||
|
||||
# Add error feedback for retry
|
||||
except ValueError as e:
|
||||
last_error = e
|
||||
error_feedback = (
|
||||
"Your tool call had parameter errors. Please fix the following issues and try again:\n"
|
||||
+ "\n".join(f"- {error}" for error in validation_errors)
|
||||
+ "\n\nPlease make sure to use the exact parameter names as specified in the function schema."
|
||||
+ f"- {str(e)}\n"
|
||||
+ "\nPlease make sure to use the exact parameter names as specified in the function schema."
|
||||
)
|
||||
prompt.append({"role": "user", "content": error_feedback})
|
||||
current_prompt = list(current_prompt) + [
|
||||
{"role": "user", "content": error_feedback}
|
||||
]
|
||||
|
||||
raise ValueError(
|
||||
f"Tool call validation failed: {'; '.join(validation_errors)}"
|
||||
)
|
||||
|
||||
return response, validation_errors
|
||||
|
||||
# Call the LLM with retry logic
|
||||
response, validation_errors = await call_llm_with_validation()
|
||||
if response is None:
|
||||
raise last_error or ValueError(
|
||||
"Failed to get valid response after all retry attempts"
|
||||
)
|
||||
|
||||
if not response.tool_calls:
|
||||
yield "finished", response.response
|
||||
return
|
||||
|
||||
# If we get here, validation passed - yield tool outputs
|
||||
for tool_call in response.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
# Get expected arguments (already validated above)
|
||||
tool_def = next(
|
||||
(
|
||||
tool
|
||||
@@ -667,16 +699,36 @@ class SmartDecisionMakerBlock(Block):
|
||||
else:
|
||||
expected_args = {arg: {} for arg in tool_args.keys()}
|
||||
|
||||
# Yield provided arguments, use .get() for optional parameters
|
||||
for arg_name in expected_args:
|
||||
yield f"tools_^_{tool_name}_~_{arg_name}", tool_args.get(arg_name)
|
||||
# Get field mapping from tool definition
|
||||
field_mapping = (
|
||||
tool_def.get("function", {}).get("_field_mapping", {})
|
||||
if tool_def
|
||||
else {}
|
||||
)
|
||||
|
||||
for clean_arg_name in expected_args:
|
||||
# arg_name is now always the cleaned field name (for Anthropic API compliance)
|
||||
# Get the original field name from field mapping for proper emit key generation
|
||||
original_field_name = field_mapping.get(clean_arg_name, clean_arg_name)
|
||||
arg_value = tool_args.get(clean_arg_name)
|
||||
|
||||
sanitized_tool_name = self.cleanup(tool_name)
|
||||
sanitized_arg_name = self.cleanup(original_field_name)
|
||||
emit_key = f"tools_^_{sanitized_tool_name}_~_{sanitized_arg_name}"
|
||||
|
||||
logger.debug(
|
||||
"[SmartDecisionMakerBlock|geid:%s|neid:%s] emit %s",
|
||||
graph_exec_id,
|
||||
node_exec_id,
|
||||
emit_key,
|
||||
)
|
||||
yield emit_key, arg_value
|
||||
|
||||
# Add reasoning to conversation history if available
|
||||
if response.reasoning:
|
||||
prompt.append(
|
||||
{"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"}
|
||||
)
|
||||
|
||||
# Add the successful response to conversation
|
||||
prompt.append(_convert_raw_response_to_dict(response.raw_response))
|
||||
|
||||
yield "conversations", prompt
|
||||
|
||||
@@ -216,8 +216,17 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
}
|
||||
|
||||
# Mock the _create_function_signature method to avoid database calls
|
||||
with patch("backend.blocks.llm.llm_call", return_value=mock_response), patch.object(
|
||||
SmartDecisionMakerBlock, "_create_function_signature", return_value=[]
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
):
|
||||
|
||||
# Create test input
|
||||
@@ -301,11 +310,16 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_with_typo.reasoning = None
|
||||
mock_response_with_typo.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_with_typo
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_with_typo,
|
||||
) as mock_llm_call, patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
@@ -332,7 +346,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
# Verify error message contains details about the typo
|
||||
error_msg = str(exc_info.value)
|
||||
assert "Tool call validation failed" in error_msg
|
||||
assert "Tool call 'search_keywords' has parameter errors" in error_msg
|
||||
assert "Unknown parameters: ['maximum_keyword_difficulty']" in error_msg
|
||||
|
||||
# Verify that LLM was called the expected number of times (retries)
|
||||
@@ -353,11 +367,16 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_missing_required.reasoning = None
|
||||
mock_response_missing_required.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_missing_required
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_missing_required,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
@@ -398,11 +417,16 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_valid.reasoning = None
|
||||
mock_response_valid.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_valid
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_valid,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
@@ -447,11 +471,16 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_all_params.reasoning = None
|
||||
mock_response_all_params.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_all_params
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_all_params,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
@@ -553,9 +582,14 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
)
|
||||
|
||||
# Mock llm_call to return different responses on different calls
|
||||
with patch("backend.blocks.llm.llm_call") as mock_llm_call, patch.object(
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", new_callable=AsyncMock
|
||||
) as mock_llm_call, patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
# First call returns response that will trigger retry due to validation error
|
||||
@@ -614,11 +648,16 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"I'll help you with that." # Ollama returns string
|
||||
)
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_ollama
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_ollama,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[], # No tools for this test
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
@@ -655,11 +694,16 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"content": "Test response",
|
||||
} # Dict format
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_dict
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_dict,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
|
||||
@@ -48,16 +48,24 @@ async def test_smart_decision_maker_handles_dynamic_dict_fields():
|
||||
assert "parameters" in signature["function"]
|
||||
assert "properties" in signature["function"]["parameters"]
|
||||
|
||||
# Check that dynamic fields are handled
|
||||
# Check that dynamic fields are handled with original names
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
assert len(properties) == 3 # Should have all three fields
|
||||
|
||||
# Each dynamic field should have proper schema
|
||||
for prop_value in properties.values():
|
||||
# Check that field names are cleaned (for Anthropic API compatibility)
|
||||
assert "values___name" in properties
|
||||
assert "values___age" in properties
|
||||
assert "values___city" in properties
|
||||
|
||||
# Each dynamic field should have proper schema with descriptive text
|
||||
for field_name, prop_value in properties.items():
|
||||
assert "type" in prop_value
|
||||
assert prop_value["type"] == "string" # Dynamic fields get string type
|
||||
assert "description" in prop_value
|
||||
assert "Dynamic value for" in prop_value["description"]
|
||||
# Check that descriptions properly explain the dynamic field
|
||||
if field_name == "values___name":
|
||||
assert "Dictionary field 'name'" in prop_value["description"]
|
||||
assert "values['name']" in prop_value["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -96,10 +104,18 @@ async def test_smart_decision_maker_handles_dynamic_list_fields():
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
assert len(properties) == 2 # Should have both list items
|
||||
|
||||
# Each dynamic field should have proper schema
|
||||
for prop_value in properties.values():
|
||||
# Check that field names are cleaned (for Anthropic API compatibility)
|
||||
assert "entries___0" in properties
|
||||
assert "entries___1" in properties
|
||||
|
||||
# Each dynamic field should have proper schema with descriptive text
|
||||
for field_name, prop_value in properties.items():
|
||||
assert prop_value["type"] == "string"
|
||||
assert "Dynamic value for" in prop_value["description"]
|
||||
assert "description" in prop_value
|
||||
# Check that descriptions properly explain the list field
|
||||
if field_name == "entries___0":
|
||||
assert "List item 0" in prop_value["description"]
|
||||
assert "entries[0]" in prop_value["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
@@ -0,0 +1,553 @@
|
||||
"""Comprehensive tests for SmartDecisionMakerBlock dynamic field handling."""
|
||||
|
||||
import json
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.blocks.data_manipulation import AddToListBlock, CreateDictionaryBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.blocks.text import MatchTextPatternBlock
|
||||
from backend.data.dynamic_fields import get_dynamic_field_description
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dynamic_field_description_generation():
|
||||
"""Test that dynamic field descriptions are generated correctly."""
|
||||
# Test dictionary field description
|
||||
desc = get_dynamic_field_description("values_#_name")
|
||||
assert "Dictionary field 'name' for base field 'values'" in desc
|
||||
assert "values['name']" in desc
|
||||
|
||||
# Test list field description
|
||||
desc = get_dynamic_field_description("items_$_0")
|
||||
assert "List item 0 for base field 'items'" in desc
|
||||
assert "items[0]" in desc
|
||||
|
||||
# Test object field description
|
||||
desc = get_dynamic_field_description("user_@_email")
|
||||
assert "Object attribute 'email' for base field 'user'" in desc
|
||||
assert "user.email" in desc
|
||||
|
||||
# Test regular field fallback
|
||||
desc = get_dynamic_field_description("regular_field")
|
||||
assert desc == "Value for regular_field"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_block_function_signature_with_dict_fields():
|
||||
"""Test that function signatures are created correctly for dictionary dynamic fields."""
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Create a mock node for CreateDictionaryBlock
|
||||
mock_node = Mock()
|
||||
mock_node.block = CreateDictionaryBlock()
|
||||
mock_node.block_id = CreateDictionaryBlock().id
|
||||
mock_node.input_default = {}
|
||||
|
||||
# Create mock links with dynamic dictionary fields (source sanitized, sink original)
|
||||
mock_links = [
|
||||
Mock(
|
||||
source_name="tools_^_create_dict_~_values___name", # Sanitized source
|
||||
sink_name="values_#_name", # Original sink
|
||||
sink_id="dict_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_create_dict_~_values___age", # Sanitized source
|
||||
sink_name="values_#_age", # Original sink
|
||||
sink_id="dict_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_create_dict_~_values___email", # Sanitized source
|
||||
sink_name="values_#_email", # Original sink
|
||||
sink_id="dict_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
]
|
||||
|
||||
# Generate function signature
|
||||
signature = await block._create_block_function_signature(mock_node, mock_links) # type: ignore
|
||||
|
||||
# Verify the signature structure
|
||||
assert signature["type"] == "function"
|
||||
assert "function" in signature
|
||||
assert "parameters" in signature["function"]
|
||||
assert "properties" in signature["function"]["parameters"]
|
||||
|
||||
# Check that dynamic fields are handled with original names
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
assert len(properties) == 3
|
||||
|
||||
# Check cleaned field names (for Anthropic API compatibility)
|
||||
assert "values___name" in properties
|
||||
assert "values___age" in properties
|
||||
assert "values___email" in properties
|
||||
|
||||
# Check descriptions mention they are dictionary fields
|
||||
assert "Dictionary field" in properties["values___name"]["description"]
|
||||
assert "values['name']" in properties["values___name"]["description"]
|
||||
|
||||
assert "Dictionary field" in properties["values___age"]["description"]
|
||||
assert "values['age']" in properties["values___age"]["description"]
|
||||
|
||||
assert "Dictionary field" in properties["values___email"]["description"]
|
||||
assert "values['email']" in properties["values___email"]["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_block_function_signature_with_list_fields():
|
||||
"""Test that function signatures are created correctly for list dynamic fields."""
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Create a mock node for AddToListBlock
|
||||
mock_node = Mock()
|
||||
mock_node.block = AddToListBlock()
|
||||
mock_node.block_id = AddToListBlock().id
|
||||
mock_node.input_default = {}
|
||||
|
||||
# Create mock links with dynamic list fields
|
||||
mock_links = [
|
||||
Mock(
|
||||
source_name="tools_^_add_list_~_0",
|
||||
sink_name="entries_$_0", # Dynamic list field
|
||||
sink_id="list_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_add_list_~_1",
|
||||
sink_name="entries_$_1", # Dynamic list field
|
||||
sink_id="list_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_add_list_~_2",
|
||||
sink_name="entries_$_2", # Dynamic list field
|
||||
sink_id="list_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
]
|
||||
|
||||
# Generate function signature
|
||||
signature = await block._create_block_function_signature(mock_node, mock_links) # type: ignore
|
||||
|
||||
# Verify the signature structure
|
||||
assert signature["type"] == "function"
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
|
||||
# Check cleaned field names (for Anthropic API compatibility)
|
||||
assert "entries___0" in properties
|
||||
assert "entries___1" in properties
|
||||
assert "entries___2" in properties
|
||||
|
||||
# Check descriptions mention they are list items
|
||||
assert "List item 0" in properties["entries___0"]["description"]
|
||||
assert "entries[0]" in properties["entries___0"]["description"]
|
||||
|
||||
assert "List item 1" in properties["entries___1"]["description"]
|
||||
assert "entries[1]" in properties["entries___1"]["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_block_function_signature_with_object_fields():
|
||||
"""Test that function signatures are created correctly for object dynamic fields."""
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Create a mock node for MatchTextPatternBlock (simulating object fields)
|
||||
mock_node = Mock()
|
||||
mock_node.block = MatchTextPatternBlock()
|
||||
mock_node.block_id = MatchTextPatternBlock().id
|
||||
mock_node.input_default = {}
|
||||
|
||||
# Create mock links with dynamic object fields
|
||||
mock_links = [
|
||||
Mock(
|
||||
source_name="tools_^_extract_~_user_name",
|
||||
sink_name="data_@_user_name", # Dynamic object field
|
||||
sink_id="extract_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_extract_~_user_email",
|
||||
sink_name="data_@_user_email", # Dynamic object field
|
||||
sink_id="extract_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
]
|
||||
|
||||
# Generate function signature
|
||||
signature = await block._create_block_function_signature(mock_node, mock_links) # type: ignore
|
||||
|
||||
# Verify the signature structure
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
|
||||
# Check cleaned field names (for Anthropic API compatibility)
|
||||
assert "data___user_name" in properties
|
||||
assert "data___user_email" in properties
|
||||
|
||||
# Check descriptions mention they are object attributes
|
||||
assert "Object attribute" in properties["data___user_name"]["description"]
|
||||
assert "data.user_name" in properties["data___user_name"]["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_function_signature():
|
||||
"""Test that the mapping between sanitized and original field names is built correctly."""
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Mock the database client and connected nodes
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
|
||||
) as mock_db:
|
||||
mock_client = AsyncMock()
|
||||
mock_db.return_value = mock_client
|
||||
|
||||
# Create mock nodes and links
|
||||
mock_dict_node = Mock()
|
||||
mock_dict_node.block = CreateDictionaryBlock()
|
||||
mock_dict_node.block_id = CreateDictionaryBlock().id
|
||||
mock_dict_node.input_default = {}
|
||||
|
||||
mock_list_node = Mock()
|
||||
mock_list_node.block = AddToListBlock()
|
||||
mock_list_node.block_id = AddToListBlock().id
|
||||
mock_list_node.input_default = {}
|
||||
|
||||
# Mock links with dynamic fields
|
||||
dict_link1 = Mock(
|
||||
source_name="tools_^_create_dictionary_~_name",
|
||||
sink_name="values_#_name",
|
||||
sink_id="dict_node_id",
|
||||
source_id="test_node_id",
|
||||
)
|
||||
dict_link2 = Mock(
|
||||
source_name="tools_^_create_dictionary_~_age",
|
||||
sink_name="values_#_age",
|
||||
sink_id="dict_node_id",
|
||||
source_id="test_node_id",
|
||||
)
|
||||
list_link = Mock(
|
||||
source_name="tools_^_add_to_list_~_0",
|
||||
sink_name="entries_$_0",
|
||||
sink_id="list_node_id",
|
||||
source_id="test_node_id",
|
||||
)
|
||||
|
||||
mock_client.get_connected_output_nodes.return_value = [
|
||||
(dict_link1, mock_dict_node),
|
||||
(dict_link2, mock_dict_node),
|
||||
(list_link, mock_list_node),
|
||||
]
|
||||
|
||||
# Call the method that builds signatures
|
||||
tool_functions = await block._create_function_signature("test_node_id")
|
||||
|
||||
# Verify we got 2 tool functions (one for dict, one for list)
|
||||
assert len(tool_functions) == 2
|
||||
|
||||
# Verify the tool functions contain the dynamic field names
|
||||
dict_tool = next(
|
||||
(
|
||||
tool
|
||||
for tool in tool_functions
|
||||
if tool["function"]["name"] == "createdictionaryblock"
|
||||
),
|
||||
None,
|
||||
)
|
||||
assert dict_tool is not None
|
||||
dict_properties = dict_tool["function"]["parameters"]["properties"]
|
||||
assert "values___name" in dict_properties
|
||||
assert "values___age" in dict_properties
|
||||
|
||||
list_tool = next(
|
||||
(
|
||||
tool
|
||||
for tool in tool_functions
|
||||
if tool["function"]["name"] == "addtolistblock"
|
||||
),
|
||||
None,
|
||||
)
|
||||
assert list_tool is not None
|
||||
list_properties = list_tool["function"]["parameters"]["properties"]
|
||||
assert "entries___0" in list_properties
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_output_yielding_with_dynamic_fields():
|
||||
"""Test that outputs are yielded correctly with dynamic field names mapped back."""
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# No more sanitized mapping needed since we removed sanitization
|
||||
|
||||
# Mock LLM response with tool calls
|
||||
mock_response = Mock()
|
||||
mock_response.tool_calls = [
|
||||
Mock(
|
||||
function=Mock(
|
||||
arguments=json.dumps(
|
||||
{
|
||||
"values___name": "Alice",
|
||||
"values___age": 30,
|
||||
"values___email": "alice@example.com",
|
||||
}
|
||||
),
|
||||
)
|
||||
)
|
||||
]
|
||||
# Ensure function name is a real string, not a Mock name
|
||||
mock_response.tool_calls[0].function.name = "createdictionaryblock"
|
||||
mock_response.reasoning = "Creating a dictionary with user information"
|
||||
mock_response.raw_response = {"role": "assistant", "content": "test"}
|
||||
mock_response.prompt_tokens = 100
|
||||
mock_response.completion_tokens = 50
|
||||
|
||||
# Mock the LLM call
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.llm.llm_call", new_callable=AsyncMock
|
||||
) as mock_llm:
|
||||
mock_llm.return_value = mock_response
|
||||
|
||||
# Mock the function signature creation
|
||||
with patch.object(
|
||||
block, "_create_function_signature", new_callable=AsyncMock
|
||||
) as mock_sig:
|
||||
mock_sig.return_value = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "createdictionaryblock",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"values___name": {"type": "string"},
|
||||
"values___age": {"type": "number"},
|
||||
"values___email": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Create input data
|
||||
from backend.blocks import llm
|
||||
|
||||
input_data = block.input_schema(
|
||||
prompt="Create a user dictionary",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
)
|
||||
|
||||
# Run the block
|
||||
outputs = {}
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
graph_id="test_graph",
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
|
||||
# Verify the outputs use sanitized field names (matching frontend normalizeToolName)
|
||||
assert "tools_^_createdictionaryblock_~_values___name" in outputs
|
||||
assert outputs["tools_^_createdictionaryblock_~_values___name"] == "Alice"
|
||||
|
||||
assert "tools_^_createdictionaryblock_~_values___age" in outputs
|
||||
assert outputs["tools_^_createdictionaryblock_~_values___age"] == 30
|
||||
|
||||
assert "tools_^_createdictionaryblock_~_values___email" in outputs
|
||||
assert (
|
||||
outputs["tools_^_createdictionaryblock_~_values___email"]
|
||||
== "alice@example.com"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_mixed_regular_and_dynamic_fields():
|
||||
"""Test handling of blocks with both regular and dynamic fields."""
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Create a mock node
|
||||
mock_node = Mock()
|
||||
mock_node.block = Mock()
|
||||
mock_node.block.name = "TestBlock"
|
||||
mock_node.block.description = "A test block"
|
||||
mock_node.block.input_schema = Mock()
|
||||
|
||||
# Mock the get_field_schema to return a proper schema for regular fields
|
||||
def get_field_schema(field_name):
|
||||
if field_name == "regular_field":
|
||||
return {"type": "string", "description": "A regular field"}
|
||||
elif field_name == "values":
|
||||
return {"type": "object", "description": "A dictionary field"}
|
||||
else:
|
||||
raise KeyError(f"Field {field_name} not found")
|
||||
|
||||
mock_node.block.input_schema.get_field_schema = get_field_schema
|
||||
mock_node.block.input_schema.jsonschema = Mock(
|
||||
return_value={"properties": {}, "required": []}
|
||||
)
|
||||
|
||||
# Create links with both regular and dynamic fields
|
||||
mock_links = [
|
||||
Mock(
|
||||
source_name="tools_^_test_~_regular",
|
||||
sink_name="regular_field", # Regular field
|
||||
sink_id="test_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_test_~_dict_key",
|
||||
sink_name="values_#_key1", # Dynamic dict field
|
||||
sink_id="test_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_test_~_dict_key2",
|
||||
sink_name="values_#_key2", # Dynamic dict field
|
||||
sink_id="test_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
]
|
||||
|
||||
# Generate function signature
|
||||
signature = await block._create_block_function_signature(mock_node, mock_links) # type: ignore
|
||||
|
||||
# Check properties
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
assert len(properties) == 3
|
||||
|
||||
# Regular field should have its original schema
|
||||
assert "regular_field" in properties
|
||||
assert properties["regular_field"]["description"] == "A regular field"
|
||||
|
||||
# Dynamic fields should have generated descriptions
|
||||
assert "values___key1" in properties
|
||||
assert "Dictionary field" in properties["values___key1"]["description"]
|
||||
|
||||
assert "values___key2" in properties
|
||||
assert "Dictionary field" in properties["values___key2"]["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validation_errors_dont_pollute_conversation():
|
||||
"""Test that validation errors are only used during retries and don't pollute the conversation."""
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Track conversation history changes
|
||||
conversation_snapshots = []
|
||||
|
||||
# Mock response with invalid tool call (missing required parameter)
|
||||
invalid_response = Mock()
|
||||
invalid_response.tool_calls = [
|
||||
Mock(
|
||||
function=Mock(
|
||||
arguments=json.dumps({"wrong_param": "value"}), # Wrong parameter name
|
||||
)
|
||||
)
|
||||
]
|
||||
# Ensure function name is a real string, not a Mock name
|
||||
invalid_response.tool_calls[0].function.name = "test_tool"
|
||||
invalid_response.reasoning = None
|
||||
invalid_response.raw_response = {"role": "assistant", "content": "invalid"}
|
||||
invalid_response.prompt_tokens = 100
|
||||
invalid_response.completion_tokens = 50
|
||||
|
||||
# Mock valid response after retry
|
||||
valid_response = Mock()
|
||||
valid_response.tool_calls = [
|
||||
Mock(function=Mock(arguments=json.dumps({"correct_param": "value"})))
|
||||
]
|
||||
# Ensure function name is a real string, not a Mock name
|
||||
valid_response.tool_calls[0].function.name = "test_tool"
|
||||
valid_response.reasoning = None
|
||||
valid_response.raw_response = {"role": "assistant", "content": "valid"}
|
||||
valid_response.prompt_tokens = 100
|
||||
valid_response.completion_tokens = 50
|
||||
|
||||
call_count = 0
|
||||
|
||||
async def mock_llm_call(**kwargs):
|
||||
nonlocal call_count
|
||||
# Capture conversation state
|
||||
conversation_snapshots.append(kwargs.get("prompt", []).copy())
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
return invalid_response
|
||||
else:
|
||||
return valid_response
|
||||
|
||||
# Mock the LLM call
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.llm.llm_call", new_callable=AsyncMock
|
||||
) as mock_llm:
|
||||
mock_llm.side_effect = mock_llm_call
|
||||
|
||||
# Mock the function signature creation
|
||||
with patch.object(
|
||||
block, "_create_function_signature", new_callable=AsyncMock
|
||||
) as mock_sig:
|
||||
mock_sig.return_value = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "test_tool",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"correct_param": {
|
||||
"type": "string",
|
||||
"description": "The correct parameter",
|
||||
}
|
||||
},
|
||||
"required": ["correct_param"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Create input data
|
||||
from backend.blocks import llm
|
||||
|
||||
input_data = block.input_schema(
|
||||
prompt="Test prompt",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
retry=3, # Allow retries
|
||||
)
|
||||
|
||||
# Run the block
|
||||
outputs = {}
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
graph_id="test_graph",
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
|
||||
# Verify we had 2 LLM calls (initial + retry)
|
||||
assert call_count == 2
|
||||
|
||||
# Check the final conversation output
|
||||
final_conversation = outputs.get("conversations", [])
|
||||
|
||||
# The final conversation should NOT contain the validation error message
|
||||
error_messages = [
|
||||
msg
|
||||
for msg in final_conversation
|
||||
if msg.get("role") == "user"
|
||||
and "parameter errors" in msg.get("content", "")
|
||||
]
|
||||
assert (
|
||||
len(error_messages) == 0
|
||||
), "Validation error leaked into final conversation"
|
||||
|
||||
# The final conversation should only have the successful response
|
||||
assert final_conversation[-1]["content"] == "valid"
|
||||
284
autogpt_platform/backend/backend/data/dynamic_fields.py
Normal file
284
autogpt_platform/backend/backend/data/dynamic_fields.py
Normal file
@@ -0,0 +1,284 @@
|
||||
"""
|
||||
Utilities for handling dynamic field names with special delimiters.
|
||||
|
||||
Dynamic fields allow graphs to connect complex data structures using special delimiters:
|
||||
- _#_ for dictionary keys (e.g., "values_#_name" → values["name"])
|
||||
- _$_ for list indices (e.g., "items_$_0" → items[0])
|
||||
- _@_ for object attributes (e.g., "obj_@_attr" → obj.attr)
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from backend.util.mock import MockObject
|
||||
|
||||
# Dynamic field delimiters
|
||||
LIST_SPLIT = "_$_"
|
||||
DICT_SPLIT = "_#_"
|
||||
OBJC_SPLIT = "_@_"
|
||||
|
||||
DYNAMIC_DELIMITERS = (LIST_SPLIT, DICT_SPLIT, OBJC_SPLIT)
|
||||
|
||||
|
||||
def extract_base_field_name(field_name: str) -> str:
|
||||
"""
|
||||
Extract the base field name from a dynamic field name by removing all dynamic suffixes.
|
||||
|
||||
Examples:
|
||||
extract_base_field_name("values_#_name") → "values"
|
||||
extract_base_field_name("items_$_0") → "items"
|
||||
extract_base_field_name("obj_@_attr") → "obj"
|
||||
extract_base_field_name("regular_field") → "regular_field"
|
||||
|
||||
Args:
|
||||
field_name: The field name that may contain dynamic delimiters
|
||||
|
||||
Returns:
|
||||
The base field name without any dynamic suffixes
|
||||
"""
|
||||
base_name = field_name
|
||||
for delimiter in DYNAMIC_DELIMITERS:
|
||||
if delimiter in base_name:
|
||||
base_name = base_name.split(delimiter)[0]
|
||||
return base_name
|
||||
|
||||
|
||||
def is_dynamic_field(field_name: str) -> bool:
|
||||
"""
|
||||
Check if a field name contains dynamic delimiters.
|
||||
|
||||
Args:
|
||||
field_name: The field name to check
|
||||
|
||||
Returns:
|
||||
True if the field contains any dynamic delimiters, False otherwise
|
||||
"""
|
||||
return any(delimiter in field_name for delimiter in DYNAMIC_DELIMITERS)
|
||||
|
||||
|
||||
def get_dynamic_field_description(field_name: str) -> str:
|
||||
"""
|
||||
Generate a description for a dynamic field based on its structure.
|
||||
|
||||
Args:
|
||||
field_name: The full dynamic field name (e.g., "values_#_name")
|
||||
|
||||
Returns:
|
||||
A descriptive string explaining what this dynamic field represents
|
||||
"""
|
||||
base_name = extract_base_field_name(field_name)
|
||||
|
||||
if DICT_SPLIT in field_name:
|
||||
# Extract the key part after _#_
|
||||
parts = field_name.split(DICT_SPLIT)
|
||||
if len(parts) > 1:
|
||||
key = parts[1].split("_")[0] if "_" in parts[1] else parts[1]
|
||||
return f"Dictionary field '{key}' for base field '{base_name}' ({base_name}['{key}'])"
|
||||
elif LIST_SPLIT in field_name:
|
||||
# Extract the index part after _$_
|
||||
parts = field_name.split(LIST_SPLIT)
|
||||
if len(parts) > 1:
|
||||
index = parts[1].split("_")[0] if "_" in parts[1] else parts[1]
|
||||
return (
|
||||
f"List item {index} for base field '{base_name}' ({base_name}[{index}])"
|
||||
)
|
||||
elif OBJC_SPLIT in field_name:
|
||||
# Extract the attribute part after _@_
|
||||
parts = field_name.split(OBJC_SPLIT)
|
||||
if len(parts) > 1:
|
||||
# Get the full attribute name (everything after _@_)
|
||||
attr = parts[1]
|
||||
return f"Object attribute '{attr}' for base field '{base_name}' ({base_name}.{attr})"
|
||||
|
||||
return f"Value for {field_name}"
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Dynamic field parsing and merging utilities
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
def _next_delim(s: str) -> tuple[str | None, int]:
|
||||
"""
|
||||
Return the *earliest* delimiter appearing in `s` and its index.
|
||||
|
||||
If none present → (None, -1).
|
||||
"""
|
||||
first: str | None = None
|
||||
pos = len(s) # sentinel: larger than any real index
|
||||
for d in DYNAMIC_DELIMITERS:
|
||||
i = s.find(d)
|
||||
if 0 <= i < pos:
|
||||
first, pos = d, i
|
||||
return first, (pos if first else -1)
|
||||
|
||||
|
||||
def _tokenise(path: str) -> list[tuple[str, str]] | None:
|
||||
"""
|
||||
Convert the raw path string (starting with a delimiter) into
|
||||
[ (delimiter, identifier), … ] or None if the syntax is malformed.
|
||||
"""
|
||||
tokens: list[tuple[str, str]] = []
|
||||
while path:
|
||||
# 1. Which delimiter starts this chunk?
|
||||
delim = next((d for d in DYNAMIC_DELIMITERS if path.startswith(d)), None)
|
||||
if delim is None:
|
||||
return None # invalid syntax
|
||||
|
||||
# 2. Slice off the delimiter, then up to the next delimiter (or EOS)
|
||||
path = path[len(delim) :]
|
||||
nxt_delim, pos = _next_delim(path)
|
||||
token, path = (
|
||||
path[: pos if pos != -1 else len(path)],
|
||||
path[pos if pos != -1 else len(path) :],
|
||||
)
|
||||
if token == "":
|
||||
return None # empty identifier is invalid
|
||||
tokens.append((delim, token))
|
||||
return tokens
|
||||
|
||||
|
||||
def parse_execution_output(output: tuple[str, Any], name: str) -> Any:
|
||||
"""
|
||||
Retrieve a nested value out of `output` using the flattened *name*.
|
||||
|
||||
On any failure (wrong name, wrong type, out-of-range, bad path)
|
||||
returns **None**.
|
||||
|
||||
Args:
|
||||
output: Tuple of (base_name, data) representing a block output entry
|
||||
name: The flattened field name to extract from the output data
|
||||
|
||||
Returns:
|
||||
The value at the specified path, or None if not found/invalid
|
||||
"""
|
||||
base_name, data = output
|
||||
|
||||
# Exact match → whole object
|
||||
if name == base_name:
|
||||
return data
|
||||
|
||||
# Must start with the expected name
|
||||
if not name.startswith(base_name):
|
||||
return None
|
||||
path = name[len(base_name) :]
|
||||
if not path:
|
||||
return None # nothing left to parse
|
||||
|
||||
tokens = _tokenise(path)
|
||||
if tokens is None:
|
||||
return None
|
||||
|
||||
cur: Any = data
|
||||
for delim, ident in tokens:
|
||||
if delim == LIST_SPLIT:
|
||||
# list[index]
|
||||
try:
|
||||
idx = int(ident)
|
||||
except ValueError:
|
||||
return None
|
||||
if not isinstance(cur, list) or idx >= len(cur):
|
||||
return None
|
||||
cur = cur[idx]
|
||||
|
||||
elif delim == DICT_SPLIT:
|
||||
if not isinstance(cur, dict) or ident not in cur:
|
||||
return None
|
||||
cur = cur[ident]
|
||||
|
||||
elif delim == OBJC_SPLIT:
|
||||
if not hasattr(cur, ident):
|
||||
return None
|
||||
cur = getattr(cur, ident)
|
||||
|
||||
else:
|
||||
return None # unreachable
|
||||
|
||||
return cur
|
||||
|
||||
|
||||
def _assign(container: Any, tokens: list[tuple[str, str]], value: Any) -> Any:
|
||||
"""
|
||||
Recursive helper that *returns* the (possibly new) container with
|
||||
`value` assigned along the remaining `tokens` path.
|
||||
"""
|
||||
if not tokens:
|
||||
return value # leaf reached
|
||||
|
||||
delim, ident = tokens[0]
|
||||
rest = tokens[1:]
|
||||
|
||||
# ---------- list ----------
|
||||
if delim == LIST_SPLIT:
|
||||
try:
|
||||
idx = int(ident)
|
||||
except ValueError:
|
||||
raise ValueError("index must be an integer")
|
||||
|
||||
if container is None:
|
||||
container = []
|
||||
elif not isinstance(container, list):
|
||||
container = list(container) if hasattr(container, "__iter__") else []
|
||||
|
||||
while len(container) <= idx:
|
||||
container.append(None)
|
||||
container[idx] = _assign(container[idx], rest, value)
|
||||
return container
|
||||
|
||||
# ---------- dict ----------
|
||||
if delim == DICT_SPLIT:
|
||||
if container is None:
|
||||
container = {}
|
||||
elif not isinstance(container, dict):
|
||||
container = dict(container) if hasattr(container, "items") else {}
|
||||
container[ident] = _assign(container.get(ident), rest, value)
|
||||
return container
|
||||
|
||||
# ---------- object ----------
|
||||
if delim == OBJC_SPLIT:
|
||||
if container is None:
|
||||
container = MockObject()
|
||||
elif not hasattr(container, "__dict__"):
|
||||
# If it's not an object, create a new one
|
||||
container = MockObject()
|
||||
setattr(
|
||||
container,
|
||||
ident,
|
||||
_assign(getattr(container, ident, None), rest, value),
|
||||
)
|
||||
return container
|
||||
|
||||
return value # unreachable
|
||||
|
||||
|
||||
def merge_execution_input(data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""
|
||||
Reconstruct nested objects from a *flattened* dict of key → value.
|
||||
|
||||
Raises ValueError on syntactically invalid list indices.
|
||||
|
||||
Args:
|
||||
data: Dictionary with potentially flattened dynamic field keys
|
||||
|
||||
Returns:
|
||||
Dictionary with nested objects reconstructed from flattened keys
|
||||
"""
|
||||
merged: dict[str, Any] = {}
|
||||
|
||||
for key, value in data.items():
|
||||
# Split off the base name (before the first delimiter, if any)
|
||||
delim, pos = _next_delim(key)
|
||||
if delim is None:
|
||||
merged[key] = value
|
||||
continue
|
||||
|
||||
base, path = key[:pos], key[pos:]
|
||||
tokens = _tokenise(path)
|
||||
if tokens is None:
|
||||
# Invalid key; treat as scalar under the raw name
|
||||
merged[key] = value
|
||||
continue
|
||||
|
||||
merged[base] = _assign(merged.get(base), tokens, value)
|
||||
|
||||
data.update(merged)
|
||||
return data
|
||||
@@ -20,6 +20,7 @@ from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.blocks.io import AgentInputBlock, AgentOutputBlock
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.db import prisma as db
|
||||
from backend.data.dynamic_fields import extract_base_field_name
|
||||
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
|
||||
from backend.data.model import (
|
||||
CredentialsField,
|
||||
@@ -741,7 +742,7 @@ def _is_tool_pin(name: str) -> bool:
|
||||
|
||||
|
||||
def _sanitize_pin_name(name: str) -> str:
|
||||
sanitized_name = name.split("_#_")[0].split("_@_")[0].split("_$_")[0]
|
||||
sanitized_name = extract_base_field_name(name)
|
||||
if _is_tool_pin(sanitized_name):
|
||||
return "tools"
|
||||
return sanitized_name
|
||||
|
||||
@@ -25,6 +25,7 @@ from backend.data.block import (
|
||||
get_block,
|
||||
)
|
||||
from backend.data.credit import UsageTransactionMetadata
|
||||
from backend.data.dynamic_fields import parse_execution_output
|
||||
from backend.data.execution import (
|
||||
ExecutionQueue,
|
||||
ExecutionStatus,
|
||||
@@ -59,7 +60,6 @@ from backend.executor.utils import (
|
||||
block_usage_cost,
|
||||
create_execution_queue_config,
|
||||
execution_usage_cost,
|
||||
parse_execution_output,
|
||||
validate_exec,
|
||||
)
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
|
||||
@@ -4,7 +4,7 @@ import threading
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from concurrent.futures import Future
|
||||
from typing import Any, Mapping, Optional, cast
|
||||
from typing import Mapping, Optional, cast
|
||||
|
||||
from pydantic import BaseModel, JsonValue, ValidationError
|
||||
|
||||
@@ -20,6 +20,9 @@ from backend.data.block import (
|
||||
)
|
||||
from backend.data.block_cost_config import BLOCK_COSTS
|
||||
from backend.data.db import prisma
|
||||
|
||||
# Import dynamic field utilities from centralized location
|
||||
from backend.data.dynamic_fields import merge_execution_input
|
||||
from backend.data.execution import (
|
||||
ExecutionStatus,
|
||||
GraphExecutionStats,
|
||||
@@ -39,7 +42,6 @@ from backend.util.clients import (
|
||||
)
|
||||
from backend.util.exceptions import GraphValidationError, NotFoundError
|
||||
from backend.util.logging import TruncatedLogger
|
||||
from backend.util.mock import MockObject
|
||||
from backend.util.settings import Config
|
||||
from backend.util.type import convert
|
||||
|
||||
@@ -186,195 +188,7 @@ def _is_cost_filter_match(cost_filter: BlockInput, input_data: BlockInput) -> bo
|
||||
|
||||
# ============ Execution Input Helpers ============ #
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Delimiters
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
LIST_SPLIT = "_$_"
|
||||
DICT_SPLIT = "_#_"
|
||||
OBJC_SPLIT = "_@_"
|
||||
|
||||
_DELIMS = (LIST_SPLIT, DICT_SPLIT, OBJC_SPLIT)
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Tokenisation utilities
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
def _next_delim(s: str) -> tuple[str | None, int]:
|
||||
"""
|
||||
Return the *earliest* delimiter appearing in `s` and its index.
|
||||
|
||||
If none present → (None, -1).
|
||||
"""
|
||||
first: str | None = None
|
||||
pos = len(s) # sentinel: larger than any real index
|
||||
for d in _DELIMS:
|
||||
i = s.find(d)
|
||||
if 0 <= i < pos:
|
||||
first, pos = d, i
|
||||
return first, (pos if first else -1)
|
||||
|
||||
|
||||
def _tokenise(path: str) -> list[tuple[str, str]] | None:
|
||||
"""
|
||||
Convert the raw path string (starting with a delimiter) into
|
||||
[ (delimiter, identifier), … ] or None if the syntax is malformed.
|
||||
"""
|
||||
tokens: list[tuple[str, str]] = []
|
||||
while path:
|
||||
# 1. Which delimiter starts this chunk?
|
||||
delim = next((d for d in _DELIMS if path.startswith(d)), None)
|
||||
if delim is None:
|
||||
return None # invalid syntax
|
||||
|
||||
# 2. Slice off the delimiter, then up to the next delimiter (or EOS)
|
||||
path = path[len(delim) :]
|
||||
nxt_delim, pos = _next_delim(path)
|
||||
token, path = (
|
||||
path[: pos if pos != -1 else len(path)],
|
||||
path[pos if pos != -1 else len(path) :],
|
||||
)
|
||||
if token == "":
|
||||
return None # empty identifier is invalid
|
||||
tokens.append((delim, token))
|
||||
return tokens
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Public API – parsing (flattened ➜ concrete)
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
def parse_execution_output(output: BlockOutputEntry, name: str) -> JsonValue | None:
|
||||
"""
|
||||
Retrieve a nested value out of `output` using the flattened *name*.
|
||||
|
||||
On any failure (wrong name, wrong type, out-of-range, bad path)
|
||||
returns **None**.
|
||||
"""
|
||||
base_name, data = output
|
||||
|
||||
# Exact match → whole object
|
||||
if name == base_name:
|
||||
return data
|
||||
|
||||
# Must start with the expected name
|
||||
if not name.startswith(base_name):
|
||||
return None
|
||||
path = name[len(base_name) :]
|
||||
if not path:
|
||||
return None # nothing left to parse
|
||||
|
||||
tokens = _tokenise(path)
|
||||
if tokens is None:
|
||||
return None
|
||||
|
||||
cur: JsonValue = data
|
||||
for delim, ident in tokens:
|
||||
if delim == LIST_SPLIT:
|
||||
# list[index]
|
||||
try:
|
||||
idx = int(ident)
|
||||
except ValueError:
|
||||
return None
|
||||
if not isinstance(cur, list) or idx >= len(cur):
|
||||
return None
|
||||
cur = cur[idx]
|
||||
|
||||
elif delim == DICT_SPLIT:
|
||||
if not isinstance(cur, dict) or ident not in cur:
|
||||
return None
|
||||
cur = cur[ident]
|
||||
|
||||
elif delim == OBJC_SPLIT:
|
||||
if not hasattr(cur, ident):
|
||||
return None
|
||||
cur = getattr(cur, ident)
|
||||
|
||||
else:
|
||||
return None # unreachable
|
||||
|
||||
return cur
|
||||
|
||||
|
||||
def _assign(container: Any, tokens: list[tuple[str, str]], value: Any) -> Any:
|
||||
"""
|
||||
Recursive helper that *returns* the (possibly new) container with
|
||||
`value` assigned along the remaining `tokens` path.
|
||||
"""
|
||||
if not tokens:
|
||||
return value # leaf reached
|
||||
|
||||
delim, ident = tokens[0]
|
||||
rest = tokens[1:]
|
||||
|
||||
# ---------- list ----------
|
||||
if delim == LIST_SPLIT:
|
||||
try:
|
||||
idx = int(ident)
|
||||
except ValueError:
|
||||
raise ValueError("index must be an integer")
|
||||
|
||||
if container is None:
|
||||
container = []
|
||||
elif not isinstance(container, list):
|
||||
container = list(container) if hasattr(container, "__iter__") else []
|
||||
|
||||
while len(container) <= idx:
|
||||
container.append(None)
|
||||
container[idx] = _assign(container[idx], rest, value)
|
||||
return container
|
||||
|
||||
# ---------- dict ----------
|
||||
if delim == DICT_SPLIT:
|
||||
if container is None:
|
||||
container = {}
|
||||
elif not isinstance(container, dict):
|
||||
container = dict(container) if hasattr(container, "items") else {}
|
||||
container[ident] = _assign(container.get(ident), rest, value)
|
||||
return container
|
||||
|
||||
# ---------- object ----------
|
||||
if delim == OBJC_SPLIT:
|
||||
if container is None or not isinstance(container, MockObject):
|
||||
container = MockObject()
|
||||
setattr(
|
||||
container,
|
||||
ident,
|
||||
_assign(getattr(container, ident, None), rest, value),
|
||||
)
|
||||
return container
|
||||
|
||||
return value # unreachable
|
||||
|
||||
|
||||
def merge_execution_input(data: BlockInput) -> BlockInput:
|
||||
"""
|
||||
Reconstruct nested objects from a *flattened* dict of key → value.
|
||||
|
||||
Raises ValueError on syntactically invalid list indices.
|
||||
"""
|
||||
merged: BlockInput = {}
|
||||
|
||||
for key, value in data.items():
|
||||
# Split off the base name (before the first delimiter, if any)
|
||||
delim, pos = _next_delim(key)
|
||||
if delim is None:
|
||||
merged[key] = value
|
||||
continue
|
||||
|
||||
base, path = key[:pos], key[pos:]
|
||||
tokens = _tokenise(path)
|
||||
if tokens is None:
|
||||
# Invalid key; treat as scalar under the raw name
|
||||
merged[key] = value
|
||||
continue
|
||||
|
||||
merged[base] = _assign(merged.get(base), tokens, value)
|
||||
|
||||
data.update(merged)
|
||||
return data
|
||||
# Dynamic field utilities are now imported from backend.data.dynamic_fields
|
||||
|
||||
|
||||
def validate_exec(
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import cast
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from backend.executor.utils import merge_execution_input, parse_execution_output
|
||||
from backend.data.dynamic_fields import merge_execution_input, parse_execution_output
|
||||
from backend.util.mock import MockObject
|
||||
|
||||
|
||||
|
||||
@@ -180,7 +180,7 @@ async def callback(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/credentials")
|
||||
@router.get("/credentials", summary="List Credentials")
|
||||
async def list_credentials(
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
) -> list[CredentialsMetaResponse]:
|
||||
@@ -221,7 +221,9 @@ async def list_credentials_by_provider(
|
||||
]
|
||||
|
||||
|
||||
@router.get("/{provider}/credentials/{cred_id}")
|
||||
@router.get(
|
||||
"/{provider}/credentials/{cred_id}", summary="Get Specific Credential By ID"
|
||||
)
|
||||
async def get_credential(
|
||||
provider: Annotated[
|
||||
ProviderName, Path(title="The provider to retrieve credentials for")
|
||||
|
||||
124
autogpt_platform/backend/backend/util/dynamic_fields.py
Normal file
124
autogpt_platform/backend/backend/util/dynamic_fields.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""
|
||||
Utilities for handling dynamic field names and delimiters in the AutoGPT Platform.
|
||||
|
||||
Dynamic fields allow graphs to connect complex data structures using special delimiters:
|
||||
- _#_ for dictionary keys (e.g., "values_#_name" → values["name"])
|
||||
- _$_ for list indices (e.g., "items_$_0" → items[0])
|
||||
- _@_ for object attributes (e.g., "obj_@_attr" → obj.attr)
|
||||
|
||||
This module provides utilities for:
|
||||
- Extracting base field names from dynamic field names
|
||||
- Generating proper schemas for base fields
|
||||
- Creating helper functions for field sanitization
|
||||
"""
|
||||
|
||||
from backend.data.dynamic_fields import DICT_SPLIT, LIST_SPLIT, OBJC_SPLIT
|
||||
|
||||
# All dynamic field delimiters
|
||||
DYNAMIC_DELIMITERS = (DICT_SPLIT, LIST_SPLIT, OBJC_SPLIT)
|
||||
|
||||
|
||||
def extract_base_field_name(field_name: str) -> str:
|
||||
"""
|
||||
Extract the base field name from a dynamic field name.
|
||||
|
||||
Examples:
|
||||
extract_base_field_name("values_#_name") → "values"
|
||||
extract_base_field_name("items_$_0") → "items"
|
||||
extract_base_field_name("obj_@_attr") → "obj"
|
||||
extract_base_field_name("regular_field") → "regular_field"
|
||||
|
||||
Args:
|
||||
field_name: The field name that may contain dynamic delimiters
|
||||
|
||||
Returns:
|
||||
The base field name without any dynamic suffixes
|
||||
"""
|
||||
base_name = field_name
|
||||
for delimiter in DYNAMIC_DELIMITERS:
|
||||
if delimiter in base_name:
|
||||
base_name = base_name.split(delimiter)[0]
|
||||
return base_name
|
||||
|
||||
|
||||
def is_dynamic_field(field_name: str) -> bool:
|
||||
"""
|
||||
Check if a field name contains dynamic delimiters.
|
||||
|
||||
Args:
|
||||
field_name: The field name to check
|
||||
|
||||
Returns:
|
||||
True if the field contains any dynamic delimiters, False otherwise
|
||||
"""
|
||||
return any(delimiter in field_name for delimiter in DYNAMIC_DELIMITERS)
|
||||
|
||||
|
||||
def get_dynamic_field_description(
|
||||
base_field_name: str, original_field_name: str
|
||||
) -> str:
|
||||
"""
|
||||
Generate a description for a dynamic field based on its base field and structure.
|
||||
|
||||
Args:
|
||||
base_field_name: The base field name (e.g., "values")
|
||||
original_field_name: The full dynamic field name (e.g., "values_#_name")
|
||||
|
||||
Returns:
|
||||
A descriptive string explaining what this dynamic field represents
|
||||
"""
|
||||
if DICT_SPLIT in original_field_name:
|
||||
key_part = (
|
||||
original_field_name.split(DICT_SPLIT, 1)[1].split(DICT_SPLIT[0])[0]
|
||||
if DICT_SPLIT in original_field_name
|
||||
else "key"
|
||||
)
|
||||
return f"Dictionary value for {base_field_name}['{key_part}']"
|
||||
elif LIST_SPLIT in original_field_name:
|
||||
index_part = (
|
||||
original_field_name.split(LIST_SPLIT, 1)[1].split(LIST_SPLIT[0])[0]
|
||||
if LIST_SPLIT in original_field_name
|
||||
else "index"
|
||||
)
|
||||
return f"List item for {base_field_name}[{index_part}]"
|
||||
elif OBJC_SPLIT in original_field_name:
|
||||
attr_part = (
|
||||
original_field_name.split(OBJC_SPLIT, 1)[1].split(OBJC_SPLIT[0])[0]
|
||||
if OBJC_SPLIT in original_field_name
|
||||
else "attr"
|
||||
)
|
||||
return f"Object attribute for {base_field_name}.{attr_part}"
|
||||
else:
|
||||
return f"Dynamic value for {base_field_name}"
|
||||
|
||||
|
||||
def group_fields_by_base_name(field_names: list[str]) -> dict[str, list[str]]:
|
||||
"""
|
||||
Group a list of field names by their base field names.
|
||||
|
||||
Args:
|
||||
field_names: List of field names that may contain dynamic delimiters
|
||||
|
||||
Returns:
|
||||
Dictionary mapping base field names to lists of original field names
|
||||
|
||||
Example:
|
||||
group_fields_by_base_name([
|
||||
"values_#_name",
|
||||
"values_#_age",
|
||||
"items_$_0",
|
||||
"regular_field"
|
||||
])
|
||||
→ {
|
||||
"values": ["values_#_name", "values_#_age"],
|
||||
"items": ["items_$_0"],
|
||||
"regular_field": ["regular_field"]
|
||||
}
|
||||
"""
|
||||
grouped = {}
|
||||
for field_name in field_names:
|
||||
base_name = extract_base_field_name(field_name)
|
||||
if base_name not in grouped:
|
||||
grouped[base_name] = []
|
||||
grouped[base_name].append(field_name)
|
||||
return grouped
|
||||
175
autogpt_platform/backend/backend/util/dynamic_fields_test.py
Normal file
175
autogpt_platform/backend/backend/util/dynamic_fields_test.py
Normal file
@@ -0,0 +1,175 @@
|
||||
"""Tests for dynamic field utilities."""
|
||||
|
||||
from backend.util.dynamic_fields import (
|
||||
extract_base_field_name,
|
||||
get_dynamic_field_description,
|
||||
group_fields_by_base_name,
|
||||
is_dynamic_field,
|
||||
)
|
||||
|
||||
|
||||
class TestExtractBaseFieldName:
|
||||
"""Test extracting base field names from dynamic field names."""
|
||||
|
||||
def test_extract_dict_field(self):
|
||||
"""Test extracting base name from dictionary fields."""
|
||||
assert extract_base_field_name("values_#_name") == "values"
|
||||
assert extract_base_field_name("data_#_key1_#_key2") == "data"
|
||||
assert extract_base_field_name("config_#_database_#_host") == "config"
|
||||
|
||||
def test_extract_list_field(self):
|
||||
"""Test extracting base name from list fields."""
|
||||
assert extract_base_field_name("items_$_0") == "items"
|
||||
assert extract_base_field_name("results_$_5_$_10") == "results"
|
||||
assert extract_base_field_name("nested_$_0_$_1_$_2") == "nested"
|
||||
|
||||
def test_extract_object_field(self):
|
||||
"""Test extracting base name from object fields."""
|
||||
assert extract_base_field_name("user_@_name") == "user"
|
||||
assert extract_base_field_name("response_@_data_@_items") == "response"
|
||||
assert extract_base_field_name("obj_@_attr1_@_attr2") == "obj"
|
||||
|
||||
def test_extract_mixed_fields(self):
|
||||
"""Test extracting base name from mixed dynamic fields."""
|
||||
assert extract_base_field_name("data_$_0_#_key") == "data"
|
||||
assert extract_base_field_name("items_#_user_@_name") == "items"
|
||||
assert extract_base_field_name("complex_$_0_@_attr_#_key") == "complex"
|
||||
|
||||
def test_extract_regular_field(self):
|
||||
"""Test extracting base name from regular (non-dynamic) fields."""
|
||||
assert extract_base_field_name("regular_field") == "regular_field"
|
||||
assert extract_base_field_name("simple") == "simple"
|
||||
assert extract_base_field_name("") == ""
|
||||
|
||||
def test_extract_field_with_underscores(self):
|
||||
"""Test fields with regular underscores (not dynamic delimiters)."""
|
||||
assert extract_base_field_name("field_name_here") == "field_name_here"
|
||||
assert extract_base_field_name("my_field_#_key") == "my_field"
|
||||
|
||||
|
||||
class TestIsDynamicField:
|
||||
"""Test identifying dynamic fields."""
|
||||
|
||||
def test_is_dynamic_dict_field(self):
|
||||
"""Test identifying dictionary dynamic fields."""
|
||||
assert is_dynamic_field("values_#_name") is True
|
||||
assert is_dynamic_field("data_#_key1_#_key2") is True
|
||||
|
||||
def test_is_dynamic_list_field(self):
|
||||
"""Test identifying list dynamic fields."""
|
||||
assert is_dynamic_field("items_$_0") is True
|
||||
assert is_dynamic_field("results_$_5_$_10") is True
|
||||
|
||||
def test_is_dynamic_object_field(self):
|
||||
"""Test identifying object dynamic fields."""
|
||||
assert is_dynamic_field("user_@_name") is True
|
||||
assert is_dynamic_field("response_@_data_@_items") is True
|
||||
|
||||
def test_is_dynamic_mixed_field(self):
|
||||
"""Test identifying mixed dynamic fields."""
|
||||
assert is_dynamic_field("data_$_0_#_key") is True
|
||||
assert is_dynamic_field("items_#_user_@_name") is True
|
||||
|
||||
def test_is_not_dynamic_field(self):
|
||||
"""Test identifying non-dynamic fields."""
|
||||
assert is_dynamic_field("regular_field") is False
|
||||
assert is_dynamic_field("field_name_here") is False
|
||||
assert is_dynamic_field("simple") is False
|
||||
assert is_dynamic_field("") is False
|
||||
|
||||
|
||||
class TestGetDynamicFieldDescription:
|
||||
"""Test generating descriptions for dynamic fields."""
|
||||
|
||||
def test_dict_field_description(self):
|
||||
"""Test descriptions for dictionary fields."""
|
||||
desc = get_dynamic_field_description("values", "values_#_name")
|
||||
assert "Dictionary value for values['name']" == desc
|
||||
|
||||
desc = get_dynamic_field_description("config", "config_#_database")
|
||||
assert "Dictionary value for config['database']" == desc
|
||||
|
||||
def test_list_field_description(self):
|
||||
"""Test descriptions for list fields."""
|
||||
desc = get_dynamic_field_description("items", "items_$_0")
|
||||
assert "List item for items[0]" == desc
|
||||
|
||||
desc = get_dynamic_field_description("results", "results_$_5")
|
||||
assert "List item for results[5]" == desc
|
||||
|
||||
def test_object_field_description(self):
|
||||
"""Test descriptions for object fields."""
|
||||
desc = get_dynamic_field_description("user", "user_@_name")
|
||||
assert "Object attribute for user.name" == desc
|
||||
|
||||
desc = get_dynamic_field_description("response", "response_@_data")
|
||||
assert "Object attribute for response.data" == desc
|
||||
|
||||
def test_fallback_description(self):
|
||||
"""Test fallback description for non-dynamic fields."""
|
||||
desc = get_dynamic_field_description("field", "field")
|
||||
assert "Dynamic value for field" == desc
|
||||
|
||||
|
||||
class TestGroupFieldsByBaseName:
|
||||
"""Test grouping fields by their base names."""
|
||||
|
||||
def test_group_mixed_fields(self):
|
||||
"""Test grouping a mix of dynamic and regular fields."""
|
||||
fields = [
|
||||
"values_#_name",
|
||||
"values_#_age",
|
||||
"items_$_0",
|
||||
"items_$_1",
|
||||
"user_@_email",
|
||||
"regular_field",
|
||||
"another_field",
|
||||
]
|
||||
|
||||
result = group_fields_by_base_name(fields)
|
||||
|
||||
expected = {
|
||||
"values": ["values_#_name", "values_#_age"],
|
||||
"items": ["items_$_0", "items_$_1"],
|
||||
"user": ["user_@_email"],
|
||||
"regular_field": ["regular_field"],
|
||||
"another_field": ["another_field"],
|
||||
}
|
||||
|
||||
assert result == expected
|
||||
|
||||
def test_group_empty_list(self):
|
||||
"""Test grouping an empty list."""
|
||||
result = group_fields_by_base_name([])
|
||||
assert result == {}
|
||||
|
||||
def test_group_single_field(self):
|
||||
"""Test grouping a single field."""
|
||||
result = group_fields_by_base_name(["values_#_name"])
|
||||
assert result == {"values": ["values_#_name"]}
|
||||
|
||||
def test_group_complex_dynamic_fields(self):
|
||||
"""Test grouping complex nested dynamic fields."""
|
||||
fields = [
|
||||
"data_$_0_#_key1",
|
||||
"data_$_0_#_key2",
|
||||
"data_$_1_#_key1",
|
||||
"other_@_attr",
|
||||
]
|
||||
|
||||
result = group_fields_by_base_name(fields)
|
||||
|
||||
expected = {
|
||||
"data": ["data_$_0_#_key1", "data_$_0_#_key2", "data_$_1_#_key1"],
|
||||
"other": ["other_@_attr"],
|
||||
}
|
||||
|
||||
assert result == expected
|
||||
|
||||
def test_preserve_order(self):
|
||||
"""Test that field order is preserved within groups."""
|
||||
fields = ["values_#_c", "values_#_a", "values_#_b"]
|
||||
result = group_fields_by_base_name(fields)
|
||||
|
||||
# Should preserve the original order
|
||||
assert result["values"] == ["values_#_c", "values_#_a", "values_#_b"]
|
||||
@@ -31,7 +31,7 @@ Sentry.init({
|
||||
Sentry.extraErrorDataIntegration(),
|
||||
Sentry.browserProfilingIntegration(),
|
||||
Sentry.httpClientIntegration(),
|
||||
// Sentry.launchDarklyIntegration(),
|
||||
Sentry.launchDarklyIntegration(),
|
||||
Sentry.replayIntegration({
|
||||
unmask: [".sentry-unmask, [data-sentry-unmask]"],
|
||||
}),
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"dependencies": {
|
||||
"@faker-js/faker": "10.0.0",
|
||||
"@hookform/resolvers": "5.2.1",
|
||||
"@marsidev/react-turnstile": "1.3.1",
|
||||
"@next/third-parties": "15.4.6",
|
||||
"@phosphor-icons/react": "2.1.10",
|
||||
"@radix-ui/react-alert-dialog": "1.1.15",
|
||||
|
||||
14
autogpt_platform/frontend/pnpm-lock.yaml
generated
14
autogpt_platform/frontend/pnpm-lock.yaml
generated
@@ -14,6 +14,9 @@ importers:
|
||||
'@hookform/resolvers':
|
||||
specifier: 5.2.1
|
||||
version: 5.2.1(react-hook-form@7.62.0(react@18.3.1))
|
||||
'@marsidev/react-turnstile':
|
||||
specifier: 1.3.1
|
||||
version: 1.3.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
'@next/third-parties':
|
||||
specifier: 15.4.6
|
||||
version: 15.4.6(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.55.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
@@ -1428,6 +1431,12 @@ packages:
|
||||
peerDependencies:
|
||||
jsep: ^0.4.0||^1.0.0
|
||||
|
||||
'@marsidev/react-turnstile@1.3.1':
|
||||
resolution: {integrity: sha512-h2THG/75k4Y049hgjSGPIcajxXnh+IZAiXVbryQyVmagkboN7pJtBgR16g8akjwUBSfRrg6jw6KvPDjscQflog==}
|
||||
peerDependencies:
|
||||
react: ^17.0.2 || ^18.0.0 || ^19.0
|
||||
react-dom: ^17.0.2 || ^18.0.0 || ^19.0
|
||||
|
||||
'@mdx-js/react@3.1.1':
|
||||
resolution: {integrity: sha512-f++rKLQgUVYDAtECQ6fn/is15GkEH9+nZPM3MS0RcxVqoTfawHvDlSCH7JbMhAM6uJ32v3eXLvLmLvjGu7PTQw==}
|
||||
peerDependencies:
|
||||
@@ -8668,6 +8677,11 @@ snapshots:
|
||||
dependencies:
|
||||
jsep: 1.4.0
|
||||
|
||||
'@marsidev/react-turnstile@1.3.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
|
||||
dependencies:
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
|
||||
'@mdx-js/react@3.1.1(@types/react@18.3.17)(react@18.3.1)':
|
||||
dependencies:
|
||||
'@types/mdx': 2.0.13
|
||||
|
||||
@@ -8,6 +8,7 @@ import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { preprocessInputSchema } from "../processors/input-schema-pre-processor";
|
||||
import { OutputHandler } from "./OutputHandler";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
export type CustomNodeData = {
|
||||
hardcodedValues: {
|
||||
@@ -22,14 +23,19 @@ export type CustomNodeData = {
|
||||
export type CustomNode = XYNode<CustomNodeData, "custom">;
|
||||
|
||||
export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
({ data, id }) => {
|
||||
({ data, id, selected }) => {
|
||||
const showAdvanced = useNodeStore(
|
||||
(state) => state.nodeAdvancedStates[id] || false,
|
||||
);
|
||||
const setShowAdvanced = useNodeStore((state) => state.setShowAdvanced);
|
||||
|
||||
return (
|
||||
<div className="rounded-xl border border-slate-200/60 bg-gradient-to-br from-white to-slate-50/30 shadow-lg shadow-slate-900/5 backdrop-blur-sm">
|
||||
<div
|
||||
className={cn(
|
||||
"rounded-xl border border-slate-200/60 bg-gradient-to-br from-white to-slate-50/30 shadow-lg shadow-slate-900/5 backdrop-blur-sm",
|
||||
selected && "border-2 border-slate-200 shadow-2xl",
|
||||
)}
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="flex h-14 items-center justify-center rounded-xl border-b border-slate-200/50 bg-gradient-to-r from-slate-50/80 to-white/90">
|
||||
<Text
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
import React from "react";
|
||||
import { FieldProps } from "@rjsf/utils";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
|
||||
// We need to add all the logic for the credential fields here
|
||||
export const CredentialsField = (props: FieldProps) => {
|
||||
const { formData = {}, onChange, required: _required, schema } = props;
|
||||
|
||||
const _credentialProvider = schema.credentials_provider;
|
||||
const _credentialType = schema.credentials_types;
|
||||
const _description = schema.description;
|
||||
const _title = schema.title;
|
||||
|
||||
// Helper to update one property
|
||||
const setField = (key: string, value: any) =>
|
||||
onChange({ ...formData, [key]: value });
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-2">
|
||||
<Input
|
||||
hideLabel={true}
|
||||
label={""}
|
||||
id="credentials-id"
|
||||
type="text"
|
||||
value={formData.id || ""}
|
||||
onChange={(e) => setField("id", e.target.value)}
|
||||
placeholder="Enter your API Key"
|
||||
required
|
||||
size="small"
|
||||
wrapperClassName="mb-0"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,49 @@
|
||||
import React from "react";
|
||||
import { FieldProps } from "@rjsf/utils";
|
||||
import { useCredentialField } from "./useCredentialField";
|
||||
import { filterCredentialsByProvider } from "./helpers";
|
||||
import { PlusIcon } from "@phosphor-icons/react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { SelectCredential } from "./SelectCredential";
|
||||
import { Skeleton } from "@/components/__legacy__/ui/skeleton";
|
||||
|
||||
export const CredentialsField = (props: FieldProps) => {
|
||||
const { formData = {}, onChange, required: _required, schema } = props;
|
||||
const { credentials, isCredentialListLoading } = useCredentialField();
|
||||
|
||||
const credentialProviders = schema.credentials_provider;
|
||||
const { credentials: filteredCredentials, exists: credentialsExists } =
|
||||
filterCredentialsByProvider(credentials, credentialProviders);
|
||||
|
||||
const setField = (key: string, value: any) =>
|
||||
onChange({ ...formData, [key]: value });
|
||||
|
||||
if (isCredentialListLoading) {
|
||||
return (
|
||||
<div className="flex flex-col gap-2">
|
||||
<Skeleton className="h-8 w-full rounded-xlarge" />
|
||||
<Skeleton className="h-8 w-[30%] rounded-xlarge" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-2">
|
||||
{credentialsExists && (
|
||||
<SelectCredential
|
||||
credentials={filteredCredentials}
|
||||
value={formData.id}
|
||||
onChange={(value) => setField("id", value)}
|
||||
disabled={false}
|
||||
label="Credential"
|
||||
placeholder="Select credential"
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* TODO : We need to add a modal to add a new credential */}
|
||||
<Button type="button" className="w-fit" size="small">
|
||||
<PlusIcon /> Add API Key
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,60 @@
|
||||
import React from "react";
|
||||
import { Select } from "@/components/atoms/Select/Select";
|
||||
import { CredentialsMetaResponse } from "@/app/api/__generated__/models/credentialsMetaResponse";
|
||||
import { KeyIcon } from "@phosphor-icons/react";
|
||||
|
||||
type SelectCredentialProps = {
|
||||
credentials: CredentialsMetaResponse[];
|
||||
value?: string;
|
||||
onChange: (credentialId: string) => void;
|
||||
disabled?: boolean;
|
||||
label?: string;
|
||||
placeholder?: string;
|
||||
};
|
||||
|
||||
export const SelectCredential: React.FC<SelectCredentialProps> = ({
|
||||
credentials,
|
||||
value,
|
||||
onChange,
|
||||
disabled = false,
|
||||
label = "Credential",
|
||||
placeholder = "Select credential",
|
||||
}) => {
|
||||
const options = credentials.map((cred) => {
|
||||
const details: string[] = [];
|
||||
if (cred.title && cred.title !== cred.provider) {
|
||||
details.push(cred.title);
|
||||
}
|
||||
if (cred.username) {
|
||||
details.push(cred.username);
|
||||
}
|
||||
if (cred.host) {
|
||||
details.push(cred.host);
|
||||
}
|
||||
const label =
|
||||
details.length > 0
|
||||
? `${cred.provider} (${details.join(" - ")})`
|
||||
: cred.provider;
|
||||
|
||||
return {
|
||||
value: cred.id,
|
||||
label,
|
||||
icon: <KeyIcon className="h-4 w-4" />,
|
||||
};
|
||||
});
|
||||
|
||||
return (
|
||||
<Select
|
||||
label={label}
|
||||
id="select-credential"
|
||||
wrapperClassName="!mb-0"
|
||||
value={value}
|
||||
onValueChange={onChange}
|
||||
options={options}
|
||||
disabled={disabled}
|
||||
placeholder={placeholder}
|
||||
size="small"
|
||||
hideLabel
|
||||
/>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,47 @@
|
||||
import { CredentialsMetaResponse } from "@/app/api/__generated__/models/credentialsMetaResponse";
|
||||
|
||||
export const filterCredentialsByProvider = (
|
||||
credentials: CredentialsMetaResponse[] | undefined,
|
||||
provider: string[],
|
||||
) => {
|
||||
const filtered =
|
||||
credentials?.filter((credential) =>
|
||||
provider.includes(credential.provider),
|
||||
) ?? [];
|
||||
return {
|
||||
credentials: filtered,
|
||||
exists: filtered.length > 0,
|
||||
};
|
||||
};
|
||||
|
||||
export function toDisplayName(provider: string): string {
|
||||
console.log("provider", provider);
|
||||
// Special cases that need manual handling
|
||||
const specialCases: Record<string, string> = {
|
||||
aiml_api: "AI/ML",
|
||||
d_id: "D-ID",
|
||||
e2b: "E2B",
|
||||
llama_api: "Llama API",
|
||||
open_router: "Open Router",
|
||||
smtp: "SMTP",
|
||||
revid: "Rev.ID",
|
||||
};
|
||||
|
||||
if (specialCases[provider]) {
|
||||
return specialCases[provider];
|
||||
}
|
||||
|
||||
// General case: convert snake_case to Title Case
|
||||
return provider
|
||||
.split(/[_-]/)
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1).toLowerCase())
|
||||
.join(" ");
|
||||
}
|
||||
|
||||
export function isCredentialFieldSchema(schema: any): boolean {
|
||||
return (
|
||||
typeof schema === "object" &&
|
||||
schema !== null &&
|
||||
"credentials_provider" in schema
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
import { useGetV1ListCredentials } from "@/app/api/__generated__/endpoints/integrations/integrations";
|
||||
import { CredentialsMetaResponse } from "@/app/api/__generated__/models/credentialsMetaResponse";
|
||||
|
||||
export const useCredentialField = () => {
|
||||
// Fetch all the credentials from the backend
|
||||
// We will save it in cache for 10 min, if user edits the credential, we will invalidate the cache
|
||||
// Whenever user adds a block, we filter the credentials list and check if this block's provider is in the list
|
||||
const { data: credentials, isLoading: isCredentialListLoading } =
|
||||
useGetV1ListCredentials({
|
||||
query: {
|
||||
refetchInterval: 10 * 60 * 1000,
|
||||
select: (x) => {
|
||||
return x.data as CredentialsMetaResponse[];
|
||||
},
|
||||
},
|
||||
});
|
||||
return {
|
||||
credentials,
|
||||
isCredentialListLoading,
|
||||
};
|
||||
};
|
||||
@@ -1,5 +1,5 @@
|
||||
import { RegistryFieldsType } from "@rjsf/utils";
|
||||
import { CredentialsField } from "./CredentialField";
|
||||
import { CredentialsField } from "./CredentialField/CredentialField";
|
||||
import { AnyOfField } from "./AnyOfField/AnyOfField";
|
||||
import { ObjectField } from "./ObjectField";
|
||||
|
||||
|
||||
@@ -15,6 +15,11 @@ import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { generateHandleId } from "../../handlers/helpers";
|
||||
import { getTypeDisplayInfo } from "../helpers";
|
||||
import { ArrayEditorContext } from "../../components/ArrayEditor/ArrayEditorContext";
|
||||
import {
|
||||
isCredentialFieldSchema,
|
||||
toDisplayName,
|
||||
} from "../fields/CredentialField/helpers";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const FieldTemplate: React.FC<FieldTemplateProps> = ({
|
||||
id,
|
||||
@@ -47,6 +52,7 @@ const FieldTemplate: React.FC<FieldTemplateProps> = ({
|
||||
}
|
||||
const isAnyOf = Array.isArray((schema as any)?.anyOf);
|
||||
const isOneOf = Array.isArray((schema as any)?.oneOf);
|
||||
const isCredential = isCredentialFieldSchema(schema);
|
||||
const suppressHandle = isAnyOf || isOneOf;
|
||||
|
||||
if (!showAdvanced && schema.advanced === true && !isConnected) {
|
||||
@@ -63,12 +69,17 @@ const FieldTemplate: React.FC<FieldTemplateProps> = ({
|
||||
<div className="mt-4 w-[400px] space-y-1">
|
||||
{label && schema.type && (
|
||||
<label htmlFor={id} className="flex items-center gap-1">
|
||||
{!suppressHandle && !fromAnyOf && (
|
||||
{!suppressHandle && !fromAnyOf && !isCredential && (
|
||||
<NodeHandle id={fieldKey} isConnected={isConnected} side="left" />
|
||||
)}
|
||||
{!fromAnyOf && (
|
||||
<Text variant="body" className="line-clamp-1">
|
||||
{label}
|
||||
<Text
|
||||
variant="body"
|
||||
className={cn("line-clamp-1", isCredential && "ml-3")}
|
||||
>
|
||||
{isCredential
|
||||
? toDisplayName(schema.credentials_provider[0]) + " credentials"
|
||||
: label}
|
||||
</Text>
|
||||
)}
|
||||
{!fromAnyOf && (
|
||||
|
||||
@@ -4,6 +4,7 @@ import { Button } from "@/components/__legacy__/Button";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogTrigger,
|
||||
@@ -64,6 +65,9 @@ export default function LibraryUploadAgentDialog(): React.ReactNode {
|
||||
<DialogContent>
|
||||
<DialogHeader>
|
||||
<DialogTitle className="mb-8 text-center">Upload Agent</DialogTitle>
|
||||
<DialogDescription>
|
||||
Upload your agent by providing a name, description, and JSON file.
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
|
||||
<Form {...form}>
|
||||
|
||||
@@ -85,18 +85,16 @@ export default function LoginPage() {
|
||||
/>
|
||||
|
||||
{/* Turnstile CAPTCHA Component */}
|
||||
{turnstile.shouldRender ? (
|
||||
<Turnstile
|
||||
key={captchaKey}
|
||||
siteKey={turnstile.siteKey}
|
||||
onVerify={turnstile.handleVerify}
|
||||
onExpire={turnstile.handleExpire}
|
||||
onError={turnstile.handleError}
|
||||
setWidgetId={turnstile.setWidgetId}
|
||||
action="login"
|
||||
shouldRender={turnstile.shouldRender}
|
||||
/>
|
||||
) : null}
|
||||
<Turnstile
|
||||
key={captchaKey}
|
||||
siteKey={turnstile.siteKey}
|
||||
onVerify={turnstile.handleVerify}
|
||||
onExpire={turnstile.handleExpire}
|
||||
onError={turnstile.handleError}
|
||||
setWidgetId={turnstile.setWidgetId}
|
||||
action="login"
|
||||
shouldRender={turnstile.shouldRender}
|
||||
/>
|
||||
|
||||
<Button
|
||||
variant="primary"
|
||||
|
||||
@@ -0,0 +1,113 @@
|
||||
import { SearchBar } from "@/components/__legacy__/SearchBar";
|
||||
import { useMainSearchResultPage } from "./useMainSearchResultPage";
|
||||
import { SearchFilterChips } from "@/components/__legacy__/SearchFilterChips";
|
||||
import { SortDropdown } from "@/components/__legacy__/SortDropdown";
|
||||
import { AgentsSection } from "../AgentsSection/AgentsSection";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { FeaturedCreators } from "../FeaturedCreators/FeaturedCreators";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { MainMarketplacePageLoading } from "../MainMarketplacePageLoading";
|
||||
|
||||
export const MainSearchResultPage = ({
|
||||
searchTerm,
|
||||
sort,
|
||||
}: {
|
||||
searchTerm: string;
|
||||
sort: string;
|
||||
}) => {
|
||||
const {
|
||||
agents,
|
||||
creators,
|
||||
totalCount,
|
||||
agentsCount,
|
||||
creatorsCount,
|
||||
handleFilterChange,
|
||||
handleSortChange,
|
||||
showAgents,
|
||||
showCreators,
|
||||
isAgentsLoading,
|
||||
isCreatorsLoading,
|
||||
isAgentsError,
|
||||
isCreatorsError,
|
||||
} = useMainSearchResultPage({ searchTerm, sort });
|
||||
|
||||
const isLoading = isAgentsLoading || isCreatorsLoading;
|
||||
const hasError = isAgentsError || isCreatorsError;
|
||||
|
||||
if (isLoading) {
|
||||
return <MainMarketplacePageLoading />;
|
||||
}
|
||||
|
||||
if (hasError) {
|
||||
return (
|
||||
<div className="flex min-h-[500px] items-center justify-center">
|
||||
<ErrorCard
|
||||
isSuccess={false}
|
||||
responseError={{ message: "Failed to load marketplace data" }}
|
||||
context="marketplace page"
|
||||
onRetry={() => window.location.reload()}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<div className="w-full">
|
||||
<div className="mx-auto min-h-screen max-w-[1440px] px-10 lg:min-w-[1440px]">
|
||||
<div className="mt-8 flex items-center">
|
||||
<div className="flex-1">
|
||||
<h2 className="text-base font-medium leading-normal text-neutral-800 dark:text-neutral-200">
|
||||
Results for:
|
||||
</h2>
|
||||
<h1 className="font-poppins text-2xl font-semibold leading-[32px] text-neutral-800 dark:text-neutral-100">
|
||||
{searchTerm}
|
||||
</h1>
|
||||
</div>
|
||||
<div className="flex-none">
|
||||
<SearchBar width="w-[439px]" height="h-[60px]" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{totalCount > 0 ? (
|
||||
<>
|
||||
<div className="mt-[36px] flex items-center justify-between">
|
||||
<SearchFilterChips
|
||||
totalCount={totalCount}
|
||||
agentsCount={agentsCount}
|
||||
creatorsCount={creatorsCount}
|
||||
onFilterChange={handleFilterChange}
|
||||
/>
|
||||
<SortDropdown onSort={handleSortChange} />
|
||||
</div>
|
||||
{/* Content section */}
|
||||
<div className="min-h-[500px] max-w-[1440px] space-y-8 py-8">
|
||||
{showAgents && agentsCount > 0 && agents && (
|
||||
<div className="mt-[36px]">
|
||||
<AgentsSection agents={agents} sectionTitle="Agents" />
|
||||
</div>
|
||||
)}
|
||||
|
||||
{showAgents && agentsCount > 0 && creatorsCount > 0 && (
|
||||
<Separator />
|
||||
)}
|
||||
{showCreators && creatorsCount > 0 && creators && (
|
||||
<FeaturedCreators
|
||||
featuredCreators={creators}
|
||||
title="Creators"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
<div className="mt-20 flex flex-col items-center justify-center">
|
||||
<h3 className="mb-2 text-xl font-medium text-neutral-600 dark:text-neutral-300">
|
||||
No results found
|
||||
</h3>
|
||||
<p className="text-neutral-500 dark:text-neutral-400">
|
||||
Try adjusting your search terms or filters
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,123 @@
|
||||
import {
|
||||
useGetV2ListStoreAgents,
|
||||
useGetV2ListStoreCreators,
|
||||
} from "@/app/api/__generated__/endpoints/store/store";
|
||||
import { CreatorsResponse } from "@/app/api/__generated__/models/creatorsResponse";
|
||||
import { StoreAgentsResponse } from "@/app/api/__generated__/models/storeAgentsResponse";
|
||||
import { useState, useMemo } from "react";
|
||||
|
||||
interface useMainSearchResultPageType {
|
||||
searchTerm: string;
|
||||
sort: string;
|
||||
}
|
||||
|
||||
export const useMainSearchResultPage = ({
|
||||
searchTerm,
|
||||
sort,
|
||||
}: useMainSearchResultPageType) => {
|
||||
const [showAgents, setShowAgents] = useState(true);
|
||||
const [showCreators, setShowCreators] = useState(true);
|
||||
const [clientSortBy, setClientSortBy] = useState<string>(sort);
|
||||
|
||||
const {
|
||||
data: agentsData,
|
||||
isLoading: isAgentsLoading,
|
||||
isError: isAgentsError,
|
||||
} = useGetV2ListStoreAgents(
|
||||
{
|
||||
search_query: searchTerm,
|
||||
sorted_by: sort,
|
||||
},
|
||||
{
|
||||
query: {
|
||||
select: (x) => {
|
||||
return (x.data as StoreAgentsResponse).agents;
|
||||
},
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const {
|
||||
data: creatorsData,
|
||||
isLoading: isCreatorsLoading,
|
||||
isError: isCreatorsError,
|
||||
} = useGetV2ListStoreCreators(
|
||||
{ search_query: searchTerm, sorted_by: sort },
|
||||
{
|
||||
query: {
|
||||
select: (x) => {
|
||||
return (x.data as CreatorsResponse).creators;
|
||||
},
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
// This is the strategy, we are using for sorting the agents and creators.
|
||||
// currently we are doing it client side but maybe we will shift it to the server side.
|
||||
// we will store the sortBy state in the url params, and then refetch the data with the new sortBy.
|
||||
|
||||
const agents = useMemo(() => {
|
||||
if (!agentsData) return [];
|
||||
|
||||
const sorted = [...agentsData];
|
||||
|
||||
if (clientSortBy === "runs") {
|
||||
return sorted.sort((a, b) => b.runs - a.runs);
|
||||
} else if (clientSortBy === "rating") {
|
||||
return sorted.sort((a, b) => b.rating - a.rating);
|
||||
} else {
|
||||
return sorted;
|
||||
}
|
||||
}, [agentsData, clientSortBy]);
|
||||
|
||||
const creators = useMemo(() => {
|
||||
if (!creatorsData) return [];
|
||||
|
||||
const sorted = [...creatorsData];
|
||||
|
||||
if (clientSortBy === "runs") {
|
||||
return sorted.sort((a, b) => b.agent_runs - a.agent_runs);
|
||||
} else if (clientSortBy === "rating") {
|
||||
return sorted.sort((a, b) => b.agent_rating - a.agent_rating);
|
||||
} else {
|
||||
return sorted.sort((a, b) => b.num_agents - a.num_agents);
|
||||
}
|
||||
}, [creatorsData, clientSortBy]);
|
||||
|
||||
const agentsCount = agents?.length ?? 0;
|
||||
const creatorsCount = creators?.length ?? 0;
|
||||
const totalCount = agentsCount + creatorsCount;
|
||||
|
||||
const handleFilterChange = (value: string) => {
|
||||
if (value === "agents") {
|
||||
setShowAgents(true);
|
||||
setShowCreators(false);
|
||||
} else if (value === "creators") {
|
||||
setShowAgents(false);
|
||||
setShowCreators(true);
|
||||
} else {
|
||||
setShowAgents(true);
|
||||
setShowCreators(true);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSortChange = (sortValue: string) => {
|
||||
setClientSortBy(sortValue);
|
||||
};
|
||||
|
||||
return {
|
||||
agents,
|
||||
creators,
|
||||
handleFilterChange,
|
||||
handleSortChange,
|
||||
agentsCount,
|
||||
creatorsCount,
|
||||
totalCount,
|
||||
showAgents,
|
||||
showCreators,
|
||||
isAgentsLoading,
|
||||
isCreatorsLoading,
|
||||
isAgentsError,
|
||||
isCreatorsError,
|
||||
};
|
||||
};
|
||||
@@ -0,0 +1,27 @@
|
||||
import { Skeleton } from "@/components/__legacy__/ui/skeleton";
|
||||
|
||||
export const MainSearchResultPageLoading = () => {
|
||||
return (
|
||||
<div className="w-full">
|
||||
<div className="mx-auto min-h-screen max-w-[1440px] px-10 lg:min-w-[1440px]">
|
||||
<div className="mt-8 flex items-center">
|
||||
<div className="flex-1">
|
||||
<Skeleton className="mb-2 h-5 w-32 bg-neutral-200 dark:bg-neutral-700" />
|
||||
<Skeleton className="h-8 w-64 bg-neutral-200 dark:bg-neutral-700" />
|
||||
</div>
|
||||
<div className="flex-none">
|
||||
<Skeleton className="h-[60px] w-[439px] bg-neutral-200 dark:bg-neutral-700" />
|
||||
</div>
|
||||
</div>
|
||||
<div className="mt-[36px] flex items-center justify-between">
|
||||
<Skeleton className="h-8 w-48 bg-neutral-200 dark:bg-neutral-700" />
|
||||
<Skeleton className="h-8 w-32 bg-neutral-200 dark:bg-neutral-700" />
|
||||
</div>
|
||||
<div className="mt-20 flex flex-col items-center justify-center">
|
||||
<Skeleton className="mb-4 h-6 w-40 bg-neutral-200 dark:bg-neutral-700" />
|
||||
<Skeleton className="h-6 w-80 bg-neutral-200 dark:bg-neutral-700" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -1,14 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import { use, useCallback, useEffect, useState } from "react";
|
||||
import { AgentsSection } from "@/components/__legacy__/composite/AgentsSection";
|
||||
import { SearchBar } from "@/components/__legacy__/SearchBar";
|
||||
import { FeaturedCreators } from "@/components/__legacy__/composite/FeaturedCreators";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { SearchFilterChips } from "@/components/__legacy__/SearchFilterChips";
|
||||
import { SortDropdown } from "@/components/__legacy__/SortDropdown";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import { Creator, StoreAgent } from "@/lib/autogpt-server-api";
|
||||
import { use } from "react";
|
||||
import { MainSearchResultPage } from "../components/MainSearchResultPage/MainSearchResultPage";
|
||||
|
||||
type MarketplaceSearchPageSearchParams = { searchTerm?: string; sort?: string };
|
||||
|
||||
@@ -18,171 +11,9 @@ export default function MarketplaceSearchPage({
|
||||
searchParams: Promise<MarketplaceSearchPageSearchParams>;
|
||||
}) {
|
||||
return (
|
||||
<SearchResults
|
||||
<MainSearchResultPage
|
||||
searchTerm={use(searchParams).searchTerm || ""}
|
||||
sort={use(searchParams).sort || "trending"}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function SearchResults({
|
||||
searchTerm,
|
||||
sort,
|
||||
}: {
|
||||
searchTerm: string;
|
||||
sort: string;
|
||||
}): React.ReactElement {
|
||||
const [showAgents, setShowAgents] = useState(true);
|
||||
const [showCreators, setShowCreators] = useState(true);
|
||||
const [agents, setAgents] = useState<StoreAgent[]>([]);
|
||||
const [creators, setCreators] = useState<Creator[]>([]);
|
||||
const [isLoading, setIsLoading] = useState(true);
|
||||
const api = useBackendAPI();
|
||||
|
||||
useEffect(() => {
|
||||
const fetchData = async () => {
|
||||
setIsLoading(true);
|
||||
|
||||
try {
|
||||
const [agentsRes, creatorsRes] = await Promise.all([
|
||||
api.getStoreAgents({
|
||||
search_query: searchTerm,
|
||||
sorted_by: sort,
|
||||
}),
|
||||
api.getStoreCreators({
|
||||
search_query: searchTerm,
|
||||
}),
|
||||
]);
|
||||
|
||||
setAgents(agentsRes.agents || []);
|
||||
setCreators(creatorsRes.creators || []);
|
||||
} catch (error) {
|
||||
console.error("Error fetching data:", error);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
fetchData();
|
||||
}, [api, searchTerm, sort]);
|
||||
|
||||
const agentsCount = agents.length;
|
||||
const creatorsCount = creators.length;
|
||||
const totalCount = agentsCount + creatorsCount;
|
||||
|
||||
const handleFilterChange = (value: string) => {
|
||||
if (value === "agents") {
|
||||
setShowAgents(true);
|
||||
setShowCreators(false);
|
||||
} else if (value === "creators") {
|
||||
setShowAgents(false);
|
||||
setShowCreators(true);
|
||||
} else {
|
||||
setShowAgents(true);
|
||||
setShowCreators(true);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSortChange = useCallback(
|
||||
(sortValue: string) => {
|
||||
let sortBy = "recent";
|
||||
if (sortValue === "runs") {
|
||||
sortBy = "runs";
|
||||
} else if (sortValue === "rating") {
|
||||
sortBy = "rating";
|
||||
}
|
||||
|
||||
const sortedAgents = [...agents].sort((a, b) => {
|
||||
if (sortBy === "runs") {
|
||||
return b.runs - a.runs;
|
||||
} else if (sortBy === "rating") {
|
||||
return b.rating - a.rating;
|
||||
} else {
|
||||
return (
|
||||
new Date(b.updated_at).getTime() - new Date(a.updated_at).getTime()
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
const sortedCreators = [...creators].sort((a, b) => {
|
||||
if (sortBy === "runs") {
|
||||
return b.agent_runs - a.agent_runs;
|
||||
} else if (sortBy === "rating") {
|
||||
return b.agent_rating - a.agent_rating;
|
||||
} else {
|
||||
// Creators don't have updated_at, sort by number of agents as fallback
|
||||
return b.num_agents - a.num_agents;
|
||||
}
|
||||
});
|
||||
|
||||
setAgents(sortedAgents);
|
||||
setCreators(sortedCreators);
|
||||
},
|
||||
[agents, creators],
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="w-full">
|
||||
<div className="mx-auto min-h-screen max-w-[1440px] px-10 lg:min-w-[1440px]">
|
||||
<div className="mt-8 flex items-center">
|
||||
<div className="flex-1">
|
||||
<h2 className="text-base font-medium leading-normal text-neutral-800 dark:text-neutral-200">
|
||||
Results for:
|
||||
</h2>
|
||||
<h1 className="font-poppins text-2xl font-semibold leading-[32px] text-neutral-800 dark:text-neutral-100">
|
||||
{searchTerm}
|
||||
</h1>
|
||||
</div>
|
||||
<div className="flex-none">
|
||||
<SearchBar width="w-[439px]" height="h-[60px]" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{isLoading ? (
|
||||
<div className="mt-20 flex flex-col items-center justify-center">
|
||||
<p className="text-neutral-500 dark:text-neutral-400">Loading...</p>
|
||||
</div>
|
||||
) : totalCount > 0 ? (
|
||||
<>
|
||||
<div className="mt-[36px] flex items-center justify-between">
|
||||
<SearchFilterChips
|
||||
totalCount={totalCount}
|
||||
agentsCount={agentsCount}
|
||||
creatorsCount={creatorsCount}
|
||||
onFilterChange={handleFilterChange}
|
||||
/>
|
||||
<SortDropdown onSort={handleSortChange} />
|
||||
</div>
|
||||
{/* Content section */}
|
||||
<div className="min-h-[500px] max-w-[1440px]">
|
||||
{showAgents && agentsCount > 0 && (
|
||||
<div className="mt-[36px]">
|
||||
<AgentsSection agents={agents} sectionTitle="Agents" />
|
||||
</div>
|
||||
)}
|
||||
|
||||
{showAgents && agentsCount > 0 && creatorsCount > 0 && (
|
||||
<Separator />
|
||||
)}
|
||||
{showCreators && creatorsCount > 0 && (
|
||||
<FeaturedCreators
|
||||
featuredCreators={creators}
|
||||
title="Creators"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
<div className="mt-20 flex flex-col items-center justify-center">
|
||||
<h3 className="mb-2 text-xl font-medium text-neutral-600 dark:text-neutral-300">
|
||||
No results found
|
||||
</h3>
|
||||
<p className="text-neutral-500 dark:text-neutral-400">
|
||||
Try adjusting your search terms or filters
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@
|
||||
"get": {
|
||||
"tags": ["v1", "integrations"],
|
||||
"summary": "List Credentials",
|
||||
"operationId": "getV1ListCredentials",
|
||||
"operationId": "getV1List credentials",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
@@ -123,7 +123,7 @@
|
||||
"$ref": "#/components/schemas/CredentialsMetaResponse"
|
||||
},
|
||||
"type": "array",
|
||||
"title": "Response Getv1Listcredentials"
|
||||
"title": "Response Getv1List Credentials"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -268,8 +268,8 @@
|
||||
"/api/integrations/{provider}/credentials/{cred_id}": {
|
||||
"get": {
|
||||
"tags": ["v1", "integrations"],
|
||||
"summary": "Get Credential",
|
||||
"operationId": "getV1GetCredential",
|
||||
"summary": "Get Specific Credential By ID",
|
||||
"operationId": "getV1Get specific credential by id",
|
||||
"security": [{ "HTTPBearerJWT": [] }],
|
||||
"parameters": [
|
||||
{
|
||||
@@ -315,7 +315,7 @@
|
||||
"host_scoped": "#/components/schemas/HostScopedCredentials-Output"
|
||||
}
|
||||
},
|
||||
"title": "Response Getv1Getcredential"
|
||||
"title": "Response Getv1Get Specific Credential By Id"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,9 +10,9 @@ import {
|
||||
import { ChevronDownIcon } from "@radix-ui/react-icons";
|
||||
|
||||
const sortOptions: SortOption[] = [
|
||||
{ label: "Most Recent", value: "recent" },
|
||||
// { label: "Most Recent", value: "recent" }, // we are not using this for now because we don't have date data from the backend
|
||||
{ label: "Most Runs", value: "runs" },
|
||||
{ label: "Highest Rated", value: "rating" },
|
||||
// { label: "Highest Rated", value: "rating" }, // we are not using this for now because we don't have rating data from the backend
|
||||
];
|
||||
|
||||
interface SortOption {
|
||||
|
||||
@@ -39,6 +39,7 @@ export interface TaskGroup {
|
||||
|
||||
export default function Wallet() {
|
||||
const { state, updateState } = useOnboarding();
|
||||
|
||||
const groups = useMemo<TaskGroup[]>(() => {
|
||||
return [
|
||||
{
|
||||
@@ -348,10 +349,11 @@ export default function Wallet() {
|
||||
</div>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent
|
||||
className={cn(
|
||||
"absolute -right-[7.9rem] -top-[3.2rem] z-50 w-[28.5rem] px-[0.625rem] py-2",
|
||||
"rounded-xl border-zinc-100 bg-white shadow-[0_3px_3px] shadow-zinc-200",
|
||||
)}
|
||||
side="bottom"
|
||||
align="end"
|
||||
sideOffset={12}
|
||||
collisionPadding={16}
|
||||
className={cn("z-50 w-[28.5rem] px-[0.625rem] py-2")}
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="mx-1 flex items-center justify-between border-b border-zinc-200 pb-3">
|
||||
|
||||
@@ -40,27 +40,57 @@ export function Turnstile({
|
||||
return;
|
||||
}
|
||||
|
||||
// Create script element
|
||||
const script = document.createElement("script");
|
||||
script.src =
|
||||
const scriptSrc =
|
||||
"https://challenges.cloudflare.com/turnstile/v0/api.js?render=explicit";
|
||||
|
||||
// If a script already exists, reuse it and attach listeners
|
||||
const existingScript = Array.from(document.scripts).find(
|
||||
(s) => s.src === scriptSrc,
|
||||
);
|
||||
|
||||
if (existingScript) {
|
||||
if (window.turnstile) {
|
||||
setLoaded(true);
|
||||
return;
|
||||
}
|
||||
|
||||
const handleLoad: EventListener = () => {
|
||||
setLoaded(true);
|
||||
};
|
||||
const handleError: EventListener = () => {
|
||||
onError?.(new Error("Failed to load Turnstile script"));
|
||||
};
|
||||
|
||||
existingScript.addEventListener("load", handleLoad);
|
||||
existingScript.addEventListener("error", handleError);
|
||||
|
||||
return () => {
|
||||
existingScript.removeEventListener("load", handleLoad);
|
||||
existingScript.removeEventListener("error", handleError);
|
||||
};
|
||||
}
|
||||
|
||||
// Create a single script element if not present and keep it in the document
|
||||
const script = document.createElement("script");
|
||||
script.src = scriptSrc;
|
||||
script.async = true;
|
||||
script.defer = true;
|
||||
|
||||
script.onload = () => {
|
||||
const handleLoad: EventListener = () => {
|
||||
setLoaded(true);
|
||||
};
|
||||
|
||||
script.onerror = () => {
|
||||
const handleError: EventListener = () => {
|
||||
onError?.(new Error("Failed to load Turnstile script"));
|
||||
};
|
||||
|
||||
script.addEventListener("load", handleLoad);
|
||||
script.addEventListener("error", handleError);
|
||||
|
||||
document.head.appendChild(script);
|
||||
|
||||
return () => {
|
||||
if (document.head.contains(script)) {
|
||||
document.head.removeChild(script);
|
||||
}
|
||||
script.removeEventListener("load", handleLoad);
|
||||
script.removeEventListener("error", handleError);
|
||||
};
|
||||
}, [onError, shouldRender]);
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import type { ReactNode } from "react";
|
||||
import { useMemo } from "react";
|
||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||
import { BehaveAs, getBehaveAs } from "@/lib/utils";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID;
|
||||
const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true";
|
||||
@@ -45,7 +46,10 @@ export function LaunchDarklyProvider({ children }: { children: ReactNode }) {
|
||||
clientSideID={clientId}
|
||||
context={context}
|
||||
reactOptions={{ useCamelCaseFlagKeys: false }}
|
||||
options={{ bootstrap: "localStorage" }}
|
||||
options={{
|
||||
bootstrap: "localStorage",
|
||||
inspectors: [Sentry.buildLaunchDarklyFlagUsedHandler()],
|
||||
}}
|
||||
>
|
||||
{children}
|
||||
</LDProvider>
|
||||
|
||||
@@ -98,7 +98,7 @@ export class MarketplacePage extends BasePage {
|
||||
}
|
||||
|
||||
async searchAndNavigate(query: string, page: Page) {
|
||||
const searchInput = await this.getSearchInput(page);
|
||||
const searchInput = (await this.getSearchInput(page)).first();
|
||||
await searchInput.fill(query);
|
||||
await searchInput.press("Enter");
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user