mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Merge branch 'dev' into swiftyos/caching-pt2
This commit is contained in:
@@ -1,6 +1,3 @@
|
||||
[pr_reviewer]
|
||||
num_code_suggestions=0
|
||||
|
||||
[pr_code_suggestions]
|
||||
commitable_code_suggestions=false
|
||||
num_code_suggestions=0
|
||||
|
||||
214
autogpt_platform/backend/backend/blocks/ai_condition.py
Normal file
214
autogpt_platform/backend/backend/blocks/ai_condition.py
Normal file
@@ -0,0 +1,214 @@
|
||||
from typing import Any
|
||||
|
||||
from backend.blocks.llm import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
AIBlockBase,
|
||||
AICredentials,
|
||||
AICredentialsField,
|
||||
LlmModel,
|
||||
LLMResponse,
|
||||
llm_call,
|
||||
)
|
||||
from backend.data.block import BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField
|
||||
|
||||
|
||||
class AIConditionBlock(AIBlockBase):
|
||||
"""
|
||||
An AI-powered condition block that uses natural language to evaluate conditions.
|
||||
|
||||
This block allows users to define conditions in plain English (e.g., "the input is an email address",
|
||||
"the input is a city in the USA") and uses AI to determine if the input satisfies the condition.
|
||||
It provides the same yes/no data pass-through functionality as the standard ConditionBlock.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
input_value: Any = SchemaField(
|
||||
description="The input value to evaluate with the AI condition",
|
||||
placeholder="Enter the value to be evaluated (text, number, or any data)",
|
||||
)
|
||||
condition: str = SchemaField(
|
||||
description="A plaintext English description of the condition to evaluate",
|
||||
placeholder="E.g., 'the input is the body of an email', 'the input is a City in the USA', 'the input is an error or a refusal'",
|
||||
)
|
||||
yes_value: Any = SchemaField(
|
||||
description="(Optional) Value to output if the condition is true. If not provided, input_value will be used.",
|
||||
placeholder="Leave empty to use input_value, or enter a specific value",
|
||||
default=None,
|
||||
)
|
||||
no_value: Any = SchemaField(
|
||||
description="(Optional) Value to output if the condition is false. If not provided, input_value will be used.",
|
||||
placeholder="Leave empty to use input_value, or enter a specific value",
|
||||
default=None,
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
description="The language model to use for evaluating the condition.",
|
||||
advanced=False,
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: bool = SchemaField(
|
||||
description="The result of the AI condition evaluation (True or False)"
|
||||
)
|
||||
yes_output: Any = SchemaField(
|
||||
description="The output value if the condition is true"
|
||||
)
|
||||
no_output: Any = SchemaField(
|
||||
description="The output value if the condition is false"
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the AI evaluation is uncertain or fails"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="553ec5b8-6c45-4299-8d75-b394d05f72ff",
|
||||
input_schema=AIConditionBlock.Input,
|
||||
output_schema=AIConditionBlock.Output,
|
||||
description="Uses AI to evaluate natural language conditions and provide conditional outputs",
|
||||
categories={BlockCategory.AI, BlockCategory.LOGIC},
|
||||
test_input={
|
||||
"input_value": "john@example.com",
|
||||
"condition": "the input is an email address",
|
||||
"yes_value": "Valid email",
|
||||
"no_value": "Not an email",
|
||||
"model": LlmModel.GPT4O,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("result", True),
|
||||
("yes_output", "Valid email"),
|
||||
],
|
||||
test_mock={
|
||||
"llm_call": lambda *args, **kwargs: LLMResponse(
|
||||
raw_response="",
|
||||
prompt=[],
|
||||
response="true",
|
||||
tool_calls=None,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=10,
|
||||
reasoning=None,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
async def llm_call(
|
||||
self,
|
||||
credentials: APIKeyCredentials,
|
||||
llm_model: LlmModel,
|
||||
prompt: list,
|
||||
max_tokens: int,
|
||||
) -> LLMResponse:
|
||||
"""Wrapper method for llm_call to enable mocking in tests."""
|
||||
return await llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=llm_model,
|
||||
prompt=prompt,
|
||||
force_json_output=False,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Evaluate the AI condition and return appropriate outputs.
|
||||
"""
|
||||
# Prepare the yes and no values, using input_value as default
|
||||
yes_value = (
|
||||
input_data.yes_value
|
||||
if input_data.yes_value is not None
|
||||
else input_data.input_value
|
||||
)
|
||||
no_value = (
|
||||
input_data.no_value
|
||||
if input_data.no_value is not None
|
||||
else input_data.input_value
|
||||
)
|
||||
|
||||
# Convert input_value to string for AI evaluation
|
||||
input_str = str(input_data.input_value)
|
||||
|
||||
# Create the prompt for AI evaluation
|
||||
prompt = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": (
|
||||
"You are an AI assistant that evaluates conditions based on input data. "
|
||||
"You must respond with only 'true' or 'false' (lowercase) to indicate whether "
|
||||
"the given condition is met by the input value. Be accurate and consider the "
|
||||
"context and meaning of both the input and the condition."
|
||||
),
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
f"Input value: {input_str}\n"
|
||||
f"Condition to evaluate: {input_data.condition}\n\n"
|
||||
f"Does the input value satisfy the condition? Respond with only 'true' or 'false'."
|
||||
),
|
||||
},
|
||||
]
|
||||
|
||||
# Call the LLM
|
||||
try:
|
||||
response = await self.llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=prompt,
|
||||
max_tokens=10, # We only expect a true/false response
|
||||
)
|
||||
|
||||
# Extract the boolean result from the response
|
||||
response_text = response.response.strip().lower()
|
||||
if response_text == "true":
|
||||
result = True
|
||||
elif response_text == "false":
|
||||
result = False
|
||||
else:
|
||||
# If the response is not clear, try to interpret it using word boundaries
|
||||
import re
|
||||
|
||||
# Use word boundaries to avoid false positives like 'untrue' or '10'
|
||||
tokens = set(re.findall(r"\b(true|false|yes|no|1|0)\b", response_text))
|
||||
|
||||
if tokens == {"true"} or tokens == {"yes"} or tokens == {"1"}:
|
||||
result = True
|
||||
elif tokens == {"false"} or tokens == {"no"} or tokens == {"0"}:
|
||||
result = False
|
||||
else:
|
||||
# Unclear or conflicting response - default to False and yield error
|
||||
result = False
|
||||
yield "error", f"Unclear AI response: '{response.response}'"
|
||||
|
||||
# Update internal stats
|
||||
self.merge_stats(
|
||||
NodeExecutionStats(
|
||||
input_token_count=response.prompt_tokens,
|
||||
output_token_count=response.completion_tokens,
|
||||
)
|
||||
)
|
||||
self.prompt = response.prompt
|
||||
|
||||
except Exception as e:
|
||||
# In case of any error, default to False to be safe
|
||||
result = False
|
||||
# Log the error but don't fail the block execution
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.error(f"AI condition evaluation failed: {str(e)}")
|
||||
yield "error", f"AI evaluation failed: {str(e)}"
|
||||
|
||||
# Yield results
|
||||
yield "result", result
|
||||
|
||||
if result:
|
||||
yield "yes_output", yes_value
|
||||
else:
|
||||
yield "no_output", no_value
|
||||
@@ -554,6 +554,89 @@ class AgentToggleInputBlock(AgentInputBlock):
|
||||
)
|
||||
|
||||
|
||||
class AgentTableInputBlock(AgentInputBlock):
|
||||
"""
|
||||
This block allows users to input data in a table format.
|
||||
|
||||
Configure the table columns at build time, then users can input
|
||||
rows of data at runtime. Each row is output as a dictionary
|
||||
with column names as keys.
|
||||
"""
|
||||
|
||||
class Input(AgentInputBlock.Input):
|
||||
value: Optional[list[dict[str, Any]]] = SchemaField(
|
||||
description="The table data as a list of dictionaries.",
|
||||
default=None,
|
||||
advanced=False,
|
||||
title="Default Value",
|
||||
)
|
||||
column_headers: list[str] = SchemaField(
|
||||
description="Column headers for the table.",
|
||||
default_factory=lambda: ["Column 1", "Column 2", "Column 3"],
|
||||
advanced=False,
|
||||
title="Column Headers",
|
||||
)
|
||||
|
||||
def generate_schema(self):
|
||||
"""Generate schema for the value field with table format."""
|
||||
schema = super().generate_schema()
|
||||
schema["type"] = "array"
|
||||
schema["format"] = "table"
|
||||
schema["items"] = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
header: {"type": "string"}
|
||||
for header in (
|
||||
self.column_headers or ["Column 1", "Column 2", "Column 3"]
|
||||
)
|
||||
},
|
||||
}
|
||||
if self.value is not None:
|
||||
schema["default"] = self.value
|
||||
return schema
|
||||
|
||||
class Output(AgentInputBlock.Output):
|
||||
result: list[dict[str, Any]] = SchemaField(
|
||||
description="The table data as a list of dictionaries with headers as keys."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5603b273-f41e-4020-af7d-fbc9c6a8d928",
|
||||
description="Block for table data input with customizable headers.",
|
||||
disabled=not config.enable_agent_input_subtype_blocks,
|
||||
input_schema=AgentTableInputBlock.Input,
|
||||
output_schema=AgentTableInputBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"name": "test_table",
|
||||
"column_headers": ["Name", "Age", "City"],
|
||||
"value": [
|
||||
{"Name": "John", "Age": "30", "City": "New York"},
|
||||
{"Name": "Jane", "Age": "25", "City": "London"},
|
||||
],
|
||||
"description": "Example table input",
|
||||
}
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
[
|
||||
{"Name": "John", "Age": "30", "City": "New York"},
|
||||
{"Name": "Jane", "Age": "25", "City": "London"},
|
||||
],
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
|
||||
"""
|
||||
Yields the table data as a list of dictionaries.
|
||||
"""
|
||||
# Pass through the value, defaulting to empty list if None
|
||||
yield "result", input_data.value if input_data.value is not None else []
|
||||
|
||||
|
||||
IO_BLOCK_IDs = [
|
||||
AgentInputBlock().id,
|
||||
AgentOutputBlock().id,
|
||||
@@ -565,4 +648,5 @@ IO_BLOCK_IDs = [
|
||||
AgentFileInputBlock().id,
|
||||
AgentDropdownInputBlock().id,
|
||||
AgentToggleInputBlock().id,
|
||||
AgentTableInputBlock().id,
|
||||
]
|
||||
|
||||
@@ -101,6 +101,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
||||
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
||||
CLAUDE_4_SONNET = "claude-sonnet-4-20250514"
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
|
||||
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest"
|
||||
CLAUDE_3_5_HAIKU = "claude-3-5-haiku-latest"
|
||||
@@ -213,6 +214,9 @@ MODEL_METADATA = {
|
||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-4-sonnet-20250514
|
||||
LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-sonnet-4-5-20250929
|
||||
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-3-7-sonnet-20250219
|
||||
|
||||
@@ -519,34 +519,121 @@ class SmartDecisionMakerBlock(Block):
|
||||
):
|
||||
prompt.append({"role": "user", "content": prefix + input_data.prompt})
|
||||
|
||||
response = await llm.llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=prompt,
|
||||
max_tokens=input_data.max_tokens,
|
||||
tools=tool_functions,
|
||||
ollama_host=input_data.ollama_host,
|
||||
parallel_tool_calls=input_data.multiple_tool_calls,
|
||||
# Use retry decorator for LLM calls with validation
|
||||
from backend.util.retry import create_retry_decorator
|
||||
|
||||
# Create retry decorator that excludes ValueError from retry (for non-LLM errors)
|
||||
llm_retry = create_retry_decorator(
|
||||
max_attempts=input_data.retry,
|
||||
exclude_exceptions=(), # Don't exclude ValueError - we want to retry validation failures
|
||||
context="SmartDecisionMaker LLM call",
|
||||
)
|
||||
|
||||
# Track LLM usage stats
|
||||
self.merge_stats(
|
||||
NodeExecutionStats(
|
||||
input_token_count=response.prompt_tokens,
|
||||
output_token_count=response.completion_tokens,
|
||||
llm_call_count=1,
|
||||
@llm_retry
|
||||
async def call_llm_with_validation():
|
||||
response = await llm.llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=prompt,
|
||||
max_tokens=input_data.max_tokens,
|
||||
tools=tool_functions,
|
||||
ollama_host=input_data.ollama_host,
|
||||
parallel_tool_calls=input_data.multiple_tool_calls,
|
||||
)
|
||||
)
|
||||
|
||||
# Track LLM usage stats
|
||||
self.merge_stats(
|
||||
NodeExecutionStats(
|
||||
input_token_count=response.prompt_tokens,
|
||||
output_token_count=response.completion_tokens,
|
||||
llm_call_count=1,
|
||||
)
|
||||
)
|
||||
|
||||
if not response.tool_calls:
|
||||
return response, None # No tool calls, return response
|
||||
|
||||
# Validate all tool calls before proceeding
|
||||
validation_errors = []
|
||||
for tool_call in response.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
# Find the tool definition to get the expected arguments
|
||||
tool_def = next(
|
||||
(
|
||||
tool
|
||||
for tool in tool_functions
|
||||
if tool["function"]["name"] == tool_name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
# Get parameters schema from tool definition
|
||||
if (
|
||||
tool_def
|
||||
and "function" in tool_def
|
||||
and "parameters" in tool_def["function"]
|
||||
):
|
||||
parameters = tool_def["function"]["parameters"]
|
||||
expected_args = parameters.get("properties", {})
|
||||
required_params = set(parameters.get("required", []))
|
||||
else:
|
||||
expected_args = {arg: {} for arg in tool_args.keys()}
|
||||
required_params = set()
|
||||
|
||||
# Validate tool call arguments
|
||||
provided_args = set(tool_args.keys())
|
||||
expected_args_set = set(expected_args.keys())
|
||||
|
||||
# Check for unexpected arguments (typos)
|
||||
unexpected_args = provided_args - expected_args_set
|
||||
# Only check for missing REQUIRED parameters
|
||||
missing_required_args = required_params - provided_args
|
||||
|
||||
if unexpected_args or missing_required_args:
|
||||
error_msg = f"Tool call '{tool_name}' has parameter errors:"
|
||||
if unexpected_args:
|
||||
error_msg += f" Unknown parameters: {sorted(unexpected_args)}."
|
||||
if missing_required_args:
|
||||
error_msg += f" Missing required parameters: {sorted(missing_required_args)}."
|
||||
error_msg += f" Expected parameters: {sorted(expected_args_set)}."
|
||||
if required_params:
|
||||
error_msg += f" Required parameters: {sorted(required_params)}."
|
||||
validation_errors.append(error_msg)
|
||||
|
||||
# If validation failed, add feedback and raise for retry
|
||||
if validation_errors:
|
||||
# Add the failed response to conversation
|
||||
prompt.append(response.raw_response)
|
||||
|
||||
# Add error feedback for retry
|
||||
error_feedback = (
|
||||
"Your tool call had parameter errors. Please fix the following issues and try again:\n"
|
||||
+ "\n".join(f"- {error}" for error in validation_errors)
|
||||
+ "\n\nPlease make sure to use the exact parameter names as specified in the function schema."
|
||||
)
|
||||
prompt.append({"role": "user", "content": error_feedback})
|
||||
|
||||
raise ValueError(
|
||||
f"Tool call validation failed: {'; '.join(validation_errors)}"
|
||||
)
|
||||
|
||||
return response, validation_errors
|
||||
|
||||
# Call the LLM with retry logic
|
||||
response, validation_errors = await call_llm_with_validation()
|
||||
|
||||
if not response.tool_calls:
|
||||
yield "finished", response.response
|
||||
return
|
||||
|
||||
# If we get here, validation passed - yield tool outputs
|
||||
for tool_call in response.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
# Find the tool definition to get the expected arguments
|
||||
# Get expected arguments (already validated above)
|
||||
tool_def = next(
|
||||
(
|
||||
tool
|
||||
@@ -555,7 +642,6 @@ class SmartDecisionMakerBlock(Block):
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if (
|
||||
tool_def
|
||||
and "function" in tool_def
|
||||
@@ -563,14 +649,11 @@ class SmartDecisionMakerBlock(Block):
|
||||
):
|
||||
expected_args = tool_def["function"]["parameters"].get("properties", {})
|
||||
else:
|
||||
expected_args = tool_args.keys()
|
||||
expected_args = {arg: {} for arg in tool_args.keys()}
|
||||
|
||||
# Yield provided arguments and None for missing ones
|
||||
# Yield provided arguments, use .get() for optional parameters
|
||||
for arg_name in expected_args:
|
||||
if arg_name in tool_args:
|
||||
yield f"tools_^_{tool_name}_~_{arg_name}", tool_args[arg_name]
|
||||
else:
|
||||
yield f"tools_^_{tool_name}_~_{arg_name}", None
|
||||
yield f"tools_^_{tool_name}_~_{arg_name}", tool_args.get(arg_name)
|
||||
|
||||
# Add reasoning to conversation history if available
|
||||
if response.reasoning:
|
||||
|
||||
@@ -249,3 +249,232 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
# Verify outputs
|
||||
assert "finished" in outputs # Should have finished since no tool calls
|
||||
assert outputs["finished"] == "I need to think about this."
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_parameter_validation():
|
||||
"""Test that SmartDecisionMakerBlock correctly validates tool call parameters."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Mock tool functions with specific parameter schema
|
||||
mock_tool_functions = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_keywords",
|
||||
"description": "Search for keywords with difficulty filtering",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {"type": "string", "description": "Search query"},
|
||||
"max_keyword_difficulty": {
|
||||
"type": "integer",
|
||||
"description": "Maximum keyword difficulty (required)",
|
||||
},
|
||||
"optional_param": {
|
||||
"type": "string",
|
||||
"description": "Optional parameter with default",
|
||||
"default": "default_value",
|
||||
},
|
||||
},
|
||||
"required": ["query", "max_keyword_difficulty"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Test case 1: Tool call with TYPO in parameter name (should retry and eventually fail)
|
||||
mock_tool_call_with_typo = MagicMock()
|
||||
mock_tool_call_with_typo.function.name = "search_keywords"
|
||||
mock_tool_call_with_typo.function.arguments = '{"query": "test", "maximum_keyword_difficulty": 50}' # TYPO: maximum instead of max
|
||||
|
||||
mock_response_with_typo = MagicMock()
|
||||
mock_response_with_typo.response = None
|
||||
mock_response_with_typo.tool_calls = [mock_tool_call_with_typo]
|
||||
mock_response_with_typo.prompt_tokens = 50
|
||||
mock_response_with_typo.completion_tokens = 25
|
||||
mock_response_with_typo.reasoning = None
|
||||
mock_response_with_typo.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_with_typo
|
||||
) as mock_llm_call, patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2, # Set retry to 2 for testing
|
||||
)
|
||||
|
||||
# Should raise ValueError after retries due to typo'd parameter name
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify error message contains details about the typo
|
||||
error_msg = str(exc_info.value)
|
||||
assert "Tool call validation failed" in error_msg
|
||||
assert "Unknown parameters: ['maximum_keyword_difficulty']" in error_msg
|
||||
|
||||
# Verify that LLM was called the expected number of times (retries)
|
||||
assert mock_llm_call.call_count == 2 # Should retry based on input_data.retry
|
||||
|
||||
# Test case 2: Tool call missing REQUIRED parameter (should raise ValueError)
|
||||
mock_tool_call_missing_required = MagicMock()
|
||||
mock_tool_call_missing_required.function.name = "search_keywords"
|
||||
mock_tool_call_missing_required.function.arguments = (
|
||||
'{"query": "test"}' # Missing required max_keyword_difficulty
|
||||
)
|
||||
|
||||
mock_response_missing_required = MagicMock()
|
||||
mock_response_missing_required.response = None
|
||||
mock_response_missing_required.tool_calls = [mock_tool_call_missing_required]
|
||||
mock_response_missing_required.prompt_tokens = 50
|
||||
mock_response_missing_required.completion_tokens = 25
|
||||
mock_response_missing_required.reasoning = None
|
||||
mock_response_missing_required.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_missing_required
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
# Should raise ValueError due to missing required parameter
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
error_msg = str(exc_info.value)
|
||||
assert "Tool call 'search_keywords' has parameter errors" in error_msg
|
||||
assert "Missing required parameters: ['max_keyword_difficulty']" in error_msg
|
||||
|
||||
# Test case 3: Valid tool call with OPTIONAL parameter missing (should succeed)
|
||||
mock_tool_call_valid = MagicMock()
|
||||
mock_tool_call_valid.function.name = "search_keywords"
|
||||
mock_tool_call_valid.function.arguments = '{"query": "test", "max_keyword_difficulty": 50}' # optional_param missing, but that's OK
|
||||
|
||||
mock_response_valid = MagicMock()
|
||||
mock_response_valid.response = None
|
||||
mock_response_valid.tool_calls = [mock_tool_call_valid]
|
||||
mock_response_valid.prompt_tokens = 50
|
||||
mock_response_valid.completion_tokens = 25
|
||||
mock_response_valid.reasoning = None
|
||||
mock_response_valid.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_valid
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
# Should succeed - optional parameter missing is OK
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify tool outputs were generated correctly
|
||||
assert "tools_^_search_keywords_~_query" in outputs
|
||||
assert outputs["tools_^_search_keywords_~_query"] == "test"
|
||||
assert "tools_^_search_keywords_~_max_keyword_difficulty" in outputs
|
||||
assert outputs["tools_^_search_keywords_~_max_keyword_difficulty"] == 50
|
||||
# Optional parameter should be None when not provided
|
||||
assert "tools_^_search_keywords_~_optional_param" in outputs
|
||||
assert outputs["tools_^_search_keywords_~_optional_param"] is None
|
||||
|
||||
# Test case 4: Valid tool call with ALL parameters (should succeed)
|
||||
mock_tool_call_all_params = MagicMock()
|
||||
mock_tool_call_all_params.function.name = "search_keywords"
|
||||
mock_tool_call_all_params.function.arguments = '{"query": "test", "max_keyword_difficulty": 50, "optional_param": "custom_value"}'
|
||||
|
||||
mock_response_all_params = MagicMock()
|
||||
mock_response_all_params.response = None
|
||||
mock_response_all_params.tool_calls = [mock_tool_call_all_params]
|
||||
mock_response_all_params.prompt_tokens = 50
|
||||
mock_response_all_params.completion_tokens = 25
|
||||
mock_response_all_params.reasoning = None
|
||||
mock_response_all_params.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_all_params
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
# Should succeed with all parameters
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify all tool outputs were generated correctly
|
||||
assert outputs["tools_^_search_keywords_~_query"] == "test"
|
||||
assert outputs["tools_^_search_keywords_~_max_keyword_difficulty"] == 50
|
||||
assert outputs["tools_^_search_keywords_~_optional_param"] == "custom_value"
|
||||
|
||||
131
autogpt_platform/backend/backend/blocks/test/test_table_input.py
Normal file
131
autogpt_platform/backend/backend/blocks/test/test_table_input.py
Normal file
@@ -0,0 +1,131 @@
|
||||
import pytest
|
||||
|
||||
from backend.blocks.io import AgentTableInputBlock
|
||||
from backend.util.test import execute_block_test
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_table_input_block():
|
||||
"""Test the AgentTableInputBlock with basic input/output."""
|
||||
block = AgentTableInputBlock()
|
||||
await execute_block_test(block)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_table_input_with_data():
|
||||
"""Test AgentTableInputBlock with actual table data."""
|
||||
block = AgentTableInputBlock()
|
||||
|
||||
input_data = block.Input(
|
||||
name="test_table",
|
||||
column_headers=["Name", "Age", "City"],
|
||||
value=[
|
||||
{"Name": "John", "Age": "30", "City": "New York"},
|
||||
{"Name": "Jane", "Age": "25", "City": "London"},
|
||||
{"Name": "Bob", "Age": "35", "City": "Paris"},
|
||||
],
|
||||
)
|
||||
|
||||
output_data = []
|
||||
async for output_name, output_value in block.run(input_data):
|
||||
output_data.append((output_name, output_value))
|
||||
|
||||
assert len(output_data) == 1
|
||||
assert output_data[0][0] == "result"
|
||||
|
||||
result = output_data[0][1]
|
||||
assert len(result) == 3
|
||||
assert result[0]["Name"] == "John"
|
||||
assert result[1]["Age"] == "25"
|
||||
assert result[2]["City"] == "Paris"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_table_input_empty_data():
|
||||
"""Test AgentTableInputBlock with empty data."""
|
||||
block = AgentTableInputBlock()
|
||||
|
||||
input_data = block.Input(
|
||||
name="empty_table", column_headers=["Col1", "Col2"], value=[]
|
||||
)
|
||||
|
||||
output_data = []
|
||||
async for output_name, output_value in block.run(input_data):
|
||||
output_data.append((output_name, output_value))
|
||||
|
||||
assert len(output_data) == 1
|
||||
assert output_data[0][0] == "result"
|
||||
assert output_data[0][1] == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_table_input_with_missing_columns():
|
||||
"""Test AgentTableInputBlock passes through data with missing columns as-is."""
|
||||
block = AgentTableInputBlock()
|
||||
|
||||
input_data = block.Input(
|
||||
name="partial_table",
|
||||
column_headers=["Name", "Age", "City"],
|
||||
value=[
|
||||
{"Name": "John", "Age": "30"}, # Missing City
|
||||
{"Name": "Jane", "City": "London"}, # Missing Age
|
||||
{"Age": "35", "City": "Paris"}, # Missing Name
|
||||
],
|
||||
)
|
||||
|
||||
output_data = []
|
||||
async for output_name, output_value in block.run(input_data):
|
||||
output_data.append((output_name, output_value))
|
||||
|
||||
result = output_data[0][1]
|
||||
assert len(result) == 3
|
||||
|
||||
# Check data is passed through as-is
|
||||
assert result[0] == {"Name": "John", "Age": "30"}
|
||||
assert result[1] == {"Name": "Jane", "City": "London"}
|
||||
assert result[2] == {"Age": "35", "City": "Paris"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_table_input_none_value():
|
||||
"""Test AgentTableInputBlock with None value returns empty list."""
|
||||
block = AgentTableInputBlock()
|
||||
|
||||
input_data = block.Input(
|
||||
name="none_table", column_headers=["Name", "Age"], value=None
|
||||
)
|
||||
|
||||
output_data = []
|
||||
async for output_name, output_value in block.run(input_data):
|
||||
output_data.append((output_name, output_value))
|
||||
|
||||
assert len(output_data) == 1
|
||||
assert output_data[0][0] == "result"
|
||||
assert output_data[0][1] == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_table_input_with_default_headers():
|
||||
"""Test AgentTableInputBlock with default column headers."""
|
||||
block = AgentTableInputBlock()
|
||||
|
||||
# Don't specify column_headers, should use defaults
|
||||
input_data = block.Input(
|
||||
name="default_headers_table",
|
||||
value=[
|
||||
{"Column 1": "A", "Column 2": "B", "Column 3": "C"},
|
||||
{"Column 1": "D", "Column 2": "E", "Column 3": "F"},
|
||||
],
|
||||
)
|
||||
|
||||
output_data = []
|
||||
async for output_name, output_value in block.run(input_data):
|
||||
output_data.append((output_name, output_value))
|
||||
|
||||
assert len(output_data) == 1
|
||||
assert output_data[0][0] == "result"
|
||||
|
||||
result = output_data[0][1]
|
||||
assert len(result) == 2
|
||||
assert result[0]["Column 1"] == "A"
|
||||
assert result[1]["Column 3"] == "F"
|
||||
@@ -69,6 +69,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_SONNET: 5,
|
||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||
LlmModel.CLAUDE_3_7_SONNET: 5,
|
||||
LlmModel.CLAUDE_3_5_SONNET: 4,
|
||||
LlmModel.CLAUDE_3_5_HAIKU: 1, # $0.80 / $4.00
|
||||
|
||||
@@ -270,6 +270,7 @@ def SchemaField(
|
||||
min_length: Optional[int] = None,
|
||||
max_length: Optional[int] = None,
|
||||
discriminator: Optional[str] = None,
|
||||
format: Optional[str] = None,
|
||||
json_schema_extra: Optional[dict[str, Any]] = None,
|
||||
) -> T:
|
||||
if default is PydanticUndefined and default_factory is None:
|
||||
@@ -285,6 +286,7 @@ def SchemaField(
|
||||
"advanced": advanced,
|
||||
"hidden": hidden,
|
||||
"depends_on": depends_on,
|
||||
"format": format,
|
||||
**(json_schema_extra or {}),
|
||||
}.items()
|
||||
if v is not None
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
|
||||
import prisma
|
||||
@@ -30,7 +31,7 @@ user_credit = get_user_credit_model()
|
||||
|
||||
class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
completedSteps: Optional[list[OnboardingStep]] = None
|
||||
notificationDot: Optional[bool] = None
|
||||
walletShown: Optional[bool] = None
|
||||
notified: Optional[list[OnboardingStep]] = None
|
||||
usageReason: Optional[str] = None
|
||||
integrations: Optional[list[str]] = None
|
||||
@@ -39,6 +40,8 @@ class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
agentInput: Optional[dict[str, Any]] = None
|
||||
onboardingAgentExecutionId: Optional[str] = None
|
||||
agentRuns: Optional[int] = None
|
||||
lastRunAt: Optional[datetime] = None
|
||||
consecutiveRunDays: Optional[int] = None
|
||||
|
||||
|
||||
async def get_user_onboarding(user_id: str):
|
||||
@@ -57,16 +60,22 @@ async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
update["completedSteps"] = list(set(data.completedSteps))
|
||||
for step in (
|
||||
OnboardingStep.AGENT_NEW_RUN,
|
||||
OnboardingStep.RUN_AGENTS,
|
||||
OnboardingStep.MARKETPLACE_VISIT,
|
||||
OnboardingStep.MARKETPLACE_ADD_AGENT,
|
||||
OnboardingStep.MARKETPLACE_RUN_AGENT,
|
||||
OnboardingStep.BUILDER_SAVE_AGENT,
|
||||
OnboardingStep.BUILDER_RUN_AGENT,
|
||||
OnboardingStep.RE_RUN_AGENT,
|
||||
OnboardingStep.SCHEDULE_AGENT,
|
||||
OnboardingStep.RUN_AGENTS,
|
||||
OnboardingStep.RUN_3_DAYS,
|
||||
OnboardingStep.TRIGGER_WEBHOOK,
|
||||
OnboardingStep.RUN_14_DAYS,
|
||||
OnboardingStep.RUN_AGENTS_100,
|
||||
):
|
||||
if step in data.completedSteps:
|
||||
await reward_user(user_id, step)
|
||||
if data.notificationDot is not None:
|
||||
update["notificationDot"] = data.notificationDot
|
||||
if data.walletShown is not None:
|
||||
update["walletShown"] = data.walletShown
|
||||
if data.notified is not None:
|
||||
update["notified"] = list(set(data.notified))
|
||||
if data.usageReason is not None:
|
||||
@@ -83,6 +92,10 @@ async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
update["onboardingAgentExecutionId"] = data.onboardingAgentExecutionId
|
||||
if data.agentRuns is not None:
|
||||
update["agentRuns"] = data.agentRuns
|
||||
if data.lastRunAt is not None:
|
||||
update["lastRunAt"] = data.lastRunAt
|
||||
if data.consecutiveRunDays is not None:
|
||||
update["consecutiveRunDays"] = data.consecutiveRunDays
|
||||
|
||||
return await UserOnboarding.prisma().upsert(
|
||||
where={"userId": user_id},
|
||||
@@ -101,16 +114,28 @@ async def reward_user(user_id: str, step: OnboardingStep):
|
||||
# This is seen as a reward for the GET_RESULTS step in the wallet
|
||||
case OnboardingStep.AGENT_NEW_RUN:
|
||||
reward = 300
|
||||
case OnboardingStep.RUN_AGENTS:
|
||||
reward = 300
|
||||
case OnboardingStep.MARKETPLACE_VISIT:
|
||||
reward = 100
|
||||
case OnboardingStep.MARKETPLACE_ADD_AGENT:
|
||||
reward = 100
|
||||
case OnboardingStep.MARKETPLACE_RUN_AGENT:
|
||||
reward = 100
|
||||
case OnboardingStep.BUILDER_SAVE_AGENT:
|
||||
reward = 100
|
||||
case OnboardingStep.BUILDER_RUN_AGENT:
|
||||
case OnboardingStep.RE_RUN_AGENT:
|
||||
reward = 100
|
||||
case OnboardingStep.SCHEDULE_AGENT:
|
||||
reward = 100
|
||||
case OnboardingStep.RUN_AGENTS:
|
||||
reward = 300
|
||||
case OnboardingStep.RUN_3_DAYS:
|
||||
reward = 100
|
||||
case OnboardingStep.TRIGGER_WEBHOOK:
|
||||
reward = 100
|
||||
case OnboardingStep.RUN_14_DAYS:
|
||||
reward = 300
|
||||
case OnboardingStep.RUN_AGENTS_100:
|
||||
reward = 300
|
||||
|
||||
if reward == 0:
|
||||
return
|
||||
@@ -132,6 +157,22 @@ async def reward_user(user_id: str, step: OnboardingStep):
|
||||
)
|
||||
|
||||
|
||||
async def complete_webhook_trigger_step(user_id: str):
|
||||
"""
|
||||
Completes the TRIGGER_WEBHOOK onboarding step for the user if not already completed.
|
||||
"""
|
||||
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if OnboardingStep.TRIGGER_WEBHOOK not in onboarding.completedSteps:
|
||||
await update_user_onboarding(
|
||||
user_id,
|
||||
UserOnboardingUpdate(
|
||||
completedSteps=onboarding.completedSteps
|
||||
+ [OnboardingStep.TRIGGER_WEBHOOK]
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def clean_and_split(text: str) -> list[str]:
|
||||
"""
|
||||
Removes all special characters from a string, truncates it to 100 characters,
|
||||
|
||||
@@ -1735,7 +1735,10 @@ async def synchronized(key: str, timeout: int = settings.config.cluster_lock_tim
|
||||
yield
|
||||
finally:
|
||||
if await lock.locked() and await lock.owned():
|
||||
await lock.release()
|
||||
try:
|
||||
await lock.release()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to release lock for key {key}: {e}")
|
||||
|
||||
|
||||
def increment_execution_count(user_id: str) -> int:
|
||||
|
||||
@@ -151,7 +151,10 @@ class IntegrationCredentialsManager:
|
||||
fresh_credentials = await oauth_handler.refresh_tokens(credentials)
|
||||
await self.store.update_creds(user_id, fresh_credentials)
|
||||
if _lock and (await _lock.locked()) and (await _lock.owned()):
|
||||
await _lock.release()
|
||||
try:
|
||||
await _lock.release()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to release OAuth refresh lock: {e}")
|
||||
|
||||
credentials = fresh_credentials
|
||||
return credentials
|
||||
@@ -184,7 +187,10 @@ class IntegrationCredentialsManager:
|
||||
yield
|
||||
finally:
|
||||
if (await lock.locked()) and (await lock.owned()):
|
||||
await lock.release()
|
||||
try:
|
||||
await lock.release()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to release credentials lock: {e}")
|
||||
|
||||
async def release_all_locks(self):
|
||||
"""Call this on process termination to ensure all locks are released"""
|
||||
|
||||
@@ -32,6 +32,7 @@ from backend.data.model import (
|
||||
OAuth2Credentials,
|
||||
UserIntegrations,
|
||||
)
|
||||
from backend.data.onboarding import complete_webhook_trigger_step
|
||||
from backend.data.user import get_user_integrations
|
||||
from backend.executor.utils import add_graph_execution
|
||||
from backend.integrations.ayrshare import AyrshareClient, SocialPlatform
|
||||
@@ -367,6 +368,8 @@ async def webhook_ingress_generic(
|
||||
return
|
||||
|
||||
executions: list[Awaitable] = []
|
||||
await complete_webhook_trigger_step(user_id)
|
||||
|
||||
for node in webhook.triggered_nodes:
|
||||
logger.debug(f"Webhook-attached node: {node}")
|
||||
if not node.is_triggered_by_event_type(event_type):
|
||||
|
||||
@@ -4,6 +4,7 @@ from enum import Enum
|
||||
import sentry_sdk
|
||||
from pydantic import SecretStr
|
||||
from sentry_sdk.integrations.anthropic import AnthropicIntegration
|
||||
from sentry_sdk.integrations.asyncio import AsyncioIntegration
|
||||
from sentry_sdk.integrations.logging import LoggingIntegration
|
||||
|
||||
from backend.util.settings import Settings
|
||||
@@ -25,6 +26,7 @@ def sentry_init():
|
||||
environment=f"app:{settings.config.app_env.value}-behave:{settings.config.behave_as.value}",
|
||||
_experiments={"enable_logs": True},
|
||||
integrations=[
|
||||
AsyncioIntegration(),
|
||||
LoggingIntegration(sentry_logs_level=logging.INFO),
|
||||
AnthropicIntegration(
|
||||
include_prompts=False,
|
||||
|
||||
@@ -15,19 +15,16 @@ node generate-tokens.js --count=160
|
||||
export K6_CLOUD_TOKEN="your-k6-cloud-token"
|
||||
export K6_CLOUD_PROJECT_ID="4254406"
|
||||
|
||||
# 4. Verify setup and run quick test
|
||||
node run-tests.js verify
|
||||
# 4. Run orchestrated load tests locally
|
||||
node orchestrator/orchestrator.js DEV local
|
||||
|
||||
# 5. Run tests locally (development/debugging)
|
||||
node run-tests.js run all DEV
|
||||
|
||||
# 6. Run tests in k6 cloud (performance testing)
|
||||
node run-tests.js cloud all DEV
|
||||
# 5. Run orchestrated load tests in k6 cloud (recommended)
|
||||
node orchestrator/orchestrator.js DEV cloud
|
||||
```
|
||||
|
||||
## 📋 Unified Test Runner
|
||||
## 📋 Load Test Orchestrator
|
||||
|
||||
The AutoGPT Platform uses a single unified test runner (`run-tests.js`) for both local and cloud execution:
|
||||
The AutoGPT Platform uses a comprehensive load test orchestrator (`orchestrator/orchestrator.js`) that runs 12 optimized tests with maximum VU counts:
|
||||
|
||||
### Available Tests
|
||||
|
||||
@@ -60,38 +57,26 @@ The AutoGPT Platform uses a single unified test runner (`run-tests.js`) for both
|
||||
### Basic Commands
|
||||
|
||||
```bash
|
||||
# List available tests and show cloud credentials status
|
||||
node run-tests.js list
|
||||
# Run 12 optimized tests locally (for debugging)
|
||||
node orchestrator/orchestrator.js DEV local
|
||||
|
||||
# Quick setup verification
|
||||
node run-tests.js verify
|
||||
# Run 12 optimized tests in k6 cloud (recommended for performance testing)
|
||||
node orchestrator/orchestrator.js DEV cloud
|
||||
|
||||
# Run specific test locally
|
||||
node run-tests.js run core-api-test DEV
|
||||
# Run against production (coordinate with team!)
|
||||
node orchestrator/orchestrator.js PROD cloud
|
||||
|
||||
# Run multiple tests sequentially (comma-separated)
|
||||
node run-tests.js run connectivity-test,core-api-test,marketplace-public-test DEV
|
||||
|
||||
# Run all tests locally
|
||||
node run-tests.js run all DEV
|
||||
|
||||
# Run specific test in k6 cloud
|
||||
node run-tests.js cloud core-api-test DEV
|
||||
|
||||
# Run all tests in k6 cloud
|
||||
node run-tests.js cloud all DEV
|
||||
# Run individual test directly with k6
|
||||
K6_ENVIRONMENT=DEV VUS=100 DURATION=3m k6 run tests/api/core-api-test.js
|
||||
```
|
||||
|
||||
### NPM Scripts
|
||||
|
||||
```bash
|
||||
# Quick verification
|
||||
npm run verify
|
||||
# Run orchestrator locally
|
||||
npm run local
|
||||
|
||||
# Run all tests locally
|
||||
npm test
|
||||
|
||||
# Run all tests in k6 cloud
|
||||
# Run orchestrator in k6 cloud
|
||||
npm run cloud
|
||||
```
|
||||
|
||||
@@ -230,8 +215,8 @@ node generate-tokens.js --count=160
|
||||
export K6_CLOUD_TOKEN="your-k6-cloud-token"
|
||||
export K6_CLOUD_PROJECT_ID="4254406" # AutoGPT Platform project ID
|
||||
|
||||
# Verify credentials work
|
||||
node run-tests.js list # Shows ✅ k6 cloud credentials configured
|
||||
# Verify credentials work by running orchestrator
|
||||
node orchestrator/orchestrator.js DEV cloud
|
||||
```
|
||||
|
||||
## 📂 File Structure
|
||||
@@ -239,9 +224,10 @@ node run-tests.js list # Shows ✅ k6 cloud credentials configured
|
||||
```
|
||||
load-tests/
|
||||
├── README.md # This documentation
|
||||
├── run-tests.js # Unified test runner (MAIN ENTRY POINT)
|
||||
├── generate-tokens.js # Generate pre-auth tokens
|
||||
├── generate-tokens.js # Generate pre-auth tokens (MAIN TOKEN SETUP)
|
||||
├── package.json # Node.js dependencies and scripts
|
||||
├── orchestrator/
|
||||
│ └── orchestrator.js # Main test orchestrator (MAIN ENTRY POINT)
|
||||
├── configs/
|
||||
│ ├── environment.js # Environment URLs and configuration
|
||||
│ └── pre-authenticated-tokens.js # Generated tokens (gitignored)
|
||||
@@ -257,21 +243,19 @@ load-tests/
|
||||
│ │ └── library-access-test.js # Authenticated marketplace/library
|
||||
│ └── comprehensive/
|
||||
│ └── platform-journey-test.js # Complete user journey simulation
|
||||
├── orchestrator/
|
||||
│ └── comprehensive-orchestrator.js # Full 25-test orchestration suite
|
||||
├── results/ # Local test results (auto-created)
|
||||
├── k6-cloud-results.txt # Cloud test URLs (auto-created)
|
||||
└── *.json # Test output files (auto-created)
|
||||
├── unified-results-*.json # Orchestrator results (auto-created)
|
||||
└── *.log # Test execution logs (auto-created)
|
||||
```
|
||||
|
||||
## 🎯 Best Practices
|
||||
|
||||
1. **Start with Verification**: Always run `node run-tests.js verify` first
|
||||
2. **Local for Development**: Use `run` command for debugging and development
|
||||
3. **Cloud for Performance**: Use `cloud` command for actual performance testing
|
||||
1. **Generate Tokens First**: Always run `node generate-tokens.js --count=160` before testing
|
||||
2. **Local for Development**: Use `DEV local` for debugging and development
|
||||
3. **Cloud for Performance**: Use `DEV cloud` for actual performance testing
|
||||
4. **Monitor Real-Time**: Check k6 cloud dashboards during test execution
|
||||
5. **Regenerate Tokens**: Refresh tokens every 24 hours when they expire
|
||||
6. **Sequential Testing**: Use comma-separated tests for organized execution
|
||||
6. **Unified Testing**: Orchestrator runs 12 optimized tests automatically
|
||||
|
||||
## 🚀 Advanced Usage
|
||||
|
||||
|
||||
@@ -1,611 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
// AutoGPT Platform Load Test Orchestrator
|
||||
// Runs comprehensive test suite locally or in k6 cloud
|
||||
// Collects URLs, statistics, and generates reports
|
||||
|
||||
const { spawn } = require("child_process");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
|
||||
console.log("🎯 AUTOGPT PLATFORM LOAD TEST ORCHESTRATOR\n");
|
||||
console.log("===========================================\n");
|
||||
|
||||
// Parse command line arguments
|
||||
const args = process.argv.slice(2);
|
||||
const environment = args[0] || "DEV"; // LOCAL, DEV, PROD
|
||||
const executionMode = args[1] || "cloud"; // local, cloud
|
||||
const testScale = args[2] || "full"; // small, full
|
||||
|
||||
console.log(`🌍 Target Environment: ${environment}`);
|
||||
console.log(`🚀 Execution Mode: ${executionMode}`);
|
||||
console.log(`📏 Test Scale: ${testScale}`);
|
||||
|
||||
// Test scenario definitions
|
||||
const testScenarios = {
|
||||
// Small scale for validation (3 tests, ~5 minutes)
|
||||
small: [
|
||||
{
|
||||
name: "Basic_Connectivity_Test",
|
||||
file: "tests/basic/connectivity-test.js",
|
||||
vus: 5,
|
||||
duration: "30s",
|
||||
},
|
||||
{
|
||||
name: "Core_API_Quick_Test",
|
||||
file: "tests/api/core-api-test.js",
|
||||
vus: 10,
|
||||
duration: "1m",
|
||||
},
|
||||
{
|
||||
name: "Marketplace_Quick_Test",
|
||||
file: "tests/marketplace/public-access-test.js",
|
||||
vus: 15,
|
||||
duration: "1m",
|
||||
},
|
||||
],
|
||||
|
||||
// Full comprehensive test suite (25 tests, ~2 hours)
|
||||
full: [
|
||||
// Marketplace Viewing Tests
|
||||
{
|
||||
name: "Viewing_Marketplace_Logged_Out_Day1",
|
||||
file: "tests/marketplace/public-access-test.js",
|
||||
vus: 106,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Viewing_Marketplace_Logged_Out_VeryHigh",
|
||||
file: "tests/marketplace/public-access-test.js",
|
||||
vus: 314,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Viewing_Marketplace_Logged_In_Day1",
|
||||
file: "tests/marketplace/library-access-test.js",
|
||||
vus: 53,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Viewing_Marketplace_Logged_In_VeryHigh",
|
||||
file: "tests/marketplace/library-access-test.js",
|
||||
vus: 157,
|
||||
duration: "3m",
|
||||
},
|
||||
|
||||
// Library Management Tests
|
||||
{
|
||||
name: "Adding_Agent_to_Library_Day1",
|
||||
file: "tests/marketplace/library-access-test.js",
|
||||
vus: 32,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Adding_Agent_to_Library_VeryHigh",
|
||||
file: "tests/marketplace/library-access-test.js",
|
||||
vus: 95,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Viewing_Library_Home_0_Agents_Day1",
|
||||
file: "tests/marketplace/library-access-test.js",
|
||||
vus: 53,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Viewing_Library_Home_0_Agents_VeryHigh",
|
||||
file: "tests/marketplace/library-access-test.js",
|
||||
vus: 157,
|
||||
duration: "3m",
|
||||
},
|
||||
|
||||
// Core API Tests
|
||||
{
|
||||
name: "Core_API_Load_Test",
|
||||
file: "tests/api/core-api-test.js",
|
||||
vus: 100,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Graph_Execution_Load_Test",
|
||||
file: "tests/api/graph-execution-test.js",
|
||||
vus: 100,
|
||||
duration: "3m",
|
||||
},
|
||||
|
||||
// Single API Endpoint Tests
|
||||
{
|
||||
name: "Credits_API_Single_Endpoint",
|
||||
file: "tests/basic/single-endpoint-test.js",
|
||||
vus: 50,
|
||||
duration: "3m",
|
||||
env: { ENDPOINT: "credits", CONCURRENT_REQUESTS: 10 },
|
||||
},
|
||||
{
|
||||
name: "Graphs_API_Single_Endpoint",
|
||||
file: "tests/basic/single-endpoint-test.js",
|
||||
vus: 50,
|
||||
duration: "3m",
|
||||
env: { ENDPOINT: "graphs", CONCURRENT_REQUESTS: 10 },
|
||||
},
|
||||
{
|
||||
name: "Blocks_API_Single_Endpoint",
|
||||
file: "tests/basic/single-endpoint-test.js",
|
||||
vus: 50,
|
||||
duration: "3m",
|
||||
env: { ENDPOINT: "blocks", CONCURRENT_REQUESTS: 10 },
|
||||
},
|
||||
{
|
||||
name: "Executions_API_Single_Endpoint",
|
||||
file: "tests/basic/single-endpoint-test.js",
|
||||
vus: 50,
|
||||
duration: "3m",
|
||||
env: { ENDPOINT: "executions", CONCURRENT_REQUESTS: 10 },
|
||||
},
|
||||
|
||||
// Comprehensive Platform Tests
|
||||
{
|
||||
name: "Comprehensive_Platform_Low",
|
||||
file: "tests/comprehensive/platform-journey-test.js",
|
||||
vus: 25,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Comprehensive_Platform_Medium",
|
||||
file: "tests/comprehensive/platform-journey-test.js",
|
||||
vus: 50,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Comprehensive_Platform_High",
|
||||
file: "tests/comprehensive/platform-journey-test.js",
|
||||
vus: 100,
|
||||
duration: "3m",
|
||||
},
|
||||
|
||||
// User Authentication Workflows
|
||||
{
|
||||
name: "User_Auth_Workflows_Day1",
|
||||
file: "tests/basic/connectivity-test.js",
|
||||
vus: 50,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "User_Auth_Workflows_VeryHigh",
|
||||
file: "tests/basic/connectivity-test.js",
|
||||
vus: 100,
|
||||
duration: "3m",
|
||||
},
|
||||
|
||||
// Mixed Load Tests
|
||||
{
|
||||
name: "Mixed_Load_Light",
|
||||
file: "tests/api/core-api-test.js",
|
||||
vus: 75,
|
||||
duration: "5m",
|
||||
},
|
||||
{
|
||||
name: "Mixed_Load_Heavy",
|
||||
file: "tests/marketplace/public-access-test.js",
|
||||
vus: 200,
|
||||
duration: "5m",
|
||||
},
|
||||
|
||||
// Stress Tests
|
||||
{
|
||||
name: "Marketplace_Stress_Test",
|
||||
file: "tests/marketplace/public-access-test.js",
|
||||
vus: 500,
|
||||
duration: "3m",
|
||||
},
|
||||
{
|
||||
name: "Core_API_Stress_Test",
|
||||
file: "tests/api/core-api-test.js",
|
||||
vus: 300,
|
||||
duration: "3m",
|
||||
},
|
||||
|
||||
// Extended Duration Tests
|
||||
{
|
||||
name: "Long_Duration_Marketplace",
|
||||
file: "tests/marketplace/library-access-test.js",
|
||||
vus: 100,
|
||||
duration: "10m",
|
||||
},
|
||||
{
|
||||
name: "Long_Duration_Core_API",
|
||||
file: "tests/api/core-api-test.js",
|
||||
vus: 100,
|
||||
duration: "10m",
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const scenarios = testScenarios[testScale];
|
||||
console.log(`📊 Running ${scenarios.length} test scenarios`);
|
||||
|
||||
// Results collection
|
||||
const results = [];
|
||||
const cloudUrls = [];
|
||||
const detailedMetrics = [];
|
||||
|
||||
// Create results directory
|
||||
const timestamp = new Date()
|
||||
.toISOString()
|
||||
.replace(/[:.]/g, "-")
|
||||
.substring(0, 16);
|
||||
const resultsDir = `results-${environment.toLowerCase()}-${executionMode}-${testScale}-${timestamp}`;
|
||||
if (!fs.existsSync(resultsDir)) {
|
||||
fs.mkdirSync(resultsDir);
|
||||
}
|
||||
|
||||
// Function to run a single test
|
||||
function runTest(scenario, testIndex) {
|
||||
return new Promise((resolve, reject) => {
|
||||
console.log(`\n🚀 Test ${testIndex}/${scenarios.length}: ${scenario.name}`);
|
||||
console.log(
|
||||
`📊 Config: ${scenario.vus} VUs × ${scenario.duration} (${executionMode} mode)`,
|
||||
);
|
||||
console.log(`📁 Script: ${scenario.file}`);
|
||||
|
||||
// Build k6 command
|
||||
let k6Command, k6Args;
|
||||
|
||||
// Determine k6 binary location
|
||||
const isInPod = fs.existsSync("/app/k6-v0.54.0-linux-amd64/k6");
|
||||
const k6Binary = isInPod ? "/app/k6-v0.54.0-linux-amd64/k6" : "k6";
|
||||
|
||||
// Build environment variables
|
||||
const envVars = [
|
||||
`K6_ENVIRONMENT=${environment}`,
|
||||
`VUS=${scenario.vus}`,
|
||||
`DURATION=${scenario.duration}`,
|
||||
`RAMP_UP=30s`,
|
||||
`RAMP_DOWN=30s`,
|
||||
`THRESHOLD_P95=60000`,
|
||||
`THRESHOLD_P99=60000`,
|
||||
];
|
||||
|
||||
// Add scenario-specific environment variables
|
||||
if (scenario.env) {
|
||||
Object.keys(scenario.env).forEach((key) => {
|
||||
envVars.push(`${key}=${scenario.env[key]}`);
|
||||
});
|
||||
}
|
||||
|
||||
// Configure command based on execution mode
|
||||
if (executionMode === "cloud") {
|
||||
k6Command = k6Binary;
|
||||
k6Args = ["cloud", "run", scenario.file];
|
||||
// Add environment variables as --env flags
|
||||
envVars.forEach((env) => {
|
||||
k6Args.push("--env", env);
|
||||
});
|
||||
} else {
|
||||
k6Command = k6Binary;
|
||||
k6Args = ["run", scenario.file];
|
||||
|
||||
// Add local output files
|
||||
const outputFile = path.join(resultsDir, `${scenario.name}.json`);
|
||||
const summaryFile = path.join(
|
||||
resultsDir,
|
||||
`${scenario.name}_summary.json`,
|
||||
);
|
||||
k6Args.push("--out", `json=${outputFile}`);
|
||||
k6Args.push("--summary-export", summaryFile);
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
let testUrl = "";
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
|
||||
console.log(`⏱️ Test started: ${new Date().toISOString()}`);
|
||||
|
||||
// Set environment variables for spawned process
|
||||
const processEnv = { ...process.env };
|
||||
envVars.forEach((env) => {
|
||||
const [key, value] = env.split("=");
|
||||
processEnv[key] = value;
|
||||
});
|
||||
|
||||
const childProcess = spawn(k6Command, k6Args, {
|
||||
env: processEnv,
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
});
|
||||
|
||||
// Handle stdout
|
||||
childProcess.stdout.on("data", (data) => {
|
||||
const output = data.toString();
|
||||
stdout += output;
|
||||
|
||||
// Extract k6 cloud URL
|
||||
if (executionMode === "cloud") {
|
||||
const urlMatch = output.match(/output:\s*(https:\/\/[^\s]+)/);
|
||||
if (urlMatch) {
|
||||
testUrl = urlMatch[1];
|
||||
console.log(`🔗 Test URL: ${testUrl}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Show progress indicators
|
||||
if (output.includes("Run [")) {
|
||||
const progressMatch = output.match(/Run\s+\[\s*(\d+)%\s*\]/);
|
||||
if (progressMatch) {
|
||||
process.stdout.write(`\r⏳ Progress: ${progressMatch[1]}%`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Handle stderr
|
||||
childProcess.stderr.on("data", (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
// Handle process completion
|
||||
childProcess.on("close", (code) => {
|
||||
const endTime = Date.now();
|
||||
const duration = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
console.log(`\n⏱️ Completed in ${duration}s`);
|
||||
|
||||
if (code === 0) {
|
||||
console.log(`✅ ${scenario.name} SUCCESS`);
|
||||
|
||||
const result = {
|
||||
test: scenario.name,
|
||||
status: "SUCCESS",
|
||||
duration: `${duration}s`,
|
||||
vus: scenario.vus,
|
||||
target_duration: scenario.duration,
|
||||
url: testUrl || "N/A",
|
||||
execution_mode: executionMode,
|
||||
environment: environment,
|
||||
completed_at: new Date().toISOString(),
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
|
||||
if (testUrl) {
|
||||
cloudUrls.push(`${scenario.name}: ${testUrl}`);
|
||||
}
|
||||
|
||||
// Store detailed output for analysis
|
||||
detailedMetrics.push({
|
||||
test: scenario.name,
|
||||
stdout_lines: stdout.split("\n").length,
|
||||
stderr_lines: stderr.split("\n").length,
|
||||
has_url: !!testUrl,
|
||||
});
|
||||
|
||||
resolve(result);
|
||||
} else {
|
||||
console.error(`❌ ${scenario.name} FAILED (exit code ${code})`);
|
||||
|
||||
const result = {
|
||||
test: scenario.name,
|
||||
status: "FAILED",
|
||||
error: `Exit code ${code}`,
|
||||
duration: `${duration}s`,
|
||||
vus: scenario.vus,
|
||||
execution_mode: executionMode,
|
||||
environment: environment,
|
||||
completed_at: new Date().toISOString(),
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
reject(new Error(`Test failed with exit code ${code}`));
|
||||
}
|
||||
});
|
||||
|
||||
// Handle spawn errors
|
||||
childProcess.on("error", (error) => {
|
||||
console.error(`❌ ${scenario.name} ERROR:`, error.message);
|
||||
|
||||
results.push({
|
||||
test: scenario.name,
|
||||
status: "ERROR",
|
||||
error: error.message,
|
||||
execution_mode: executionMode,
|
||||
environment: environment,
|
||||
});
|
||||
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Main orchestration function
|
||||
async function runOrchestrator() {
|
||||
const estimatedMinutes = scenarios.length * (testScale === "small" ? 2 : 5);
|
||||
console.log(`\n🎯 Starting ${testScale} test suite on ${environment}`);
|
||||
console.log(`📈 Estimated time: ~${estimatedMinutes} minutes`);
|
||||
console.log(`🌩️ Execution: ${executionMode} mode\n`);
|
||||
|
||||
const startTime = Date.now();
|
||||
let successCount = 0;
|
||||
let failureCount = 0;
|
||||
|
||||
// Run tests sequentially
|
||||
for (let i = 0; i < scenarios.length; i++) {
|
||||
try {
|
||||
await runTest(scenarios[i], i + 1);
|
||||
successCount++;
|
||||
|
||||
// Pause between tests (avoid overwhelming k6 cloud API)
|
||||
if (i < scenarios.length - 1) {
|
||||
const pauseSeconds = testScale === "small" ? 10 : 30;
|
||||
console.log(`\n⏸️ Pausing ${pauseSeconds}s before next test...\n`);
|
||||
await new Promise((resolve) =>
|
||||
setTimeout(resolve, pauseSeconds * 1000),
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
failureCount++;
|
||||
console.log(`💥 Continuing after failure...\n`);
|
||||
|
||||
// Brief pause before continuing
|
||||
if (i < scenarios.length - 1) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 15000));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const totalTime = Math.round((Date.now() - startTime) / 1000);
|
||||
await generateReports(successCount, failureCount, totalTime);
|
||||
}
|
||||
|
||||
// Generate comprehensive reports
|
||||
async function generateReports(successCount, failureCount, totalTime) {
|
||||
console.log("\n🎉 LOAD TEST ORCHESTRATOR COMPLETE\n");
|
||||
console.log("===================================\n");
|
||||
|
||||
// Summary statistics
|
||||
const successRate = Math.round((successCount / scenarios.length) * 100);
|
||||
console.log("📊 EXECUTION SUMMARY:");
|
||||
console.log(
|
||||
`✅ Successful tests: ${successCount}/${scenarios.length} (${successRate}%)`,
|
||||
);
|
||||
console.log(`❌ Failed tests: ${failureCount}/${scenarios.length}`);
|
||||
console.log(`⏱️ Total execution time: ${Math.round(totalTime / 60)} minutes`);
|
||||
console.log(`🌍 Environment: ${environment}`);
|
||||
console.log(`🚀 Mode: ${executionMode}`);
|
||||
|
||||
// Generate CSV report
|
||||
const csvHeaders =
|
||||
"Test Name,Status,VUs,Target Duration,Actual Duration,Environment,Mode,Test URL,Error,Completed At";
|
||||
const csvRows = results.map(
|
||||
(r) =>
|
||||
`"${r.test}","${r.status}",${r.vus},"${r.target_duration || "N/A"}","${r.duration || "N/A"}","${r.environment}","${r.execution_mode}","${r.url || "N/A"}","${r.error || "None"}","${r.completed_at || "N/A"}"`,
|
||||
);
|
||||
|
||||
const csvContent = [csvHeaders, ...csvRows].join("\n");
|
||||
const csvFile = path.join(resultsDir, "orchestrator_results.csv");
|
||||
fs.writeFileSync(csvFile, csvContent);
|
||||
console.log(`\n📁 CSV Report: ${csvFile}`);
|
||||
|
||||
// Generate cloud URLs file
|
||||
if (executionMode === "cloud" && cloudUrls.length > 0) {
|
||||
const urlsContent = [
|
||||
`# AutoGPT Platform Load Test URLs`,
|
||||
`# Environment: ${environment}`,
|
||||
`# Generated: ${new Date().toISOString()}`,
|
||||
`# Dashboard: https://significantgravitas.grafana.net/a/k6-app/`,
|
||||
"",
|
||||
...cloudUrls,
|
||||
"",
|
||||
"# Direct Dashboard Access:",
|
||||
"https://significantgravitas.grafana.net/a/k6-app/",
|
||||
].join("\n");
|
||||
|
||||
const urlsFile = path.join(resultsDir, "cloud_test_urls.txt");
|
||||
fs.writeFileSync(urlsFile, urlsContent);
|
||||
console.log(`📁 Cloud URLs: ${urlsFile}`);
|
||||
}
|
||||
|
||||
// Generate detailed JSON report
|
||||
const jsonReport = {
|
||||
meta: {
|
||||
orchestrator_version: "1.0",
|
||||
environment: environment,
|
||||
execution_mode: executionMode,
|
||||
test_scale: testScale,
|
||||
total_scenarios: scenarios.length,
|
||||
generated_at: new Date().toISOString(),
|
||||
results_directory: resultsDir,
|
||||
},
|
||||
summary: {
|
||||
successful_tests: successCount,
|
||||
failed_tests: failureCount,
|
||||
success_rate: `${successRate}%`,
|
||||
total_execution_time_seconds: totalTime,
|
||||
total_execution_time_minutes: Math.round(totalTime / 60),
|
||||
},
|
||||
test_results: results,
|
||||
detailed_metrics: detailedMetrics,
|
||||
cloud_urls: cloudUrls,
|
||||
};
|
||||
|
||||
const jsonFile = path.join(resultsDir, "orchestrator_results.json");
|
||||
fs.writeFileSync(jsonFile, JSON.stringify(jsonReport, null, 2));
|
||||
console.log(`📁 JSON Report: ${jsonFile}`);
|
||||
|
||||
// Display immediate results
|
||||
if (executionMode === "cloud" && cloudUrls.length > 0) {
|
||||
console.log("\n🔗 K6 CLOUD TEST DASHBOARD URLS:");
|
||||
console.log("================================");
|
||||
cloudUrls.slice(0, 5).forEach((url) => console.log(url));
|
||||
if (cloudUrls.length > 5) {
|
||||
console.log(`... and ${cloudUrls.length - 5} more URLs in ${urlsFile}`);
|
||||
}
|
||||
console.log(
|
||||
"\n📈 Main Dashboard: https://significantgravitas.grafana.net/a/k6-app/",
|
||||
);
|
||||
}
|
||||
|
||||
console.log(`\n📂 All results saved in: ${resultsDir}/`);
|
||||
console.log("🏁 Load Test Orchestrator finished successfully!");
|
||||
}
|
||||
|
||||
// Show usage help
|
||||
function showUsage() {
|
||||
console.log("🎯 AutoGPT Platform Load Test Orchestrator\n");
|
||||
console.log(
|
||||
"Usage: node load-test-orchestrator.js [ENVIRONMENT] [MODE] [SCALE]\n",
|
||||
);
|
||||
console.log("ENVIRONMENT:");
|
||||
console.log(" LOCAL - http://localhost:8006 (local development)");
|
||||
console.log(" DEV - https://dev-api.agpt.co (development server)");
|
||||
console.log(
|
||||
" PROD - https://api.agpt.co (production - coordinate with team!)\n",
|
||||
);
|
||||
console.log("MODE:");
|
||||
console.log(" local - Run locally with JSON output files");
|
||||
console.log(" cloud - Run in k6 cloud with dashboard monitoring\n");
|
||||
console.log("SCALE:");
|
||||
console.log(" small - 3 validation tests (~5 minutes)");
|
||||
console.log(" full - 25 comprehensive tests (~2 hours)\n");
|
||||
console.log("Examples:");
|
||||
console.log(" node load-test-orchestrator.js DEV cloud small");
|
||||
console.log(" node load-test-orchestrator.js LOCAL local small");
|
||||
console.log(" node load-test-orchestrator.js DEV cloud full");
|
||||
console.log(
|
||||
" node load-test-orchestrator.js PROD cloud full # Coordinate with team!\n",
|
||||
);
|
||||
console.log("Requirements:");
|
||||
console.log(
|
||||
" - Pre-authenticated tokens generated (node generate-tokens.js)",
|
||||
);
|
||||
console.log(" - k6 installed locally or run from Kubernetes pod");
|
||||
console.log(" - For cloud mode: K6_CLOUD_TOKEN and K6_CLOUD_PROJECT_ID set");
|
||||
}
|
||||
|
||||
// Handle command line help
|
||||
if (args.includes("--help") || args.includes("-h")) {
|
||||
showUsage();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Handle graceful shutdown
|
||||
process.on("SIGINT", () => {
|
||||
console.log("\n🛑 Orchestrator interrupted by user");
|
||||
console.log("📊 Generating partial results...");
|
||||
generateReports(
|
||||
results.filter((r) => r.status === "SUCCESS").length,
|
||||
results.filter((r) => r.status === "FAILED").length,
|
||||
0,
|
||||
).then(() => {
|
||||
console.log("🏃♂️ Partial results saved");
|
||||
process.exit(0);
|
||||
});
|
||||
});
|
||||
|
||||
// Start orchestrator
|
||||
if (require.main === module) {
|
||||
runOrchestrator().catch((error) => {
|
||||
console.error("💥 Orchestrator failed:", error);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = { runOrchestrator, testScenarios };
|
||||
362
autogpt_platform/backend/load-tests/orchestrator/orchestrator.js
Normal file
362
autogpt_platform/backend/load-tests/orchestrator/orchestrator.js
Normal file
@@ -0,0 +1,362 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* AutoGPT Platform Load Test Orchestrator
|
||||
*
|
||||
* Optimized test suite with only the highest VU count for each unique test type.
|
||||
* Eliminates duplicate tests and focuses on maximum load testing.
|
||||
*/
|
||||
|
||||
import { spawn } from 'child_process';
|
||||
import fs from 'fs';
|
||||
|
||||
console.log("🎯 AUTOGPT PLATFORM LOAD TEST ORCHESTRATOR\n");
|
||||
console.log("===========================================\n");
|
||||
|
||||
// Parse command line arguments
|
||||
const args = process.argv.slice(2);
|
||||
const environment = args[0] || "DEV"; // LOCAL, DEV, PROD
|
||||
const executionMode = args[1] || "cloud"; // local, cloud
|
||||
|
||||
console.log(`🌍 Target Environment: ${environment}`);
|
||||
console.log(`🚀 Execution Mode: ${executionMode}`);
|
||||
|
||||
// Unified test scenarios - only highest VUs for each unique test
|
||||
const unifiedTestScenarios = [
|
||||
// 1. Marketplace Public Access (highest VUs: 314)
|
||||
{
|
||||
name: "Marketplace_Public_Access_Max_Load",
|
||||
file: "tests/marketplace/public-access-test.js",
|
||||
vus: 314,
|
||||
duration: "3m",
|
||||
rampUp: "30s",
|
||||
rampDown: "30s",
|
||||
description: "Public marketplace browsing at maximum load"
|
||||
},
|
||||
|
||||
// 2. Marketplace Authenticated Access (highest VUs: 157)
|
||||
{
|
||||
name: "Marketplace_Authenticated_Access_Max_Load",
|
||||
file: "tests/marketplace/library-access-test.js",
|
||||
vus: 157,
|
||||
duration: "3m",
|
||||
rampUp: "30s",
|
||||
rampDown: "30s",
|
||||
description: "Authenticated marketplace/library operations at maximum load"
|
||||
},
|
||||
|
||||
// 3. Core API Load Test (highest VUs: 100)
|
||||
{
|
||||
name: "Core_API_Max_Load",
|
||||
file: "tests/api/core-api-test.js",
|
||||
vus: 100,
|
||||
duration: "5m",
|
||||
rampUp: "1m",
|
||||
rampDown: "1m",
|
||||
description: "Core authenticated API endpoints at maximum load"
|
||||
},
|
||||
|
||||
// 4. Graph Execution Load Test (highest VUs: 100)
|
||||
{
|
||||
name: "Graph_Execution_Max_Load",
|
||||
file: "tests/api/graph-execution-test.js",
|
||||
vus: 100,
|
||||
duration: "5m",
|
||||
rampUp: "1m",
|
||||
rampDown: "1m",
|
||||
description: "Graph workflow execution pipeline at maximum load"
|
||||
},
|
||||
|
||||
// 5. Credits API Single Endpoint (upgraded to 100 VUs)
|
||||
{
|
||||
name: "Credits_API_Max_Load",
|
||||
file: "tests/basic/single-endpoint-test.js",
|
||||
vus: 100,
|
||||
duration: "3m",
|
||||
rampUp: "30s",
|
||||
rampDown: "30s",
|
||||
env: { ENDPOINT: "credits", CONCURRENT_REQUESTS: "1" },
|
||||
description: "Credits API endpoint at maximum load"
|
||||
},
|
||||
|
||||
// 6. Graphs API Single Endpoint (upgraded to 100 VUs)
|
||||
{
|
||||
name: "Graphs_API_Max_Load",
|
||||
file: "tests/basic/single-endpoint-test.js",
|
||||
vus: 100,
|
||||
duration: "3m",
|
||||
rampUp: "30s",
|
||||
rampDown: "30s",
|
||||
env: { ENDPOINT: "graphs", CONCURRENT_REQUESTS: "1" },
|
||||
description: "Graphs API endpoint at maximum load"
|
||||
},
|
||||
|
||||
// 7. Blocks API Single Endpoint (upgraded to 100 VUs)
|
||||
{
|
||||
name: "Blocks_API_Max_Load",
|
||||
file: "tests/basic/single-endpoint-test.js",
|
||||
vus: 100,
|
||||
duration: "3m",
|
||||
rampUp: "30s",
|
||||
rampDown: "30s",
|
||||
env: { ENDPOINT: "blocks", CONCURRENT_REQUESTS: "1" },
|
||||
description: "Blocks API endpoint at maximum load"
|
||||
},
|
||||
|
||||
// 8. Executions API Single Endpoint (upgraded to 100 VUs)
|
||||
{
|
||||
name: "Executions_API_Max_Load",
|
||||
file: "tests/basic/single-endpoint-test.js",
|
||||
vus: 100,
|
||||
duration: "3m",
|
||||
rampUp: "30s",
|
||||
rampDown: "30s",
|
||||
env: { ENDPOINT: "executions", CONCURRENT_REQUESTS: "1" },
|
||||
description: "Executions API endpoint at maximum load"
|
||||
},
|
||||
|
||||
// 9. Comprehensive Platform Journey (highest VUs: 100)
|
||||
{
|
||||
name: "Comprehensive_Platform_Max_Load",
|
||||
file: "tests/comprehensive/platform-journey-test.js",
|
||||
vus: 100,
|
||||
duration: "3m",
|
||||
rampUp: "30s",
|
||||
rampDown: "30s",
|
||||
description: "End-to-end user journey simulation at maximum load"
|
||||
},
|
||||
|
||||
// 10. Marketplace Stress Test (highest VUs: 500)
|
||||
{
|
||||
name: "Marketplace_Stress_Test",
|
||||
file: "tests/marketplace/public-access-test.js",
|
||||
vus: 500,
|
||||
duration: "2m",
|
||||
rampUp: "1m",
|
||||
rampDown: "1m",
|
||||
description: "Ultimate marketplace stress test"
|
||||
},
|
||||
|
||||
// 11. Core API Stress Test (highest VUs: 500)
|
||||
{
|
||||
name: "Core_API_Stress_Test",
|
||||
file: "tests/api/core-api-test.js",
|
||||
vus: 500,
|
||||
duration: "2m",
|
||||
rampUp: "1m",
|
||||
rampDown: "1m",
|
||||
description: "Ultimate core API stress test"
|
||||
},
|
||||
|
||||
// 12. Long Duration Core API Test (highest VUs: 100, longest duration)
|
||||
{
|
||||
name: "Long_Duration_Core_API_Test",
|
||||
file: "tests/api/core-api-test.js",
|
||||
vus: 100,
|
||||
duration: "10m",
|
||||
rampUp: "1m",
|
||||
rampDown: "1m",
|
||||
description: "Extended duration core API endurance test"
|
||||
}
|
||||
];
|
||||
|
||||
// Configuration
|
||||
const K6_CLOUD_TOKEN = process.env.K6_CLOUD_TOKEN || '9347b8bd716cadc243e92f7d2f89107febfb81b49f2340d17da515d7b0513b51';
|
||||
const K6_CLOUD_PROJECT_ID = process.env.K6_CLOUD_PROJECT_ID || '4254406';
|
||||
const PAUSE_BETWEEN_TESTS = 30; // seconds
|
||||
|
||||
/**
|
||||
* Sleep for specified milliseconds
|
||||
*/
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a single k6 test
|
||||
*/
|
||||
async function runTest(test, index) {
|
||||
return new Promise((resolve, reject) => {
|
||||
console.log(`\n🚀 Test ${index + 1}/${unifiedTestScenarios.length}: ${test.name}`);
|
||||
console.log(`📊 Config: ${test.vus} VUs × ${test.duration} (${executionMode} mode)`);
|
||||
console.log(`📁 Script: ${test.file}`);
|
||||
console.log(`📋 Description: ${test.description}`);
|
||||
console.log(`⏱️ Test started: ${new Date().toISOString()}`);
|
||||
|
||||
const env = {
|
||||
K6_CLOUD_TOKEN,
|
||||
K6_CLOUD_PROJECT_ID,
|
||||
K6_ENVIRONMENT: environment,
|
||||
VUS: test.vus.toString(),
|
||||
DURATION: test.duration,
|
||||
RAMP_UP: test.rampUp,
|
||||
RAMP_DOWN: test.rampDown,
|
||||
...test.env
|
||||
};
|
||||
|
||||
let args;
|
||||
if (executionMode === 'cloud') {
|
||||
args = [
|
||||
'cloud', 'run',
|
||||
...Object.entries(env).map(([key, value]) => ['--env', `${key}=${value}`]).flat(),
|
||||
test.file
|
||||
];
|
||||
} else {
|
||||
args = [
|
||||
'run',
|
||||
...Object.entries(env).map(([key, value]) => ['--env', `${key}=${value}`]).flat(),
|
||||
test.file
|
||||
];
|
||||
}
|
||||
|
||||
const k6Process = spawn('k6', args, {
|
||||
stdio: ['ignore', 'pipe', 'pipe'],
|
||||
env: { ...process.env, ...env }
|
||||
});
|
||||
|
||||
let output = '';
|
||||
let testId = null;
|
||||
|
||||
k6Process.stdout.on('data', (data) => {
|
||||
const str = data.toString();
|
||||
output += str;
|
||||
|
||||
// Extract test ID from k6 cloud output
|
||||
const testIdMatch = str.match(/Test created: .*\/(\d+)/);
|
||||
if (testIdMatch) {
|
||||
testId = testIdMatch[1];
|
||||
console.log(`🔗 Test URL: https://significantgravitas.grafana.net/a/k6-app/runs/${testId}`);
|
||||
}
|
||||
|
||||
// Show progress updates
|
||||
const progressMatch = str.match(/(\d+)%/);
|
||||
if (progressMatch) {
|
||||
process.stdout.write(`\r⏳ Progress: ${progressMatch[1]}%`);
|
||||
}
|
||||
});
|
||||
|
||||
k6Process.stderr.on('data', (data) => {
|
||||
output += data.toString();
|
||||
});
|
||||
|
||||
k6Process.on('close', (code) => {
|
||||
process.stdout.write('\n'); // Clear progress line
|
||||
|
||||
if (code === 0) {
|
||||
console.log(`✅ ${test.name} SUCCESS`);
|
||||
resolve({
|
||||
success: true,
|
||||
testId,
|
||||
url: testId ? `https://significantgravitas.grafana.net/a/k6-app/runs/${testId}` : 'unknown',
|
||||
vus: test.vus,
|
||||
duration: test.duration
|
||||
});
|
||||
} else {
|
||||
console.log(`❌ ${test.name} FAILED (exit code ${code})`);
|
||||
resolve({
|
||||
success: false,
|
||||
testId,
|
||||
url: testId ? `https://significantgravitas.grafana.net/a/k6-app/runs/${testId}` : 'unknown',
|
||||
exitCode: code,
|
||||
vus: test.vus,
|
||||
duration: test.duration
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
k6Process.on('error', (error) => {
|
||||
console.log(`❌ ${test.name} ERROR: ${error.message}`);
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Main execution
|
||||
*/
|
||||
async function main() {
|
||||
console.log(`\n📋 UNIFIED TEST PLAN`);
|
||||
console.log(`📊 Total tests: ${unifiedTestScenarios.length} (reduced from 25 original tests)`);
|
||||
console.log(`⏱️ Estimated duration: ~60 minutes\n`);
|
||||
|
||||
console.log(`📋 Test Summary:`);
|
||||
unifiedTestScenarios.forEach((test, i) => {
|
||||
console.log(` ${i + 1}. ${test.name} (${test.vus} VUs × ${test.duration})`);
|
||||
});
|
||||
console.log('');
|
||||
|
||||
const results = [];
|
||||
|
||||
for (let i = 0; i < unifiedTestScenarios.length; i++) {
|
||||
const test = unifiedTestScenarios[i];
|
||||
|
||||
try {
|
||||
const result = await runTest(test, i);
|
||||
results.push({ ...test, ...result });
|
||||
|
||||
// Pause between tests (except after the last one)
|
||||
if (i < unifiedTestScenarios.length - 1) {
|
||||
console.log(`\n⏸️ Pausing ${PAUSE_BETWEEN_TESTS}s before next test...`);
|
||||
await sleep(PAUSE_BETWEEN_TESTS * 1000);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`💥 Fatal error running ${test.name}:`, error.message);
|
||||
results.push({ ...test, success: false, error: error.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Summary
|
||||
console.log('\n' + '='.repeat(60));
|
||||
console.log('🏁 UNIFIED LOAD TEST RESULTS SUMMARY');
|
||||
console.log('='.repeat(60));
|
||||
|
||||
const successful = results.filter(r => r.success);
|
||||
const failed = results.filter(r => !r.success);
|
||||
|
||||
console.log(`✅ Successful tests: ${successful.length}/${results.length} (${Math.round(successful.length / results.length * 100)}%)`);
|
||||
console.log(`❌ Failed tests: ${failed.length}/${results.length}`);
|
||||
|
||||
if (successful.length > 0) {
|
||||
console.log('\n✅ SUCCESSFUL TESTS:');
|
||||
successful.forEach(test => {
|
||||
console.log(` • ${test.name} (${test.vus} VUs) - ${test.url}`);
|
||||
});
|
||||
}
|
||||
|
||||
if (failed.length > 0) {
|
||||
console.log('\n❌ FAILED TESTS:');
|
||||
failed.forEach(test => {
|
||||
console.log(` • ${test.name} (${test.vus} VUs) - ${test.url || 'no URL'} (exit: ${test.exitCode || 'unknown'})`);
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate total VU-minutes tested
|
||||
const totalVuMinutes = results.reduce((sum, test) => {
|
||||
const minutes = parseFloat(test.duration.replace(/[ms]/g, ''));
|
||||
const multiplier = test.duration.includes('m') ? 1 : (1/60); // convert seconds to minutes
|
||||
return sum + (test.vus * minutes * multiplier);
|
||||
}, 0);
|
||||
|
||||
console.log(`\n📊 LOAD TESTING SUMMARY:`);
|
||||
console.log(` • Total VU-minutes tested: ${Math.round(totalVuMinutes)}`);
|
||||
console.log(` • Peak concurrent VUs: ${Math.max(...results.map(r => r.vus))}`);
|
||||
console.log(` • Average test duration: ${(results.reduce((sum, r) => sum + parseFloat(r.duration.replace(/[ms]/g, '')), 0) / results.length).toFixed(1)}${results[0].duration.includes('m') ? 'm' : 's'}`);
|
||||
|
||||
// Write results to file
|
||||
const timestamp = Math.floor(Date.now() / 1000);
|
||||
const resultsFile = `unified-results-${timestamp}.json`;
|
||||
fs.writeFileSync(resultsFile, JSON.stringify(results, null, 2));
|
||||
console.log(`\n📄 Detailed results saved to: ${resultsFile}`);
|
||||
|
||||
console.log(`\n🎉 UNIFIED LOAD TEST ORCHESTRATOR COMPLETE\n`);
|
||||
|
||||
process.exit(failed.length === 0 ? 0 : 1);
|
||||
}
|
||||
|
||||
// Run if called directly
|
||||
if (process.argv[1] === new URL(import.meta.url).pathname) {
|
||||
main().catch(error => {
|
||||
console.error('💥 Fatal error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
@@ -1,268 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Unified Load Test Runner
|
||||
*
|
||||
* Supports both local execution and k6 cloud execution with the same interface.
|
||||
* Automatically detects cloud credentials and provides seamless switching.
|
||||
*
|
||||
* Usage:
|
||||
* node run-tests.js verify # Quick verification (1 VU, 10s)
|
||||
* node run-tests.js run core-api-test DEV # Run specific test locally
|
||||
* node run-tests.js run all DEV # Run all tests locally
|
||||
* node run-tests.js cloud core-api DEV # Run specific test in k6 cloud
|
||||
* node run-tests.js cloud all DEV # Run all tests in k6 cloud
|
||||
*/
|
||||
|
||||
import { execSync } from "child_process";
|
||||
import fs from "fs";
|
||||
|
||||
const TESTS = {
|
||||
"connectivity-test": {
|
||||
script: "tests/basic/connectivity-test.js",
|
||||
description: "Basic connectivity validation",
|
||||
cloudConfig: { vus: 10, duration: "2m" },
|
||||
},
|
||||
"single-endpoint-test": {
|
||||
script: "tests/basic/single-endpoint-test.js",
|
||||
description: "Individual API endpoint testing",
|
||||
cloudConfig: { vus: 25, duration: "3m" },
|
||||
},
|
||||
"core-api-test": {
|
||||
script: "tests/api/core-api-test.js",
|
||||
description: "Core API endpoints performance test",
|
||||
cloudConfig: { vus: 100, duration: "5m" },
|
||||
},
|
||||
"graph-execution-test": {
|
||||
script: "tests/api/graph-execution-test.js",
|
||||
description: "Graph creation and execution pipeline test",
|
||||
cloudConfig: { vus: 80, duration: "5m" },
|
||||
},
|
||||
"marketplace-public-test": {
|
||||
script: "tests/marketplace/public-access-test.js",
|
||||
description: "Public marketplace browsing test",
|
||||
cloudConfig: { vus: 150, duration: "3m" },
|
||||
},
|
||||
"marketplace-library-test": {
|
||||
script: "tests/marketplace/library-access-test.js",
|
||||
description: "Authenticated marketplace/library test",
|
||||
cloudConfig: { vus: 100, duration: "4m" },
|
||||
},
|
||||
"comprehensive-test": {
|
||||
script: "tests/comprehensive/platform-journey-test.js",
|
||||
description: "Complete user journey simulation",
|
||||
cloudConfig: { vus: 50, duration: "6m" },
|
||||
},
|
||||
};
|
||||
|
||||
function checkCloudCredentials() {
|
||||
const token = process.env.K6_CLOUD_TOKEN;
|
||||
const projectId = process.env.K6_CLOUD_PROJECT_ID;
|
||||
|
||||
if (!token || !projectId) {
|
||||
console.log("❌ Missing k6 cloud credentials");
|
||||
console.log("Set: K6_CLOUD_TOKEN and K6_CLOUD_PROJECT_ID");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function verifySetup() {
|
||||
console.log("🔍 Quick Setup Verification");
|
||||
|
||||
// Check tokens
|
||||
if (!fs.existsSync("configs/pre-authenticated-tokens.js")) {
|
||||
console.log("❌ No tokens found. Run: node generate-tokens.js");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Quick test
|
||||
try {
|
||||
execSync(
|
||||
"K6_ENVIRONMENT=DEV VUS=1 DURATION=10s k6 run tests/basic/connectivity-test.js --quiet",
|
||||
{ stdio: "inherit", cwd: process.cwd() },
|
||||
);
|
||||
console.log("✅ Verification successful");
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.log("❌ Verification failed");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function runLocalTest(testName, environment) {
|
||||
const test = TESTS[testName];
|
||||
if (!test) {
|
||||
console.log(`❌ Unknown test: ${testName}`);
|
||||
console.log("Available tests:", Object.keys(TESTS).join(", "));
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`🚀 Running ${test.description} locally on ${environment}`);
|
||||
|
||||
try {
|
||||
const cmd = `K6_ENVIRONMENT=${environment} VUS=5 DURATION=30s k6 run ${test.script}`;
|
||||
execSync(cmd, { stdio: "inherit", cwd: process.cwd() });
|
||||
console.log("✅ Test completed");
|
||||
} catch (error) {
|
||||
console.log("❌ Test failed");
|
||||
}
|
||||
}
|
||||
|
||||
function runCloudTest(testName, environment) {
|
||||
const test = TESTS[testName];
|
||||
if (!test) {
|
||||
console.log(`❌ Unknown test: ${testName}`);
|
||||
console.log("Available tests:", Object.keys(TESTS).join(", "));
|
||||
return;
|
||||
}
|
||||
|
||||
const { vus, duration } = test.cloudConfig;
|
||||
console.log(`☁️ Running ${test.description} in k6 cloud`);
|
||||
console.log(` Environment: ${environment}`);
|
||||
console.log(` Config: ${vus} VUs × ${duration}`);
|
||||
|
||||
try {
|
||||
const cmd = `k6 cloud run --env K6_ENVIRONMENT=${environment} --env VUS=${vus} --env DURATION=${duration} --env RAMP_UP=30s --env RAMP_DOWN=30s ${test.script}`;
|
||||
const output = execSync(cmd, {
|
||||
stdio: "pipe",
|
||||
cwd: process.cwd(),
|
||||
encoding: "utf8",
|
||||
});
|
||||
|
||||
// Extract and display URL
|
||||
const urlMatch = output.match(/https:\/\/[^\s]*grafana[^\s]*/);
|
||||
if (urlMatch) {
|
||||
const url = urlMatch[0];
|
||||
console.log(`🔗 Test URL: ${url}`);
|
||||
|
||||
// Save to results file
|
||||
const timestamp = new Date().toISOString();
|
||||
const result = `${timestamp} - ${testName}: ${url}\n`;
|
||||
fs.appendFileSync("k6-cloud-results.txt", result);
|
||||
}
|
||||
|
||||
console.log("✅ Cloud test started successfully");
|
||||
} catch (error) {
|
||||
console.log("❌ Cloud test failed to start");
|
||||
console.log(error.message);
|
||||
}
|
||||
}
|
||||
|
||||
function runAllLocalTests(environment) {
|
||||
console.log(`🚀 Running all tests locally on ${environment}`);
|
||||
|
||||
for (const [testName, test] of Object.entries(TESTS)) {
|
||||
console.log(`\n📊 ${test.description}`);
|
||||
runLocalTest(testName, environment);
|
||||
}
|
||||
}
|
||||
|
||||
function runAllCloudTests(environment) {
|
||||
console.log(`☁️ Running all tests in k6 cloud on ${environment}`);
|
||||
|
||||
const testNames = Object.keys(TESTS);
|
||||
for (let i = 0; i < testNames.length; i++) {
|
||||
const testName = testNames[i];
|
||||
console.log(`\n📊 Test ${i + 1}/${testNames.length}: ${testName}`);
|
||||
|
||||
runCloudTest(testName, environment);
|
||||
|
||||
// Brief pause between cloud tests (except last one)
|
||||
if (i < testNames.length - 1) {
|
||||
console.log("⏸️ Waiting 2 minutes before next cloud test...");
|
||||
execSync("sleep 120");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function listTests() {
|
||||
console.log("📋 Available Tests:");
|
||||
console.log("==================");
|
||||
|
||||
Object.entries(TESTS).forEach(([name, test]) => {
|
||||
const { vus, duration } = test.cloudConfig;
|
||||
console.log(` ${name.padEnd(20)} - ${test.description}`);
|
||||
console.log(` ${" ".repeat(20)} Cloud: ${vus} VUs × ${duration}`);
|
||||
});
|
||||
|
||||
console.log("\n🌍 Available Environments: LOCAL, DEV, PROD");
|
||||
console.log("\n💡 Examples:");
|
||||
console.log(" # Local execution (5 VUs, 30s)");
|
||||
console.log(" node run-tests.js verify");
|
||||
console.log(" node run-tests.js run core-api-test DEV");
|
||||
console.log(" node run-tests.js run core-api-test,marketplace-test DEV");
|
||||
console.log(" node run-tests.js run all DEV");
|
||||
console.log("");
|
||||
console.log(" # Cloud execution (high VUs, longer duration)");
|
||||
console.log(" node run-tests.js cloud core-api DEV");
|
||||
console.log(" node run-tests.js cloud all DEV");
|
||||
|
||||
const hasCloudCreds = checkCloudCredentials();
|
||||
console.log(
|
||||
`\n☁️ Cloud Status: ${hasCloudCreds ? "✅ Configured" : "❌ Missing credentials"}`,
|
||||
);
|
||||
}
|
||||
|
||||
function runSequentialTests(testNames, environment, isCloud = false) {
|
||||
const tests = testNames.split(",").map((t) => t.trim());
|
||||
const mode = isCloud ? "cloud" : "local";
|
||||
console.log(
|
||||
`🚀 Running ${tests.length} tests sequentially in ${mode} mode on ${environment}`,
|
||||
);
|
||||
|
||||
for (let i = 0; i < tests.length; i++) {
|
||||
const testName = tests[i];
|
||||
console.log(`\n📊 Test ${i + 1}/${tests.length}: ${testName}`);
|
||||
|
||||
if (isCloud) {
|
||||
runCloudTest(testName, environment);
|
||||
} else {
|
||||
runLocalTest(testName, environment);
|
||||
}
|
||||
|
||||
// Brief pause between tests (except last one)
|
||||
if (i < tests.length - 1) {
|
||||
const pauseTime = isCloud ? "2 minutes" : "10 seconds";
|
||||
const pauseCmd = isCloud ? "sleep 120" : "sleep 10";
|
||||
console.log(`⏸️ Waiting ${pauseTime} before next test...`);
|
||||
if (!isCloud) {
|
||||
// Note: In real implementation, would use setTimeout/sleep for local tests
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Main CLI
|
||||
const [, , command, testOrEnv, environment] = process.argv;
|
||||
|
||||
switch (command) {
|
||||
case "verify":
|
||||
verifySetup();
|
||||
break;
|
||||
case "list":
|
||||
listTests();
|
||||
break;
|
||||
case "run":
|
||||
if (testOrEnv === "all") {
|
||||
runAllLocalTests(environment || "DEV");
|
||||
} else if (testOrEnv?.includes(",")) {
|
||||
runSequentialTests(testOrEnv, environment || "DEV", false);
|
||||
} else {
|
||||
runLocalTest(testOrEnv, environment || "DEV");
|
||||
}
|
||||
break;
|
||||
case "cloud":
|
||||
if (!checkCloudCredentials()) {
|
||||
process.exit(1);
|
||||
}
|
||||
if (testOrEnv === "all") {
|
||||
runAllCloudTests(environment || "DEV");
|
||||
} else if (testOrEnv?.includes(",")) {
|
||||
runSequentialTests(testOrEnv, environment || "DEV", true);
|
||||
} else {
|
||||
runCloudTest(testOrEnv, environment || "DEV");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
listTests();
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
Warnings:
|
||||
|
||||
- You are about to drop the column `notificationDot` on the `UserOnboarding` table. All the data in the column will be lost.
|
||||
|
||||
*/
|
||||
-- AlterEnum
|
||||
-- This migration adds more than one value to an enum.
|
||||
-- With PostgreSQL versions 11 and earlier, this is not possible
|
||||
-- in a single migration. This can be worked around by creating
|
||||
-- multiple migrations, each migration adding only one value to
|
||||
-- the enum.
|
||||
|
||||
|
||||
ALTER TYPE "OnboardingStep" ADD VALUE 'RE_RUN_AGENT';
|
||||
ALTER TYPE "OnboardingStep" ADD VALUE 'SCHEDULE_AGENT';
|
||||
ALTER TYPE "OnboardingStep" ADD VALUE 'RUN_3_DAYS';
|
||||
ALTER TYPE "OnboardingStep" ADD VALUE 'TRIGGER_WEBHOOK';
|
||||
ALTER TYPE "OnboardingStep" ADD VALUE 'RUN_14_DAYS';
|
||||
ALTER TYPE "OnboardingStep" ADD VALUE 'RUN_AGENTS_100';
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE "UserOnboarding" DROP COLUMN "notificationDot",
|
||||
ADD COLUMN "consecutiveRunDays" INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN "lastRunAt" TIMESTAMP(3),
|
||||
ADD COLUMN "walletShown" BOOLEAN NOT NULL DEFAULT false;
|
||||
@@ -68,15 +68,23 @@ enum OnboardingStep {
|
||||
AGENT_NEW_RUN
|
||||
AGENT_INPUT
|
||||
CONGRATS
|
||||
// First Wins
|
||||
GET_RESULTS
|
||||
RUN_AGENTS
|
||||
// Marketplace
|
||||
MARKETPLACE_VISIT
|
||||
MARKETPLACE_ADD_AGENT
|
||||
MARKETPLACE_RUN_AGENT
|
||||
// Builder
|
||||
BUILDER_OPEN
|
||||
BUILDER_SAVE_AGENT
|
||||
// Consistency Challenge
|
||||
RE_RUN_AGENT
|
||||
SCHEDULE_AGENT
|
||||
RUN_AGENTS
|
||||
RUN_3_DAYS
|
||||
// The Pro Playground
|
||||
TRIGGER_WEBHOOK
|
||||
RUN_14_DAYS
|
||||
RUN_AGENTS_100
|
||||
// No longer rewarded but exist for analytical purposes
|
||||
BUILDER_OPEN
|
||||
BUILDER_RUN_AGENT
|
||||
}
|
||||
|
||||
@@ -86,7 +94,7 @@ model UserOnboarding {
|
||||
updatedAt DateTime? @updatedAt
|
||||
|
||||
completedSteps OnboardingStep[] @default([])
|
||||
notificationDot Boolean @default(true)
|
||||
walletShown Boolean @default(false)
|
||||
notified OnboardingStep[] @default([])
|
||||
rewardedFor OnboardingStep[] @default([])
|
||||
usageReason String?
|
||||
@@ -96,6 +104,8 @@ model UserOnboarding {
|
||||
agentInput Json?
|
||||
onboardingAgentExecutionId String?
|
||||
agentRuns Int @default(0)
|
||||
lastRunAt DateTime?
|
||||
consecutiveRunDays Int @default(0)
|
||||
|
||||
userId String @unique
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@ -471,7 +471,7 @@ async def main():
|
||||
data={
|
||||
"userId": user.id,
|
||||
"completedSteps": completed_steps,
|
||||
"notificationDot": random.choice([True, False]),
|
||||
"walletShown": random.choice([True, False]),
|
||||
"notified": (
|
||||
random.sample(completed_steps, k=min(3, len(completed_steps)))
|
||||
if completed_steps
|
||||
|
||||
@@ -27,11 +27,16 @@ Sentry.init({
|
||||
|
||||
// Add optional integrations for additional features
|
||||
integrations: [
|
||||
Sentry.replayIntegration(),
|
||||
Sentry.captureConsoleIntegration(),
|
||||
Sentry.extraErrorDataIntegration(),
|
||||
Sentry.browserProfilingIntegration(),
|
||||
Sentry.httpClientIntegration(),
|
||||
// Sentry.launchDarklyIntegration(),
|
||||
Sentry.replayIntegration({
|
||||
unmask: [".sentry-unmask, [data-sentry-unmask]"],
|
||||
}),
|
||||
Sentry.replayCanvasIntegration(),
|
||||
Sentry.reportingObserverIntegration(),
|
||||
Sentry.browserProfilingIntegration(),
|
||||
// Sentry.feedbackIntegration({
|
||||
// // Additional SDK configuration goes in here, for example:
|
||||
// colorScheme: "system",
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
"defaults"
|
||||
],
|
||||
"dependencies": {
|
||||
"@faker-js/faker": "9.9.0",
|
||||
"@faker-js/faker": "10.0.0",
|
||||
"@hookform/resolvers": "5.2.1",
|
||||
"@next/third-parties": "15.4.6",
|
||||
"@phosphor-icons/react": "2.1.10",
|
||||
@@ -51,7 +51,7 @@
|
||||
"@rjsf/core": "5.24.13",
|
||||
"@rjsf/utils": "5.24.13",
|
||||
"@rjsf/validator-ajv8": "5.24.13",
|
||||
"@sentry/nextjs": "9.42.0",
|
||||
"@sentry/nextjs": "10.15.0",
|
||||
"@supabase/ssr": "0.6.1",
|
||||
"@supabase/supabase-js": "2.55.0",
|
||||
"@tanstack/react-query": "5.85.3",
|
||||
|
||||
991
autogpt_platform/frontend/pnpm-lock.yaml
generated
991
autogpt_platform/frontend/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -39,4 +39,8 @@ Sentry.init({
|
||||
debug: false,
|
||||
|
||||
enableLogs: true,
|
||||
integrations: [
|
||||
Sentry.captureConsoleIntegration(),
|
||||
Sentry.extraErrorDataIntegration(),
|
||||
],
|
||||
});
|
||||
|
||||
@@ -42,6 +42,7 @@ Sentry.init({
|
||||
integrations: [
|
||||
Sentry.anrIntegration(),
|
||||
// NodeProfilingIntegration,
|
||||
Sentry.extraErrorDataIntegration(),
|
||||
// Sentry.fsIntegration(),
|
||||
],
|
||||
|
||||
|
||||
@@ -1,111 +1,31 @@
|
||||
import { useCallback, useEffect, useRef, useState } from "react";
|
||||
import { ChevronDown, Check } from "lucide-react";
|
||||
import { OnboardingStep } from "@/lib/autogpt-server-api";
|
||||
import { useOnboarding } from "../../../../providers/onboarding/onboarding-provider";
|
||||
import { ChevronDown, Check, BadgeQuestionMark } from "lucide-react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import * as party from "party-js";
|
||||
import { useOnboarding } from "@/providers/onboarding/onboarding-provider";
|
||||
import { Task, TaskGroup } from "@/components/__legacy__/Wallet";
|
||||
|
||||
interface Task {
|
||||
id: OnboardingStep;
|
||||
name: string;
|
||||
amount: number;
|
||||
details: string;
|
||||
video?: string;
|
||||
interface Props {
|
||||
groups: TaskGroup[];
|
||||
}
|
||||
|
||||
interface TaskGroup {
|
||||
name: string;
|
||||
tasks: Task[];
|
||||
isOpen: boolean;
|
||||
}
|
||||
|
||||
export function TaskGroups() {
|
||||
const [groups, setGroups] = useState<TaskGroup[]>([
|
||||
{
|
||||
name: "Run your first agents",
|
||||
isOpen: true,
|
||||
tasks: [
|
||||
{
|
||||
id: "GET_RESULTS",
|
||||
name: "Complete onboarding and see your first agent's results",
|
||||
amount: 3,
|
||||
details: "",
|
||||
},
|
||||
{
|
||||
id: "RUN_AGENTS",
|
||||
name: "Run 10 agents",
|
||||
amount: 3,
|
||||
details: "Run agents from Library or Builder 10 times",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: "Explore the Marketplace",
|
||||
isOpen: true,
|
||||
tasks: [
|
||||
{
|
||||
id: "MARKETPLACE_VISIT",
|
||||
name: "Go to Marketplace",
|
||||
amount: 0,
|
||||
details: "Click Marketplace in the top navigation",
|
||||
video: "/onboarding/marketplace-visit.mp4",
|
||||
},
|
||||
{
|
||||
id: "MARKETPLACE_ADD_AGENT",
|
||||
name: "Find an agent",
|
||||
amount: 1,
|
||||
details:
|
||||
"Search for an agent in the Marketplace, like a code generator or research assistant and add it to your Library",
|
||||
video: "/onboarding/marketplace-add.mp4",
|
||||
},
|
||||
{
|
||||
id: "MARKETPLACE_RUN_AGENT",
|
||||
name: "Try out your agent",
|
||||
amount: 1,
|
||||
details:
|
||||
"Run the agent you found in the Marketplace from the Library - whether it's a writing assistant, data analyzer, or something else",
|
||||
video: "/onboarding/marketplace-run.mp4",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: "Build your own agent",
|
||||
isOpen: true,
|
||||
tasks: [
|
||||
{
|
||||
id: "BUILDER_OPEN",
|
||||
name: "Open the Builder",
|
||||
amount: 0,
|
||||
details: "Click Builder in the top navigation",
|
||||
video: "/onboarding/builder-open.mp4",
|
||||
},
|
||||
{
|
||||
id: "BUILDER_SAVE_AGENT",
|
||||
name: "Place your first blocks and save your agent",
|
||||
amount: 1,
|
||||
details:
|
||||
"Open block library on the left and add a block to the canvas then save your agent",
|
||||
video: "/onboarding/builder-save.mp4",
|
||||
},
|
||||
{
|
||||
id: "BUILDER_RUN_AGENT",
|
||||
name: "Run your agent",
|
||||
amount: 1,
|
||||
details: "Run your agent from the Builder",
|
||||
video: "/onboarding/builder-run.mp4",
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
export function TaskGroups({ groups }: Props) {
|
||||
const { state, updateState } = useOnboarding();
|
||||
const refs = useRef<Record<string, HTMLDivElement | null>>({});
|
||||
|
||||
const [openGroups, setOpenGroups] = useState<Record<string, boolean>>(() => {
|
||||
const initialState: Record<string, boolean> = {};
|
||||
groups.forEach((group) => {
|
||||
initialState[group.name] = true;
|
||||
});
|
||||
return initialState;
|
||||
});
|
||||
|
||||
const toggleGroup = useCallback((name: string) => {
|
||||
setGroups((prevGroups) =>
|
||||
prevGroups.map((group) =>
|
||||
group.name === name ? { ...group, isOpen: !group.isOpen } : group,
|
||||
),
|
||||
);
|
||||
setOpenGroups((prev) => ({
|
||||
...prev,
|
||||
[name]: !prev[name],
|
||||
}));
|
||||
}, []);
|
||||
|
||||
const isTaskCompleted = useCallback(
|
||||
@@ -129,6 +49,21 @@ export function TaskGroups() {
|
||||
[isTaskCompleted],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
// Close completed groups
|
||||
setOpenGroups((prevGroups) =>
|
||||
groups.reduce(
|
||||
(acc, group) => {
|
||||
acc[group.name] = isGroupCompleted(group)
|
||||
? false
|
||||
: prevGroups[group.name];
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, boolean>,
|
||||
),
|
||||
);
|
||||
}, [state?.completedSteps, isGroupCompleted]);
|
||||
|
||||
const setRef = (name: string) => (el: HTMLDivElement | null) => {
|
||||
if (el) {
|
||||
refs.current[name] = el;
|
||||
@@ -201,19 +136,21 @@ export function TaskGroups() {
|
||||
<div
|
||||
key={group.name}
|
||||
ref={setRef(group.name)}
|
||||
className="mt-3 overflow-hidden rounded-lg border border-zinc-200 bg-zinc-100"
|
||||
className="mt-3 overflow-hidden rounded-lg border border-zinc-100 bg-zinc-50"
|
||||
>
|
||||
{/* Group Header */}
|
||||
<div
|
||||
className="flex cursor-pointer items-center justify-between p-3"
|
||||
onClick={() => toggleGroup(group.name)}
|
||||
>
|
||||
{/* Name and completed count */}
|
||||
{/* Name, details and completed count */}
|
||||
<div className="flex-1">
|
||||
<div className="text-sm font-medium text-zinc-900">
|
||||
{group.name}
|
||||
</div>
|
||||
<div className="mt-1 text-xs font-normal leading-tight text-zinc-500">
|
||||
{group.details}
|
||||
<br />
|
||||
{getCompletedCount(group.tasks)} of {group.tasks.length}{" "}
|
||||
completed
|
||||
</div>
|
||||
@@ -233,7 +170,7 @@ export function TaskGroups() {
|
||||
</div>
|
||||
)}
|
||||
<ChevronDown
|
||||
className={`h-5 w-5 text-slate-950 transition-transform duration-300 ease-in-out ${group.isOpen ? "rotate-180" : ""}`}
|
||||
className={`h-5 w-5 text-slate-950 transition-transform duration-300 ease-in-out ${openGroups[group.name] ? "rotate-180" : ""}`}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
@@ -242,8 +179,8 @@ export function TaskGroups() {
|
||||
<div
|
||||
className={cn(
|
||||
"overflow-hidden transition-all duration-300 ease-in-out",
|
||||
group.isOpen || !isGroupCompleted(group)
|
||||
? "max-h-[1000px] opacity-100"
|
||||
openGroups[group.name] || !isGroupCompleted(group)
|
||||
? "max-h-[1200px] opacity-100"
|
||||
: "max-h-0 opacity-0",
|
||||
)}
|
||||
>
|
||||
@@ -251,7 +188,7 @@ export function TaskGroups() {
|
||||
<div
|
||||
key={task.id}
|
||||
ref={setRef(task.id)}
|
||||
className="mx-3 border-t border-zinc-300 px-1 pb-1 pt-3"
|
||||
className="mx-3 border-t border-zinc-200 px-1 pb-0.5 pt-3"
|
||||
>
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
{/* Checkmark and name */}
|
||||
@@ -291,15 +228,38 @@ export function TaskGroups() {
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Progress bar and counter text */}
|
||||
{task.progress && !isTaskCompleted(task) && (
|
||||
<div className="mb-1 flex w-full items-center justify-between pl-6 pr-3">
|
||||
<div className="h-2 flex-1 overflow-hidden rounded-full bg-zinc-100">
|
||||
<div
|
||||
className="h-full bg-violet-400 transition-all duration-500 ease-in-out"
|
||||
style={{
|
||||
width: `${Math.min(
|
||||
100,
|
||||
(task.progress.current / task.progress.target) *
|
||||
100,
|
||||
)}%`,
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<span className="mx-1 w-8 text-right text-xs font-normal text-zinc-500">
|
||||
{(
|
||||
(task.progress.current / task.progress.target) *
|
||||
100
|
||||
).toFixed(0)}
|
||||
%
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
{/* Details section */}
|
||||
{!isGroupCompleted(group) && (
|
||||
<>
|
||||
<div
|
||||
className={cn(
|
||||
"overflow-hidden pl-6 text-xs font-normal text-zinc-500 transition-all duration-300 ease-in-out",
|
||||
"mt-0 overflow-hidden pl-6 pt-0 text-xs font-normal text-zinc-500 transition-all duration-300 ease-in-out",
|
||||
isTaskCompleted(task) && "line-through",
|
||||
group.isOpen
|
||||
openGroups[group.name]
|
||||
? "max-h-[100px] opacity-100"
|
||||
: "max-h-0 opacity-0",
|
||||
)}
|
||||
@@ -310,7 +270,7 @@ export function TaskGroups() {
|
||||
<div
|
||||
className={cn(
|
||||
"relative mx-6 aspect-video overflow-hidden rounded-lg transition-all duration-300 ease-in-out",
|
||||
group.isOpen
|
||||
openGroups[group.name]
|
||||
? "my-2 max-h-[200px] opacity-100"
|
||||
: "max-h-0 opacity-0",
|
||||
)}
|
||||
@@ -337,6 +297,46 @@ export function TaskGroups() {
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
{/* Hidden Tasks group */}
|
||||
<div className="mt-3 overflow-hidden rounded-lg border border-zinc-100 bg-zinc-50">
|
||||
{/* Group Header */}
|
||||
<div className="flex items-center justify-between p-3">
|
||||
{/* Name and details */}
|
||||
<div className="flex-1">
|
||||
<div className="text-sm font-medium text-zinc-900">
|
||||
Hidden Tasks
|
||||
</div>
|
||||
<div className="mt-1 text-xs font-normal leading-tight text-zinc-500">
|
||||
Check back later — new tasks are on the way
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{/* Tasks */}
|
||||
<div>
|
||||
<div className="mx-3 border-t border-zinc-200 px-1 pb-1 pt-3">
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
{/* Question mark and rectangle */}
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="flex h-4 w-4 items-center justify-center">
|
||||
<BadgeQuestionMark />
|
||||
</div>
|
||||
<div className="h-4 w-64 rounded-full bg-zinc-100" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="mx-3 border-t border-zinc-200 px-1 pb-1 pt-3">
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
{/* Question mark and rectangle */}
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="flex h-4 w-4 items-center justify-center">
|
||||
<BadgeQuestionMark />
|
||||
</div>
|
||||
<div className="h-4 w-64 rounded-full bg-zinc-100" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ export default async function OnboardingResetPage() {
|
||||
const api = new BackendAPI();
|
||||
await api.updateUserOnboarding({
|
||||
completedSteps: [],
|
||||
notificationDot: true,
|
||||
walletShown: false,
|
||||
notified: [],
|
||||
usageReason: null,
|
||||
integrations: [],
|
||||
|
||||
@@ -828,7 +828,7 @@ export const CustomNode = React.memo(
|
||||
) : (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<h3 className="font-roboto cursor-default text-lg">
|
||||
<h3 className="font-roboto sentry-unmask cursor-default text-lg">
|
||||
<TextRenderer
|
||||
value={displayTitle}
|
||||
truncateLengthLimit={80}
|
||||
|
||||
@@ -25,6 +25,7 @@ import {
|
||||
BlockIOSimpleTypeSubSchema,
|
||||
BlockIOStringSubSchema,
|
||||
BlockIOSubSchema,
|
||||
BlockIOTableSubSchema,
|
||||
DataType,
|
||||
determineDataType,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
@@ -56,6 +57,7 @@ import { LocalValuedInput } from "../../../../../components/__legacy__/ui/input"
|
||||
import NodeHandle from "./NodeHandle";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
|
||||
import { Switch } from "../../../../../components/atoms/Switch/Switch";
|
||||
import { NodeTableInput } from "../../../../../components/node-table-input";
|
||||
|
||||
type NodeObjectInputTreeProps = {
|
||||
nodeId: string;
|
||||
@@ -106,6 +108,7 @@ const NodeObjectInputTree: FC<NodeObjectInputTreeProps> = ({
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
displayName={propSchema.title || beautifyString(propKey)}
|
||||
parentContext={object}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
@@ -315,6 +318,7 @@ export const NodeGenericInputField: FC<{
|
||||
handleInputClick: NodeObjectInputTreeProps["handleInputClick"];
|
||||
className?: string;
|
||||
displayName?: string;
|
||||
parentContext?: { [key: string]: any };
|
||||
}> = ({
|
||||
nodeId,
|
||||
propKey,
|
||||
@@ -326,6 +330,7 @@ export const NodeGenericInputField: FC<{
|
||||
handleInputClick,
|
||||
className,
|
||||
displayName,
|
||||
parentContext,
|
||||
}) => {
|
||||
className = cn(className);
|
||||
displayName ||= propSchema.title || beautifyString(propKey);
|
||||
@@ -467,6 +472,28 @@ export const NodeGenericInputField: FC<{
|
||||
/>
|
||||
);
|
||||
|
||||
case DataType.TABLE:
|
||||
const tableSchema = propSchema as BlockIOTableSubSchema;
|
||||
// Extract headers from the schema's items properties
|
||||
const headers = tableSchema.items?.properties
|
||||
? Object.keys(tableSchema.items.properties)
|
||||
: ["Column 1", "Column 2", "Column 3"];
|
||||
return (
|
||||
<NodeTableInput
|
||||
nodeId={nodeId}
|
||||
selfKey={propKey}
|
||||
schema={tableSchema}
|
||||
headers={headers}
|
||||
rows={currentValue}
|
||||
errors={errors}
|
||||
connections={connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
className={className}
|
||||
displayName={displayName}
|
||||
/>
|
||||
);
|
||||
|
||||
case DataType.ARRAY:
|
||||
return (
|
||||
<NodeArrayInput
|
||||
@@ -480,6 +507,7 @@ export const NodeGenericInputField: FC<{
|
||||
connections={connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
parentContext={parentContext}
|
||||
/>
|
||||
);
|
||||
|
||||
@@ -894,6 +922,7 @@ const NodeArrayInput: FC<{
|
||||
handleInputClick: NodeObjectInputTreeProps["handleInputClick"];
|
||||
className?: string;
|
||||
displayName?: string;
|
||||
parentContext?: { [key: string]: any };
|
||||
}> = ({
|
||||
nodeId,
|
||||
selfKey,
|
||||
@@ -905,6 +934,7 @@ const NodeArrayInput: FC<{
|
||||
handleInputClick,
|
||||
className,
|
||||
displayName,
|
||||
parentContext: _parentContext,
|
||||
}) => {
|
||||
entries ??= schema.default;
|
||||
if (!entries || !Array.isArray(entries)) entries = [];
|
||||
|
||||
@@ -83,6 +83,7 @@ export function RunnerInputDialog({
|
||||
onRun={doRun ? undefined : doClose}
|
||||
doCreateSchedule={doCreateSchedule ? handleSchedule : undefined}
|
||||
onCreateSchedule={doCreateSchedule ? undefined : doClose}
|
||||
runCount={0}
|
||||
/>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
@@ -8,13 +8,17 @@ import { MultiToggle } from "@/components/molecules/MultiToggle/MultiToggle";
|
||||
import {
|
||||
BlockIOObjectSubSchema,
|
||||
BlockIOSubSchema,
|
||||
BlockIOTableSubSchema,
|
||||
DataType,
|
||||
determineDataType,
|
||||
TableRow,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { TimePicker } from "@/components/molecules/TimePicker/TimePicker";
|
||||
import { FileInput } from "@/components/atoms/FileInput/FileInput";
|
||||
import { useRunAgentInputs } from "./useRunAgentInputs";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { PlusIcon, XIcon } from "@phosphor-icons/react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
|
||||
/**
|
||||
* A generic prop structure for the TypeBasedInput.
|
||||
@@ -44,6 +48,7 @@ export function RunAgentInputs({
|
||||
const { handleUploadFile, uploadProgress } = useRunAgentInputs();
|
||||
|
||||
const dataType = determineDataType(schema);
|
||||
|
||||
const baseId = String(schema.title ?? "input")
|
||||
.replace(/\s+/g, "-")
|
||||
.toLowerCase();
|
||||
@@ -211,6 +216,101 @@ export function RunAgentInputs({
|
||||
break;
|
||||
}
|
||||
|
||||
case DataType.TABLE: {
|
||||
// Render a simple table UI for the run modal
|
||||
const tableSchema = schema as BlockIOTableSubSchema;
|
||||
const headers = tableSchema.items?.properties
|
||||
? Object.keys(tableSchema.items.properties)
|
||||
: ["Column 1", "Column 2", "Column 3"];
|
||||
|
||||
const tableData: TableRow[] = Array.isArray(value) ? value : [];
|
||||
|
||||
const updateRow = (index: number, header: string, newValue: string) => {
|
||||
const newData = [...tableData];
|
||||
if (!newData[index]) {
|
||||
newData[index] = {};
|
||||
}
|
||||
newData[index][header] = newValue;
|
||||
onChange(newData);
|
||||
};
|
||||
|
||||
const addRow = () => {
|
||||
const newRow: TableRow = {};
|
||||
headers.forEach((header) => {
|
||||
newRow[header] = "";
|
||||
});
|
||||
onChange([...tableData, newRow]);
|
||||
};
|
||||
|
||||
const removeRow = (index: number) => {
|
||||
const newData = tableData.filter((_, i) => i !== index);
|
||||
onChange(newData);
|
||||
};
|
||||
|
||||
innerInputElement = (
|
||||
<div className="w-full space-y-2">
|
||||
<div className="overflow-hidden rounded-md border">
|
||||
<table className="w-full text-sm">
|
||||
<thead>
|
||||
<tr className="bg-gray-50 dark:bg-gray-800">
|
||||
{headers.map((header) => (
|
||||
<th
|
||||
key={header}
|
||||
className="px-3 py-2 text-left font-medium"
|
||||
>
|
||||
{header}
|
||||
</th>
|
||||
))}
|
||||
<th className="w-10 px-3 py-2"></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{tableData.map((row, rowIndex) => (
|
||||
<tr key={rowIndex} className="border-t dark:border-gray-700">
|
||||
{headers.map((header) => (
|
||||
<td key={header} className="px-3 py-1">
|
||||
<input
|
||||
type="text"
|
||||
value={String(row[header] || "")}
|
||||
onChange={(e) =>
|
||||
updateRow(rowIndex, header, e.target.value)
|
||||
}
|
||||
className="w-full rounded border px-2 py-1 dark:border-gray-700 dark:bg-gray-900"
|
||||
placeholder={`Enter ${header}`}
|
||||
/>
|
||||
</td>
|
||||
))}
|
||||
<td className="px-3 py-1">
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => removeRow(rowIndex)}
|
||||
className="h-8 w-8 p-0"
|
||||
>
|
||||
<XIcon className="h-4 w-4" weight="bold" />
|
||||
</Button>
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={addRow}
|
||||
className="w-full"
|
||||
>
|
||||
<PlusIcon className="mr-2 h-4 w-4" weight="bold" />
|
||||
Add Row
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
case DataType.SHORT_TEXT:
|
||||
default:
|
||||
innerInputElement = (
|
||||
|
||||
@@ -16,6 +16,7 @@ import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutio
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset";
|
||||
import { useGetV1GetUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth";
|
||||
import { useOnboarding } from "@/providers/onboarding/onboarding-provider";
|
||||
|
||||
export type RunVariant =
|
||||
| "manual"
|
||||
@@ -48,6 +49,7 @@ export function useAgentRunModal(
|
||||
const [cronExpression, setCronExpression] = useState(
|
||||
agent.recommended_schedule_cron || "0 9 * * 1",
|
||||
);
|
||||
const { completeStep: completeOnboardingStep } = useOnboarding();
|
||||
|
||||
// Get user timezone for scheduling
|
||||
const { data: userTimezone } = useGetV1GetUserTimezone({
|
||||
@@ -319,6 +321,8 @@ export function useAgentRunModal(
|
||||
userTimezone && userTimezone !== "not-set" ? userTimezone : undefined,
|
||||
},
|
||||
});
|
||||
|
||||
completeOnboardingStep("SCHEDULE_AGENT");
|
||||
}, [
|
||||
allRequiredInputsAreSet,
|
||||
scheduleName,
|
||||
|
||||
@@ -558,6 +558,7 @@ export function OldAgentLibraryView() {
|
||||
onCreateSchedule={onCreateSchedule}
|
||||
onCreatePreset={onCreatePreset}
|
||||
agentActions={agentActions}
|
||||
runCount={agentRuns.length}
|
||||
recommendedScheduleCron={agent?.recommended_schedule_cron || null}
|
||||
/>
|
||||
) : selectedView.type == "preset" ? (
|
||||
@@ -573,6 +574,7 @@ export function OldAgentLibraryView() {
|
||||
onUpdatePreset={onUpdatePreset}
|
||||
doDeletePreset={setConfirmingDeleteAgentPreset}
|
||||
agentActions={agentActions}
|
||||
runCount={agentRuns.length}
|
||||
/>
|
||||
) : selectedView.type == "schedule" ? (
|
||||
selectedSchedule &&
|
||||
|
||||
@@ -55,6 +55,7 @@ export function AgentRunDraftView({
|
||||
doCreateSchedule: _doCreateSchedule,
|
||||
onCreateSchedule,
|
||||
agentActions,
|
||||
runCount,
|
||||
className,
|
||||
recommendedScheduleCron,
|
||||
}: {
|
||||
@@ -73,6 +74,7 @@ export function AgentRunDraftView({
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
) => Promise<void>;
|
||||
onCreateSchedule?: (schedule: Schedule) => void;
|
||||
runCount: number;
|
||||
className?: string;
|
||||
} & (
|
||||
| {
|
||||
@@ -198,6 +200,9 @@ export function AgentRunDraftView({
|
||||
if (onboardingState?.completedSteps.includes("MARKETPLACE_ADD_AGENT")) {
|
||||
completeOnboardingStep("MARKETPLACE_RUN_AGENT");
|
||||
}
|
||||
if (runCount > 0) {
|
||||
completeOnboardingStep("RE_RUN_AGENT");
|
||||
}
|
||||
}, [
|
||||
api,
|
||||
graph,
|
||||
@@ -319,11 +324,6 @@ export function AgentRunDraftView({
|
||||
setChangedPresetAttributes(new Set()); // reset change tracker
|
||||
})
|
||||
.catch(toastOnFail("set up agent trigger"));
|
||||
|
||||
// Mark run agent onboarding step as completed(?)
|
||||
if (onboardingState?.completedSteps.includes("MARKETPLACE_ADD_AGENT")) {
|
||||
completeOnboardingStep("MARKETPLACE_RUN_AGENT");
|
||||
}
|
||||
}, [
|
||||
api,
|
||||
graph,
|
||||
|
||||
@@ -7,6 +7,7 @@ import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { useGetV1GetUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth";
|
||||
import { getTimezoneDisplayName } from "@/lib/timezone-utils";
|
||||
import { InfoIcon } from "lucide-react";
|
||||
import { useOnboarding } from "@/providers/onboarding/onboarding-provider";
|
||||
|
||||
// Base type for cron expression only
|
||||
type CronOnlyCallback = (cronExpression: string) => void;
|
||||
@@ -48,6 +49,7 @@ export function CronSchedulerDialog(props: CronSchedulerDialogProps) {
|
||||
const [scheduleName, setScheduleName] = useState<string>(
|
||||
props.mode === "with-name" ? props.defaultScheduleName || "" : "",
|
||||
);
|
||||
const { completeStep } = useOnboarding();
|
||||
|
||||
// Get user's timezone
|
||||
const { data: userTimezone } = useGetV1GetUserTimezone({
|
||||
@@ -92,6 +94,7 @@ export function CronSchedulerDialog(props: CronSchedulerDialogProps) {
|
||||
props.onSubmit(cronExpression);
|
||||
}
|
||||
setOpen(false);
|
||||
completeStep("SCHEDULE_AGENT");
|
||||
};
|
||||
|
||||
return (
|
||||
|
||||
@@ -6779,12 +6779,18 @@
|
||||
"AGENT_INPUT",
|
||||
"CONGRATS",
|
||||
"GET_RESULTS",
|
||||
"RUN_AGENTS",
|
||||
"MARKETPLACE_VISIT",
|
||||
"MARKETPLACE_ADD_AGENT",
|
||||
"MARKETPLACE_RUN_AGENT",
|
||||
"BUILDER_OPEN",
|
||||
"BUILDER_SAVE_AGENT",
|
||||
"RE_RUN_AGENT",
|
||||
"SCHEDULE_AGENT",
|
||||
"RUN_AGENTS",
|
||||
"RUN_3_DAYS",
|
||||
"TRIGGER_WEBHOOK",
|
||||
"RUN_14_DAYS",
|
||||
"RUN_AGENTS_100",
|
||||
"BUILDER_OPEN",
|
||||
"BUILDER_RUN_AGENT"
|
||||
],
|
||||
"title": "OnboardingStep"
|
||||
@@ -9335,9 +9341,9 @@
|
||||
],
|
||||
"title": "Completedsteps"
|
||||
},
|
||||
"notificationDot": {
|
||||
"walletShown": {
|
||||
"anyOf": [{ "type": "boolean" }, { "type": "null" }],
|
||||
"title": "Notificationdot"
|
||||
"title": "Walletshown"
|
||||
},
|
||||
"notified": {
|
||||
"anyOf": [
|
||||
@@ -9382,6 +9388,17 @@
|
||||
"agentRuns": {
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Agentruns"
|
||||
},
|
||||
"lastRunAt": {
|
||||
"anyOf": [
|
||||
{ "type": "string", "format": "date-time" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Lastrunat"
|
||||
},
|
||||
"consecutiveRunDays": {
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Consecutiverundays"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
PopoverTrigger,
|
||||
} from "@/components/__legacy__/ui/popover";
|
||||
import { X } from "lucide-react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { PopoverClose } from "@radix-ui/react-popover";
|
||||
import { TaskGroups } from "@/app/(no-navbar)/onboarding/components/WalletTaskGroups";
|
||||
import { ScrollArea } from "./ui/scroll-area";
|
||||
@@ -15,29 +16,183 @@ import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import * as party from "party-js";
|
||||
import WalletRefill from "./WalletRefill";
|
||||
import { OnboardingStep } from "@/lib/autogpt-server-api";
|
||||
|
||||
export interface Task {
|
||||
id: OnboardingStep;
|
||||
name: string;
|
||||
amount: number;
|
||||
details: string;
|
||||
video?: string;
|
||||
progress?: {
|
||||
current: number;
|
||||
target: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface TaskGroup {
|
||||
name: string;
|
||||
details: string;
|
||||
tasks: Task[];
|
||||
}
|
||||
|
||||
export default function Wallet() {
|
||||
const { state, updateState } = useOnboarding();
|
||||
const groups = useMemo<TaskGroup[]>(() => {
|
||||
return [
|
||||
{
|
||||
name: "First Wins",
|
||||
details: "Kickstart your journey with quick wins.",
|
||||
tasks: [
|
||||
{
|
||||
id: "GET_RESULTS",
|
||||
name: "Complete onboarding and see your first agent's results",
|
||||
amount: 3,
|
||||
details: "",
|
||||
},
|
||||
{
|
||||
id: "MARKETPLACE_VISIT",
|
||||
name: "Go to Marketplace",
|
||||
amount: 1,
|
||||
details: "Click Marketplace in the top navigation",
|
||||
video: "/onboarding/marketplace-visit.mp4",
|
||||
},
|
||||
{
|
||||
id: "MARKETPLACE_ADD_AGENT",
|
||||
name: "Find and add an agent",
|
||||
amount: 1,
|
||||
details:
|
||||
"Search for an agent in the Marketplace and add it to your Library",
|
||||
video: "/onboarding/marketplace-add.mp4",
|
||||
},
|
||||
{
|
||||
id: "MARKETPLACE_RUN_AGENT",
|
||||
name: "Open the Library page and run an agent",
|
||||
amount: 1,
|
||||
details: "Go to the Library, open an agent you want, and run it",
|
||||
video: "/onboarding/marketplace-run.mp4",
|
||||
},
|
||||
{
|
||||
id: "BUILDER_SAVE_AGENT",
|
||||
name: "Place your first blocks and save your agent",
|
||||
amount: 1,
|
||||
details:
|
||||
"Open block library on the left and add a block to the canvas then save your agent",
|
||||
video: "/onboarding/builder-save.mp4",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: "Consistency Challenge",
|
||||
details: "Build your rhythm and make agents part of your routine.",
|
||||
tasks: [
|
||||
{
|
||||
id: "RE_RUN_AGENT",
|
||||
name: "Re-run an agent",
|
||||
amount: 1,
|
||||
details: "Re-run an agent from the Library",
|
||||
},
|
||||
{
|
||||
id: "SCHEDULE_AGENT",
|
||||
name: "Schedule your first agent",
|
||||
amount: 1,
|
||||
details: "Schedule an agent to run on a recurring basis",
|
||||
},
|
||||
{
|
||||
id: "RUN_AGENTS",
|
||||
name: "Run 10 agents",
|
||||
amount: 3,
|
||||
details: "Run agents from Library or Builder 10 times",
|
||||
progress: {
|
||||
current: state?.agentRuns || 0,
|
||||
target: 10,
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "RUN_3_DAYS",
|
||||
name: "Run agents 3 days in a row",
|
||||
amount: 1,
|
||||
details:
|
||||
"Run any agents from the Library or Builder for 3 days in a row",
|
||||
progress: {
|
||||
current: state?.consecutiveRunDays || 0,
|
||||
target: 3,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: "The Pro Playground",
|
||||
details: "Master powerful features to supercharge your workflow.",
|
||||
tasks: [
|
||||
{
|
||||
id: "TRIGGER_WEBHOOK",
|
||||
name: "Trigger an agent via webhook",
|
||||
amount: 1,
|
||||
details:
|
||||
"In the Builder, go to Settings and copy the Webhook URL. Use it to trigger your agent from another app.",
|
||||
},
|
||||
{
|
||||
id: "RUN_14_DAYS",
|
||||
name: "Run agents 14 days in a row",
|
||||
amount: 3,
|
||||
details:
|
||||
"Run any agents from the Library or Builder for 10 days in a row",
|
||||
progress: {
|
||||
current: state?.consecutiveRunDays || 0,
|
||||
target: 14,
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "RUN_AGENTS_100",
|
||||
name: "Complete 100 agent runs",
|
||||
amount: 3,
|
||||
details: "Let your agents run and complete 100 tasks in total",
|
||||
progress: {
|
||||
current: state?.agentRuns || 0,
|
||||
target: 100,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
}, [state]);
|
||||
|
||||
const { credits, formatCredits, fetchCredits } = useCredits({
|
||||
fetchInitialCredits: true,
|
||||
});
|
||||
|
||||
const { state, updateState } = useOnboarding();
|
||||
const [prevCredits, setPrevCredits] = useState<number | null>(credits);
|
||||
const [flash, setFlash] = useState(false);
|
||||
const [walletOpen, setWalletOpen] = useState(state?.walletShown || false);
|
||||
|
||||
const [stepsLength, setStepsLength] = useState<number | null>(
|
||||
state?.completedSteps?.length || null,
|
||||
);
|
||||
const totalCount = useMemo(() => {
|
||||
return groups.reduce((acc, group) => acc + group.tasks.length, 0);
|
||||
}, [groups]);
|
||||
|
||||
// Get total completed count for all groups
|
||||
const completedCount = useMemo(() => {
|
||||
return groups.reduce(
|
||||
(acc, group) =>
|
||||
acc +
|
||||
group.tasks.filter((task) => state?.completedSteps?.includes(task.id))
|
||||
.length,
|
||||
0,
|
||||
);
|
||||
}, [groups, state?.completedSteps]);
|
||||
|
||||
// Needed to show confetti when a new step is completed
|
||||
const [stepsLength, setStepsLength] = useState(completedCount);
|
||||
|
||||
const walletRef = useRef<HTMLButtonElement | null>(null);
|
||||
|
||||
const onWalletOpen = useCallback(async () => {
|
||||
if (state?.notificationDot) {
|
||||
updateState({ notificationDot: false });
|
||||
if (!state?.walletShown) {
|
||||
updateState({ walletShown: true });
|
||||
}
|
||||
// Refresh credits when the wallet is opened
|
||||
fetchCredits();
|
||||
}, [state?.notificationDot, updateState, fetchCredits]);
|
||||
}, [state?.walletShown, updateState, fetchCredits]);
|
||||
|
||||
const fadeOut = useMemo(
|
||||
() =>
|
||||
@@ -54,20 +209,15 @@ export default function Wallet() {
|
||||
if (!state?.completedSteps) {
|
||||
return;
|
||||
}
|
||||
// If we haven't set the length yet, just set it and return
|
||||
if (stepsLength === null) {
|
||||
setStepsLength(state?.completedSteps?.length);
|
||||
return;
|
||||
}
|
||||
// It's enough to compare array lengths,
|
||||
// It's enough to check completed count,
|
||||
// because the order of completed steps is not important
|
||||
// If the length is the same, we don't need to do anything
|
||||
if (state?.completedSteps?.length === stepsLength) {
|
||||
// If the count is the same, we don't need to do anything
|
||||
if (completedCount === stepsLength) {
|
||||
return;
|
||||
}
|
||||
// Otherwise, we need to set the new length
|
||||
setStepsLength(state?.completedSteps?.length);
|
||||
// And make confetti
|
||||
setStepsLength(completedCount);
|
||||
// And emit confetti
|
||||
if (walletRef.current) {
|
||||
setTimeout(() => {
|
||||
fetchCredits();
|
||||
@@ -106,7 +256,7 @@ export default function Wallet() {
|
||||
}, [credits, prevCredits]);
|
||||
|
||||
return (
|
||||
<Popover>
|
||||
<Popover open={walletOpen} onOpenChange={setWalletOpen}>
|
||||
<PopoverTrigger asChild>
|
||||
<div className="relative inline-block">
|
||||
<button
|
||||
@@ -116,13 +266,18 @@ export default function Wallet() {
|
||||
)}
|
||||
onClick={onWalletOpen}
|
||||
>
|
||||
Wallet{" "}
|
||||
Earn credits{" "}
|
||||
<span className="text-sm font-semibold">
|
||||
{formatCredits(credits)}
|
||||
</span>
|
||||
{state?.notificationDot && (
|
||||
{completedCount < totalCount && (
|
||||
<span className="absolute right-1 top-1 h-2 w-2 rounded-full bg-violet-600"></span>
|
||||
)}
|
||||
<div className="absolute bottom-[-2.5rem] left-1/2 z-50 hidden -translate-x-1/2 transform whitespace-nowrap rounded-small bg-white px-4 py-2 shadow-md group-hover:block">
|
||||
<Text variant="body-medium">
|
||||
{completedCount} of {totalCount} rewards claimed
|
||||
</Text>
|
||||
</div>
|
||||
</button>
|
||||
<div
|
||||
className={cn(
|
||||
@@ -135,21 +290,21 @@ export default function Wallet() {
|
||||
<PopoverContent
|
||||
className={cn(
|
||||
"absolute -right-[7.9rem] -top-[3.2rem] z-50 w-[28.5rem] px-[0.625rem] py-2",
|
||||
"rounded-xl border-zinc-200 bg-zinc-50 shadow-[0_3px_3px] shadow-zinc-300",
|
||||
"rounded-xl border-zinc-100 bg-white shadow-[0_3px_3px] shadow-zinc-200",
|
||||
)}
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="mx-1 flex items-center justify-between border-b border-zinc-300 pb-2">
|
||||
<div className="mx-1 flex items-center justify-between border-b border-zinc-200 pb-3">
|
||||
<span className="font-poppins font-medium text-zinc-900">
|
||||
Your wallet
|
||||
Your credits
|
||||
</span>
|
||||
<div className="flex items-center text-sm font-semibold text-violet-700">
|
||||
<div className="flex items-center text-sm text-violet-700">
|
||||
<div className="rounded-lg bg-violet-100 px-3 py-2">
|
||||
Wallet{" "}
|
||||
Earn credits{" "}
|
||||
<span className="font-semibold">{formatCredits(credits)}</span>
|
||||
</div>
|
||||
<PopoverClose>
|
||||
<X className="ml-[2.8rem] h-5 w-5 text-zinc-800 hover:text-foreground" />
|
||||
<X className="ml-2 h-5 w-5 text-zinc-800 hover:text-foreground" />
|
||||
</PopoverClose>
|
||||
</div>
|
||||
</div>
|
||||
@@ -159,13 +314,10 @@ export default function Wallet() {
|
||||
<WalletRefill />
|
||||
)}
|
||||
{/* Tasks */}
|
||||
<p className="mx-1 mt-4 font-sans text-xs font-medium text-violet-700">
|
||||
Onboarding tasks
|
||||
</p>
|
||||
<p className="mx-1 my-1 font-sans text-xs font-normal text-zinc-500">
|
||||
<p className="mx-1 my-3 font-sans text-xs font-normal text-zinc-400">
|
||||
Complete the following tasks to earn more credits!
|
||||
</p>
|
||||
<TaskGroups />
|
||||
<TaskGroups groups={groups} />
|
||||
</ScrollArea>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
|
||||
@@ -4,9 +4,18 @@ import React, { useEffect, useState } from "react";
|
||||
import { Button } from "../../__legacy__/ui/button";
|
||||
import { QuestionMarkCircledIcon } from "@radix-ui/react-icons";
|
||||
import { useRouter, usePathname } from "next/navigation";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { getCurrentUser } from "@/lib/supabase/actions";
|
||||
|
||||
const TallyPopupSimple = () => {
|
||||
const [isFormVisible, setIsFormVisible] = useState(false);
|
||||
const [sentryReplayId, setSentryReplayId] = useState("");
|
||||
const [replayUrl, setReplayUrl] = useState("");
|
||||
const [pageUrl, setPageUrl] = useState("");
|
||||
const [userAgent, setUserAgent] = useState("");
|
||||
const [isAuthenticated, setIsAuthenticated] = useState<boolean | null>(null);
|
||||
// const [userId, setUserId] = useState<string>("");
|
||||
const [userEmail, setUserEmail] = useState<string>("");
|
||||
const router = useRouter();
|
||||
const pathname = usePathname();
|
||||
|
||||
@@ -16,6 +25,35 @@ const TallyPopupSimple = () => {
|
||||
setShowTutorial(pathname.includes("build"));
|
||||
}, [pathname]);
|
||||
|
||||
useEffect(() => {
|
||||
// Set client-side values
|
||||
if (typeof window !== "undefined") {
|
||||
setPageUrl(window.location.href);
|
||||
setUserAgent(window.navigator.userAgent);
|
||||
|
||||
const replay = Sentry.getReplay();
|
||||
|
||||
if (replay) {
|
||||
const replayId = replay.getReplayId();
|
||||
|
||||
if (replayId) {
|
||||
setSentryReplayId(replayId);
|
||||
const orgSlug = "significant-gravitas";
|
||||
setReplayUrl(`https://${orgSlug}.sentry.io/replays/${replayId}/`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}, [pathname]);
|
||||
|
||||
useEffect(() => {
|
||||
// Check authentication status using server action (works with httpOnly cookies)
|
||||
getCurrentUser().then(({ user }) => {
|
||||
setIsAuthenticated(user != null);
|
||||
// setUserId(user?.id || "");
|
||||
setUserEmail(user?.email || "");
|
||||
});
|
||||
}, [pathname]);
|
||||
|
||||
useEffect(() => {
|
||||
// Load Tally script
|
||||
const script = document.createElement("script");
|
||||
@@ -26,15 +64,40 @@ const TallyPopupSimple = () => {
|
||||
// Setup event listeners for Tally events
|
||||
const handleTallyMessage = (event: MessageEvent) => {
|
||||
if (typeof event.data === "string") {
|
||||
// Ignore iframe-resizer messages
|
||||
if (
|
||||
event.data.startsWith("[iFrameSize") ||
|
||||
event.data.startsWith("[iFrameResizer")
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const data = JSON.parse(event.data);
|
||||
|
||||
// Only process Tally events
|
||||
if (!data.event?.startsWith("Tally.")) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (data.event === "Tally.FormLoaded") {
|
||||
setIsFormVisible(true);
|
||||
|
||||
// Flush Sentry replay when form opens
|
||||
if (typeof window !== "undefined") {
|
||||
const replay = Sentry.getReplay();
|
||||
if (replay) {
|
||||
replay.flush();
|
||||
}
|
||||
}
|
||||
} else if (data.event === "Tally.PopupClosed") {
|
||||
setIsFormVisible(false);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error parsing Tally message:", error);
|
||||
// Only log errors for messages we care about
|
||||
if (event.data.includes("Tally")) {
|
||||
console.error("Error parsing Tally message:", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -48,7 +111,7 @@ const TallyPopupSimple = () => {
|
||||
}, []);
|
||||
|
||||
if (isFormVisible) {
|
||||
return null; // Hide the button when the form is visible
|
||||
return null;
|
||||
}
|
||||
|
||||
const resetTutorial = () => {
|
||||
@@ -72,6 +135,15 @@ const TallyPopupSimple = () => {
|
||||
data-tally-open="3yx2L0"
|
||||
data-tally-emoji-text="👋"
|
||||
data-tally-emoji-animation="wave"
|
||||
data-sentry-replay-id={sentryReplayId || "not-initialized"}
|
||||
data-sentry-replay-url={replayUrl || "not-initialized"}
|
||||
data-user-agent={userAgent}
|
||||
data-page-url={pageUrl}
|
||||
data-is-authenticated={
|
||||
isAuthenticated === null ? "unknown" : String(isAuthenticated)
|
||||
}
|
||||
data-email={userEmail || "not-authenticated"}
|
||||
// data-user-id={userId || "not-authenticated"}
|
||||
>
|
||||
<QuestionMarkCircledIcon className="h-14 w-14" />
|
||||
<span className="sr-only">Reach Out</span>
|
||||
|
||||
210
autogpt_platform/frontend/src/components/node-table-input.tsx
Normal file
210
autogpt_platform/frontend/src/components/node-table-input.tsx
Normal file
@@ -0,0 +1,210 @@
|
||||
import React, { FC, useCallback, useEffect, useState } from "react";
|
||||
|
||||
import { PlusIcon, XIcon } from "@phosphor-icons/react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import NodeHandle from "@/app/(platform)/build/components/legacy-builder/NodeHandle";
|
||||
import {
|
||||
BlockIOTableSubSchema,
|
||||
TableRow,
|
||||
TableCellValue,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { Input } from "./atoms/Input/Input";
|
||||
import { Button } from "./atoms/Button/Button";
|
||||
|
||||
interface NodeTableInputProps {
|
||||
/** Unique identifier for the node in the builder graph */
|
||||
nodeId: string;
|
||||
/** Key identifier for this specific input field within the node */
|
||||
selfKey: string;
|
||||
/** Schema definition for the table structure */
|
||||
schema: BlockIOTableSubSchema;
|
||||
/** Column headers for the table */
|
||||
headers: string[];
|
||||
/** Initial row data for the table */
|
||||
rows?: TableRow[];
|
||||
/** Validation errors mapped by field key */
|
||||
errors: { [key: string]: string | undefined };
|
||||
/** Graph connections between nodes in the builder */
|
||||
connections: {
|
||||
edge_id: string;
|
||||
source: string;
|
||||
sourceHandle: string;
|
||||
target: string;
|
||||
targetHandle: string;
|
||||
}[];
|
||||
/** Callback when table data changes */
|
||||
handleInputChange: (key: string, value: TableRow[]) => void;
|
||||
/** Callback when input field is clicked (for builder selection) */
|
||||
handleInputClick: (key: string) => void;
|
||||
/** Additional CSS classes */
|
||||
className?: string;
|
||||
/** Display name for the input field */
|
||||
displayName?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Table input component for the workflow builder interface.
|
||||
*
|
||||
* This component is specifically designed for use in the agent builder where users
|
||||
* design workflows with connected nodes. It includes graph connection capabilities
|
||||
* via NodeHandle and is tightly integrated with the builder's state management.
|
||||
*
|
||||
* @warning Do NOT use this component in runtime/execution contexts (like RunAgentInputs).
|
||||
* For runtime table inputs, use a simpler implementation without builder-specific features.
|
||||
*
|
||||
* @example
|
||||
* ```tsx
|
||||
* <NodeTableInput
|
||||
* nodeId="node-123"
|
||||
* selfKey="table_data"
|
||||
* schema={tableSchema}
|
||||
* headers={["Name", "Value"]}
|
||||
* rows={existingData}
|
||||
* connections={graphConnections}
|
||||
* handleInputChange={handleChange}
|
||||
* handleInputClick={handleClick}
|
||||
* errors={{}}
|
||||
* />
|
||||
* ```
|
||||
*
|
||||
* @see Used exclusively in: `/app/(platform)/build/components/legacy-builder/NodeInputs.tsx`
|
||||
*/
|
||||
export const NodeTableInput: FC<NodeTableInputProps> = ({
|
||||
nodeId,
|
||||
selfKey,
|
||||
schema,
|
||||
headers,
|
||||
rows = [],
|
||||
errors,
|
||||
connections,
|
||||
handleInputChange,
|
||||
handleInputClick: _handleInputClick,
|
||||
className,
|
||||
displayName,
|
||||
}) => {
|
||||
const [tableData, setTableData] = useState<TableRow[]>(rows);
|
||||
|
||||
// Sync with parent state when rows change
|
||||
useEffect(() => {
|
||||
setTableData(rows);
|
||||
}, [rows]);
|
||||
|
||||
const isConnected = (key: string) =>
|
||||
connections.some((c) => c.targetHandle === key && c.target === nodeId);
|
||||
|
||||
const updateTableData = useCallback(
|
||||
(newData: TableRow[]) => {
|
||||
setTableData(newData);
|
||||
handleInputChange(selfKey, newData);
|
||||
},
|
||||
[selfKey, handleInputChange],
|
||||
);
|
||||
|
||||
const updateCell = (
|
||||
rowIndex: number,
|
||||
header: string,
|
||||
value: TableCellValue,
|
||||
) => {
|
||||
const newData = [...tableData];
|
||||
if (!newData[rowIndex]) {
|
||||
newData[rowIndex] = {};
|
||||
}
|
||||
newData[rowIndex][header] = value;
|
||||
updateTableData(newData);
|
||||
};
|
||||
|
||||
const addRow = () => {
|
||||
if (!headers || headers.length === 0) {
|
||||
return;
|
||||
}
|
||||
const newRow: TableRow = {};
|
||||
headers.forEach((header) => {
|
||||
newRow[header] = "";
|
||||
});
|
||||
updateTableData([...tableData, newRow]);
|
||||
};
|
||||
|
||||
const removeRow = (index: number) => {
|
||||
const newData = tableData.filter((_, i) => i !== index);
|
||||
updateTableData(newData);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={cn("w-full space-y-2", className)}>
|
||||
<NodeHandle
|
||||
title={displayName || selfKey}
|
||||
keyName={selfKey}
|
||||
schema={schema}
|
||||
isConnected={isConnected(selfKey)}
|
||||
isRequired={false}
|
||||
side="left"
|
||||
/>
|
||||
|
||||
{!isConnected(selfKey) && (
|
||||
<div className="nodrag overflow-x-auto">
|
||||
<table className="w-full border-collapse">
|
||||
<thead>
|
||||
<tr>
|
||||
{headers.map((header, index) => (
|
||||
<th
|
||||
key={index}
|
||||
className="border border-gray-300 bg-gray-100 px-2 py-1 text-left text-sm font-medium dark:border-gray-600 dark:bg-gray-800"
|
||||
>
|
||||
{header}
|
||||
</th>
|
||||
))}
|
||||
<th className="w-10"></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{tableData.map((row, rowIndex) => (
|
||||
<tr key={rowIndex}>
|
||||
{headers.map((header, colIndex) => (
|
||||
<td
|
||||
key={colIndex}
|
||||
className="border border-gray-300 p-1 dark:border-gray-600"
|
||||
>
|
||||
<Input
|
||||
id={`${selfKey}-${rowIndex}-${header}`}
|
||||
label={header}
|
||||
type="text"
|
||||
value={String(row[header] || "")}
|
||||
onChange={(e) =>
|
||||
updateCell(rowIndex, header, e.target.value)
|
||||
}
|
||||
className="h-8 w-full"
|
||||
placeholder={`Enter ${header}`}
|
||||
/>
|
||||
</td>
|
||||
))}
|
||||
<td className="p-1">
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => removeRow(rowIndex)}
|
||||
className="h-8 w-8 p-0"
|
||||
>
|
||||
<XIcon />
|
||||
</Button>
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<Button
|
||||
className="mt-2 bg-gray-200 font-normal text-black hover:text-white dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600"
|
||||
onClick={addRow}
|
||||
size="small"
|
||||
>
|
||||
<PlusIcon className="mr-2" /> Add Row
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{errors[selfKey] && (
|
||||
<span className="text-sm text-red-500">{errors[selfKey]}</span>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -58,6 +58,7 @@ export type BlockIOSimpleTypeSubSchema =
|
||||
| BlockIOCredentialsSubSchema
|
||||
| BlockIOKVSubSchema
|
||||
| BlockIOArraySubSchema
|
||||
| BlockIOTableSubSchema
|
||||
| BlockIOStringSubSchema
|
||||
| BlockIONumberSubSchema
|
||||
| BlockIOBooleanSubSchema
|
||||
@@ -78,6 +79,7 @@ export enum DataType {
|
||||
OBJECT = "object",
|
||||
KEY_VALUE = "key-value",
|
||||
ARRAY = "array",
|
||||
TABLE = "table",
|
||||
}
|
||||
|
||||
export type BlockIOSubSchemaMeta = {
|
||||
@@ -114,6 +116,20 @@ export type BlockIOArraySubSchema = BlockIOSubSchemaMeta & {
|
||||
secret?: boolean;
|
||||
};
|
||||
|
||||
// Table cell values are typically primitives
|
||||
export type TableCellValue = string | number | boolean | null;
|
||||
|
||||
export type TableRow = Record<string, TableCellValue>;
|
||||
|
||||
export type BlockIOTableSubSchema = BlockIOSubSchemaMeta & {
|
||||
type: "array";
|
||||
format: "table";
|
||||
items: BlockIOObjectSubSchema;
|
||||
const?: TableRow[];
|
||||
default?: TableRow[];
|
||||
secret?: boolean;
|
||||
};
|
||||
|
||||
export type BlockIOStringSubSchema = BlockIOSubSchemaMeta & {
|
||||
type: "string";
|
||||
enum?: string[];
|
||||
@@ -192,6 +208,7 @@ type BlockIOCombinedTypeSubSchema = BlockIOSubSchemaMeta & {
|
||||
anyOf: BlockIOSimpleTypeSubSchema[];
|
||||
default?: string | number | boolean | null;
|
||||
secret?: boolean;
|
||||
format?: string; // For table format and other formats on anyOf schemas
|
||||
}
|
||||
| BlockIOOneOfSubSchema
|
||||
| BlockIODiscriminatedOneOfSubSchema
|
||||
@@ -916,6 +933,7 @@ export interface RefundRequest {
|
||||
}
|
||||
|
||||
export type OnboardingStep =
|
||||
// Introductory onboarding (Library)
|
||||
| "WELCOME"
|
||||
| "USAGE_REASON"
|
||||
| "INTEGRATIONS"
|
||||
@@ -923,18 +941,28 @@ export type OnboardingStep =
|
||||
| "AGENT_NEW_RUN"
|
||||
| "AGENT_INPUT"
|
||||
| "CONGRATS"
|
||||
// First Wins
|
||||
| "GET_RESULTS"
|
||||
| "RUN_AGENTS"
|
||||
| "MARKETPLACE_VISIT"
|
||||
| "MARKETPLACE_ADD_AGENT"
|
||||
| "MARKETPLACE_RUN_AGENT"
|
||||
| "BUILDER_OPEN"
|
||||
| "BUILDER_SAVE_AGENT"
|
||||
// Consistency Challenge
|
||||
| "RE_RUN_AGENT"
|
||||
| "SCHEDULE_AGENT"
|
||||
| "RUN_AGENTS"
|
||||
| "RUN_3_DAYS"
|
||||
// The Pro Playground
|
||||
| "TRIGGER_WEBHOOK"
|
||||
| "RUN_14_DAYS"
|
||||
| "RUN_AGENTS_100"
|
||||
// No longer used but tracked
|
||||
| "BUILDER_OPEN"
|
||||
| "BUILDER_RUN_AGENT";
|
||||
|
||||
export interface UserOnboarding {
|
||||
completedSteps: OnboardingStep[];
|
||||
notificationDot: boolean;
|
||||
walletShown: boolean;
|
||||
notified: OnboardingStep[];
|
||||
rewardedFor: OnboardingStep[];
|
||||
usageReason: string | null;
|
||||
@@ -943,6 +971,8 @@ export interface UserOnboarding {
|
||||
selectedStoreListingVersionId: string | null;
|
||||
agentInput: Record<string, string | number> | null;
|
||||
onboardingAgentExecutionId: GraphExecutionID | null;
|
||||
lastRunAt: Date | null;
|
||||
consecutiveRunDays: number;
|
||||
agentRuns: number;
|
||||
}
|
||||
|
||||
@@ -1061,6 +1091,10 @@ function _handleSingleTypeSchema(subSchema: BlockIOSubSchema): DataType {
|
||||
return DataType.NUMBER;
|
||||
}
|
||||
if (subSchema.type === "array") {
|
||||
// Check for table format first
|
||||
if ("format" in subSchema && subSchema.format === "table") {
|
||||
return DataType.TABLE;
|
||||
}
|
||||
/** Commented code below since we haven't yet support rendering of a multi-select with array { items: enum } type */
|
||||
// if ("items" in subSchema && subSchema.items && "enum" in subSchema.items) {
|
||||
// return DataType.MULTI_SELECT; // array + enum => multi-select
|
||||
@@ -1140,6 +1174,11 @@ export function determineDataType(schema: BlockIOSubSchema): DataType {
|
||||
|
||||
// (array | null)
|
||||
if (types.includes("array") && types.includes("null")) {
|
||||
// Check for table format on the parent schema (where anyOf is)
|
||||
if ("format" in schema && schema.format === "table") {
|
||||
return DataType.TABLE;
|
||||
}
|
||||
|
||||
const arrSchema = schema.anyOf.find((s) => s.type === "array");
|
||||
if (arrSchema) return _handleSingleTypeSchema(arrSchema);
|
||||
return DataType.ARRAY;
|
||||
|
||||
@@ -94,7 +94,19 @@ export default function OnboardingProvider({
|
||||
|
||||
// Only update state if onboarding data is valid
|
||||
if (onboarding) {
|
||||
setState((prev) => ({ ...onboarding, ...prev }));
|
||||
//todo kcze this is a patch because only TRIGGER_WEBHOOK is set on the backend and then overwritten by the frontend
|
||||
const completeWebhook =
|
||||
onboarding.rewardedFor.includes("TRIGGER_WEBHOOK") &&
|
||||
!onboarding.completedSteps.includes("TRIGGER_WEBHOOK")
|
||||
? (["TRIGGER_WEBHOOK"] as OnboardingStep[])
|
||||
: [];
|
||||
|
||||
setState((prev) => ({
|
||||
...onboarding,
|
||||
completedSteps: [...completeWebhook, ...onboarding.completedSteps],
|
||||
lastRunAt: new Date(onboarding.lastRunAt || ""),
|
||||
...prev,
|
||||
}));
|
||||
|
||||
// Redirect outside onboarding if completed
|
||||
// If user did CONGRATS step, that means they completed introductory onboarding
|
||||
@@ -125,7 +137,7 @@ export default function OnboardingProvider({
|
||||
// Handle initial state
|
||||
return {
|
||||
completedSteps: [],
|
||||
notificationDot: false,
|
||||
walletShown: true,
|
||||
notified: [],
|
||||
rewardedFor: [],
|
||||
usageReason: null,
|
||||
@@ -135,12 +147,13 @@ export default function OnboardingProvider({
|
||||
agentInput: null,
|
||||
onboardingAgentExecutionId: null,
|
||||
agentRuns: 0,
|
||||
lastRunAt: null,
|
||||
consecutiveRunDays: 0,
|
||||
...newState,
|
||||
};
|
||||
}
|
||||
return { ...prev, ...newState };
|
||||
});
|
||||
|
||||
// Make the API call asynchronously to not block render
|
||||
setTimeout(() => {
|
||||
api.updateUserOnboarding(newState).catch((error) => {
|
||||
@@ -167,21 +180,61 @@ export default function OnboardingProvider({
|
||||
[state, updateState],
|
||||
);
|
||||
|
||||
const incrementRuns = useCallback(() => {
|
||||
if (
|
||||
!state ||
|
||||
!state.completedSteps ||
|
||||
state.completedSteps.includes("RUN_AGENTS")
|
||||
)
|
||||
return;
|
||||
const isToday = useCallback((date: Date) => {
|
||||
const today = new Date();
|
||||
|
||||
const finished = state.agentRuns + 1 >= 10;
|
||||
setNpsDialogOpen(finished);
|
||||
return (
|
||||
date.getDate() === today.getDate() &&
|
||||
date.getMonth() === today.getMonth() &&
|
||||
date.getFullYear() === today.getFullYear()
|
||||
);
|
||||
}, []);
|
||||
|
||||
const isYesterday = useCallback((date: Date): boolean => {
|
||||
const yesterday = new Date();
|
||||
yesterday.setDate(yesterday.getDate() - 1);
|
||||
|
||||
return (
|
||||
date.getDate() === yesterday.getDate() &&
|
||||
date.getMonth() === yesterday.getMonth() &&
|
||||
date.getFullYear() === yesterday.getFullYear()
|
||||
);
|
||||
}, []);
|
||||
|
||||
const incrementRuns = useCallback(() => {
|
||||
if (!state || !state.completedSteps) return;
|
||||
|
||||
const tenRuns = state.agentRuns + 1 === 10;
|
||||
const hundredRuns = state.agentRuns + 1 === 100;
|
||||
// Calculate if it's a run on a consecutive day
|
||||
// If the last run was yesterday, increment days
|
||||
// Otherwise, if the last run was *not* today reset it (already checked that it wasn't yesterday at this point)
|
||||
// Otherwise, don't do anything (the last run was today)
|
||||
const consecutive =
|
||||
state.lastRunAt === null || isYesterday(state.lastRunAt)
|
||||
? {
|
||||
lastRunAt: new Date(),
|
||||
consecutiveRunDays: state.consecutiveRunDays + 1,
|
||||
}
|
||||
: !isToday(state.lastRunAt)
|
||||
? { lastRunAt: new Date(), consecutiveRunDays: 1 }
|
||||
: {};
|
||||
|
||||
setNpsDialogOpen(tenRuns);
|
||||
updateState({
|
||||
agentRuns: state.agentRuns + 1,
|
||||
...(finished && {
|
||||
completedSteps: [...state.completedSteps, "RUN_AGENTS"],
|
||||
}),
|
||||
completedSteps: [
|
||||
...state.completedSteps,
|
||||
...(tenRuns ? (["RUN_AGENTS"] as OnboardingStep[]) : []),
|
||||
...(hundredRuns ? (["RUN_AGENTS_100"] as OnboardingStep[]) : []),
|
||||
...(consecutive.consecutiveRunDays === 3
|
||||
? (["RUN_3_DAYS"] as OnboardingStep[])
|
||||
: []),
|
||||
...(consecutive.consecutiveRunDays === 14
|
||||
? (["RUN_14_DAYS"] as OnboardingStep[])
|
||||
: []),
|
||||
],
|
||||
...consecutive,
|
||||
});
|
||||
}, [state, updateState]);
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ test("user can signup with custom credentials", async ({ page }) => {
|
||||
|
||||
try {
|
||||
const customEmail = generateTestEmail();
|
||||
const customPassword = generateTestPassword();
|
||||
const customPassword = await generateTestPassword();
|
||||
|
||||
const testUser = await signupTestUser(page, customEmail, customPassword);
|
||||
|
||||
@@ -82,7 +82,7 @@ test("user can signup with existing email handling", async ({
|
||||
}) => {
|
||||
try {
|
||||
const testEmail = generateTestEmail();
|
||||
const testPassword = generateTestPassword();
|
||||
const testPassword = await generateTestPassword();
|
||||
|
||||
// First signup
|
||||
const firstUser = await signupTestUser(page, testEmail, testPassword);
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { faker } from "@faker-js/faker";
|
||||
import fs from "fs";
|
||||
import path from "path";
|
||||
import { signupTestUser } from "./signup";
|
||||
@@ -22,6 +21,7 @@ export async function createTestUser(
|
||||
password?: string,
|
||||
ignoreOnboarding: boolean = true,
|
||||
): Promise<TestUser> {
|
||||
const { faker } = await import("@faker-js/faker");
|
||||
const userEmail = email || faker.internet.email();
|
||||
const userPassword = password || faker.internet.password({ length: 12 });
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { faker } from "@faker-js/faker";
|
||||
import { TestUser } from "./auth";
|
||||
import { getSelectors } from "./selectors";
|
||||
import { isVisible } from "./assertion";
|
||||
@@ -11,6 +10,7 @@ export async function signupTestUser(
|
||||
ignoreOnboarding: boolean = true,
|
||||
withAgent: boolean = false,
|
||||
): Promise<TestUser> {
|
||||
const { faker } = await import("@faker-js/faker");
|
||||
const userEmail = email || faker.internet.email();
|
||||
const userPassword = password || faker.internet.password({ length: 12 });
|
||||
|
||||
@@ -152,6 +152,7 @@ export function generateTestEmail(): string {
|
||||
return `test.${Date.now()}.${Math.random().toString(36).substring(7)}@example.com`;
|
||||
}
|
||||
|
||||
export function generateTestPassword(): string {
|
||||
export async function generateTestPassword(): Promise<string> {
|
||||
const { faker } = await import("@faker-js/faker");
|
||||
return faker.internet.password({ length: 12 });
|
||||
}
|
||||
|
||||
79
docs/content/platform/blocks/ai_condition.md
Normal file
79
docs/content/platform/blocks/ai_condition.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# AI Condition Block
|
||||
|
||||
## What it is
|
||||
The AI Condition Block is a logical component that uses artificial intelligence to evaluate natural language conditions and produces outputs based on the result. This block allows you to define conditions in plain English rather than using traditional comparison operators.
|
||||
|
||||
## What it does
|
||||
This block takes an input value and a natural language condition, then uses AI to determine whether the input satisfies the condition. Based on the result, it provides conditional outputs similar to a traditional if/else statement but with the flexibility of natural language evaluation.
|
||||
|
||||
## How it works
|
||||
The block uses a Large Language Model (LLM) to evaluate the condition by:
|
||||
1. Converting the input value to a string representation
|
||||
2. Sending a carefully crafted prompt to the AI asking it to evaluate whether the input meets the specified condition
|
||||
3. Parsing the AI's response to determine a true/false result
|
||||
4. Outputting the appropriate value based on the result
|
||||
|
||||
## Inputs
|
||||
| Input | Description |
|
||||
|-------|-------------|
|
||||
| Input Value | The value to be evaluated (can be text, number, or any data type) |
|
||||
| Condition | A plaintext English description of the condition to evaluate |
|
||||
| Yes Value | (Optional) The value to output if the condition is true. If not provided, Input Value will be used |
|
||||
| No Value | (Optional) The value to output if the condition is false. If not provided, Input Value will be used |
|
||||
| Model | The LLM model to use for evaluation (defaults to GPT-4o) |
|
||||
| Credentials | API credentials for the LLM provider |
|
||||
|
||||
## Outputs
|
||||
| Output | Description |
|
||||
|--------|-------------|
|
||||
| Result | A boolean value (true or false) indicating whether the condition was met |
|
||||
| Yes Output | The output value if the condition is true. This will be the Yes Value if provided, or Input Value if not |
|
||||
| No Output | The output value if the condition is false. This will be the No Value if provided, or Input Value if not |
|
||||
| Error Message | Error message if the AI evaluation is uncertain or fails (empty string if successful) |
|
||||
|
||||
## Examples
|
||||
|
||||
### Email Address Validation
|
||||
- **Input Value**: `"john@example.com"`
|
||||
- **Condition**: `"the input is an email address"`
|
||||
- **Result**: `true`
|
||||
- **Yes Output**: `"john@example.com"` (or custom Yes Value)
|
||||
|
||||
### Geographic Location Check
|
||||
- **Input Value**: `"San Francisco"`
|
||||
- **Condition**: `"the input is a city in the USA"`
|
||||
- **Result**: `true`
|
||||
- **Yes Output**: `"San Francisco"` (or custom Yes Value)
|
||||
|
||||
### Error Detection
|
||||
- **Input Value**: `"Error: Connection timeout"`
|
||||
- **Condition**: `"the input is an error message or refusal"`
|
||||
- **Result**: `true`
|
||||
- **Yes Output**: `"Error: Connection timeout"` (or custom Yes Value)
|
||||
|
||||
### Content Classification
|
||||
- **Input Value**: `"This is a detailed explanation of how machine learning works..."`
|
||||
- **Condition**: `"the input is the body of an email"`
|
||||
- **Result**: `false` (it's more like article content)
|
||||
- **No Output**: Custom No Value or the input value
|
||||
|
||||
## Possible Use Cases
|
||||
- **Content Classification**: Automatically classify text content (emails, articles, comments, etc.)
|
||||
- **Data Validation**: Validate input data using natural language rules
|
||||
- **Smart Routing**: Route data through different paths based on AI-evaluated conditions
|
||||
- **Quality Control**: Check if content meets certain quality or format standards
|
||||
- **Language Detection**: Determine if text is in a specific language or style
|
||||
- **Sentiment Analysis**: Evaluate if content has positive, negative, or neutral sentiment
|
||||
- **Error Handling**: Detect and route error messages or problematic inputs
|
||||
|
||||
## Advantages over Traditional Condition Blocks
|
||||
- **Flexibility**: Can handle complex, nuanced conditions that would be difficult to express with simple comparisons
|
||||
- **Natural Language**: Uses everyday language instead of programming logic
|
||||
- **Context Awareness**: AI can understand context and meaning, not just exact matches
|
||||
- **Adaptability**: Can handle variations in input format and wording
|
||||
|
||||
## Considerations
|
||||
- **Performance**: Requires an API call to an LLM, which adds latency compared to traditional conditions
|
||||
- **Cost**: Each evaluation consumes LLM tokens, which has associated costs
|
||||
- **Reliability**: AI responses may occasionally be inconsistent, so critical logic should include fallback handling
|
||||
- **Network Dependency**: Requires internet connectivity to access the LLM API
|
||||
Reference in New Issue
Block a user