mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Merge branch 'dev' into swiftyos/caching-pt2
This commit is contained in:
@@ -4,6 +4,7 @@ import logging
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
@@ -139,8 +140,13 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
print(f"Log directory: {config.log_dir}")
|
||||
|
||||
# Activity log handler (INFO and above)
|
||||
activity_log_handler = logging.FileHandler(
|
||||
config.log_dir / LOG_FILE, "a", "utf-8"
|
||||
# Security fix: Use RotatingFileHandler with size limits to prevent disk exhaustion
|
||||
activity_log_handler = RotatingFileHandler(
|
||||
config.log_dir / LOG_FILE,
|
||||
mode="a",
|
||||
encoding="utf-8",
|
||||
maxBytes=10 * 1024 * 1024, # 10MB per file
|
||||
backupCount=3, # Keep 3 backup files (40MB total)
|
||||
)
|
||||
activity_log_handler.setLevel(config.level)
|
||||
activity_log_handler.setFormatter(
|
||||
@@ -150,8 +156,13 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
|
||||
if config.level == logging.DEBUG:
|
||||
# Debug log handler (all levels)
|
||||
debug_log_handler = logging.FileHandler(
|
||||
config.log_dir / DEBUG_LOG_FILE, "a", "utf-8"
|
||||
# Security fix: Use RotatingFileHandler with size limits
|
||||
debug_log_handler = RotatingFileHandler(
|
||||
config.log_dir / DEBUG_LOG_FILE,
|
||||
mode="a",
|
||||
encoding="utf-8",
|
||||
maxBytes=10 * 1024 * 1024, # 10MB per file
|
||||
backupCount=3, # Keep 3 backup files (40MB total)
|
||||
)
|
||||
debug_log_handler.setLevel(logging.DEBUG)
|
||||
debug_log_handler.setFormatter(
|
||||
@@ -160,8 +171,13 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
log_handlers.append(debug_log_handler)
|
||||
|
||||
# Error log handler (ERROR and above)
|
||||
error_log_handler = logging.FileHandler(
|
||||
config.log_dir / ERROR_LOG_FILE, "a", "utf-8"
|
||||
# Security fix: Use RotatingFileHandler with size limits
|
||||
error_log_handler = RotatingFileHandler(
|
||||
config.log_dir / ERROR_LOG_FILE,
|
||||
mode="a",
|
||||
encoding="utf-8",
|
||||
maxBytes=10 * 1024 * 1024, # 10MB per file
|
||||
backupCount=3, # Keep 3 backup files (40MB total)
|
||||
)
|
||||
error_log_handler.setLevel(logging.ERROR)
|
||||
error_log_handler.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True))
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from enum import Enum
|
||||
from typing import Literal, Optional
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
from e2b_code_interpreter import AsyncSandbox
|
||||
from e2b_code_interpreter import Result as E2BExecutionResult
|
||||
from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart
|
||||
from pydantic import BaseModel, JsonValue, SecretStr
|
||||
|
||||
@@ -37,7 +38,7 @@ class ProgrammingLanguage(Enum):
|
||||
JAVA = "java"
|
||||
|
||||
|
||||
class CodeExecutionResult(BaseModel):
|
||||
class MainCodeExecutionResult(BaseModel):
|
||||
"""
|
||||
*Pydantic model mirroring `e2b_code_interpreter.Result`*
|
||||
|
||||
@@ -47,7 +48,7 @@ class CodeExecutionResult(BaseModel):
|
||||
The result can contain multiple types of data, such as text, images, plots, etc. Each type of data is represented
|
||||
as a string, and the result can contain multiple types of data. The display calls don't have to have text representation,
|
||||
for the actual result the representation is always present for the result, the other representations are always optional.
|
||||
"""
|
||||
""" # noqa
|
||||
|
||||
class Chart(BaseModel, E2BExecutionResultChart):
|
||||
pass
|
||||
@@ -68,14 +69,104 @@ class CodeExecutionResult(BaseModel):
|
||||
"""Extra data that can be included. Not part of the standard types."""
|
||||
|
||||
|
||||
class CodeExecutionBlock(Block):
|
||||
class CodeExecutionResult(MainCodeExecutionResult):
|
||||
__doc__ = MainCodeExecutionResult.__doc__
|
||||
|
||||
is_main_result: bool = False
|
||||
"""Whether this data is the main result of the cell. Data can be produced by display calls of which can be multiple in a cell.""" # noqa
|
||||
|
||||
|
||||
class BaseE2BExecutorMixin:
|
||||
"""Shared implementation methods for E2B executor blocks."""
|
||||
|
||||
async def execute_code(
|
||||
self,
|
||||
api_key: str,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
template_id: str = "",
|
||||
setup_commands: Optional[list[str]] = None,
|
||||
timeout: Optional[int] = None,
|
||||
sandbox_id: Optional[str] = None,
|
||||
dispose_sandbox: bool = False,
|
||||
):
|
||||
"""
|
||||
Unified code execution method that handles all three use cases:
|
||||
1. Create new sandbox and execute (ExecuteCodeBlock)
|
||||
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
||||
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
||||
""" # noqa
|
||||
sandbox = None
|
||||
try:
|
||||
if sandbox_id:
|
||||
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
||||
sandbox = await AsyncSandbox.connect(
|
||||
sandbox_id=sandbox_id, api_key=api_key
|
||||
)
|
||||
else:
|
||||
# Create new sandbox (ExecuteCodeBlock/InstantiateCodeSandboxBlock case)
|
||||
sandbox = await AsyncSandbox.create(
|
||||
api_key=api_key, template=template_id, timeout=timeout
|
||||
)
|
||||
if setup_commands:
|
||||
for cmd in setup_commands:
|
||||
await sandbox.commands.run(cmd)
|
||||
|
||||
# Execute the code
|
||||
execution = await sandbox.run_code(
|
||||
code,
|
||||
language=language.value,
|
||||
on_error=lambda e: sandbox.kill(), # Kill the sandbox on error
|
||||
)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
|
||||
results = execution.results
|
||||
text_output = execution.text
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
|
||||
finally:
|
||||
# Dispose of sandbox if requested to reduce usage costs
|
||||
if dispose_sandbox and sandbox:
|
||||
await sandbox.kill()
|
||||
|
||||
def process_execution_results(
|
||||
self, results: list[E2BExecutionResult]
|
||||
) -> tuple[dict[str, Any] | None, list[dict[str, Any]]]:
|
||||
"""Process and filter execution results."""
|
||||
# Filter out empty formats and convert to dicts
|
||||
processed_results = [
|
||||
{
|
||||
f: value
|
||||
for f in [*r.formats(), "extra", "is_main_result"]
|
||||
if (value := getattr(r, f, None)) is not None
|
||||
}
|
||||
for r in results
|
||||
]
|
||||
if main_result := next(
|
||||
(r for r in processed_results if r.get("is_main_result")), None
|
||||
):
|
||||
# Make main_result a copy we can modify & remove is_main_result
|
||||
(main_result := {**main_result}).pop("is_main_result")
|
||||
|
||||
return main_result, processed_results
|
||||
|
||||
|
||||
class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
||||
# TODO : Add support to upload and download files
|
||||
# Currently, You can customized the CPU and Memory, only by creating a pre customized sandbox template
|
||||
# NOTE: Currently, you can only customize the CPU and Memory
|
||||
# by creating a pre customized sandbox template
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.E2B], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
|
||||
description=(
|
||||
"Enter your API key for the E2B platform. "
|
||||
"You can get it in here - https://e2b.dev/docs"
|
||||
),
|
||||
)
|
||||
|
||||
# Todo : Option to run commond in background
|
||||
@@ -108,6 +199,14 @@ class CodeExecutionBlock(Block):
|
||||
description="Execution timeout in seconds", default=300
|
||||
)
|
||||
|
||||
dispose_sandbox: bool = SchemaField(
|
||||
description=(
|
||||
"Whether to dispose of the sandbox immediately after execution. "
|
||||
"If disabled, the sandbox will run until its timeout expires."
|
||||
),
|
||||
default=True,
|
||||
)
|
||||
|
||||
template_id: str = SchemaField(
|
||||
description=(
|
||||
"You can use an E2B sandbox template by entering its ID here. "
|
||||
@@ -119,7 +218,7 @@ class CodeExecutionBlock(Block):
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
main_result: CodeExecutionResult = SchemaField(
|
||||
main_result: MainCodeExecutionResult = SchemaField(
|
||||
title="Main Result", description="The main result from the code execution"
|
||||
)
|
||||
results: list[CodeExecutionResult] = SchemaField(
|
||||
@@ -138,10 +237,10 @@ class CodeExecutionBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="0b02b072-abe7-11ef-8372-fb5d162dd712",
|
||||
description="Executes code in an isolated sandbox environment with internet access.",
|
||||
description="Executes code in a sandbox environment with internet access.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=CodeExecutionBlock.Input,
|
||||
output_schema=CodeExecutionBlock.Output,
|
||||
input_schema=ExecuteCodeBlock.Input,
|
||||
output_schema=ExecuteCodeBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
@@ -157,102 +256,54 @@ class CodeExecutionBlock(Block):
|
||||
("stdout_logs", "Hello World\n"),
|
||||
],
|
||||
test_mock={
|
||||
"execute_code": lambda code, language, setup_commands, timeout, api_key, template_id: (
|
||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
|
||||
[], # results
|
||||
"Hello World", # text_output
|
||||
"Hello World\n", # stdout_logs
|
||||
"", # stderr_logs
|
||||
"sandbox_id", # sandbox_id
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
async def execute_code(
|
||||
self,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
setup_commands: list[str],
|
||||
timeout: int,
|
||||
api_key: str,
|
||||
template_id: str,
|
||||
):
|
||||
try:
|
||||
sandbox = None
|
||||
if template_id:
|
||||
sandbox = await AsyncSandbox.create(
|
||||
template=template_id, api_key=api_key, timeout=timeout
|
||||
)
|
||||
else:
|
||||
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
|
||||
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not created")
|
||||
|
||||
# Running setup commands
|
||||
for cmd in setup_commands:
|
||||
await sandbox.commands.run(cmd)
|
||||
|
||||
# Executing the code
|
||||
execution = await sandbox.run_code(
|
||||
code,
|
||||
language=language.value,
|
||||
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
|
||||
)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
|
||||
results = execution.results
|
||||
text_output = execution.text
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return results, text_output, stdout_logs, stderr_logs
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
results, text_output, stdout_logs, stderr_logs = await self.execute_code(
|
||||
input_data.code,
|
||||
input_data.language,
|
||||
input_data.setup_commands,
|
||||
input_data.timeout,
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.template_id,
|
||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
code=input_data.code,
|
||||
language=input_data.language,
|
||||
template_id=input_data.template_id,
|
||||
setup_commands=input_data.setup_commands,
|
||||
timeout=input_data.timeout,
|
||||
dispose_sandbox=input_data.dispose_sandbox,
|
||||
)
|
||||
|
||||
# Determine result object shape & filter out empty formats
|
||||
results = [
|
||||
{
|
||||
f: r[f]
|
||||
for f in [*r.formats(), "extra", "is_main_result"]
|
||||
if getattr(r, f, None) is not None
|
||||
}
|
||||
for r in results
|
||||
]
|
||||
main_result, results = self.process_execution_results(results)
|
||||
if main_result:
|
||||
yield "main_result", main_result
|
||||
yield "results", results
|
||||
for r in results:
|
||||
if r.pop("is_main_result", False):
|
||||
yield "main_result", r
|
||||
if text_output:
|
||||
yield "response", text_output
|
||||
if stdout_logs:
|
||||
yield "stdout_logs", stdout_logs
|
||||
if stderr_logs:
|
||||
yield "stderr_logs", stderr_logs
|
||||
if stdout:
|
||||
yield "stdout_logs", stdout
|
||||
if stderr:
|
||||
yield "stderr_logs", stderr
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class InstantiationBlock(Block):
|
||||
class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.E2B], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
|
||||
description=(
|
||||
"Enter your API key for the E2B platform. "
|
||||
"You can get it in here - https://e2b.dev/docs"
|
||||
)
|
||||
)
|
||||
|
||||
# Todo : Option to run commond in background
|
||||
@@ -310,10 +361,13 @@ class InstantiationBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ff0861c9-1726-4aec-9e5b-bf53f3622112",
|
||||
description="Instantiate an isolated sandbox environment with internet access where to execute code in.",
|
||||
description=(
|
||||
"Instantiate a sandbox environment with internet access "
|
||||
"in which you can execute code with the Execute Code Step block."
|
||||
),
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=InstantiationBlock.Input,
|
||||
output_schema=InstantiationBlock.Output,
|
||||
input_schema=InstantiateCodeSandboxBlock.Input,
|
||||
output_schema=InstantiateCodeSandboxBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
@@ -329,11 +383,12 @@ class InstantiationBlock(Block):
|
||||
("stdout_logs", "Hello World\n"),
|
||||
],
|
||||
test_mock={
|
||||
"execute_code": lambda setup_code, language, setup_commands, timeout, api_key, template_id: (
|
||||
"sandbox_id", # sandbox_id
|
||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout: ( # noqa
|
||||
[], # results
|
||||
"Hello World", # text_output
|
||||
"Hello World\n", # stdout_logs
|
||||
"", # stderr_logs
|
||||
"sandbox_id", # sandbox_id
|
||||
),
|
||||
},
|
||||
)
|
||||
@@ -342,13 +397,13 @@ class InstantiationBlock(Block):
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
sandbox_id, text_output, stdout_logs, stderr_logs = await self.execute_code(
|
||||
input_data.setup_code,
|
||||
input_data.language,
|
||||
input_data.setup_commands,
|
||||
input_data.timeout,
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.template_id,
|
||||
_, text_output, stdout, stderr, sandbox_id = await self.execute_code(
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
code=input_data.setup_code,
|
||||
language=input_data.language,
|
||||
template_id=input_data.template_id,
|
||||
setup_commands=input_data.setup_commands,
|
||||
timeout=input_data.timeout,
|
||||
)
|
||||
if sandbox_id:
|
||||
yield "sandbox_id", sandbox_id
|
||||
@@ -357,64 +412,23 @@ class InstantiationBlock(Block):
|
||||
|
||||
if text_output:
|
||||
yield "response", text_output
|
||||
if stdout_logs:
|
||||
yield "stdout_logs", stdout_logs
|
||||
if stderr_logs:
|
||||
yield "stderr_logs", stderr_logs
|
||||
if stdout:
|
||||
yield "stdout_logs", stdout
|
||||
if stderr:
|
||||
yield "stderr_logs", stderr
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
async def execute_code(
|
||||
self,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
setup_commands: list[str],
|
||||
timeout: int,
|
||||
api_key: str,
|
||||
template_id: str,
|
||||
):
|
||||
try:
|
||||
sandbox = None
|
||||
if template_id:
|
||||
sandbox = await AsyncSandbox.create(
|
||||
template=template_id, api_key=api_key, timeout=timeout
|
||||
)
|
||||
else:
|
||||
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
|
||||
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not created")
|
||||
|
||||
# Running setup commands
|
||||
for cmd in setup_commands:
|
||||
await sandbox.commands.run(cmd)
|
||||
|
||||
# Executing the code
|
||||
execution = await sandbox.run_code(
|
||||
code,
|
||||
language=language.value,
|
||||
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
|
||||
)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
|
||||
text_output = execution.text
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return sandbox.sandbox_id, text_output, stdout_logs, stderr_logs
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
|
||||
class StepExecutionBlock(Block):
|
||||
class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.E2B], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
|
||||
description=(
|
||||
"Enter your API key for the E2B platform. "
|
||||
"You can get it in here - https://e2b.dev/docs"
|
||||
),
|
||||
)
|
||||
|
||||
sandbox_id: str = SchemaField(
|
||||
@@ -435,8 +449,13 @@ class StepExecutionBlock(Block):
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
dispose_sandbox: bool = SchemaField(
|
||||
description="Whether to dispose of the sandbox after executing this code.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
main_result: CodeExecutionResult = SchemaField(
|
||||
main_result: MainCodeExecutionResult = SchemaField(
|
||||
title="Main Result", description="The main result from the code execution"
|
||||
)
|
||||
results: list[CodeExecutionResult] = SchemaField(
|
||||
@@ -455,10 +474,10 @@ class StepExecutionBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="82b59b8e-ea10-4d57-9161-8b169b0adba6",
|
||||
description="Execute code in a previously instantiated sandbox environment.",
|
||||
description="Execute code in a previously instantiated sandbox.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=StepExecutionBlock.Input,
|
||||
output_schema=StepExecutionBlock.Output,
|
||||
input_schema=ExecuteCodeStepBlock.Input,
|
||||
output_schema=ExecuteCodeStepBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
@@ -472,74 +491,38 @@ class StepExecutionBlock(Block):
|
||||
("stdout_logs", "Hello World\n"),
|
||||
],
|
||||
test_mock={
|
||||
"execute_step_code": lambda sandbox_id, step_code, language, api_key: (
|
||||
"execute_code": lambda api_key, code, language, sandbox_id, dispose_sandbox: ( # noqa
|
||||
[], # results
|
||||
"Hello World", # text_output
|
||||
"Hello World\n", # stdout_logs
|
||||
"", # stderr_logs
|
||||
sandbox_id, # sandbox_id
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
async def execute_step_code(
|
||||
self,
|
||||
sandbox_id: str,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
api_key: str,
|
||||
):
|
||||
try:
|
||||
sandbox = await AsyncSandbox.connect(sandbox_id=sandbox_id, api_key=api_key)
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not found")
|
||||
|
||||
# Executing the code
|
||||
execution = await sandbox.run_code(code, language=language.value)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
|
||||
results = execution.results
|
||||
text_output = execution.text
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return results, text_output, stdout_logs, stderr_logs
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
results, text_output, stdout_logs, stderr_logs = (
|
||||
await self.execute_step_code(
|
||||
input_data.sandbox_id,
|
||||
input_data.step_code,
|
||||
input_data.language,
|
||||
credentials.api_key.get_secret_value(),
|
||||
)
|
||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
code=input_data.step_code,
|
||||
language=input_data.language,
|
||||
sandbox_id=input_data.sandbox_id,
|
||||
dispose_sandbox=input_data.dispose_sandbox,
|
||||
)
|
||||
|
||||
# Determine result object shape & filter out empty formats
|
||||
results = [
|
||||
{
|
||||
f: r[f]
|
||||
for f in [*r.formats(), "extra", "is_main_result"]
|
||||
if getattr(r, f, None) is not None
|
||||
}
|
||||
for r in results
|
||||
]
|
||||
main_result, results = self.process_execution_results(results)
|
||||
if main_result:
|
||||
yield "main_result", main_result
|
||||
yield "results", results
|
||||
for r in results:
|
||||
if r.pop("is_main_result", False):
|
||||
yield "main_result", r
|
||||
if text_output:
|
||||
yield "response", text_output
|
||||
if stdout_logs:
|
||||
yield "stdout_logs", stdout_logs
|
||||
if stderr_logs:
|
||||
yield "stderr_logs", stderr_logs
|
||||
if stdout:
|
||||
yield "stdout_logs", stdout
|
||||
if stderr:
|
||||
yield "stderr_logs", stderr
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@@ -90,7 +90,7 @@ class CodeExtractionBlock(Block):
|
||||
for aliases in language_aliases.values()
|
||||
for alias in aliases
|
||||
)
|
||||
+ r")\s+[\s\S]*?```"
|
||||
+ r")[ \t]*\n[\s\S]*?```"
|
||||
)
|
||||
|
||||
remaining_text = re.sub(pattern, "", input_data.text).strip()
|
||||
@@ -103,7 +103,9 @@ class CodeExtractionBlock(Block):
|
||||
# Escape special regex characters in the language string
|
||||
language = re.escape(language)
|
||||
# Extract all code blocks enclosed in ```language``` blocks
|
||||
pattern = re.compile(rf"```{language}\s+(.*?)```", re.DOTALL | re.IGNORECASE)
|
||||
pattern = re.compile(
|
||||
rf"```{language}[ \t]*\n(.*?)\n```", re.DOTALL | re.IGNORECASE
|
||||
)
|
||||
matches = pattern.finditer(text)
|
||||
# Combine all code blocks for this language with newlines between them
|
||||
code_blocks = [match.group(1).strip() for match in matches]
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class ScrapeFormat(Enum):
|
||||
MARKDOWN = "markdown"
|
||||
HTML = "html"
|
||||
RAW_HTML = "rawHtml"
|
||||
LINKS = "links"
|
||||
SCREENSHOT = "screenshot"
|
||||
SCREENSHOT_FULL_PAGE = "screenshot@fullPage"
|
||||
JSON = "json"
|
||||
CHANGE_TRACKING = "changeTracking"
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
"""Utility functions for converting between our ScrapeFormat enum and firecrawl FormatOption types."""
|
||||
|
||||
from typing import List
|
||||
|
||||
from firecrawl.v2.types import FormatOption, ScreenshotFormat
|
||||
|
||||
from backend.blocks.firecrawl._api import ScrapeFormat
|
||||
|
||||
|
||||
def convert_to_format_options(
|
||||
formats: List[ScrapeFormat],
|
||||
) -> List[FormatOption]:
|
||||
"""Convert our ScrapeFormat enum values to firecrawl FormatOption types.
|
||||
|
||||
Handles special cases like screenshot@fullPage which needs to be converted
|
||||
to a ScreenshotFormat object.
|
||||
"""
|
||||
result: List[FormatOption] = []
|
||||
|
||||
for format_enum in formats:
|
||||
if format_enum.value == "screenshot@fullPage":
|
||||
# Special case: convert to ScreenshotFormat with full_page=True
|
||||
result.append(ScreenshotFormat(type="screenshot", full_page=True))
|
||||
else:
|
||||
# Regular string literals
|
||||
result.append(format_enum.value)
|
||||
|
||||
return result
|
||||
@@ -1,8 +1,9 @@
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from firecrawl import FirecrawlApp, ScrapeOptions
|
||||
from firecrawl import FirecrawlApp
|
||||
from firecrawl.v2.types import ScrapeOptions
|
||||
|
||||
from backend.blocks.firecrawl._api import ScrapeFormat
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
@@ -14,21 +15,10 @@ from backend.sdk import (
|
||||
)
|
||||
|
||||
from ._config import firecrawl
|
||||
|
||||
|
||||
class ScrapeFormat(Enum):
|
||||
MARKDOWN = "markdown"
|
||||
HTML = "html"
|
||||
RAW_HTML = "rawHtml"
|
||||
LINKS = "links"
|
||||
SCREENSHOT = "screenshot"
|
||||
SCREENSHOT_FULL_PAGE = "screenshot@fullPage"
|
||||
JSON = "json"
|
||||
CHANGE_TRACKING = "changeTracking"
|
||||
from ._format_utils import convert_to_format_options
|
||||
|
||||
|
||||
class FirecrawlCrawlBlock(Block):
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = firecrawl.credentials_field()
|
||||
url: str = SchemaField(description="The URL to crawl")
|
||||
@@ -78,18 +68,17 @@ class FirecrawlCrawlBlock(Block):
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
|
||||
app = FirecrawlApp(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
# Sync call
|
||||
crawl_result = app.crawl_url(
|
||||
crawl_result = app.crawl(
|
||||
input_data.url,
|
||||
limit=input_data.limit,
|
||||
scrape_options=ScrapeOptions(
|
||||
formats=[format.value for format in input_data.formats],
|
||||
onlyMainContent=input_data.only_main_content,
|
||||
maxAge=input_data.max_age,
|
||||
waitFor=input_data.wait_for,
|
||||
formats=convert_to_format_options(input_data.formats),
|
||||
only_main_content=input_data.only_main_content,
|
||||
max_age=input_data.max_age,
|
||||
wait_for=input_data.wait_for,
|
||||
),
|
||||
)
|
||||
yield "data", crawl_result.data
|
||||
@@ -101,7 +90,7 @@ class FirecrawlCrawlBlock(Block):
|
||||
elif f == ScrapeFormat.HTML:
|
||||
yield "html", data.html
|
||||
elif f == ScrapeFormat.RAW_HTML:
|
||||
yield "raw_html", data.rawHtml
|
||||
yield "raw_html", data.raw_html
|
||||
elif f == ScrapeFormat.LINKS:
|
||||
yield "links", data.links
|
||||
elif f == ScrapeFormat.SCREENSHOT:
|
||||
@@ -109,6 +98,6 @@ class FirecrawlCrawlBlock(Block):
|
||||
elif f == ScrapeFormat.SCREENSHOT_FULL_PAGE:
|
||||
yield "screenshot_full_page", data.screenshot
|
||||
elif f == ScrapeFormat.CHANGE_TRACKING:
|
||||
yield "change_tracking", data.changeTracking
|
||||
yield "change_tracking", data.change_tracking
|
||||
elif f == ScrapeFormat.JSON:
|
||||
yield "json", data.json
|
||||
|
||||
@@ -20,7 +20,6 @@ from ._config import firecrawl
|
||||
|
||||
@cost(BlockCost(2, BlockCostType.RUN))
|
||||
class FirecrawlExtractBlock(Block):
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = firecrawl.credentials_field()
|
||||
urls: list[str] = SchemaField(
|
||||
@@ -53,7 +52,6 @@ class FirecrawlExtractBlock(Block):
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
|
||||
app = FirecrawlApp(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
extract_result = app.extract(
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from typing import Any
|
||||
|
||||
from firecrawl import FirecrawlApp
|
||||
|
||||
from backend.sdk import (
|
||||
@@ -14,14 +16,16 @@ from ._config import firecrawl
|
||||
|
||||
|
||||
class FirecrawlMapWebsiteBlock(Block):
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = firecrawl.credentials_field()
|
||||
|
||||
url: str = SchemaField(description="The website url to map")
|
||||
|
||||
class Output(BlockSchema):
|
||||
links: list[str] = SchemaField(description="The links of the website")
|
||||
links: list[str] = SchemaField(description="List of URLs found on the website")
|
||||
results: list[dict[str, Any]] = SchemaField(
|
||||
description="List of search results with url, title, and description"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -35,12 +39,22 @@ class FirecrawlMapWebsiteBlock(Block):
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
|
||||
app = FirecrawlApp(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
# Sync call
|
||||
map_result = app.map_url(
|
||||
map_result = app.map(
|
||||
url=input_data.url,
|
||||
)
|
||||
|
||||
yield "links", map_result.links
|
||||
# Convert SearchResult objects to dicts
|
||||
results_data = [
|
||||
{
|
||||
"url": link.url,
|
||||
"title": link.title,
|
||||
"description": link.description,
|
||||
}
|
||||
for link in map_result.links
|
||||
]
|
||||
|
||||
yield "links", [link.url for link in map_result.links]
|
||||
yield "results", results_data
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from firecrawl import FirecrawlApp
|
||||
|
||||
from backend.blocks.firecrawl._api import ScrapeFormat
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
@@ -14,21 +14,10 @@ from backend.sdk import (
|
||||
)
|
||||
|
||||
from ._config import firecrawl
|
||||
|
||||
|
||||
class ScrapeFormat(Enum):
|
||||
MARKDOWN = "markdown"
|
||||
HTML = "html"
|
||||
RAW_HTML = "rawHtml"
|
||||
LINKS = "links"
|
||||
SCREENSHOT = "screenshot"
|
||||
SCREENSHOT_FULL_PAGE = "screenshot@fullPage"
|
||||
JSON = "json"
|
||||
CHANGE_TRACKING = "changeTracking"
|
||||
from ._format_utils import convert_to_format_options
|
||||
|
||||
|
||||
class FirecrawlScrapeBlock(Block):
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = firecrawl.credentials_field()
|
||||
url: str = SchemaField(description="The URL to crawl")
|
||||
@@ -78,12 +67,11 @@ class FirecrawlScrapeBlock(Block):
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
|
||||
app = FirecrawlApp(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
scrape_result = app.scrape_url(
|
||||
scrape_result = app.scrape(
|
||||
input_data.url,
|
||||
formats=[format.value for format in input_data.formats],
|
||||
formats=convert_to_format_options(input_data.formats),
|
||||
only_main_content=input_data.only_main_content,
|
||||
max_age=input_data.max_age,
|
||||
wait_for=input_data.wait_for,
|
||||
@@ -96,7 +84,7 @@ class FirecrawlScrapeBlock(Block):
|
||||
elif f == ScrapeFormat.HTML:
|
||||
yield "html", scrape_result.html
|
||||
elif f == ScrapeFormat.RAW_HTML:
|
||||
yield "raw_html", scrape_result.rawHtml
|
||||
yield "raw_html", scrape_result.raw_html
|
||||
elif f == ScrapeFormat.LINKS:
|
||||
yield "links", scrape_result.links
|
||||
elif f == ScrapeFormat.SCREENSHOT:
|
||||
@@ -104,6 +92,6 @@ class FirecrawlScrapeBlock(Block):
|
||||
elif f == ScrapeFormat.SCREENSHOT_FULL_PAGE:
|
||||
yield "screenshot_full_page", scrape_result.screenshot
|
||||
elif f == ScrapeFormat.CHANGE_TRACKING:
|
||||
yield "change_tracking", scrape_result.changeTracking
|
||||
yield "change_tracking", scrape_result.change_tracking
|
||||
elif f == ScrapeFormat.JSON:
|
||||
yield "json", scrape_result.json
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from firecrawl import FirecrawlApp, ScrapeOptions
|
||||
from firecrawl import FirecrawlApp
|
||||
from firecrawl.v2.types import ScrapeOptions
|
||||
|
||||
from backend.blocks.firecrawl._api import ScrapeFormat
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
@@ -14,21 +15,10 @@ from backend.sdk import (
|
||||
)
|
||||
|
||||
from ._config import firecrawl
|
||||
|
||||
|
||||
class ScrapeFormat(Enum):
|
||||
MARKDOWN = "markdown"
|
||||
HTML = "html"
|
||||
RAW_HTML = "rawHtml"
|
||||
LINKS = "links"
|
||||
SCREENSHOT = "screenshot"
|
||||
SCREENSHOT_FULL_PAGE = "screenshot@fullPage"
|
||||
JSON = "json"
|
||||
CHANGE_TRACKING = "changeTracking"
|
||||
from ._format_utils import convert_to_format_options
|
||||
|
||||
|
||||
class FirecrawlSearchBlock(Block):
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = firecrawl.credentials_field()
|
||||
query: str = SchemaField(description="The query to search for")
|
||||
@@ -61,7 +51,6 @@ class FirecrawlSearchBlock(Block):
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
|
||||
app = FirecrawlApp(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
# Sync call
|
||||
@@ -69,11 +58,12 @@ class FirecrawlSearchBlock(Block):
|
||||
input_data.query,
|
||||
limit=input_data.limit,
|
||||
scrape_options=ScrapeOptions(
|
||||
formats=[format.value for format in input_data.formats],
|
||||
maxAge=input_data.max_age,
|
||||
waitFor=input_data.wait_for,
|
||||
formats=convert_to_format_options(input_data.formats) or None,
|
||||
max_age=input_data.max_age,
|
||||
wait_for=input_data.wait_for,
|
||||
),
|
||||
)
|
||||
yield "data", scrape_result
|
||||
for site in scrape_result.data:
|
||||
yield "site", site
|
||||
if hasattr(scrape_result, "web") and scrape_result.web:
|
||||
for site in scrape_result.web:
|
||||
yield "site", site
|
||||
|
||||
@@ -54,20 +54,43 @@ class StepThroughItemsBlock(Block):
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Security fix: Add limits to prevent DoS from large iterations
|
||||
MAX_ITEMS = 10000 # Maximum items to iterate
|
||||
MAX_ITEM_SIZE = 1024 * 1024 # 1MB per item
|
||||
|
||||
for data in [input_data.items, input_data.items_object, input_data.items_str]:
|
||||
if not data:
|
||||
continue
|
||||
|
||||
# Limit string size before parsing
|
||||
if isinstance(data, str):
|
||||
if len(data) > MAX_ITEM_SIZE:
|
||||
raise ValueError(
|
||||
f"Input too large: {len(data)} bytes > {MAX_ITEM_SIZE} bytes"
|
||||
)
|
||||
items = json.loads(data)
|
||||
else:
|
||||
items = data
|
||||
|
||||
# Check total item count
|
||||
if isinstance(items, (list, dict)):
|
||||
if len(items) > MAX_ITEMS:
|
||||
raise ValueError(f"Too many items: {len(items)} > {MAX_ITEMS}")
|
||||
|
||||
iteration_count = 0
|
||||
if isinstance(items, dict):
|
||||
# If items is a dictionary, iterate over its values
|
||||
for item in items.values():
|
||||
yield "item", item
|
||||
yield "key", item
|
||||
for key, value in items.items():
|
||||
if iteration_count >= MAX_ITEMS:
|
||||
break
|
||||
yield "item", value
|
||||
yield "key", key # Fixed: should yield key, not item
|
||||
iteration_count += 1
|
||||
else:
|
||||
# If items is a list, iterate over the list
|
||||
for index, item in enumerate(items):
|
||||
if iteration_count >= MAX_ITEMS:
|
||||
break
|
||||
yield "item", item
|
||||
yield "key", index
|
||||
iteration_count += 1
|
||||
|
||||
@@ -1404,11 +1404,27 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
|
||||
@staticmethod
|
||||
def _split_text(text: str, max_tokens: int, overlap: int) -> list[str]:
|
||||
# Security fix: Add validation to prevent DoS attacks
|
||||
# Limit text size to prevent memory exhaustion
|
||||
MAX_TEXT_LENGTH = 1_000_000 # 1MB character limit
|
||||
MAX_CHUNKS = 100 # Maximum number of chunks to prevent excessive memory use
|
||||
|
||||
if len(text) > MAX_TEXT_LENGTH:
|
||||
text = text[:MAX_TEXT_LENGTH]
|
||||
|
||||
# Ensure chunk_size is at least 1 to prevent infinite loops
|
||||
chunk_size = max(1, max_tokens - overlap)
|
||||
|
||||
# Ensure overlap is less than max_tokens to prevent invalid configurations
|
||||
if overlap >= max_tokens:
|
||||
overlap = max(0, max_tokens - 1)
|
||||
|
||||
words = text.split()
|
||||
chunks = []
|
||||
chunk_size = max_tokens - overlap
|
||||
|
||||
for i in range(0, len(words), chunk_size):
|
||||
if len(chunks) >= MAX_CHUNKS:
|
||||
break # Limit the number of chunks to prevent memory exhaustion
|
||||
chunk = " ".join(words[i : i + max_tokens])
|
||||
chunks.append(chunk)
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any
|
||||
|
||||
@@ -101,7 +104,38 @@ class ReadRSSFeedBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def parse_feed(url: str) -> dict[str, Any]:
|
||||
return feedparser.parse(url) # type: ignore
|
||||
# Security fix: Add protection against memory exhaustion attacks
|
||||
MAX_FEED_SIZE = 10 * 1024 * 1024 # 10MB limit for RSS feeds
|
||||
|
||||
# Validate URL
|
||||
parsed_url = urllib.parse.urlparse(url)
|
||||
if parsed_url.scheme not in ("http", "https"):
|
||||
raise ValueError(f"Invalid URL scheme: {parsed_url.scheme}")
|
||||
|
||||
# Download with size limit
|
||||
try:
|
||||
with urllib.request.urlopen(url, timeout=30) as response:
|
||||
# Check content length if available
|
||||
content_length = response.headers.get("Content-Length")
|
||||
if content_length and int(content_length) > MAX_FEED_SIZE:
|
||||
raise ValueError(
|
||||
f"Feed too large: {content_length} bytes exceeds {MAX_FEED_SIZE} limit"
|
||||
)
|
||||
|
||||
# Read with size limit
|
||||
content = response.read(MAX_FEED_SIZE + 1)
|
||||
if len(content) > MAX_FEED_SIZE:
|
||||
raise ValueError(
|
||||
f"Feed too large: exceeds {MAX_FEED_SIZE} byte limit"
|
||||
)
|
||||
|
||||
# Parse with feedparser using the validated content
|
||||
# feedparser has built-in protection against XML attacks
|
||||
return feedparser.parse(content) # type: ignore
|
||||
except Exception as e:
|
||||
# Log error and return empty feed
|
||||
logging.warning(f"Failed to parse RSS feed from {url}: {e}")
|
||||
return {"entries": []}
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
keep_going = True
|
||||
|
||||
@@ -98,6 +98,22 @@ def _create_tool_response(call_id: str, output: Any) -> dict[str, Any]:
|
||||
return {"role": "tool", "tool_call_id": call_id, "content": content}
|
||||
|
||||
|
||||
def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]:
|
||||
"""
|
||||
Safely convert raw_response to dictionary format for conversation history.
|
||||
Handles different response types from different LLM providers.
|
||||
"""
|
||||
if isinstance(raw_response, str):
|
||||
# Ollama returns a string, convert to dict format
|
||||
return {"role": "assistant", "content": raw_response}
|
||||
elif isinstance(raw_response, dict):
|
||||
# Already a dict (from tests or some providers)
|
||||
return raw_response
|
||||
else:
|
||||
# OpenAI/Anthropic return objects, convert with json.to_dict
|
||||
return json.to_dict(raw_response)
|
||||
|
||||
|
||||
def get_pending_tool_calls(conversation_history: list[Any]) -> dict[str, int]:
|
||||
"""
|
||||
All the tool calls entry in the conversation history requires a response.
|
||||
@@ -605,7 +621,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
# If validation failed, add feedback and raise for retry
|
||||
if validation_errors:
|
||||
# Add the failed response to conversation
|
||||
prompt.append(response.raw_response)
|
||||
prompt.append(_convert_raw_response_to_dict(response.raw_response))
|
||||
|
||||
# Add error feedback for retry
|
||||
error_feedback = (
|
||||
@@ -661,5 +677,6 @@ class SmartDecisionMakerBlock(Block):
|
||||
{"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"}
|
||||
)
|
||||
|
||||
prompt.append(response.raw_response)
|
||||
# Add the successful response to conversation
|
||||
prompt.append(_convert_raw_response_to_dict(response.raw_response))
|
||||
yield "conversations", prompt
|
||||
|
||||
@@ -19,7 +19,7 @@ async def test_block_ids_valid(block: Type[Block]):
|
||||
# Skip list for blocks with known invalid UUIDs
|
||||
skip_blocks = {
|
||||
"GetWeatherInformationBlock",
|
||||
"CodeExecutionBlock",
|
||||
"ExecuteCodeBlock",
|
||||
"CountdownTimerBlock",
|
||||
"TwitterGetListTweetsBlock",
|
||||
"TwitterRemoveListMemberBlock",
|
||||
|
||||
@@ -0,0 +1,269 @@
|
||||
"""
|
||||
Test security fixes for various DoS vulnerabilities.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.blocks.code_extraction_block import CodeExtractionBlock
|
||||
from backend.blocks.iteration import StepThroughItemsBlock
|
||||
from backend.blocks.llm import AITextSummarizerBlock
|
||||
from backend.blocks.text import ExtractTextInformationBlock
|
||||
from backend.blocks.xml_parser import XMLParserBlock
|
||||
from backend.util.file import store_media_file
|
||||
from backend.util.type import MediaFileType
|
||||
|
||||
|
||||
class TestCodeExtractionBlockSecurity:
|
||||
"""Test ReDoS fixes in CodeExtractionBlock."""
|
||||
|
||||
async def test_redos_protection(self):
|
||||
"""Test that the regex patterns don't cause ReDoS."""
|
||||
block = CodeExtractionBlock()
|
||||
|
||||
# Test with input that would previously cause ReDoS
|
||||
malicious_input = "```python" + " " * 10000 # Large spaces
|
||||
|
||||
result = []
|
||||
async for output_name, output_data in block.run(
|
||||
CodeExtractionBlock.Input(text=malicious_input)
|
||||
):
|
||||
result.append((output_name, output_data))
|
||||
|
||||
# Should complete without hanging
|
||||
assert len(result) >= 1
|
||||
assert any(name == "remaining_text" for name, _ in result)
|
||||
|
||||
|
||||
class TestAITextSummarizerBlockSecurity:
|
||||
"""Test memory exhaustion fixes in AITextSummarizerBlock."""
|
||||
|
||||
def test_split_text_limits(self):
|
||||
"""Test that _split_text has proper limits."""
|
||||
# Test text size limit
|
||||
large_text = "a" * 2_000_000 # 2MB text
|
||||
result = AITextSummarizerBlock._split_text(large_text, 1000, 100)
|
||||
|
||||
# Should be truncated to 1MB
|
||||
total_chars = sum(len(chunk) for chunk in result)
|
||||
assert total_chars <= 1_000_000 + 1000 # Allow for chunk boundary
|
||||
|
||||
# Test chunk count limit
|
||||
result = AITextSummarizerBlock._split_text("word " * 10000, 10, 9)
|
||||
assert len(result) <= 100 # MAX_CHUNKS limit
|
||||
|
||||
# Test parameter validation
|
||||
result = AITextSummarizerBlock._split_text(
|
||||
"test", 10, 15
|
||||
) # overlap > max_tokens
|
||||
assert len(result) >= 1 # Should still work
|
||||
|
||||
|
||||
class TestExtractTextInformationBlockSecurity:
|
||||
"""Test ReDoS and memory exhaustion fixes in ExtractTextInformationBlock."""
|
||||
|
||||
async def test_text_size_limits(self):
|
||||
"""Test text size limits."""
|
||||
block = ExtractTextInformationBlock()
|
||||
|
||||
# Test with large input
|
||||
large_text = "a" * 2_000_000 # 2MB
|
||||
|
||||
results = []
|
||||
async for output_name, output_data in block.run(
|
||||
ExtractTextInformationBlock.Input(
|
||||
text=large_text, pattern=r"a+", find_all=True, group=0
|
||||
)
|
||||
):
|
||||
results.append((output_name, output_data))
|
||||
|
||||
# Should complete and have limits applied
|
||||
matched_results = [r for name, r in results if name == "matched_results"]
|
||||
if matched_results:
|
||||
assert len(matched_results[0]) <= 1000 # MAX_MATCHES limit
|
||||
|
||||
async def test_dangerous_pattern_timeout(self):
|
||||
"""Test timeout protection for dangerous patterns."""
|
||||
block = ExtractTextInformationBlock()
|
||||
|
||||
# Test with potentially dangerous lookahead pattern
|
||||
test_input = "a" * 1000
|
||||
|
||||
# This should complete quickly due to timeout protection
|
||||
start_time = asyncio.get_event_loop().time()
|
||||
results = []
|
||||
async for output_name, output_data in block.run(
|
||||
ExtractTextInformationBlock.Input(
|
||||
text=test_input, pattern=r"(?=.+)", find_all=True, group=0
|
||||
)
|
||||
):
|
||||
results.append((output_name, output_data))
|
||||
|
||||
end_time = asyncio.get_event_loop().time()
|
||||
# Should complete within reasonable time (much less than 5s timeout)
|
||||
assert (end_time - start_time) < 10
|
||||
|
||||
async def test_redos_catastrophic_backtracking(self):
|
||||
"""Test that ReDoS patterns with catastrophic backtracking are handled."""
|
||||
block = ExtractTextInformationBlock()
|
||||
|
||||
# Pattern that causes catastrophic backtracking: (a+)+b
|
||||
# With input "aaaaaaaaaaaaaaaaaaaaaaaaaaaa" (no 'b'), this causes exponential time
|
||||
dangerous_pattern = r"(a+)+b"
|
||||
test_input = "a" * 30 # 30 'a's without a 'b' at the end
|
||||
|
||||
# This should be handled by timeout protection or pattern detection
|
||||
start_time = asyncio.get_event_loop().time()
|
||||
results = []
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
ExtractTextInformationBlock.Input(
|
||||
text=test_input, pattern=dangerous_pattern, find_all=True, group=0
|
||||
)
|
||||
):
|
||||
results.append((output_name, output_data))
|
||||
|
||||
end_time = asyncio.get_event_loop().time()
|
||||
elapsed = end_time - start_time
|
||||
|
||||
# Should complete within timeout (6 seconds to be safe)
|
||||
# The current threading.Timer approach doesn't work, so this will likely fail
|
||||
# demonstrating the need for a fix
|
||||
assert elapsed < 6, f"Regex took {elapsed}s, timeout mechanism failed"
|
||||
|
||||
# Should return empty results on timeout or no match
|
||||
matched_results = [r for name, r in results if name == "matched_results"]
|
||||
assert matched_results[0] == [] # No matches expected
|
||||
|
||||
|
||||
class TestStepThroughItemsBlockSecurity:
|
||||
"""Test iteration limits in StepThroughItemsBlock."""
|
||||
|
||||
async def test_item_count_limits(self):
|
||||
"""Test maximum item count limits."""
|
||||
block = StepThroughItemsBlock()
|
||||
|
||||
# Test with too many items
|
||||
large_list = list(range(20000)) # Exceeds MAX_ITEMS (10000)
|
||||
|
||||
with pytest.raises(ValueError, match="Too many items"):
|
||||
async for _ in block.run(StepThroughItemsBlock.Input(items=large_list)):
|
||||
pass
|
||||
|
||||
async def test_string_size_limits(self):
|
||||
"""Test string input size limits."""
|
||||
block = StepThroughItemsBlock()
|
||||
|
||||
# Test with large JSON string
|
||||
large_string = '["item"]' * 200000 # Large JSON string
|
||||
|
||||
with pytest.raises(ValueError, match="Input too large"):
|
||||
async for _ in block.run(
|
||||
StepThroughItemsBlock.Input(items_str=large_string)
|
||||
):
|
||||
pass
|
||||
|
||||
async def test_normal_iteration_works(self):
|
||||
"""Test that normal iteration still works."""
|
||||
block = StepThroughItemsBlock()
|
||||
|
||||
results = []
|
||||
async for output_name, output_data in block.run(
|
||||
StepThroughItemsBlock.Input(items=[1, 2, 3])
|
||||
):
|
||||
results.append((output_name, output_data))
|
||||
|
||||
# Should have 6 outputs (item, key for each of 3 items)
|
||||
assert len(results) == 6
|
||||
items = [data for name, data in results if name == "item"]
|
||||
assert items == [1, 2, 3]
|
||||
|
||||
|
||||
class TestXMLParserBlockSecurity:
|
||||
"""Test XML size limits in XMLParserBlock."""
|
||||
|
||||
async def test_xml_size_limits(self):
|
||||
"""Test XML input size limits."""
|
||||
block = XMLParserBlock()
|
||||
|
||||
# Test with large XML - need to exceed 10MB limit
|
||||
# Each "<item>data</item>" is 17 chars, need ~620K items for >10MB
|
||||
large_xml = "<root>" + "<item>data</item>" * 620000 + "</root>"
|
||||
|
||||
with pytest.raises(ValueError, match="XML too large"):
|
||||
async for _ in block.run(XMLParserBlock.Input(input_xml=large_xml)):
|
||||
pass
|
||||
|
||||
|
||||
class TestStoreMediaFileSecurity:
|
||||
"""Test file storage security limits."""
|
||||
|
||||
@patch("backend.util.file.scan_content_safe")
|
||||
@patch("backend.util.file.get_cloud_storage_handler")
|
||||
async def test_file_size_limits(self, mock_cloud_storage, mock_scan):
|
||||
"""Test file size limits."""
|
||||
# Mock cloud storage handler - get_cloud_storage_handler is async
|
||||
# but is_cloud_path and parse_cloud_path are sync methods
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
mock_handler = MagicMock()
|
||||
mock_handler.is_cloud_path.return_value = False
|
||||
|
||||
# Make get_cloud_storage_handler an async function that returns the mock handler
|
||||
async def async_get_handler():
|
||||
return mock_handler
|
||||
|
||||
mock_cloud_storage.side_effect = async_get_handler
|
||||
mock_scan.return_value = None
|
||||
|
||||
# Test with large base64 content
|
||||
large_content = "a" * (200 * 1024 * 1024) # 200MB
|
||||
large_data_uri = f"data:text/plain;base64,{large_content}"
|
||||
|
||||
with pytest.raises(ValueError, match="File too large"):
|
||||
await store_media_file(
|
||||
graph_exec_id="test",
|
||||
file=MediaFileType(large_data_uri),
|
||||
user_id="test_user",
|
||||
)
|
||||
|
||||
@patch("backend.util.file.Path")
|
||||
@patch("backend.util.file.scan_content_safe")
|
||||
@patch("backend.util.file.get_cloud_storage_handler")
|
||||
async def test_directory_size_limits(self, mock_cloud_storage, mock_scan, MockPath):
|
||||
"""Test directory size limits."""
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
mock_handler = MagicMock()
|
||||
mock_handler.is_cloud_path.return_value = False
|
||||
|
||||
async def async_get_handler():
|
||||
return mock_handler
|
||||
|
||||
mock_cloud_storage.side_effect = async_get_handler
|
||||
mock_scan.return_value = None
|
||||
|
||||
# Create mock path instance for the execution directory
|
||||
mock_path_instance = MagicMock()
|
||||
mock_path_instance.exists.return_value = True
|
||||
|
||||
# Mock glob to return files that total > 1GB
|
||||
mock_file = MagicMock()
|
||||
mock_file.is_file.return_value = True
|
||||
mock_file.stat.return_value.st_size = 2 * 1024 * 1024 * 1024 # 2GB
|
||||
mock_path_instance.glob.return_value = [mock_file]
|
||||
|
||||
# Make Path() return our mock
|
||||
MockPath.return_value = mock_path_instance
|
||||
|
||||
# Should raise an error when directory size exceeds limit
|
||||
with pytest.raises(ValueError, match="Disk usage limit exceeded"):
|
||||
await store_media_file(
|
||||
graph_exec_id="test",
|
||||
file=MediaFileType(
|
||||
"data:text/plain;base64,dGVzdA=="
|
||||
), # Small test file
|
||||
user_id="test_user",
|
||||
)
|
||||
@@ -478,3 +478,207 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
assert outputs["tools_^_search_keywords_~_query"] == "test"
|
||||
assert outputs["tools_^_search_keywords_~_max_keyword_difficulty"] == 50
|
||||
assert outputs["tools_^_search_keywords_~_optional_param"] == "custom_value"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_raw_response_conversion():
|
||||
"""Test that SmartDecisionMaker correctly handles different raw_response types with retry mechanism."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Mock tool functions
|
||||
mock_tool_functions = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "test_tool",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"param": {"type": "string"}},
|
||||
"required": ["param"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Test case 1: Simulate ChatCompletionMessage raw_response that caused the original error
|
||||
class MockChatCompletionMessage:
|
||||
"""Simulate OpenAI's ChatCompletionMessage object that lacks .get() method"""
|
||||
|
||||
def __init__(self, role, content, tool_calls=None):
|
||||
self.role = role
|
||||
self.content = content
|
||||
self.tool_calls = tool_calls or []
|
||||
|
||||
# This is what caused the error - no .get() method
|
||||
# def get(self, key, default=None): # Intentionally missing
|
||||
|
||||
# First response: has invalid parameter name (triggers retry)
|
||||
mock_tool_call_invalid = MagicMock()
|
||||
mock_tool_call_invalid.function.name = "test_tool"
|
||||
mock_tool_call_invalid.function.arguments = (
|
||||
'{"wrong_param": "test_value"}' # Invalid parameter name
|
||||
)
|
||||
|
||||
mock_response_retry = MagicMock()
|
||||
mock_response_retry.response = None
|
||||
mock_response_retry.tool_calls = [mock_tool_call_invalid]
|
||||
mock_response_retry.prompt_tokens = 50
|
||||
mock_response_retry.completion_tokens = 25
|
||||
mock_response_retry.reasoning = None
|
||||
# This would cause the original error without our fix
|
||||
mock_response_retry.raw_response = MockChatCompletionMessage(
|
||||
role="assistant", content=None, tool_calls=[mock_tool_call_invalid]
|
||||
)
|
||||
|
||||
# Second response: successful (correct parameter name)
|
||||
mock_tool_call_valid = MagicMock()
|
||||
mock_tool_call_valid.function.name = "test_tool"
|
||||
mock_tool_call_valid.function.arguments = (
|
||||
'{"param": "test_value"}' # Correct parameter name
|
||||
)
|
||||
|
||||
mock_response_success = MagicMock()
|
||||
mock_response_success.response = None
|
||||
mock_response_success.tool_calls = [mock_tool_call_valid]
|
||||
mock_response_success.prompt_tokens = 50
|
||||
mock_response_success.completion_tokens = 25
|
||||
mock_response_success.reasoning = None
|
||||
mock_response_success.raw_response = MockChatCompletionMessage(
|
||||
role="assistant", content=None, tool_calls=[mock_tool_call_valid]
|
||||
)
|
||||
|
||||
# Mock llm_call to return different responses on different calls
|
||||
with patch("backend.blocks.llm.llm_call") as mock_llm_call, patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
# First call returns response that will trigger retry due to validation error
|
||||
# Second call returns successful response
|
||||
mock_llm_call.side_effect = [mock_response_retry, mock_response_success]
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Test prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2,
|
||||
)
|
||||
|
||||
# Should succeed after retry, demonstrating our helper function works
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify the tool output was generated successfully
|
||||
assert "tools_^_test_tool_~_param" in outputs
|
||||
assert outputs["tools_^_test_tool_~_param"] == "test_value"
|
||||
|
||||
# Verify conversation history was properly maintained
|
||||
assert "conversations" in outputs
|
||||
conversations = outputs["conversations"]
|
||||
assert len(conversations) > 0
|
||||
|
||||
# The conversations should contain properly converted raw_response objects as dicts
|
||||
# This would have failed with the original bug due to ChatCompletionMessage.get() error
|
||||
for msg in conversations:
|
||||
assert isinstance(msg, dict), f"Expected dict, got {type(msg)}"
|
||||
if msg.get("role") == "assistant":
|
||||
# Should have been converted from ChatCompletionMessage to dict
|
||||
assert "role" in msg
|
||||
|
||||
# Verify LLM was called twice (initial + 1 retry)
|
||||
assert mock_llm_call.call_count == 2
|
||||
|
||||
# Test case 2: Test with different raw_response types (Ollama string, dict)
|
||||
# Test Ollama string response
|
||||
mock_response_ollama = MagicMock()
|
||||
mock_response_ollama.response = "I'll help you with that."
|
||||
mock_response_ollama.tool_calls = None
|
||||
mock_response_ollama.prompt_tokens = 30
|
||||
mock_response_ollama.completion_tokens = 15
|
||||
mock_response_ollama.reasoning = None
|
||||
mock_response_ollama.raw_response = (
|
||||
"I'll help you with that." # Ollama returns string
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_ollama
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
return_value=[], # No tools for this test
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Simple prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Should finish since no tool calls
|
||||
assert "finished" in outputs
|
||||
assert outputs["finished"] == "I'll help you with that."
|
||||
|
||||
# Test case 3: Test with dict raw_response (some providers/tests)
|
||||
mock_response_dict = MagicMock()
|
||||
mock_response_dict.response = "Test response"
|
||||
mock_response_dict.tool_calls = None
|
||||
mock_response_dict.prompt_tokens = 25
|
||||
mock_response_dict.completion_tokens = 10
|
||||
mock_response_dict.reasoning = None
|
||||
mock_response_dict.raw_response = {
|
||||
"role": "assistant",
|
||||
"content": "Test response",
|
||||
} # Dict format
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", return_value=mock_response_dict
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
return_value=[],
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Another test",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
assert "finished" in outputs
|
||||
assert outputs["finished"] == "Test response"
|
||||
|
||||
@@ -2,6 +2,8 @@ import re
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import regex # Has built-in timeout support
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util import json, text
|
||||
@@ -137,6 +139,11 @@ class ExtractTextInformationBlock(Block):
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Security fix: Add limits to prevent ReDoS and memory exhaustion
|
||||
MAX_TEXT_LENGTH = 1_000_000 # 1MB character limit
|
||||
MAX_MATCHES = 1000 # Maximum number of matches to prevent memory exhaustion
|
||||
MAX_MATCH_LENGTH = 10_000 # Maximum length per match
|
||||
|
||||
flags = 0
|
||||
if not input_data.case_sensitive:
|
||||
flags = flags | re.IGNORECASE
|
||||
@@ -148,20 +155,85 @@ class ExtractTextInformationBlock(Block):
|
||||
else:
|
||||
txt = json.dumps(input_data.text)
|
||||
|
||||
matches = [
|
||||
match.group(input_data.group)
|
||||
for match in re.finditer(input_data.pattern, txt, flags)
|
||||
if input_data.group <= len(match.groups())
|
||||
]
|
||||
if not input_data.find_all:
|
||||
matches = matches[:1]
|
||||
for match in matches:
|
||||
yield "positive", match
|
||||
if not matches:
|
||||
yield "negative", input_data.text
|
||||
# Limit text size to prevent DoS
|
||||
if len(txt) > MAX_TEXT_LENGTH:
|
||||
txt = txt[:MAX_TEXT_LENGTH]
|
||||
|
||||
yield "matched_results", matches
|
||||
yield "matched_count", len(matches)
|
||||
# Validate regex pattern to prevent dangerous patterns
|
||||
dangerous_patterns = [
|
||||
r".*\+.*\+", # Nested quantifiers
|
||||
r".*\*.*\*", # Nested quantifiers
|
||||
r"(?=.*\+)", # Lookahead with quantifier
|
||||
r"(?=.*\*)", # Lookahead with quantifier
|
||||
r"\(.+\)\+", # Group with nested quantifier
|
||||
r"\(.+\)\*", # Group with nested quantifier
|
||||
r"\([^)]+\+\)\+", # Nested quantifiers like (a+)+
|
||||
r"\([^)]+\*\)\*", # Nested quantifiers like (a*)*
|
||||
]
|
||||
|
||||
# Check if pattern is potentially dangerous
|
||||
is_dangerous = any(
|
||||
re.search(dangerous, input_data.pattern) for dangerous in dangerous_patterns
|
||||
)
|
||||
|
||||
# Use regex module with timeout for dangerous patterns
|
||||
# For safe patterns, use standard re module for compatibility
|
||||
try:
|
||||
matches = []
|
||||
match_count = 0
|
||||
|
||||
if is_dangerous:
|
||||
# Use regex module with timeout (5 seconds) for dangerous patterns
|
||||
# The regex module supports timeout parameter in finditer
|
||||
try:
|
||||
for match in regex.finditer(
|
||||
input_data.pattern, txt, flags=flags, timeout=5.0
|
||||
):
|
||||
if match_count >= MAX_MATCHES:
|
||||
break
|
||||
if input_data.group <= len(match.groups()):
|
||||
match_text = match.group(input_data.group)
|
||||
# Limit match length to prevent memory exhaustion
|
||||
if len(match_text) > MAX_MATCH_LENGTH:
|
||||
match_text = match_text[:MAX_MATCH_LENGTH]
|
||||
matches.append(match_text)
|
||||
match_count += 1
|
||||
except regex.error as e:
|
||||
# Timeout occurred or regex error
|
||||
if "timeout" in str(e).lower():
|
||||
# Timeout - return empty results
|
||||
pass
|
||||
else:
|
||||
# Other regex error
|
||||
raise
|
||||
else:
|
||||
# Use standard re module for non-dangerous patterns
|
||||
for match in re.finditer(input_data.pattern, txt, flags):
|
||||
if match_count >= MAX_MATCHES:
|
||||
break
|
||||
if input_data.group <= len(match.groups()):
|
||||
match_text = match.group(input_data.group)
|
||||
# Limit match length to prevent memory exhaustion
|
||||
if len(match_text) > MAX_MATCH_LENGTH:
|
||||
match_text = match_text[:MAX_MATCH_LENGTH]
|
||||
matches.append(match_text)
|
||||
match_count += 1
|
||||
|
||||
if not input_data.find_all:
|
||||
matches = matches[:1]
|
||||
|
||||
for match in matches:
|
||||
yield "positive", match
|
||||
if not matches:
|
||||
yield "negative", input_data.text
|
||||
|
||||
yield "matched_results", matches
|
||||
yield "matched_count", len(matches)
|
||||
except Exception:
|
||||
# Return empty results on any regex error
|
||||
yield "negative", input_data.text
|
||||
yield "matched_results", []
|
||||
yield "matched_count", 0
|
||||
|
||||
|
||||
class FillTextTemplateBlock(Block):
|
||||
|
||||
@@ -270,13 +270,17 @@ class GetCurrentDateBlock(Block):
|
||||
test_output=[
|
||||
(
|
||||
"date",
|
||||
lambda t: abs(datetime.now() - datetime.strptime(t, "%Y-%m-%d"))
|
||||
< timedelta(days=8), # 7 days difference + 1 day error margin.
|
||||
lambda t: abs(
|
||||
datetime.now().date() - datetime.strptime(t, "%Y-%m-%d").date()
|
||||
)
|
||||
<= timedelta(days=8), # 7 days difference + 1 day error margin.
|
||||
),
|
||||
(
|
||||
"date",
|
||||
lambda t: abs(datetime.now() - datetime.strptime(t, "%m/%d/%Y"))
|
||||
< timedelta(days=8),
|
||||
lambda t: abs(
|
||||
datetime.now().date() - datetime.strptime(t, "%m/%d/%Y").date()
|
||||
)
|
||||
<= timedelta(days=8),
|
||||
# 7 days difference + 1 day error margin.
|
||||
),
|
||||
(
|
||||
@@ -382,7 +386,7 @@ class GetCurrentDateAndTimeBlock(Block):
|
||||
lambda t: abs(
|
||||
datetime.now().date() - datetime.strptime(t, "%Y/%m/%d").date()
|
||||
)
|
||||
< timedelta(days=1), # Date format only, no time component
|
||||
<= timedelta(days=1), # Date format only, no time component
|
||||
),
|
||||
(
|
||||
"date_time",
|
||||
|
||||
@@ -26,6 +26,14 @@ class XMLParserBlock(Block):
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Security fix: Add size limits to prevent XML bomb attacks
|
||||
MAX_XML_SIZE = 10 * 1024 * 1024 # 10MB limit for XML input
|
||||
|
||||
if len(input_data.input_xml) > MAX_XML_SIZE:
|
||||
raise ValueError(
|
||||
f"XML too large: {len(input_data.input_xml)} bytes > {MAX_XML_SIZE} bytes"
|
||||
)
|
||||
|
||||
try:
|
||||
tokens = tokenize(input_data.input_xml)
|
||||
parser = Parser(tokens)
|
||||
|
||||
@@ -32,7 +32,15 @@ from backend.util import type as type_utils
|
||||
from backend.util.json import SafeJson
|
||||
from backend.util.models import Pagination
|
||||
|
||||
from .block import Block, BlockInput, BlockSchema, BlockType, get_block, get_blocks
|
||||
from .block import (
|
||||
Block,
|
||||
BlockInput,
|
||||
BlockSchema,
|
||||
BlockType,
|
||||
EmptySchema,
|
||||
get_block,
|
||||
get_blocks,
|
||||
)
|
||||
from .db import BaseDbModel, query_raw_with_schema, transaction
|
||||
from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE
|
||||
|
||||
@@ -73,12 +81,15 @@ class Node(BaseDbModel):
|
||||
output_links: list[Link] = []
|
||||
|
||||
@property
|
||||
def block(self) -> Block[BlockSchema, BlockSchema]:
|
||||
def block(self) -> "Block[BlockSchema, BlockSchema] | _UnknownBlockBase":
|
||||
"""Get the block for this node. Returns UnknownBlock if block is deleted/missing."""
|
||||
block = get_block(self.block_id)
|
||||
if not block:
|
||||
raise ValueError(
|
||||
f"Block #{self.block_id} does not exist -> Node #{self.id} is invalid"
|
||||
# Log warning but don't raise exception - return a placeholder block for deleted blocks
|
||||
logger.warning(
|
||||
f"Block #{self.block_id} does not exist for Node #{self.id} (deleted/missing block), using UnknownBlock"
|
||||
)
|
||||
return _UnknownBlockBase(self.block_id)
|
||||
return block
|
||||
|
||||
|
||||
@@ -1316,3 +1327,34 @@ async def migrate_llm_models(migrate_to: LlmModel):
|
||||
id,
|
||||
path,
|
||||
)
|
||||
|
||||
|
||||
# Simple placeholder class for deleted/missing blocks
|
||||
class _UnknownBlockBase(Block):
|
||||
"""
|
||||
Placeholder for deleted/missing blocks that inherits from Block
|
||||
but uses a name that doesn't end with 'Block' to avoid auto-discovery.
|
||||
"""
|
||||
|
||||
def __init__(self, block_id: str = "00000000-0000-0000-0000-000000000000"):
|
||||
# Initialize with minimal valid Block parameters
|
||||
super().__init__(
|
||||
id=block_id,
|
||||
description=f"Unknown or deleted block (original ID: {block_id})",
|
||||
disabled=True,
|
||||
input_schema=EmptySchema,
|
||||
output_schema=EmptySchema,
|
||||
categories=set(),
|
||||
contributors=[],
|
||||
static_output=False,
|
||||
block_type=BlockType.STANDARD,
|
||||
webhook_config=None,
|
||||
)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return "UnknownBlock"
|
||||
|
||||
async def run(self, input_data, **kwargs):
|
||||
"""Always yield an error for missing blocks."""
|
||||
yield "error", f"Block {self.id} no longer exists"
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Any, Optional
|
||||
|
||||
import prisma
|
||||
import pydantic
|
||||
from autogpt_libs.utils.cache import cached
|
||||
from prisma.enums import OnboardingStep
|
||||
from prisma.models import UserOnboarding
|
||||
from prisma.types import UserOnboardingCreateInput, UserOnboardingUpdateInput
|
||||
@@ -374,8 +375,13 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
]
|
||||
|
||||
|
||||
@cached(maxsize=1, ttl_seconds=300) # Cache for 5 minutes since this rarely changes
|
||||
async def onboarding_enabled() -> bool:
|
||||
"""
|
||||
Check if onboarding should be enabled based on store agent count.
|
||||
Cached to prevent repeated slow database queries.
|
||||
"""
|
||||
# Use a more efficient query that stops counting after finding enough agents
|
||||
count = await prisma.models.StoreAgent.prisma().count(take=MIN_AGENT_COUNT + 1)
|
||||
|
||||
# Onboading is enabled if there are at least 2 agents in the store
|
||||
# Onboarding is enabled if there are at least 2 agents in the store
|
||||
return count >= MIN_AGENT_COUNT
|
||||
|
||||
@@ -4,7 +4,12 @@ Module for generating AI-based activity status for graph executions.
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, NotRequired, TypedDict
|
||||
from typing import TYPE_CHECKING, Any, TypedDict
|
||||
|
||||
try:
|
||||
from typing import NotRequired
|
||||
except ImportError:
|
||||
from typing_extensions import NotRequired
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
@@ -146,17 +151,35 @@ async def generate_activity_status_for_execution(
|
||||
"Focus on the ACTUAL TASK the user wanted done, not the internal workflow steps. "
|
||||
"Avoid technical terms like 'workflow', 'execution', 'components', 'nodes', 'processing', etc. "
|
||||
"Keep it to 3 sentences maximum. Be conversational and human-friendly.\n\n"
|
||||
"UNDERSTAND THE INTENDED PURPOSE:\n"
|
||||
"- FIRST: Read the graph description carefully to understand what the user wanted to accomplish\n"
|
||||
"- The graph name and description tell you the main goal/intention of this automation\n"
|
||||
"- Use this intended purpose as your PRIMARY criteria for success/failure evaluation\n"
|
||||
"- Ask yourself: 'Did this execution actually accomplish what the graph was designed to do?'\n\n"
|
||||
"CRITICAL OUTPUT ANALYSIS:\n"
|
||||
"- Check if blocks that should produce user-facing results actually produced outputs\n"
|
||||
"- Blocks with names containing 'Output', 'Post', 'Create', 'Send', 'Publish', 'Generate' are usually meant to produce final results\n"
|
||||
"- If these critical blocks have NO outputs (empty recent_outputs), the task likely FAILED even if status shows 'completed'\n"
|
||||
"- Sub-agents (AgentExecutorBlock) that produce no outputs usually indicate failed sub-tasks\n"
|
||||
"- Most importantly: Does the execution result match what the graph description promised to deliver?\n\n"
|
||||
"SUCCESS EVALUATION BASED ON INTENTION:\n"
|
||||
"- If the graph is meant to 'create blog posts' → check if blog content was actually created\n"
|
||||
"- If the graph is meant to 'send emails' → check if emails were actually sent\n"
|
||||
"- If the graph is meant to 'analyze data' → check if analysis results were produced\n"
|
||||
"- If the graph is meant to 'generate reports' → check if reports were generated\n"
|
||||
"- Technical completion ≠ goal achievement. Focus on whether the USER'S INTENDED OUTCOME was delivered\n\n"
|
||||
"IMPORTANT: Be HONEST about what actually happened:\n"
|
||||
"- If the input was invalid/nonsensical, say so directly\n"
|
||||
"- If the task failed, explain what went wrong in simple terms\n"
|
||||
"- If errors occurred, focus on what the user needs to know\n"
|
||||
"- Only claim success if the task was genuinely completed\n"
|
||||
"- Don't sugar-coat failures or present them as helpful feedback\n\n"
|
||||
"- Only claim success if the INTENDED PURPOSE was genuinely accomplished AND produced expected outputs\n"
|
||||
"- Don't sugar-coat failures or present them as helpful feedback\n"
|
||||
"- ESPECIALLY: If the graph's main purpose wasn't achieved, this is a failure regardless of 'completed' status\n\n"
|
||||
"Understanding Errors:\n"
|
||||
"- Node errors: Individual steps may fail but the overall task might still complete (e.g., one data source fails but others work)\n"
|
||||
"- Graph error (in overall_status.graph_error): This means the entire execution failed and nothing was accomplished\n"
|
||||
"- Even if execution shows 'completed', check if critical nodes failed that would prevent the desired outcome\n"
|
||||
"- Focus on the end result the user wanted, not whether technical steps completed"
|
||||
"- Missing outputs from critical blocks: Even if no errors, this means the task failed to produce expected results\n"
|
||||
"- Focus on whether the graph's intended purpose was fulfilled, not whether technical steps completed"
|
||||
),
|
||||
},
|
||||
{
|
||||
@@ -165,15 +188,28 @@ async def generate_activity_status_for_execution(
|
||||
f"A user ran '{graph_name}' to accomplish something. Based on this execution data, "
|
||||
f"write what they achieved in simple, user-friendly terms:\n\n"
|
||||
f"{json.dumps(execution_data, indent=2)}\n\n"
|
||||
"CRITICAL: Check overall_status.graph_error FIRST - if present, the entire execution failed.\n"
|
||||
"Then check individual node errors to understand partial failures.\n\n"
|
||||
"ANALYSIS CHECKLIST:\n"
|
||||
"1. READ graph_info.description FIRST - this tells you what the user intended to accomplish\n"
|
||||
"2. Check overall_status.graph_error - if present, the entire execution failed\n"
|
||||
"3. Look for nodes with 'Output', 'Post', 'Create', 'Send', 'Publish', 'Generate' in their block_name\n"
|
||||
"4. Check if these critical blocks have empty recent_outputs arrays - this indicates failure\n"
|
||||
"5. Look for AgentExecutorBlock (sub-agents) with no outputs - this suggests sub-task failures\n"
|
||||
"6. Count how many nodes produced outputs vs total nodes - low ratio suggests problems\n"
|
||||
"7. MOST IMPORTANT: Does the execution outcome match what graph_info.description promised?\n\n"
|
||||
"INTENTION-BASED EVALUATION:\n"
|
||||
"- If description mentions 'blog writing' → did it create blog content?\n"
|
||||
"- If description mentions 'email automation' → were emails actually sent?\n"
|
||||
"- If description mentions 'data analysis' → were analysis results produced?\n"
|
||||
"- If description mentions 'content generation' → was content actually generated?\n"
|
||||
"- If description mentions 'social media posting' → were posts actually made?\n"
|
||||
"- Match the outputs to the stated intention, not just technical completion\n\n"
|
||||
"Write 1-3 sentences about what the user accomplished, such as:\n"
|
||||
"- 'I analyzed your resume and provided detailed feedback for the IT industry.'\n"
|
||||
"- 'I couldn't analyze your resume because the input was just nonsensical text.'\n"
|
||||
"- 'I failed to complete the task due to missing API access.'\n"
|
||||
"- 'I couldn't complete the task because critical steps failed to produce any results.'\n"
|
||||
"- 'I failed to generate the content you requested due to missing API access.'\n"
|
||||
"- 'I extracted key information from your documents and organized it into a summary.'\n"
|
||||
"- 'The task failed to run due to system configuration issues.'\n\n"
|
||||
"Focus on what ACTUALLY happened, not what was attempted."
|
||||
"- 'The task failed because the blog post creation step didn't produce any output.'\n\n"
|
||||
"BE CRITICAL: If the graph's intended purpose (from description) wasn't achieved, report this as a failure even if status is 'completed'."
|
||||
),
|
||||
},
|
||||
]
|
||||
@@ -197,6 +233,7 @@ async def generate_activity_status_for_execution(
|
||||
logger.debug(
|
||||
f"Generated activity status for {graph_exec_id}: {activity_status}"
|
||||
)
|
||||
|
||||
return activity_status
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -66,6 +66,18 @@ async def store_media_file(
|
||||
base_path = Path(get_exec_file_path(graph_exec_id, ""))
|
||||
base_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Security fix: Add disk space limits to prevent DoS
|
||||
MAX_FILE_SIZE = 100 * 1024 * 1024 # 100MB per file
|
||||
MAX_TOTAL_DISK_USAGE = 1024 * 1024 * 1024 # 1GB total per execution directory
|
||||
|
||||
# Check total disk usage in base_path
|
||||
if base_path.exists():
|
||||
current_usage = get_dir_size(base_path)
|
||||
if current_usage > MAX_TOTAL_DISK_USAGE:
|
||||
raise ValueError(
|
||||
f"Disk usage limit exceeded: {current_usage} bytes > {MAX_TOTAL_DISK_USAGE} bytes"
|
||||
)
|
||||
|
||||
# Helper functions
|
||||
def _extension_from_mime(mime: str) -> str:
|
||||
ext = mimetypes.guess_extension(mime, strict=False)
|
||||
@@ -108,6 +120,12 @@ async def store_media_file(
|
||||
filename = Path(path_part).name or f"{uuid.uuid4()}.bin"
|
||||
target_path = _ensure_inside_base(base_path / filename, base_path)
|
||||
|
||||
# Check file size limit
|
||||
if len(cloud_content) > MAX_FILE_SIZE:
|
||||
raise ValueError(
|
||||
f"File too large: {len(cloud_content)} bytes > {MAX_FILE_SIZE} bytes"
|
||||
)
|
||||
|
||||
# Virus scan the cloud content before writing locally
|
||||
await scan_content_safe(cloud_content, filename=filename)
|
||||
target_path.write_bytes(cloud_content)
|
||||
@@ -129,6 +147,12 @@ async def store_media_file(
|
||||
target_path = _ensure_inside_base(base_path / filename, base_path)
|
||||
content = base64.b64decode(b64_content)
|
||||
|
||||
# Check file size limit
|
||||
if len(content) > MAX_FILE_SIZE:
|
||||
raise ValueError(
|
||||
f"File too large: {len(content)} bytes > {MAX_FILE_SIZE} bytes"
|
||||
)
|
||||
|
||||
# Virus scan the base64 content before writing
|
||||
await scan_content_safe(content, filename=filename)
|
||||
target_path.write_bytes(content)
|
||||
@@ -142,6 +166,12 @@ async def store_media_file(
|
||||
# Download and save
|
||||
resp = await Requests().get(file)
|
||||
|
||||
# Check file size limit
|
||||
if len(resp.content) > MAX_FILE_SIZE:
|
||||
raise ValueError(
|
||||
f"File too large: {len(resp.content)} bytes > {MAX_FILE_SIZE} bytes"
|
||||
)
|
||||
|
||||
# Virus scan the downloaded content before writing
|
||||
await scan_content_safe(resp.content, filename=filename)
|
||||
target_path.write_bytes(resp.content)
|
||||
@@ -159,6 +189,18 @@ async def store_media_file(
|
||||
return MediaFileType(_strip_base_prefix(target_path, base_path))
|
||||
|
||||
|
||||
def get_dir_size(path: Path) -> int:
|
||||
"""Get total size of directory."""
|
||||
total = 0
|
||||
try:
|
||||
for entry in path.glob("**/*"):
|
||||
if entry.is_file():
|
||||
total += entry.stat().st_size
|
||||
except Exception:
|
||||
pass
|
||||
return total
|
||||
|
||||
|
||||
def get_mime_type(file: str) -> str:
|
||||
"""
|
||||
Get the MIME type of a file, whether it's a data URI, URL, or local path.
|
||||
|
||||
@@ -19,9 +19,48 @@ def _msg_tokens(msg: dict, enc) -> int:
|
||||
"""
|
||||
OpenAI counts ≈3 wrapper tokens per chat message, plus 1 if "name"
|
||||
is present, plus the tokenised content length.
|
||||
For tool calls, we need to count tokens in tool_calls and content fields.
|
||||
"""
|
||||
WRAPPER = 3 + (1 if "name" in msg else 0)
|
||||
return WRAPPER + _tok_len(msg.get("content") or "", enc)
|
||||
|
||||
# Count content tokens
|
||||
content_tokens = _tok_len(msg.get("content") or "", enc)
|
||||
|
||||
# Count tool call tokens for both OpenAI and Anthropic formats
|
||||
tool_call_tokens = 0
|
||||
|
||||
# OpenAI format: tool_calls array at message level
|
||||
if "tool_calls" in msg and isinstance(msg["tool_calls"], list):
|
||||
for tool_call in msg["tool_calls"]:
|
||||
# Count the tool call structure tokens
|
||||
tool_call_tokens += _tok_len(tool_call.get("id", ""), enc)
|
||||
tool_call_tokens += _tok_len(tool_call.get("type", ""), enc)
|
||||
if "function" in tool_call:
|
||||
tool_call_tokens += _tok_len(tool_call["function"].get("name", ""), enc)
|
||||
tool_call_tokens += _tok_len(
|
||||
tool_call["function"].get("arguments", ""), enc
|
||||
)
|
||||
|
||||
# Anthropic format: tool_use within content array
|
||||
content = msg.get("content")
|
||||
if isinstance(content, list):
|
||||
for item in content:
|
||||
if isinstance(item, dict) and item.get("type") == "tool_use":
|
||||
# Count the tool use structure tokens
|
||||
tool_call_tokens += _tok_len(item.get("id", ""), enc)
|
||||
tool_call_tokens += _tok_len(item.get("name", ""), enc)
|
||||
tool_call_tokens += _tok_len(json.dumps(item.get("input", {})), enc)
|
||||
elif isinstance(item, dict) and item.get("type") == "tool_result":
|
||||
# Count tool result tokens
|
||||
tool_call_tokens += _tok_len(item.get("tool_use_id", ""), enc)
|
||||
tool_call_tokens += _tok_len(item.get("content", ""), enc)
|
||||
elif isinstance(item, dict) and "content" in item:
|
||||
# Other content types with content field
|
||||
tool_call_tokens += _tok_len(item.get("content", ""), enc)
|
||||
# For list content, override content_tokens since we counted everything above
|
||||
content_tokens = 0
|
||||
|
||||
return WRAPPER + content_tokens + tool_call_tokens
|
||||
|
||||
|
||||
def _truncate_middle_tokens(text: str, enc, max_tok: int) -> str:
|
||||
|
||||
278
autogpt_platform/backend/backend/util/prompt_test.py
Normal file
278
autogpt_platform/backend/backend/util/prompt_test.py
Normal file
@@ -0,0 +1,278 @@
|
||||
"""Tests for prompt utility functions, especially tool call token counting."""
|
||||
|
||||
import pytest
|
||||
from tiktoken import encoding_for_model
|
||||
|
||||
from backend.util import json
|
||||
from backend.util.prompt import _msg_tokens, estimate_token_count
|
||||
|
||||
|
||||
class TestMsgTokens:
|
||||
"""Test the _msg_tokens function with various message types."""
|
||||
|
||||
@pytest.fixture
|
||||
def enc(self):
|
||||
"""Get the encoding for gpt-4o model."""
|
||||
return encoding_for_model("gpt-4o")
|
||||
|
||||
def test_regular_message_token_counting(self, enc):
|
||||
"""Test that regular messages are counted correctly (backward compatibility)."""
|
||||
msg = {"role": "user", "content": "What's the weather like in San Francisco?"}
|
||||
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
|
||||
# Should be wrapper (3) + content tokens
|
||||
expected = 3 + len(enc.encode(msg["content"]))
|
||||
assert tokens == expected
|
||||
assert tokens > 3 # Has content
|
||||
|
||||
def test_regular_message_with_name(self, enc):
|
||||
"""Test that messages with name field get extra wrapper token."""
|
||||
msg = {"role": "user", "name": "test_user", "content": "Hello!"}
|
||||
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
|
||||
# Should be wrapper (3 + 1 for name) + content tokens
|
||||
expected = 4 + len(enc.encode(msg["content"]))
|
||||
assert tokens == expected
|
||||
|
||||
def test_openai_tool_call_token_counting(self, enc):
|
||||
"""Test OpenAI format tool call token counting."""
|
||||
msg = {
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "call_abc123",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"arguments": '{"location": "San Francisco", "unit": "celsius"}',
|
||||
},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
|
||||
# Should count wrapper + all tool call components
|
||||
expected_tool_tokens = (
|
||||
len(enc.encode("call_abc123"))
|
||||
+ len(enc.encode("function"))
|
||||
+ len(enc.encode("get_weather"))
|
||||
+ len(enc.encode('{"location": "San Francisco", "unit": "celsius"}'))
|
||||
)
|
||||
expected = 3 + expected_tool_tokens # wrapper + tool tokens
|
||||
|
||||
assert tokens == expected
|
||||
assert tokens > 8 # Should be significantly more than just wrapper
|
||||
|
||||
def test_openai_multiple_tool_calls(self, enc):
|
||||
"""Test OpenAI format with multiple tool calls."""
|
||||
msg = {
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "call_1",
|
||||
"type": "function",
|
||||
"function": {"name": "func1", "arguments": '{"arg": "value1"}'},
|
||||
},
|
||||
{
|
||||
"id": "call_2",
|
||||
"type": "function",
|
||||
"function": {"name": "func2", "arguments": '{"arg": "value2"}'},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
|
||||
# Should count all tool calls
|
||||
assert tokens > 20 # Should be more than single tool call
|
||||
|
||||
def test_anthropic_tool_use_token_counting(self, enc):
|
||||
"""Test Anthropic format tool use token counting."""
|
||||
msg = {
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "tool_use",
|
||||
"id": "toolu_xyz456",
|
||||
"name": "get_weather",
|
||||
"input": {"location": "San Francisco", "unit": "celsius"},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
|
||||
# Should count wrapper + tool use components
|
||||
expected_tool_tokens = (
|
||||
len(enc.encode("toolu_xyz456"))
|
||||
+ len(enc.encode("get_weather"))
|
||||
+ len(
|
||||
enc.encode(json.dumps({"location": "San Francisco", "unit": "celsius"}))
|
||||
)
|
||||
)
|
||||
expected = 3 + expected_tool_tokens # wrapper + tool tokens
|
||||
|
||||
assert tokens == expected
|
||||
assert tokens > 8 # Should be significantly more than just wrapper
|
||||
|
||||
def test_anthropic_tool_result_token_counting(self, enc):
|
||||
"""Test Anthropic format tool result token counting."""
|
||||
msg = {
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": "toolu_xyz456",
|
||||
"content": "The weather in San Francisco is 22°C and sunny.",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
|
||||
# Should count wrapper + tool result components
|
||||
expected_tool_tokens = len(enc.encode("toolu_xyz456")) + len(
|
||||
enc.encode("The weather in San Francisco is 22°C and sunny.")
|
||||
)
|
||||
expected = 3 + expected_tool_tokens # wrapper + tool tokens
|
||||
|
||||
assert tokens == expected
|
||||
assert tokens > 8 # Should be significantly more than just wrapper
|
||||
|
||||
def test_anthropic_mixed_content(self, enc):
|
||||
"""Test Anthropic format with mixed content types."""
|
||||
msg = {
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{"type": "text", "content": "I'll check the weather for you."},
|
||||
{
|
||||
"type": "tool_use",
|
||||
"id": "toolu_123",
|
||||
"name": "get_weather",
|
||||
"input": {"location": "SF"},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
|
||||
# Should count all content items
|
||||
assert tokens > 15 # Should count both text and tool use
|
||||
|
||||
def test_empty_content(self, enc):
|
||||
"""Test message with empty or None content."""
|
||||
msg = {"role": "assistant", "content": None}
|
||||
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
assert tokens == 3 # Just wrapper tokens
|
||||
|
||||
msg["content"] = ""
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
assert tokens == 3 # Just wrapper tokens
|
||||
|
||||
def test_string_content_with_tool_calls(self, enc):
|
||||
"""Test OpenAI format where content is string but tool_calls exist."""
|
||||
msg = {
|
||||
"role": "assistant",
|
||||
"content": "Let me check that for you.",
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "call_123",
|
||||
"type": "function",
|
||||
"function": {"name": "test_func", "arguments": "{}"},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
tokens = _msg_tokens(msg, enc)
|
||||
|
||||
# Should count both content and tool calls
|
||||
content_tokens = len(enc.encode("Let me check that for you."))
|
||||
tool_tokens = (
|
||||
len(enc.encode("call_123"))
|
||||
+ len(enc.encode("function"))
|
||||
+ len(enc.encode("test_func"))
|
||||
+ len(enc.encode("{}"))
|
||||
)
|
||||
expected = 3 + content_tokens + tool_tokens
|
||||
|
||||
assert tokens == expected
|
||||
|
||||
|
||||
class TestEstimateTokenCount:
|
||||
"""Test the estimate_token_count function with conversations containing tool calls."""
|
||||
|
||||
def test_conversation_with_tool_calls(self):
|
||||
"""Test token counting for a complete conversation with tool calls."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "What's the weather like in San Francisco?"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "tool_use",
|
||||
"id": "toolu_123",
|
||||
"name": "get_weather",
|
||||
"input": {"location": "San Francisco"},
|
||||
}
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": "toolu_123",
|
||||
"content": "22°C and sunny",
|
||||
}
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "The weather in San Francisco is 22°C and sunny.",
|
||||
},
|
||||
]
|
||||
|
||||
total_tokens = estimate_token_count(conversation)
|
||||
|
||||
# Verify total equals sum of individual messages
|
||||
enc = encoding_for_model("gpt-4o")
|
||||
expected_total = sum(_msg_tokens(msg, enc) for msg in conversation)
|
||||
|
||||
assert total_tokens == expected_total
|
||||
assert total_tokens > 40 # Should be substantial for this conversation
|
||||
|
||||
def test_openai_conversation(self):
|
||||
"""Test token counting for OpenAI format conversation."""
|
||||
conversation = [
|
||||
{"role": "user", "content": "Calculate 2 + 2"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "call_calc",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "calculate",
|
||||
"arguments": '{"expression": "2 + 2"}',
|
||||
},
|
||||
}
|
||||
],
|
||||
},
|
||||
{"role": "tool", "tool_call_id": "call_calc", "content": "4"},
|
||||
{"role": "assistant", "content": "The result is 4."},
|
||||
]
|
||||
|
||||
total_tokens = estimate_token_count(conversation)
|
||||
|
||||
# Verify total equals sum of individual messages
|
||||
enc = encoding_for_model("gpt-4o")
|
||||
expected_total = sum(_msg_tokens(msg, enc) for msg in conversation)
|
||||
|
||||
assert total_tokens == expected_total
|
||||
assert total_tokens > 20 # Should be substantial
|
||||
284
autogpt_platform/backend/poetry.lock
generated
284
autogpt_platform/backend/poetry.lock
generated
@@ -1240,14 +1240,14 @@ tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
||||
|
||||
[[package]]
|
||||
name = "faker"
|
||||
version = "37.6.0"
|
||||
version = "37.8.0"
|
||||
description = "Faker is a Python package that generates fake data for you."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "faker-37.6.0-py3-none-any.whl", hash = "sha256:3c5209b23d7049d596a51db5d76403a0ccfea6fc294ffa2ecfef6a8843b1e6a7"},
|
||||
{file = "faker-37.6.0.tar.gz", hash = "sha256:0f8cc34f30095184adf87c3c24c45b38b33ad81c35ef6eb0a3118f301143012c"},
|
||||
{file = "faker-37.8.0-py3-none-any.whl", hash = "sha256:b08233118824423b5fc239f7dd51f145e7018082b4164f8da6a9994e1f1ae793"},
|
||||
{file = "faker-37.8.0.tar.gz", hash = "sha256:090bb5abbec2b30949a95ce1ba6b20d1d0ed222883d63483a0d4be4a970d6fb8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1339,20 +1339,21 @@ packaging = ">=20"
|
||||
|
||||
[[package]]
|
||||
name = "firecrawl-py"
|
||||
version = "2.16.3"
|
||||
version = "4.3.6"
|
||||
description = "Python SDK for Firecrawl API"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "firecrawl_py-2.16.3-py3-none-any.whl", hash = "sha256:94bb46af5e0df6c8ec414ac999a5355c0f5a46f15fd1cf5a02a3b31062db0aa8"},
|
||||
{file = "firecrawl_py-2.16.3.tar.gz", hash = "sha256:5fd063ef4acc4c4be62648f1e11467336bc127780b3afc28d39078a012e6a14c"},
|
||||
{file = "firecrawl_py-4.3.6-py3-none-any.whl", hash = "sha256:9b5dffdf5ed08fdbf0966f17e18c1a034d59f42a20b2bf9a6291a83190d7eb0f"},
|
||||
{file = "firecrawl_py-4.3.6.tar.gz", hash = "sha256:303827a86d0f6237a8ddcaa0bcdaa4c5ee11d9a4880b0685302b8d9a0e191ee0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aiohttp = "*"
|
||||
httpx = "*"
|
||||
nest-asyncio = "*"
|
||||
pydantic = "*"
|
||||
pydantic = ">=2.0"
|
||||
python-dotenv = "*"
|
||||
requests = "*"
|
||||
websockets = "*"
|
||||
@@ -4912,14 +4913,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "pyright"
|
||||
version = "1.1.404"
|
||||
version = "1.1.406"
|
||||
description = "Command line wrapper for pyright"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "pyright-1.1.404-py3-none-any.whl", hash = "sha256:c7b7ff1fdb7219c643079e4c3e7d4125f0dafcc19d253b47e898d130ea426419"},
|
||||
{file = "pyright-1.1.404.tar.gz", hash = "sha256:455e881a558ca6be9ecca0b30ce08aa78343ecc031d37a198ffa9a7a1abeb63e"},
|
||||
{file = "pyright-1.1.406-py3-none-any.whl", hash = "sha256:1d81fb43c2407bf566e97e57abb01c811973fdb21b2df8df59f870f688bdca71"},
|
||||
{file = "pyright-1.1.406.tar.gz", hash = "sha256:c4872bc58c9643dac09e8a2e74d472c62036910b3bd37a32813989ef7576ea2c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -4977,14 +4978,14 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-mock"
|
||||
version = "3.14.1"
|
||||
version = "3.15.1"
|
||||
description = "Thin-wrapper around the mock package for easier use with pytest"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0"},
|
||||
{file = "pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e"},
|
||||
{file = "pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d"},
|
||||
{file = "pytest_mock-3.15.1.tar.gz", hash = "sha256:1849a238f6f396da19762269de72cb1814ab44416fa73a8686deac10b0d87a0f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -5377,106 +5378,127 @@ typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""}
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "2024.11.6"
|
||||
version = "2025.9.18"
|
||||
description = "Alternative regular expression module, to replace re."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"},
|
||||
{file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"},
|
||||
{file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"},
|
||||
{file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"},
|
||||
{file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"},
|
||||
{file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"},
|
||||
{file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"},
|
||||
{file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:12296202480c201c98a84aecc4d210592b2f55e200a1d193235c4db92b9f6788"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:220381f1464a581f2ea988f2220cf2a67927adcef107d47d6897ba5a2f6d51a4"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:87f681bfca84ebd265278b5daa1dcb57f4db315da3b5d044add7c30c10442e61"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:34d674cbba70c9398074c8a1fcc1a79739d65d1105de2a3c695e2b05ea728251"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:385c9b769655cb65ea40b6eea6ff763cbb6d69b3ffef0b0db8208e1833d4e746"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8900b3208e022570ae34328712bef6696de0804c122933414014bae791437ab2"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c204e93bf32cd7a77151d44b05eb36f469d0898e3fba141c026a26b79d9914a0"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3acc471d1dd7e5ff82e6cacb3b286750decd949ecd4ae258696d04f019817ef8"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6479d5555122433728760e5f29edb4c2b79655a8deb681a141beb5c8a025baea"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:431bd2a8726b000eb6f12429c9b438a24062a535d06783a93d2bcbad3698f8a8"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0cc3521060162d02bd36927e20690129200e5ac9d2c6d32b70368870b122db25"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a021217b01be2d51632ce056d7a837d3fa37c543ede36e39d14063176a26ae29"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-win32.whl", hash = "sha256:4a12a06c268a629cb67cc1d009b7bb0be43e289d00d5111f86a2efd3b1949444"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-win_amd64.whl", hash = "sha256:47acd811589301298c49db2c56bde4f9308d6396da92daf99cba781fa74aa450"},
|
||||
{file = "regex-2025.9.18-cp310-cp310-win_arm64.whl", hash = "sha256:16bd2944e77522275e5ee36f867e19995bcaa533dcb516753a26726ac7285442"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:51076980cd08cd13c88eb7365427ae27f0d94e7cebe9ceb2bb9ffdae8fc4d82a"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:828446870bd7dee4e0cbeed767f07961aa07f0ea3129f38b3ccecebc9742e0b8"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c28821d5637866479ec4cc23b8c990f5bc6dd24e5e4384ba4a11d38a526e1414"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:726177ade8e481db669e76bf99de0b278783be8acd11cef71165327abd1f170a"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f5cca697da89b9f8ea44115ce3130f6c54c22f541943ac8e9900461edc2b8bd4"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dfbde38f38004703c35666a1e1c088b778e35d55348da2b7b278914491698d6a"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f2f422214a03fab16bfa495cfec72bee4aaa5731843b771860a471282f1bf74f"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a295916890f4df0902e4286bc7223ee7f9e925daa6dcdec4192364255b70561a"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:5db95ff632dbabc8c38c4e82bf545ab78d902e81160e6e455598014f0abe66b9"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fb967eb441b0f15ae610b7069bdb760b929f267efbf522e814bbbfffdf125ce2"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f04d2f20da4053d96c08f7fde6e1419b7ec9dbcee89c96e3d731fca77f411b95"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-win32.whl", hash = "sha256:895197241fccf18c0cea7550c80e75f185b8bd55b6924fcae269a1a92c614a07"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-win_amd64.whl", hash = "sha256:7e2b414deae99166e22c005e154a5513ac31493db178d8aec92b3269c9cce8c9"},
|
||||
{file = "regex-2025.9.18-cp311-cp311-win_arm64.whl", hash = "sha256:fb137ec7c5c54f34a25ff9b31f6b7b0c2757be80176435bf367111e3f71d72df"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:436e1b31d7efd4dcd52091d076482031c611dde58bf9c46ca6d0a26e33053a7e"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c190af81e5576b9c5fdc708f781a52ff20f8b96386c6e2e0557a78402b029f4a"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e4121f1ce2b2b5eec4b397cc1b277686e577e658d8f5870b7eb2d726bd2300ab"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:300e25dbbf8299d87205e821a201057f2ef9aa3deb29caa01cd2cac669e508d5"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7b47fcf9f5316c0bdaf449e879407e1b9937a23c3b369135ca94ebc8d74b1742"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:57a161bd3acaa4b513220b49949b07e252165e6b6dc910ee7617a37ff4f5b425"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f130c3a7845ba42de42f380fff3c8aebe89a810747d91bcf56d40a069f15352"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f96fa342b6f54dcba928dd452e8d8cb9f0d63e711d1721cd765bb9f73bb048d"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0f0d676522d68c207828dcd01fb6f214f63f238c283d9f01d85fc664c7c85b56"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:40532bff8a1a0621e7903ae57fce88feb2e8a9a9116d341701302c9302aef06e"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:039f11b618ce8d71a1c364fdee37da1012f5a3e79b1b2819a9f389cd82fd6282"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-win32.whl", hash = "sha256:e1dd06f981eb226edf87c55d523131ade7285137fbde837c34dc9d1bf309f459"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-win_amd64.whl", hash = "sha256:3d86b5247bf25fa3715e385aa9ff272c307e0636ce0c9595f64568b41f0a9c77"},
|
||||
{file = "regex-2025.9.18-cp312-cp312-win_arm64.whl", hash = "sha256:032720248cbeeae6444c269b78cb15664458b7bb9ed02401d3da59fe4d68c3a5"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2a40f929cd907c7e8ac7566ac76225a77701a6221bca937bdb70d56cb61f57b2"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c90471671c2cdf914e58b6af62420ea9ecd06d1554d7474d50133ff26ae88feb"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a351aff9e07a2dabb5022ead6380cff17a4f10e4feb15f9100ee56c4d6d06af"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc4b8e9d16e20ddfe16430c23468a8707ccad3365b06d4536142e71823f3ca29"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4b8cdbddf2db1c5e80338ba2daa3cfa3dec73a46fff2a7dda087c8efbf12d62f"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a276937d9d75085b2c91fb48244349c6954f05ee97bba0963ce24a9d915b8b68"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:92a8e375ccdc1256401c90e9dc02b8642894443d549ff5e25e36d7cf8a80c783"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0dc6893b1f502d73037cf807a321cdc9be29ef3d6219f7970f842475873712ac"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a61e85bfc63d232ac14b015af1261f826260c8deb19401c0597dbb87a864361e"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1ef86a9ebc53f379d921fb9a7e42b92059ad3ee800fcd9e0fe6181090e9f6c23"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d3bc882119764ba3a119fbf2bd4f1b47bc56c1da5d42df4ed54ae1e8e66fdf8f"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-win32.whl", hash = "sha256:3810a65675845c3bdfa58c3c7d88624356dd6ee2fc186628295e0969005f928d"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-win_amd64.whl", hash = "sha256:16eaf74b3c4180ede88f620f299e474913ab6924d5c4b89b3833bc2345d83b3d"},
|
||||
{file = "regex-2025.9.18-cp313-cp313-win_arm64.whl", hash = "sha256:4dc98ba7dd66bd1261927a9f49bd5ee2bcb3660f7962f1ec02617280fc00f5eb"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:fe5d50572bc885a0a799410a717c42b1a6b50e2f45872e2b40f4f288f9bce8a2"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1b9d9a2d6cda6621551ca8cf7a06f103adf72831153f3c0d982386110870c4d3"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:13202e4c4ac0ef9a317fff817674b293c8f7e8c68d3190377d8d8b749f566e12"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:874ff523b0fecffb090f80ae53dc93538f8db954c8bb5505f05b7787ab3402a0"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d13ab0490128f2bb45d596f754148cd750411afc97e813e4b3a61cf278a23bb6"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:05440bc172bc4b4b37fb9667e796597419404dbba62e171e1f826d7d2a9ebcef"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5514b8e4031fdfaa3d27e92c75719cbe7f379e28cacd939807289bce76d0e35a"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:65d3c38c39efce73e0d9dc019697b39903ba25b1ad45ebbd730d2cf32741f40d"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:ae77e447ebc144d5a26d50055c6ddba1d6ad4a865a560ec7200b8b06bc529368"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e3ef8cf53dc8df49d7e28a356cf824e3623764e9833348b655cfed4524ab8a90"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9feb29817df349c976da9a0debf775c5c33fc1c8ad7b9f025825da99374770b7"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-win32.whl", hash = "sha256:168be0d2f9b9d13076940b1ed774f98595b4e3c7fc54584bba81b3cc4181742e"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-win_amd64.whl", hash = "sha256:d59ecf3bb549e491c8104fea7313f3563c7b048e01287db0a90485734a70a730"},
|
||||
{file = "regex-2025.9.18-cp313-cp313t-win_arm64.whl", hash = "sha256:dbef80defe9fb21310948a2595420b36c6d641d9bea4c991175829b2cc4bc06a"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:c6db75b51acf277997f3adcd0ad89045d856190d13359f15ab5dda21581d9129"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8f9698b6f6895d6db810e0bda5364f9ceb9e5b11328700a90cae573574f61eea"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:29cd86aa7cb13a37d0f0d7c21d8d949fe402ffa0ea697e635afedd97ab4b69f1"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7c9f285a071ee55cd9583ba24dde006e53e17780bb309baa8e4289cd472bcc47"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5adf266f730431e3be9021d3e5b8d5ee65e563fec2883ea8093944d21863b379"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1137cabc0f38807de79e28d3f6e3e3f2cc8cfb26bead754d02e6d1de5f679203"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7cc9e5525cada99699ca9223cce2d52e88c52a3d2a0e842bd53de5497c604164"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:bbb9246568f72dce29bcd433517c2be22c7791784b223a810225af3b50d1aafb"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:6a52219a93dd3d92c675383efff6ae18c982e2d7651c792b1e6d121055808743"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:ae9b3840c5bd456780e3ddf2f737ab55a79b790f6409182012718a35c6d43282"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d488c236ac497c46a5ac2005a952c1a0e22a07be9f10c3e735bc7d1209a34773"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-win32.whl", hash = "sha256:0c3506682ea19beefe627a38872d8da65cc01ffa25ed3f2e422dffa1474f0788"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-win_amd64.whl", hash = "sha256:57929d0f92bebb2d1a83af372cd0ffba2263f13f376e19b1e4fa32aec4efddc3"},
|
||||
{file = "regex-2025.9.18-cp314-cp314-win_arm64.whl", hash = "sha256:6a4b44df31d34fa51aa5c995d3aa3c999cec4d69b9bd414a8be51984d859f06d"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:b176326bcd544b5e9b17d6943f807697c0cb7351f6cfb45bf5637c95ff7e6306"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:0ffd9e230b826b15b369391bec167baed57c7ce39efc35835448618860995946"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ec46332c41add73f2b57e2f5b642f991f6b15e50e9f86285e08ffe3a512ac39f"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b80fa342ed1ea095168a3f116637bd1030d39c9ff38dc04e54ef7c521e01fc95"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f4d97071c0ba40f0cf2a93ed76e660654c399a0a04ab7d85472239460f3da84b"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0ac936537ad87cef9e0e66c5144484206c1354224ee811ab1519a32373e411f3"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dec57f96d4def58c422d212d414efe28218d58537b5445cf0c33afb1b4768571"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:48317233294648bf7cd068857f248e3a57222259a5304d32c7552e2284a1b2ad"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:274687e62ea3cf54846a9b25fc48a04459de50af30a7bd0b61a9e38015983494"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:a78722c86a3e7e6aadf9579e3b0ad78d955f2d1f1a8ca4f67d7ca258e8719d4b"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:06104cd203cdef3ade989a1c45b6215bf42f8b9dd705ecc220c173233f7cba41"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-win32.whl", hash = "sha256:2e1eddc06eeaffd249c0adb6fafc19e2118e6308c60df9db27919e96b5656096"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-win_amd64.whl", hash = "sha256:8620d247fb8c0683ade51217b459cb4a1081c0405a3072235ba43a40d355c09a"},
|
||||
{file = "regex-2025.9.18-cp314-cp314t-win_arm64.whl", hash = "sha256:b7531a8ef61de2c647cdf68b3229b071e46ec326b3138b2180acb4275f470b01"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3dbcfcaa18e9480669030d07371713c10b4f1a41f791ffa5cb1a99f24e777f40"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1e85f73ef7095f0380208269055ae20524bfde3f27c5384126ddccf20382a638"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9098e29b3ea4ffffeade423f6779665e2a4f8db64e699c0ed737ef0db6ba7b12"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90b6b7a2d0f45b7ecaaee1aec6b362184d6596ba2092dd583ffba1b78dd0231c"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c81b892af4a38286101502eae7aec69f7cd749a893d9987a92776954f3943408"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3b524d010973f2e1929aeb635418d468d869a5f77b52084d9f74c272189c251d"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6b498437c026a3d5d0be0020023ff76d70ae4d77118e92f6f26c9d0423452446"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0716e4d6e58853d83f6563f3cf25c281ff46cf7107e5f11879e32cb0b59797d9"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:065b6956749379d41db2625f880b637d4acc14c0a4de0d25d609a62850e96d36"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d4a691494439287c08ddb9b5793da605ee80299dd31e95fa3f323fac3c33d9d4"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ef8d10cc0989565bcbe45fb4439f044594d5c2b8919d3d229ea2c4238f1d55b0"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4baeb1b16735ac969a7eeecc216f1f8b7caf60431f38a2671ae601f716a32d25"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-win32.whl", hash = "sha256:8e5f41ad24a1e0b5dfcf4c4e5d9f5bd54c895feb5708dd0c1d0d35693b24d478"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-win_amd64.whl", hash = "sha256:50e8290707f2fb8e314ab3831e594da71e062f1d623b05266f8cfe4db4949afd"},
|
||||
{file = "regex-2025.9.18-cp39-cp39-win_arm64.whl", hash = "sha256:039a9d7195fd88c943d7c777d4941e8ef736731947becce773c31a1009cb3c35"},
|
||||
{file = "regex-2025.9.18.tar.gz", hash = "sha256:c5ba23274c61c6fef447ba6a39333297d0c247f53059dba0bca415cac511edc4"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5743,31 +5765,31 @@ pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.12.11"
|
||||
version = "0.13.3"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.12.11-py3-none-linux_armv6l.whl", hash = "sha256:93fce71e1cac3a8bf9200e63a38ac5c078f3b6baebffb74ba5274fb2ab276065"},
|
||||
{file = "ruff-0.12.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b8e33ac7b28c772440afa80cebb972ffd823621ded90404f29e5ab6d1e2d4b93"},
|
||||
{file = "ruff-0.12.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d69fb9d4937aa19adb2e9f058bc4fbfe986c2040acb1a4a9747734834eaa0bfd"},
|
||||
{file = "ruff-0.12.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:411954eca8464595077a93e580e2918d0a01a19317af0a72132283e28ae21bee"},
|
||||
{file = "ruff-0.12.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a2c0a2e1a450f387bf2c6237c727dd22191ae8c00e448e0672d624b2bbd7fb0"},
|
||||
{file = "ruff-0.12.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ca4c3a7f937725fd2413c0e884b5248a19369ab9bdd850b5781348ba283f644"},
|
||||
{file = "ruff-0.12.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4d1df0098124006f6a66ecf3581a7f7e754c4df7644b2e6704cd7ca80ff95211"},
|
||||
{file = "ruff-0.12.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a8dd5f230efc99a24ace3b77e3555d3fbc0343aeed3fc84c8d89e75ab2ff793"},
|
||||
{file = "ruff-0.12.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4dc75533039d0ed04cd33fb8ca9ac9620b99672fe7ff1533b6402206901c34ee"},
|
||||
{file = "ruff-0.12.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fc58f9266d62c6eccc75261a665f26b4ef64840887fc6cbc552ce5b29f96cc8"},
|
||||
{file = "ruff-0.12.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:5a0113bd6eafd545146440225fe60b4e9489f59eb5f5f107acd715ba5f0b3d2f"},
|
||||
{file = "ruff-0.12.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0d737b4059d66295c3ea5720e6efc152623bb83fde5444209b69cd33a53e2000"},
|
||||
{file = "ruff-0.12.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:916fc5defee32dbc1fc1650b576a8fed68f5e8256e2180d4d9855aea43d6aab2"},
|
||||
{file = "ruff-0.12.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c984f07d7adb42d3ded5be894fb4007f30f82c87559438b4879fe7aa08c62b39"},
|
||||
{file = "ruff-0.12.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e07fbb89f2e9249f219d88331c833860489b49cdf4b032b8e4432e9b13e8a4b9"},
|
||||
{file = "ruff-0.12.11-py3-none-win32.whl", hash = "sha256:c792e8f597c9c756e9bcd4d87cf407a00b60af77078c96f7b6366ea2ce9ba9d3"},
|
||||
{file = "ruff-0.12.11-py3-none-win_amd64.whl", hash = "sha256:a3283325960307915b6deb3576b96919ee89432ebd9c48771ca12ee8afe4a0fd"},
|
||||
{file = "ruff-0.12.11-py3-none-win_arm64.whl", hash = "sha256:bae4d6e6a2676f8fb0f98b74594a048bae1b944aab17e9f5d504062303c6dbea"},
|
||||
{file = "ruff-0.12.11.tar.gz", hash = "sha256:c6b09ae8426a65bbee5425b9d0b82796dbb07cb1af045743c79bfb163001165d"},
|
||||
{file = "ruff-0.13.3-py3-none-linux_armv6l.whl", hash = "sha256:311860a4c5e19189c89d035638f500c1e191d283d0cc2f1600c8c80d6dcd430c"},
|
||||
{file = "ruff-0.13.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:2bdad6512fb666b40fcadb65e33add2b040fc18a24997d2e47fee7d66f7fcae2"},
|
||||
{file = "ruff-0.13.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fc6fa4637284708d6ed4e5e970d52fc3b76a557d7b4e85a53013d9d201d93286"},
|
||||
{file = "ruff-0.13.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c9e6469864f94a98f412f20ea143d547e4c652f45e44f369d7b74ee78185838"},
|
||||
{file = "ruff-0.13.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5bf62b705f319476c78891e0e97e965b21db468b3c999086de8ffb0d40fd2822"},
|
||||
{file = "ruff-0.13.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78cc1abed87ce40cb07ee0667ce99dbc766c9f519eabfd948ed87295d8737c60"},
|
||||
{file = "ruff-0.13.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4fb75e7c402d504f7a9a259e0442b96403fa4a7310ffe3588d11d7e170d2b1e3"},
|
||||
{file = "ruff-0.13.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17b951f9d9afb39330b2bdd2dd144ce1c1335881c277837ac1b50bfd99985ed3"},
|
||||
{file = "ruff-0.13.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6052f8088728898e0a449f0dde8fafc7ed47e4d878168b211977e3e7e854f662"},
|
||||
{file = "ruff-0.13.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc742c50f4ba72ce2a3be362bd359aef7d0d302bf7637a6f942eaa763bd292af"},
|
||||
{file = "ruff-0.13.3-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:8e5640349493b378431637019366bbd73c927e515c9c1babfea3e932f5e68e1d"},
|
||||
{file = "ruff-0.13.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6b139f638a80eae7073c691a5dd8d581e0ba319540be97c343d60fb12949c8d0"},
|
||||
{file = "ruff-0.13.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:6b547def0a40054825de7cfa341039ebdfa51f3d4bfa6a0772940ed351d2746c"},
|
||||
{file = "ruff-0.13.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9cc48a3564423915c93573f1981d57d101e617839bef38504f85f3677b3a0a3e"},
|
||||
{file = "ruff-0.13.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1a993b17ec03719c502881cb2d5f91771e8742f2ca6de740034433a97c561989"},
|
||||
{file = "ruff-0.13.3-py3-none-win32.whl", hash = "sha256:f14e0d1fe6460f07814d03c6e32e815bff411505178a1f539a38f6097d3e8ee3"},
|
||||
{file = "ruff-0.13.3-py3-none-win_amd64.whl", hash = "sha256:621e2e5812b691d4f244638d693e640f188bacbb9bc793ddd46837cea0503dd2"},
|
||||
{file = "ruff-0.13.3-py3-none-win_arm64.whl", hash = "sha256:9e9e9d699841eaf4c2c798fa783df2fabc680b72059a02ca0ed81c460bc58330"},
|
||||
{file = "ruff-0.13.3.tar.gz", hash = "sha256:5b0ba0db740eefdfbcce4299f49e9eaefc643d4d007749d77d047c2bab19908e"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7252,4 +7274,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "b2363edeebb91f410039c8d4b563f683c1edb0cf4bda4f3e6c287040e93639bc"
|
||||
content-hash = "ff0f6f8d90793ea95f1f7008f7c845432ff46fca0937d5068b4f7cfec0ee7674"
|
||||
|
||||
@@ -56,6 +56,7 @@ pytest-asyncio = "^1.1.0"
|
||||
python-dotenv = "^1.1.1"
|
||||
python-multipart = "^0.0.20"
|
||||
redis = "^6.2.0"
|
||||
regex = "^2025.9.18"
|
||||
replicate = "^1.0.6"
|
||||
sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlalchemy"], version = "^2.33.2"}
|
||||
sqlalchemy = "^2.0.40"
|
||||
@@ -77,7 +78,7 @@ aioclamd = "^1.0.0"
|
||||
setuptools = "^80.9.0"
|
||||
gcloud-aio-storage = "^9.5.0"
|
||||
pandas = "^2.3.1"
|
||||
firecrawl-py = "^2.16.3"
|
||||
firecrawl-py = "^4.3.6"
|
||||
exa-py = "^1.14.20"
|
||||
croniter = "^6.0.0"
|
||||
stagehand = "^0.5.1"
|
||||
@@ -85,16 +86,16 @@ stagehand = "^0.5.1"
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
aiohappyeyeballs = "^2.6.1"
|
||||
black = "^24.10.0"
|
||||
faker = "^37.6.0"
|
||||
faker = "^37.8.0"
|
||||
httpx = "^0.28.1"
|
||||
isort = "^5.13.2"
|
||||
poethepoet = "^0.37.0"
|
||||
pre-commit = "^4.3.0"
|
||||
pyright = "^1.1.404"
|
||||
pytest-mock = "^3.14.0"
|
||||
pyright = "^1.1.406"
|
||||
pytest-mock = "^3.15.1"
|
||||
pytest-watcher = "^0.4.2"
|
||||
requests = "^2.32.5"
|
||||
ruff = "^0.12.11"
|
||||
ruff = "^0.13.3"
|
||||
# NOTE: please insert new dependencies in their alphabetical location
|
||||
|
||||
[build-system]
|
||||
|
||||
@@ -117,6 +117,11 @@ services:
|
||||
- "8006:8006"
|
||||
networks:
|
||||
- app-network
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
executor:
|
||||
build:
|
||||
@@ -147,6 +152,11 @@ services:
|
||||
- "8002:8002"
|
||||
networks:
|
||||
- app-network
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
websocket_server:
|
||||
build:
|
||||
@@ -175,6 +185,11 @@ services:
|
||||
- "8001:8001"
|
||||
networks:
|
||||
- app-network
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
database_manager:
|
||||
build:
|
||||
@@ -199,6 +214,11 @@ services:
|
||||
- "8005:8005"
|
||||
networks:
|
||||
- app-network
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
scheduler_server:
|
||||
build:
|
||||
@@ -242,6 +262,11 @@ services:
|
||||
- "8003:8003"
|
||||
networks:
|
||||
- app-network
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
notification_server:
|
||||
build:
|
||||
@@ -270,6 +295,11 @@ services:
|
||||
- "8007:8007"
|
||||
networks:
|
||||
- app-network
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
frontend:
|
||||
build:
|
||||
context: ../
|
||||
@@ -286,6 +316,11 @@ services:
|
||||
- "3000:3000"
|
||||
networks:
|
||||
- app-network
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
# Load environment variables in order (later overrides earlier)
|
||||
env_file:
|
||||
- path: ./frontend/.env.default # Base defaults (always exists)
|
||||
|
||||
@@ -31,7 +31,7 @@ export default function AdminLayout({
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
return (
|
||||
<div className="flex min-h-screen w-screen flex-col lg:flex-row">
|
||||
<div className="flex min-h-screen w-full flex-col lg:flex-row">
|
||||
<Sidebar linkGroups={sidebarLinkGroups} />
|
||||
<div className="flex-1 pl-4">{children}</div>
|
||||
</div>
|
||||
|
||||
@@ -11,25 +11,12 @@ async function shouldShowOnboarding() {
|
||||
);
|
||||
}
|
||||
|
||||
// Validate redirect URL to prevent open redirect attacks
|
||||
function validateRedirectUrl(url: string): string {
|
||||
// Only allow relative URLs that start with /
|
||||
if (url.startsWith("/") && !url.startsWith("//")) {
|
||||
return url;
|
||||
}
|
||||
// Default to home page for any invalid URLs
|
||||
return "/";
|
||||
}
|
||||
|
||||
// Handle the callback to complete the user session login
|
||||
export async function GET(request: Request) {
|
||||
const { searchParams, origin } = new URL(request.url);
|
||||
const code = searchParams.get("code");
|
||||
|
||||
// if "next" is in param, use it as the redirect URL
|
||||
const nextParam = searchParams.get("next") ?? "/";
|
||||
// Validate redirect URL to prevent open redirect attacks
|
||||
let next = validateRedirectUrl(nextParam);
|
||||
let next = "/marketplace";
|
||||
|
||||
if (code) {
|
||||
const supabase = await getServerSupabase();
|
||||
@@ -39,7 +26,7 @@ export async function GET(request: Request) {
|
||||
}
|
||||
|
||||
const { error } = await supabase.auth.exchangeCodeForSession(code);
|
||||
// data.session?.refresh_token is available if you need to store it for later use
|
||||
|
||||
if (!error) {
|
||||
try {
|
||||
const api = new BackendAPI();
|
||||
@@ -53,7 +40,45 @@ export async function GET(request: Request) {
|
||||
}
|
||||
} catch (createUserError) {
|
||||
console.error("Error creating user:", createUserError);
|
||||
// Continue with redirect even if createUser fails
|
||||
|
||||
// Handle ApiError from the backend API client
|
||||
if (
|
||||
createUserError &&
|
||||
typeof createUserError === "object" &&
|
||||
"status" in createUserError
|
||||
) {
|
||||
const apiError = createUserError as any;
|
||||
|
||||
if (apiError.status === 401) {
|
||||
// Authentication issues - token missing/invalid
|
||||
return NextResponse.redirect(
|
||||
`${origin}/error?message=auth-token-invalid`,
|
||||
);
|
||||
} else if (apiError.status >= 500) {
|
||||
// Server/database errors
|
||||
return NextResponse.redirect(
|
||||
`${origin}/error?message=server-error`,
|
||||
);
|
||||
} else if (apiError.status === 429) {
|
||||
// Rate limiting
|
||||
return NextResponse.redirect(
|
||||
`${origin}/error?message=rate-limited`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle network/fetch errors
|
||||
if (
|
||||
createUserError instanceof TypeError &&
|
||||
createUserError.message.includes("fetch")
|
||||
) {
|
||||
return NextResponse.redirect(`${origin}/error?message=network-error`);
|
||||
}
|
||||
|
||||
// Generic user creation failure
|
||||
return NextResponse.redirect(
|
||||
`${origin}/error?message=user-creation-failed`,
|
||||
);
|
||||
}
|
||||
|
||||
const forwardedHost = request.headers.get("x-forwarded-host"); // original origin before load balancer
|
||||
|
||||
55
autogpt_platform/frontend/src/app/(platform)/error.tsx
Normal file
55
autogpt_platform/frontend/src/app/(platform)/error.tsx
Normal file
@@ -0,0 +1,55 @@
|
||||
"use client";
|
||||
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { getErrorDetails } from "./error/helpers";
|
||||
import { useSearchParams } from "next/navigation";
|
||||
import { Suspense } from "react";
|
||||
|
||||
function ErrorPageContent() {
|
||||
const searchParams = useSearchParams();
|
||||
const errorMessage = searchParams.get("message");
|
||||
|
||||
const errorDetails = getErrorDetails(errorMessage);
|
||||
|
||||
function handleRetry() {
|
||||
if (
|
||||
errorMessage === "user-creation-failed" ||
|
||||
errorMessage === "auth-failed"
|
||||
) {
|
||||
window.location.href = "/login";
|
||||
} else {
|
||||
window.location.href = "/marketplace";
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex min-h-screen items-center justify-center bg-gray-50 px-4 py-12 sm:px-6 lg:px-8">
|
||||
<div className="w-full max-w-md">
|
||||
<ErrorCard
|
||||
responseError={errorDetails.responseError}
|
||||
context={errorDetails.context}
|
||||
onRetry={handleRetry}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default function ErrorPage() {
|
||||
return (
|
||||
<Suspense
|
||||
fallback={
|
||||
<div className="flex min-h-screen items-center justify-center bg-gray-50 px-4 py-12 sm:px-6 lg:px-8">
|
||||
<div className="w-full max-w-md">
|
||||
<ErrorCard
|
||||
responseError={{ message: "Loading..." }}
|
||||
context="application"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<ErrorPageContent />
|
||||
</Suspense>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
export function getErrorDetails(errorType: string | null) {
|
||||
switch (errorType) {
|
||||
case "user-creation-failed":
|
||||
return {
|
||||
responseError: {
|
||||
message:
|
||||
"Failed to create your user account in our system. This could be due to a temporary server issue or a problem with your account setup.",
|
||||
},
|
||||
context: "user account creation",
|
||||
};
|
||||
case "auth-token-invalid":
|
||||
return {
|
||||
responseError: {
|
||||
message:
|
||||
"Your authentication token is missing or invalid. Please try signing in again.",
|
||||
},
|
||||
context: "authentication token",
|
||||
};
|
||||
case "server-error":
|
||||
return {
|
||||
responseError: {
|
||||
message:
|
||||
"Our servers are experiencing issues. Please try again in a few minutes, or contact support if the problem persists.",
|
||||
},
|
||||
context: "server error",
|
||||
};
|
||||
case "rate-limited":
|
||||
return {
|
||||
responseError: {
|
||||
message:
|
||||
"Too many requests have been made. Please wait a moment before trying again.",
|
||||
},
|
||||
context: "rate limiting",
|
||||
};
|
||||
case "network-error":
|
||||
return {
|
||||
responseError: {
|
||||
message:
|
||||
"Unable to connect to our servers. Please check your internet connection and try again.",
|
||||
},
|
||||
context: "network connectivity",
|
||||
};
|
||||
case "auth-failed":
|
||||
return {
|
||||
responseError: {
|
||||
message: "Authentication failed. Please try signing in again.",
|
||||
},
|
||||
context: "authentication",
|
||||
};
|
||||
case "session-expired":
|
||||
return {
|
||||
responseError: {
|
||||
message:
|
||||
"Your session has expired. Please sign in again to continue.",
|
||||
},
|
||||
context: "session",
|
||||
};
|
||||
default:
|
||||
return {
|
||||
responseError: {
|
||||
message:
|
||||
"An unexpected error occurred. Please try again or contact support if the problem persists.",
|
||||
},
|
||||
context: "application",
|
||||
};
|
||||
}
|
||||
}
|
||||
63
autogpt_platform/frontend/src/app/(platform)/error/page.tsx
Normal file
63
autogpt_platform/frontend/src/app/(platform)/error/page.tsx
Normal file
@@ -0,0 +1,63 @@
|
||||
"use client";
|
||||
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { useSearchParams } from "next/navigation";
|
||||
import { Suspense } from "react";
|
||||
import { getErrorDetails } from "./helpers";
|
||||
|
||||
function ErrorPageContent() {
|
||||
const searchParams = useSearchParams();
|
||||
const errorMessage = searchParams.get("message");
|
||||
const errorDetails = getErrorDetails(errorMessage);
|
||||
|
||||
function handleRetry() {
|
||||
// Auth-related errors should redirect to login
|
||||
if (
|
||||
errorMessage === "user-creation-failed" ||
|
||||
errorMessage === "auth-failed" ||
|
||||
errorMessage === "auth-token-invalid" ||
|
||||
errorMessage === "session-expired"
|
||||
) {
|
||||
window.location.href = "/login";
|
||||
} else if (errorMessage === "rate-limited") {
|
||||
// For rate limiting, wait a moment then try again
|
||||
setTimeout(() => {
|
||||
window.location.reload();
|
||||
}, 2000);
|
||||
} else {
|
||||
// For server/network errors, go to marketplace
|
||||
window.location.href = "/marketplace";
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex min-h-screen items-center justify-center bg-gray-50 px-4 py-12 sm:px-6 lg:px-8">
|
||||
<div className="relative w-full max-w-xl lg:bottom-[4rem]">
|
||||
<ErrorCard
|
||||
responseError={errorDetails.responseError}
|
||||
context={errorDetails.context}
|
||||
onRetry={handleRetry}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default function ErrorPage() {
|
||||
return (
|
||||
<Suspense
|
||||
fallback={
|
||||
<div className="flex min-h-screen items-center justify-center bg-gray-50 px-4 py-12 sm:px-6 lg:px-8">
|
||||
<div className="relative w-full max-w-xl lg:-top-[4rem]">
|
||||
<ErrorCard
|
||||
responseError={{ message: "Loading..." }}
|
||||
context="application"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<ErrorPageContent />
|
||||
</Suspense>
|
||||
);
|
||||
}
|
||||
@@ -3,7 +3,7 @@ import { ReactNode } from "react";
|
||||
|
||||
export default function PlatformLayout({ children }: { children: ReactNode }) {
|
||||
return (
|
||||
<main className="flex h-screen w-screen flex-col">
|
||||
<main className="flex h-screen w-full flex-col">
|
||||
<Navbar />
|
||||
<section className="flex-1">{children}</section>
|
||||
</main>
|
||||
|
||||
@@ -144,7 +144,7 @@ export function RunDetailHeader({
|
||||
<>
|
||||
<span className="mx-1 inline-block text-zinc-200">|</span>
|
||||
<Text variant="small" className="!text-zinc-600">
|
||||
Cost: ${run.stats.cost}
|
||||
Cost: ${(run.stats.cost / 100).toFixed(2)}
|
||||
</Text>
|
||||
</>
|
||||
)}
|
||||
|
||||
@@ -103,8 +103,7 @@ export function AgentRunDraftView({
|
||||
const [changedPresetAttributes, setChangedPresetAttributes] = useState<
|
||||
Set<keyof LibraryAgentPresetUpdatable>
|
||||
>(new Set());
|
||||
const { state: onboardingState, completeStep: completeOnboardingStep } =
|
||||
useOnboarding();
|
||||
const { completeStep: completeOnboardingStep } = useOnboarding();
|
||||
const [cronScheduleDialogOpen, setCronScheduleDialogOpen] = useState(false);
|
||||
|
||||
// Update values if agentPreset parameter is changed
|
||||
@@ -197,9 +196,7 @@ export function AgentRunDraftView({
|
||||
.catch(toastOnFail("execute agent preset"));
|
||||
}
|
||||
// Mark run agent onboarding step as completed
|
||||
if (onboardingState?.completedSteps.includes("MARKETPLACE_ADD_AGENT")) {
|
||||
completeOnboardingStep("MARKETPLACE_RUN_AGENT");
|
||||
}
|
||||
completeOnboardingStep("MARKETPLACE_RUN_AGENT");
|
||||
if (runCount > 0) {
|
||||
completeOnboardingStep("RE_RUN_AGENT");
|
||||
}
|
||||
@@ -210,7 +207,6 @@ export function AgentRunDraftView({
|
||||
inputCredentials,
|
||||
onRun,
|
||||
toastOnFail,
|
||||
onboardingState,
|
||||
completeOnboardingStep,
|
||||
]);
|
||||
|
||||
@@ -246,7 +242,6 @@ export function AgentRunDraftView({
|
||||
onCreatePreset,
|
||||
toast,
|
||||
toastOnFail,
|
||||
onboardingState,
|
||||
completeOnboardingStep,
|
||||
]);
|
||||
|
||||
@@ -286,7 +281,6 @@ export function AgentRunDraftView({
|
||||
onUpdatePreset,
|
||||
toast,
|
||||
toastOnFail,
|
||||
onboardingState,
|
||||
completeOnboardingStep,
|
||||
]);
|
||||
|
||||
@@ -334,7 +328,6 @@ export function AgentRunDraftView({
|
||||
onCreatePreset,
|
||||
toast,
|
||||
toastOnFail,
|
||||
onboardingState,
|
||||
completeOnboardingStep,
|
||||
]);
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ export default function LoginPage() {
|
||||
/>
|
||||
|
||||
{/* Turnstile CAPTCHA Component */}
|
||||
{isCloudEnv && !turnstile.verified ? (
|
||||
{turnstile.shouldRender ? (
|
||||
<Turnstile
|
||||
key={captchaKey}
|
||||
siteKey={turnstile.siteKey}
|
||||
|
||||
@@ -2,7 +2,7 @@ import { Skeleton } from "@/components/__legacy__/ui/skeleton";
|
||||
|
||||
export const AgentPageLoading = () => {
|
||||
return (
|
||||
<div className="mx-auto w-screen max-w-[1360px]">
|
||||
<div className="mx-auto w-full max-w-[1360px]">
|
||||
<main className="mt-5 px-4">
|
||||
<div className="flex items-center space-x-2">
|
||||
<Skeleton className="h-4 w-24" />
|
||||
|
||||
@@ -2,7 +2,7 @@ import { Skeleton } from "@/components/__legacy__/ui/skeleton";
|
||||
|
||||
export const CreatorPageLoading = () => {
|
||||
return (
|
||||
<div className="mx-auto w-screen max-w-[1360px]">
|
||||
<div className="mx-auto w-full max-w-[1360px]">
|
||||
<main className="mt-5 px-4">
|
||||
<Skeleton className="mb-4 h-6 w-40" />
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ export const MainAgentPage = ({ params }: MainAgentPageProps) => {
|
||||
}
|
||||
if (hasError) {
|
||||
return (
|
||||
<div className="mx-auto w-screen max-w-[1360px]">
|
||||
<div className="mx-auto w-full max-w-[1360px]">
|
||||
<main className="px-4">
|
||||
<div className="flex min-h-[400px] items-center justify-center">
|
||||
<ErrorCard
|
||||
@@ -48,7 +48,7 @@ export const MainAgentPage = ({ params }: MainAgentPageProps) => {
|
||||
|
||||
if (!agent) {
|
||||
return (
|
||||
<div className="mx-auto w-screen max-w-[1360px]">
|
||||
<div className="mx-auto w-full max-w-[1360px]">
|
||||
<main className="px-4">
|
||||
<div className="flex min-h-[400px] items-center justify-center">
|
||||
<ErrorCard
|
||||
@@ -74,7 +74,7 @@ export const MainAgentPage = ({ params }: MainAgentPageProps) => {
|
||||
];
|
||||
|
||||
return (
|
||||
<div className="mx-auto w-screen max-w-[1360px]">
|
||||
<div className="mx-auto w-full max-w-[1360px]">
|
||||
<main className="mt-5 px-4">
|
||||
<Breadcrumbs items={breadcrumbs} />
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ export const MainCreatorPage = ({ params }: MainCreatorPageProps) => {
|
||||
|
||||
if (hasError) {
|
||||
return (
|
||||
<div className="mx-auto w-screen max-w-[1360px]">
|
||||
<div className="mx-auto w-full max-w-[1360px]">
|
||||
<div className="flex min-h-[60vh] items-center justify-center">
|
||||
<ErrorCard
|
||||
isSuccess={false}
|
||||
@@ -39,7 +39,7 @@ export const MainCreatorPage = ({ params }: MainCreatorPageProps) => {
|
||||
|
||||
if (creator)
|
||||
return (
|
||||
<div className="mx-auto w-screen max-w-[1360px]">
|
||||
<div className="mx-auto w-full max-w-[1360px]">
|
||||
<main className="mt-5 px-4">
|
||||
<Breadcrumbs
|
||||
items={[
|
||||
|
||||
@@ -37,7 +37,7 @@ export const MainMarkeplacePage = () => {
|
||||
|
||||
return (
|
||||
// FRONTEND-TODO : Need better state location, need to fetch creators and agents in their respective file, Can't do it right now because these files are used in some other pages of marketplace, will fix it when encounter with those pages
|
||||
<div className="mx-auto w-screen max-w-[1360px]">
|
||||
<div className="mx-auto w-full max-w-[1360px]">
|
||||
<main className="px-4">
|
||||
<HeroSection />
|
||||
{featuredAgents && (
|
||||
|
||||
@@ -2,7 +2,7 @@ import { Skeleton } from "@/components/__legacy__/ui/skeleton";
|
||||
|
||||
export const MainMarketplacePageLoading = () => {
|
||||
return (
|
||||
<div className="mx-auto w-screen max-w-[1360px]">
|
||||
<div className="mx-auto w-full max-w-[1360px]">
|
||||
<main className="px-4">
|
||||
<div className="flex flex-col gap-2 pt-16">
|
||||
<div className="flex flex-col items-center justify-center gap-8">
|
||||
|
||||
@@ -112,12 +112,13 @@ export const FlowRunsStatus: React.FC<{
|
||||
</p>
|
||||
{filteredFlowRuns.some((r) => r.stats) && (
|
||||
<p>
|
||||
<strong>Total cost:</strong>{" "}
|
||||
{filteredFlowRuns.reduce(
|
||||
(total, run) => total + (run.stats?.cost ?? 0),
|
||||
0,
|
||||
)}{" "}
|
||||
seconds
|
||||
<strong>Total cost:</strong> $
|
||||
{(
|
||||
filteredFlowRuns.reduce(
|
||||
(total, run) => total + (run.stats?.cost ?? 0),
|
||||
0,
|
||||
) / 100
|
||||
).toFixed(2)}
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -52,7 +52,7 @@ export default function Layout({ children }: { children: React.ReactNode }) {
|
||||
];
|
||||
|
||||
return (
|
||||
<div className="flex min-h-screen w-screen max-w-[1360px] flex-col lg:flex-row">
|
||||
<div className="flex min-h-screen w-full max-w-[1360px] flex-col lg:flex-row">
|
||||
<Sidebar linkGroups={sidebarLinkGroups} />
|
||||
<div className="flex-1 pl-4">{children}</div>
|
||||
</div>
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
"use client";
|
||||
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import NextError from "next/error";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { useEffect } from "react";
|
||||
|
||||
export default function GlobalError({
|
||||
error,
|
||||
reset,
|
||||
}: {
|
||||
error: Error & { digest?: string };
|
||||
reset: () => void;
|
||||
}) {
|
||||
useEffect(() => {
|
||||
Sentry.captureException(error);
|
||||
@@ -16,11 +18,19 @@ export default function GlobalError({
|
||||
return (
|
||||
<html>
|
||||
<body>
|
||||
{/* `NextError` is the default Next.js error page component. Its type
|
||||
definition requires a `statusCode` prop. However, since the App Router
|
||||
does not expose status codes for errors, we simply pass 0 to render a
|
||||
generic error message. */}
|
||||
<NextError statusCode={0} />
|
||||
<div className="flex min-h-screen items-center justify-center bg-gray-50 px-4 py-12 sm:px-6 lg:px-8">
|
||||
<div className="relative w-full max-w-xl lg:bottom-[4rem]">
|
||||
<ErrorCard
|
||||
responseError={{
|
||||
message:
|
||||
error.message ||
|
||||
"An unexpected error occurred. Our team has been notified and is working to resolve the issue.",
|
||||
}}
|
||||
context="application"
|
||||
onRetry={reset}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
);
|
||||
|
||||
@@ -17,6 +17,7 @@ import { cn } from "@/lib/utils";
|
||||
import * as party from "party-js";
|
||||
import WalletRefill from "./WalletRefill";
|
||||
import { OnboardingStep } from "@/lib/autogpt-server-api";
|
||||
import { storage, Key as StorageKey } from "@/services/storage/local-storage";
|
||||
|
||||
export interface Task {
|
||||
id: OnboardingStep;
|
||||
@@ -164,27 +165,67 @@ export default function Wallet() {
|
||||
|
||||
const [prevCredits, setPrevCredits] = useState<number | null>(credits);
|
||||
const [flash, setFlash] = useState(false);
|
||||
const [walletOpen, setWalletOpen] = useState(state?.walletShown || false);
|
||||
const [walletOpen, setWalletOpen] = useState(false);
|
||||
const [lastSeenCredits, setLastSeenCredits] = useState<number | null>(null);
|
||||
|
||||
const totalCount = useMemo(() => {
|
||||
return groups.reduce((acc, group) => acc + group.tasks.length, 0);
|
||||
}, [groups]);
|
||||
|
||||
// Get total completed count for all groups
|
||||
const completedCount = useMemo(() => {
|
||||
return groups.reduce(
|
||||
const [completedCount, setCompletedCount] = useState<number | null>(null);
|
||||
// Needed to show confetti when a new step is completed
|
||||
const [prevCompletedCount, setPrevCompletedCount] = useState<number | null>(
|
||||
null,
|
||||
);
|
||||
|
||||
const walletRef = useRef<HTMLButtonElement | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (!state) {
|
||||
return;
|
||||
}
|
||||
const completed = groups.reduce(
|
||||
(acc, group) =>
|
||||
acc +
|
||||
group.tasks.filter((task) => state?.completedSteps?.includes(task.id))
|
||||
.length,
|
||||
0,
|
||||
);
|
||||
setCompletedCount(completed);
|
||||
}, [groups, state?.completedSteps]);
|
||||
|
||||
// Needed to show confetti when a new step is completed
|
||||
const [stepsLength, setStepsLength] = useState(completedCount);
|
||||
// Load last seen credits from localStorage once on mount
|
||||
useEffect(() => {
|
||||
const stored = storage.get(StorageKey.WALLET_LAST_SEEN_CREDITS);
|
||||
if (stored !== undefined && stored !== null) {
|
||||
const parsed = parseFloat(stored);
|
||||
if (!Number.isNaN(parsed)) setLastSeenCredits(parsed);
|
||||
else setLastSeenCredits(0);
|
||||
} else {
|
||||
setLastSeenCredits(0);
|
||||
}
|
||||
}, []);
|
||||
|
||||
const walletRef = useRef<HTMLButtonElement | null>(null);
|
||||
// Auto-open once if never shown, otherwise open only when credits increase beyond last seen
|
||||
useEffect(() => {
|
||||
if (typeof credits !== "number") return;
|
||||
// Open once for first-time users
|
||||
if (state && state.walletShown === false) {
|
||||
setWalletOpen(true);
|
||||
// Mark as shown so it won't reopen on every reload
|
||||
updateState({ walletShown: true });
|
||||
return;
|
||||
}
|
||||
// Open if user gained more credits than last acknowledged
|
||||
if (
|
||||
lastSeenCredits !== null &&
|
||||
credits > lastSeenCredits &&
|
||||
walletOpen === false
|
||||
) {
|
||||
setWalletOpen(true);
|
||||
}
|
||||
}, [credits, lastSeenCredits, state?.walletShown, updateState, walletOpen]);
|
||||
|
||||
const onWalletOpen = useCallback(async () => {
|
||||
if (!state?.walletShown) {
|
||||
@@ -206,19 +247,25 @@ export default function Wallet() {
|
||||
|
||||
// Confetti effect on the wallet button
|
||||
useEffect(() => {
|
||||
if (!state?.completedSteps) {
|
||||
return;
|
||||
}
|
||||
// It's enough to check completed count,
|
||||
// because the order of completed steps is not important
|
||||
// If the count is the same, we don't need to do anything
|
||||
if (completedCount === stepsLength) {
|
||||
if (completedCount === null || completedCount === prevCompletedCount) {
|
||||
return;
|
||||
}
|
||||
// Otherwise, we need to set the new prevCompletedCount
|
||||
setPrevCompletedCount(completedCount);
|
||||
// If there was no previous count, we don't show confetti
|
||||
if (prevCompletedCount === null) {
|
||||
return;
|
||||
}
|
||||
// Otherwise, we need to set the new length
|
||||
setStepsLength(completedCount);
|
||||
// And emit confetti
|
||||
if (walletRef.current) {
|
||||
// Fix confetti appearing in the top left corner
|
||||
const rect = walletRef.current.getBoundingClientRect();
|
||||
if (rect.width === 0 || rect.height === 0) {
|
||||
return;
|
||||
}
|
||||
setTimeout(() => {
|
||||
fetchCredits();
|
||||
party.confetti(walletRef.current!, {
|
||||
@@ -236,7 +283,8 @@ export default function Wallet() {
|
||||
state?.notified,
|
||||
fadeOut,
|
||||
fetchCredits,
|
||||
stepsLength,
|
||||
completedCount,
|
||||
prevCompletedCount,
|
||||
walletRef,
|
||||
]);
|
||||
|
||||
@@ -256,7 +304,19 @@ export default function Wallet() {
|
||||
}, [credits, prevCredits]);
|
||||
|
||||
return (
|
||||
<Popover open={walletOpen} onOpenChange={setWalletOpen}>
|
||||
<Popover
|
||||
open={walletOpen}
|
||||
onOpenChange={(open) => {
|
||||
setWalletOpen(open);
|
||||
if (!open) {
|
||||
// Persist the latest acknowledged credits so we only auto-open on future gains
|
||||
if (typeof credits === "number") {
|
||||
storage.set(StorageKey.WALLET_LAST_SEEN_CREDITS, String(credits));
|
||||
setLastSeenCredits(credits);
|
||||
}
|
||||
}
|
||||
}}
|
||||
>
|
||||
<PopoverTrigger asChild>
|
||||
<div className="relative inline-block">
|
||||
<button
|
||||
@@ -270,7 +330,7 @@ export default function Wallet() {
|
||||
<span className="text-sm font-semibold">
|
||||
{formatCredits(credits)}
|
||||
</span>
|
||||
{completedCount < totalCount && (
|
||||
{completedCount && completedCount < totalCount && (
|
||||
<span className="absolute right-1 top-1 h-2 w-2 rounded-full bg-violet-600"></span>
|
||||
)}
|
||||
<div className="absolute bottom-[-2.5rem] left-1/2 z-50 hidden -translate-x-1/2 transform whitespace-nowrap rounded-small bg-white px-4 py-2 shadow-md group-hover:block">
|
||||
@@ -303,7 +363,7 @@ export default function Wallet() {
|
||||
Earn credits{" "}
|
||||
<span className="font-semibold">{formatCredits(credits)}</span>
|
||||
</div>
|
||||
<PopoverClose>
|
||||
<PopoverClose aria-label="Close wallet">
|
||||
<X className="ml-2 h-5 w-5 text-zinc-800 hover:text-foreground" />
|
||||
</PopoverClose>
|
||||
</div>
|
||||
|
||||
@@ -10,6 +10,7 @@ import React, {
|
||||
import BoringAvatar from "boring-avatars";
|
||||
|
||||
import Image, { ImageProps } from "next/image";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
type AvatarContextValue = {
|
||||
isLoaded: boolean;
|
||||
@@ -44,10 +45,10 @@ export function Avatar({
|
||||
return (
|
||||
<AvatarContext.Provider value={value}>
|
||||
<div
|
||||
className={[
|
||||
className={cn(
|
||||
"relative flex h-10 w-10 shrink-0 overflow-hidden rounded-full",
|
||||
className || "",
|
||||
].join(" ")}
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
@@ -126,7 +127,7 @@ export function AvatarImage({
|
||||
<img
|
||||
src={normalizedSrc}
|
||||
alt={alt || "Avatar image"}
|
||||
className={["h-full w-full object-cover", className || ""].join(" ")}
|
||||
className={cn("h-full w-full object-cover", className)}
|
||||
width={computedWidth}
|
||||
height={computedHeight}
|
||||
onLoad={handleLoad}
|
||||
@@ -148,7 +149,7 @@ export function AvatarImage({
|
||||
<Image
|
||||
src={normalizedSrc}
|
||||
alt={alt || "Avatar image"}
|
||||
className={["h-full w-full object-cover", className || ""].join(" ")}
|
||||
className={cn("h-full w-full object-cover", className)}
|
||||
width={fill ? undefined : computedWidth}
|
||||
height={fill ? undefined : computedHeight}
|
||||
fill={Boolean(fill)}
|
||||
@@ -179,10 +180,10 @@ export function AvatarFallback({
|
||||
typeof children === "string" && children.trim() ? children : "User";
|
||||
return (
|
||||
<span
|
||||
className={[
|
||||
"flex h-full w-full items-center justify-center rounded-full bg-neutral-200 text-lg text-neutral-600",
|
||||
className || "",
|
||||
].join(" ")}
|
||||
className={cn(
|
||||
"flex h-full w-full items-center justify-center rounded-full bg-transparent text-lg text-neutral-600",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
<BoringAvatar
|
||||
|
||||
@@ -40,7 +40,7 @@ export function AccountMenu({
|
||||
aria-haspopup="true"
|
||||
data-testid="profile-popout-menu-trigger"
|
||||
>
|
||||
<Avatar className="h-10 w-10">
|
||||
<Avatar>
|
||||
<AvatarImage src={avatarSrc} alt="" aria-hidden="true" />
|
||||
<AvatarFallback aria-hidden="true">
|
||||
{userName?.charAt(0) || "U"}
|
||||
|
||||
@@ -19,7 +19,6 @@ export function LoginButton() {
|
||||
<Button
|
||||
onClick={handleLogin}
|
||||
size="small"
|
||||
className="flex items-center justify-end space-x-2"
|
||||
leftIcon={<SignInIcon className="h-5 w-5" />}
|
||||
variant="secondary"
|
||||
>
|
||||
|
||||
@@ -66,7 +66,7 @@ export const NavbarView = ({ isLoggedIn }: NavbarViewProps) => {
|
||||
{/* Mobile Navbar - Adjust positioning */}
|
||||
<>
|
||||
{isLoggedIn ? (
|
||||
<div className="fixed -right-4 top-2 z-50 flex items-center gap-0 md:hidden">
|
||||
<div className="fixed right-0 top-2 z-50 flex items-center gap-0 md:hidden">
|
||||
<Wallet />
|
||||
<MobileNavBar
|
||||
userName={profile?.username}
|
||||
|
||||
@@ -1,17 +1,34 @@
|
||||
import { useEffect, useRef } from "react";
|
||||
import { usePathname } from "next/navigation";
|
||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||
import { usePostV1UpdateUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth";
|
||||
|
||||
/**
|
||||
* Hook to silently detect and set user's timezone during onboarding
|
||||
* This version doesn't show any toast notifications
|
||||
* Hook to silently detect and set user's timezone ONLY during actual onboarding flow
|
||||
* This prevents unnecessary timezone API calls during authentication and platform usage
|
||||
* @returns void
|
||||
*/
|
||||
export const useOnboardingTimezoneDetection = () => {
|
||||
const updateTimezone = usePostV1UpdateUserTimezone();
|
||||
const hasAttemptedDetection = useRef(false);
|
||||
const pathname = usePathname();
|
||||
const { user, isUserLoading } = useSupabase();
|
||||
|
||||
// Check if we're on onboarding route (computed outside useEffect to avoid re-computing)
|
||||
const isOnOnboardingRoute = pathname.startsWith("/onboarding");
|
||||
|
||||
useEffect(() => {
|
||||
// Only attempt once
|
||||
// Only run during actual onboarding routes - prevents running on every auth
|
||||
if (!isOnOnboardingRoute) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Wait for proper authentication state instead of using arbitrary timeout
|
||||
if (isUserLoading || !user) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Only attempt once per session
|
||||
if (hasAttemptedDetection.current) {
|
||||
return;
|
||||
}
|
||||
@@ -30,13 +47,13 @@ export const useOnboardingTimezoneDetection = () => {
|
||||
return;
|
||||
}
|
||||
|
||||
// Silently update the timezone in the backend
|
||||
await updateTimezone.mutateAsync({
|
||||
// Fire-and-forget timezone update - we don't need to wait for response
|
||||
updateTimezone.mutate({
|
||||
data: { timezone: browserTimezone } as any,
|
||||
});
|
||||
|
||||
console.log(
|
||||
`Timezone automatically set to ${browserTimezone} during onboarding`,
|
||||
console.info(
|
||||
`Timezone automatically set to ${browserTimezone} during onboarding flow`,
|
||||
);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
@@ -47,11 +64,6 @@ export const useOnboardingTimezoneDetection = () => {
|
||||
}
|
||||
};
|
||||
|
||||
// Small delay to ensure user is created
|
||||
const timer = setTimeout(() => {
|
||||
detectAndSetTimezone();
|
||||
}, 1000);
|
||||
|
||||
return () => clearTimeout(timer);
|
||||
}, []); // Run once on mount
|
||||
detectAndSetTimezone();
|
||||
}, [isOnOnboardingRoute, updateTimezone, user, isUserLoading]); // Use computed boolean to reduce re-renders
|
||||
};
|
||||
|
||||
@@ -27,8 +27,8 @@ export const useTimezoneDetection = (currentTimezone?: string) => {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the timezone in the backend
|
||||
await updateTimezone.mutateAsync({
|
||||
// Fire-and-forget timezone update - we don't need to wait for response
|
||||
updateTimezone.mutate({
|
||||
data: { timezone: browserTimezone } as any,
|
||||
});
|
||||
|
||||
|
||||
109
autogpt_platform/frontend/src/providers/onboarding/helpers.ts
Normal file
109
autogpt_platform/frontend/src/providers/onboarding/helpers.ts
Normal file
@@ -0,0 +1,109 @@
|
||||
import { OnboardingStep, UserOnboarding } from "@/lib/autogpt-server-api";
|
||||
|
||||
export function isToday(date: Date): boolean {
|
||||
const today = new Date();
|
||||
return (
|
||||
date.getDate() === today.getDate() &&
|
||||
date.getMonth() === today.getMonth() &&
|
||||
date.getFullYear() === today.getFullYear()
|
||||
);
|
||||
}
|
||||
|
||||
export function isYesterday(date: Date): boolean {
|
||||
const yesterday = new Date();
|
||||
yesterday.setDate(yesterday.getDate() - 1);
|
||||
|
||||
return (
|
||||
date.getDate() === yesterday.getDate() &&
|
||||
date.getMonth() === yesterday.getMonth() &&
|
||||
date.getFullYear() === yesterday.getFullYear()
|
||||
);
|
||||
}
|
||||
|
||||
export function calculateConsecutiveDays(
|
||||
lastRunAt: Date | null,
|
||||
currentConsecutiveDays: number,
|
||||
): { lastRunAt: Date; consecutiveRunDays: number } {
|
||||
const now = new Date();
|
||||
|
||||
if (lastRunAt === null || isYesterday(lastRunAt)) {
|
||||
return {
|
||||
lastRunAt: now,
|
||||
consecutiveRunDays: currentConsecutiveDays + 1,
|
||||
};
|
||||
}
|
||||
|
||||
if (!isToday(lastRunAt)) {
|
||||
return {
|
||||
lastRunAt: now,
|
||||
consecutiveRunDays: 1,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
lastRunAt: now,
|
||||
consecutiveRunDays: currentConsecutiveDays,
|
||||
};
|
||||
}
|
||||
|
||||
export function getRunMilestoneSteps(
|
||||
newRunCount: number,
|
||||
consecutiveDays: number,
|
||||
): OnboardingStep[] {
|
||||
const steps: OnboardingStep[] = [];
|
||||
|
||||
if (newRunCount === 10) steps.push("RUN_AGENTS");
|
||||
if (newRunCount === 100) steps.push("RUN_AGENTS_100");
|
||||
if (consecutiveDays === 3) steps.push("RUN_3_DAYS");
|
||||
if (consecutiveDays === 14) steps.push("RUN_14_DAYS");
|
||||
|
||||
return steps;
|
||||
}
|
||||
|
||||
export function processOnboardingData(
|
||||
onboarding: UserOnboarding,
|
||||
): UserOnboarding {
|
||||
// Patch for TRIGGER_WEBHOOK - only set on backend then overwritten by frontend
|
||||
const completeWebhook =
|
||||
onboarding.rewardedFor.includes("TRIGGER_WEBHOOK") &&
|
||||
!onboarding.completedSteps.includes("TRIGGER_WEBHOOK")
|
||||
? (["TRIGGER_WEBHOOK"] as OnboardingStep[])
|
||||
: [];
|
||||
|
||||
return {
|
||||
...onboarding,
|
||||
completedSteps: [...completeWebhook, ...onboarding.completedSteps],
|
||||
lastRunAt: onboarding.lastRunAt ? new Date(onboarding.lastRunAt) : null,
|
||||
};
|
||||
}
|
||||
|
||||
export function shouldRedirectFromOnboarding(
|
||||
completedSteps: OnboardingStep[],
|
||||
pathname: string,
|
||||
): boolean {
|
||||
return (
|
||||
completedSteps.includes("CONGRATS") &&
|
||||
!pathname.startsWith("/onboarding/reset")
|
||||
);
|
||||
}
|
||||
|
||||
export function createInitialOnboardingState(
|
||||
newState: Omit<Partial<UserOnboarding>, "rewardedFor">,
|
||||
): UserOnboarding {
|
||||
return {
|
||||
completedSteps: [],
|
||||
walletShown: true,
|
||||
notified: [],
|
||||
rewardedFor: [],
|
||||
usageReason: null,
|
||||
integrations: [],
|
||||
otherIntegrations: null,
|
||||
selectedStoreListingVersionId: null,
|
||||
agentInput: null,
|
||||
onboardingAgentExecutionId: null,
|
||||
agentRuns: 0,
|
||||
lastRunAt: null,
|
||||
consecutiveRunDays: 0,
|
||||
...newState,
|
||||
};
|
||||
}
|
||||
@@ -20,8 +20,16 @@ import {
|
||||
useCallback,
|
||||
useContext,
|
||||
useEffect,
|
||||
useRef,
|
||||
useState,
|
||||
} from "react";
|
||||
import {
|
||||
calculateConsecutiveDays,
|
||||
createInitialOnboardingState,
|
||||
getRunMilestoneSteps,
|
||||
processOnboardingData,
|
||||
shouldRedirectFromOnboarding,
|
||||
} from "./helpers";
|
||||
|
||||
const OnboardingContext = createContext<
|
||||
| {
|
||||
@@ -39,6 +47,7 @@ const OnboardingContext = createContext<
|
||||
|
||||
export function useOnboarding(step?: number, completeStep?: OnboardingStep) {
|
||||
const context = useContext(OnboardingContext);
|
||||
|
||||
if (!context)
|
||||
throw new Error("useOnboarding must be used within an OnboardingProvider");
|
||||
|
||||
@@ -71,90 +80,73 @@ export default function OnboardingProvider({
|
||||
children: ReactNode;
|
||||
}) {
|
||||
const [state, setState] = useState<UserOnboarding | null>(null);
|
||||
// Step is used to control the progress bar, it's frontend only
|
||||
const [step, setStep] = useState(1);
|
||||
const [npsDialogOpen, setNpsDialogOpen] = useState(false);
|
||||
const hasInitialized = useRef(false);
|
||||
|
||||
const api = useBackendAPI();
|
||||
const pathname = usePathname();
|
||||
const router = useRouter();
|
||||
const { user, isUserLoading } = useSupabase();
|
||||
|
||||
// Automatically detect and set timezone for new users during onboarding
|
||||
useOnboardingTimezoneDetection();
|
||||
|
||||
const isOnOnboardingRoute = pathname.startsWith("/onboarding");
|
||||
|
||||
useEffect(() => {
|
||||
const fetchOnboarding = async () => {
|
||||
try {
|
||||
const enabled = await api.isOnboardingEnabled();
|
||||
if (!enabled && pathname.startsWith("/onboarding")) {
|
||||
router.push("/marketplace");
|
||||
return;
|
||||
}
|
||||
const onboarding = await api.getUserOnboarding();
|
||||
|
||||
// Only update state if onboarding data is valid
|
||||
if (onboarding) {
|
||||
//todo kcze this is a patch because only TRIGGER_WEBHOOK is set on the backend and then overwritten by the frontend
|
||||
const completeWebhook =
|
||||
onboarding.rewardedFor.includes("TRIGGER_WEBHOOK") &&
|
||||
!onboarding.completedSteps.includes("TRIGGER_WEBHOOK")
|
||||
? (["TRIGGER_WEBHOOK"] as OnboardingStep[])
|
||||
: [];
|
||||
|
||||
setState((prev) => ({
|
||||
...onboarding,
|
||||
completedSteps: [...completeWebhook, ...onboarding.completedSteps],
|
||||
lastRunAt: new Date(onboarding.lastRunAt || ""),
|
||||
...prev,
|
||||
}));
|
||||
|
||||
// Redirect outside onboarding if completed
|
||||
// If user did CONGRATS step, that means they completed introductory onboarding
|
||||
if (
|
||||
onboarding.completedSteps &&
|
||||
onboarding.completedSteps.includes("CONGRATS") &&
|
||||
pathname.startsWith("/onboarding") &&
|
||||
!pathname.startsWith("/onboarding/reset")
|
||||
) {
|
||||
router.push("/marketplace");
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Failed to fetch onboarding data:", error);
|
||||
// Don't update state on error to prevent null access issues
|
||||
}
|
||||
};
|
||||
if (isUserLoading || !user) {
|
||||
// Prevent multiple initializations
|
||||
if (hasInitialized.current || isUserLoading || !user) {
|
||||
return;
|
||||
}
|
||||
fetchOnboarding();
|
||||
}, [api, pathname, router, user, isUserLoading]);
|
||||
|
||||
hasInitialized.current = true;
|
||||
|
||||
async function initializeOnboarding() {
|
||||
try {
|
||||
// Check onboarding enabled only for onboarding routes
|
||||
if (isOnOnboardingRoute) {
|
||||
const enabled = await api.isOnboardingEnabled();
|
||||
if (!enabled) {
|
||||
router.push("/marketplace");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const onboarding = await api.getUserOnboarding();
|
||||
if (!onboarding) return;
|
||||
|
||||
const processedOnboarding = processOnboardingData(onboarding);
|
||||
setState(processedOnboarding);
|
||||
|
||||
// Handle redirects for completed onboarding
|
||||
if (
|
||||
isOnOnboardingRoute &&
|
||||
shouldRedirectFromOnboarding(
|
||||
processedOnboarding.completedSteps,
|
||||
pathname,
|
||||
)
|
||||
) {
|
||||
router.push("/marketplace");
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Failed to initialize onboarding:", error);
|
||||
hasInitialized.current = false; // Allow retry on next render
|
||||
}
|
||||
}
|
||||
|
||||
initializeOnboarding();
|
||||
}, [api, isOnOnboardingRoute, router, user, isUserLoading, pathname]);
|
||||
|
||||
const updateState = useCallback(
|
||||
(newState: Omit<Partial<UserOnboarding>, "rewardedFor">) => {
|
||||
setState((prev) => {
|
||||
if (!prev) {
|
||||
// Handle initial state
|
||||
return {
|
||||
completedSteps: [],
|
||||
walletShown: true,
|
||||
notified: [],
|
||||
rewardedFor: [],
|
||||
usageReason: null,
|
||||
integrations: [],
|
||||
otherIntegrations: null,
|
||||
selectedStoreListingVersionId: null,
|
||||
agentInput: null,
|
||||
onboardingAgentExecutionId: null,
|
||||
agentRuns: 0,
|
||||
lastRunAt: null,
|
||||
consecutiveRunDays: 0,
|
||||
...newState,
|
||||
};
|
||||
return createInitialOnboardingState(newState);
|
||||
}
|
||||
return { ...prev, ...newState };
|
||||
});
|
||||
// Make the API call asynchronously to not block render
|
||||
|
||||
// Async API update without blocking render
|
||||
setTimeout(() => {
|
||||
api.updateUserOnboarding(newState).catch((error) => {
|
||||
console.error("Failed to update user onboarding:", error);
|
||||
@@ -166,75 +158,38 @@ export default function OnboardingProvider({
|
||||
|
||||
const completeStep = useCallback(
|
||||
(step: OnboardingStep) => {
|
||||
if (
|
||||
!state ||
|
||||
!state.completedSteps ||
|
||||
state.completedSteps.includes(step)
|
||||
)
|
||||
return;
|
||||
|
||||
updateState({
|
||||
completedSteps: [...state.completedSteps, step],
|
||||
});
|
||||
if (!state?.completedSteps?.includes(step)) {
|
||||
updateState({
|
||||
completedSteps: [...(state?.completedSteps || []), step],
|
||||
});
|
||||
}
|
||||
},
|
||||
[state, updateState],
|
||||
[state?.completedSteps, updateState],
|
||||
);
|
||||
|
||||
const isToday = useCallback((date: Date) => {
|
||||
const today = new Date();
|
||||
|
||||
return (
|
||||
date.getDate() === today.getDate() &&
|
||||
date.getMonth() === today.getMonth() &&
|
||||
date.getFullYear() === today.getFullYear()
|
||||
);
|
||||
}, []);
|
||||
|
||||
const isYesterday = useCallback((date: Date): boolean => {
|
||||
const yesterday = new Date();
|
||||
yesterday.setDate(yesterday.getDate() - 1);
|
||||
|
||||
return (
|
||||
date.getDate() === yesterday.getDate() &&
|
||||
date.getMonth() === yesterday.getMonth() &&
|
||||
date.getFullYear() === yesterday.getFullYear()
|
||||
);
|
||||
}, []);
|
||||
|
||||
const incrementRuns = useCallback(() => {
|
||||
if (!state || !state.completedSteps) return;
|
||||
if (!state?.completedSteps) return;
|
||||
|
||||
const tenRuns = state.agentRuns + 1 === 10;
|
||||
const hundredRuns = state.agentRuns + 1 === 100;
|
||||
// Calculate if it's a run on a consecutive day
|
||||
// If the last run was yesterday, increment days
|
||||
// Otherwise, if the last run was *not* today reset it (already checked that it wasn't yesterday at this point)
|
||||
// Otherwise, don't do anything (the last run was today)
|
||||
const consecutive =
|
||||
state.lastRunAt === null || isYesterday(state.lastRunAt)
|
||||
? {
|
||||
lastRunAt: new Date(),
|
||||
consecutiveRunDays: state.consecutiveRunDays + 1,
|
||||
}
|
||||
: !isToday(state.lastRunAt)
|
||||
? { lastRunAt: new Date(), consecutiveRunDays: 1 }
|
||||
: {};
|
||||
const newRunCount = state.agentRuns + 1;
|
||||
const consecutiveData = calculateConsecutiveDays(
|
||||
state.lastRunAt,
|
||||
state.consecutiveRunDays,
|
||||
);
|
||||
|
||||
const milestoneSteps = getRunMilestoneSteps(
|
||||
newRunCount,
|
||||
consecutiveData.consecutiveRunDays,
|
||||
);
|
||||
|
||||
// Show NPS dialog at 10 runs
|
||||
if (newRunCount === 10) {
|
||||
setNpsDialogOpen(true);
|
||||
}
|
||||
|
||||
setNpsDialogOpen(tenRuns);
|
||||
updateState({
|
||||
agentRuns: state.agentRuns + 1,
|
||||
completedSteps: [
|
||||
...state.completedSteps,
|
||||
...(tenRuns ? (["RUN_AGENTS"] as OnboardingStep[]) : []),
|
||||
...(hundredRuns ? (["RUN_AGENTS_100"] as OnboardingStep[]) : []),
|
||||
...(consecutive.consecutiveRunDays === 3
|
||||
? (["RUN_3_DAYS"] as OnboardingStep[])
|
||||
: []),
|
||||
...(consecutive.consecutiveRunDays === 14
|
||||
? (["RUN_14_DAYS"] as OnboardingStep[])
|
||||
: []),
|
||||
],
|
||||
...consecutive,
|
||||
agentRuns: newRunCount,
|
||||
completedSteps: [...state.completedSteps, ...milestoneSteps],
|
||||
...consecutiveData,
|
||||
});
|
||||
}, [state, updateState]);
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ export enum Key {
|
||||
WEBSOCKET_DISCONNECT_INTENT = "websocket-disconnect-intent",
|
||||
COPIED_FLOW_DATA = "copied-flow-data",
|
||||
SHEPHERD_TOUR = "shepherd-tour",
|
||||
WALLET_LAST_SEEN_CREDITS = "wallet-last-seen-credits",
|
||||
}
|
||||
|
||||
function get(key: Key) {
|
||||
|
||||
@@ -63,5 +63,32 @@ export class LoginPage {
|
||||
console.log("➡️ Navigating to /marketplace ...");
|
||||
await this.page.goto("/marketplace", { timeout: 10_000 });
|
||||
console.log("✅ Login process complete");
|
||||
|
||||
// If Wallet popover auto-opens, close it to avoid blocking account menu interactions
|
||||
try {
|
||||
const walletPanel = this.page.getByText("Your credits").first();
|
||||
// Wait briefly for wallet to appear after navigation (it may open asynchronously)
|
||||
const appeared = await walletPanel
|
||||
.waitFor({ state: "visible", timeout: 2500 })
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
if (appeared) {
|
||||
const closeWalletButton = this.page.getByRole("button", {
|
||||
name: /Close wallet/i,
|
||||
});
|
||||
await closeWalletButton.click({ timeout: 3000 }).catch(async () => {
|
||||
// Fallbacks: try Escape, then click outside
|
||||
await this.page.keyboard.press("Escape").catch(() => {});
|
||||
});
|
||||
await walletPanel
|
||||
.waitFor({ state: "hidden", timeout: 3000 })
|
||||
.catch(async () => {
|
||||
await this.page.mouse.click(5, 5).catch(() => {});
|
||||
});
|
||||
}
|
||||
} catch (_e) {
|
||||
// Non-fatal in tests; continue
|
||||
console.log("(info) Wallet popover not present or already closed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user