Compare commits

..

5 Commits

Author SHA1 Message Date
Nicholas Tindle
2a1ece7b65 Merge branch 'master' into copilot/fix-10840 2025-12-18 10:50:57 -06:00
Nicholas Tindle
4d3e87a3ea Merge branch 'master' into copilot/fix-10840 2025-09-30 11:23:50 -05:00
copilot-swe-agent[bot]
e7c8c875b7 fix(ci): make workflow_dispatch functional and prevent runtime errors
- Add github.event_name == 'workflow_dispatch' to allow manual testing
- Add null safety check for github.event.pull_request to prevent runtime errors
- Maintains all existing Dependabot detection while fixing manual trigger capability

Co-authored-by: ntindle <8845353+ntindle@users.noreply.github.com>
2025-09-18 21:53:16 +00:00
copilot-swe-agent[bot]
67dab25ec7 fix(ci): correct Dependabot PR detection in Claude workflow
- Fix workflow condition to use github.event.pull_request.user.login
- Add fallback condition with github.actor for security
- Add workflow_dispatch trigger for manual testing
- Implements the "belt and suspenders" approach from issue analysis

Co-authored-by: ntindle <8845353+ntindle@users.noreply.github.com>
2025-09-18 19:28:10 +00:00
copilot-swe-agent[bot]
3d17911477 Initial plan 2025-09-18 19:20:02 +00:00
74 changed files with 794 additions and 4370 deletions

View File

@@ -14,11 +14,15 @@ name: Claude Dependabot PR Review
on:
pull_request:
types: [opened, synchronize]
workflow_dispatch: # Allow manual testing
jobs:
dependabot-review:
# Only run on Dependabot PRs
if: github.actor == 'dependabot[bot]'
# Only run on Dependabot PRs or manual dispatch
if: |
github.event_name == 'workflow_dispatch' ||
github.actor == 'dependabot[bot]' ||
(github.event.pull_request && github.event.pull_request.user.login == 'dependabot[bot]')
runs-on: ubuntu-latest
timeout-minutes: 30

View File

@@ -32,9 +32,7 @@ jobs:
strategy:
fail-fast: false
matrix:
# Use Python 3.13 to match Docker image (see backend/Dockerfile)
# ClamAV tests moved to platform-backend-security-ci.yml (runs on merge to master)
python-version: ["3.13"]
python-version: ["3.11", "3.12", "3.13"]
runs-on: ubuntu-latest
services:
@@ -50,6 +48,23 @@ jobs:
env:
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
clamav:
image: clamav/clamav-debian:latest
ports:
- 3310:3310
env:
CLAMAV_NO_FRESHCLAMD: false
CLAMD_CONF_StreamMaxLength: 50M
CLAMD_CONF_MaxFileSize: 100M
CLAMD_CONF_MaxScanSize: 100M
CLAMD_CONF_MaxThreads: 4
CLAMD_CONF_ReadTimeout: 300
options: >-
--health-cmd "clamdscan --version || exit 1"
--health-interval 30s
--health-timeout 10s
--health-retries 5
--health-start-period 180s
steps:
- name: Checkout repository
@@ -131,6 +146,35 @@ jobs:
# outputs:
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
- name: Wait for ClamAV to be ready
run: |
echo "Waiting for ClamAV daemon to start..."
max_attempts=60
attempt=0
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
sleep 5
attempt=$((attempt+1))
done
if [ $attempt -eq $max_attempts ]; then
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
echo "Checking ClamAV service logs..."
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
fi
echo "ClamAV is ready!"
# Verify ClamAV is responsive
echo "Testing ClamAV connection..."
timeout 10 bash -c 'echo "PING" | nc localhost 3310' || {
echo "ClamAV is not responding to PING"
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
}
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:

View File

@@ -1,145 +0,0 @@
name: AutoGPT Platform - Backend Security CI
# This workflow runs ClamAV-dependent security tests.
# It only runs on merge to master to avoid the 3-5 minute ClamAV startup time on every PR.
on:
push:
branches: [master]
paths:
- "autogpt_platform/backend/**/file*.py"
- "autogpt_platform/backend/**/scan*.py"
- "autogpt_platform/backend/**/virus*.py"
- "autogpt_platform/backend/**/media*.py"
- ".github/workflows/platform-backend-security-ci.yml"
concurrency:
group: ${{ format('backend-security-ci-{0}', github.sha) }}
cancel-in-progress: false
defaults:
run:
shell: bash
working-directory: autogpt_platform/backend
jobs:
security-tests:
runs-on: ubuntu-latest
timeout-minutes: 15
services:
redis:
image: redis:latest
ports:
- 6379:6379
clamav:
image: clamav/clamav-debian:latest
ports:
- 3310:3310
env:
CLAMAV_NO_FRESHCLAMD: false
CLAMD_CONF_StreamMaxLength: 50M
CLAMD_CONF_MaxFileSize: 100M
CLAMD_CONF_MaxScanSize: 100M
CLAMD_CONF_MaxThreads: 4
CLAMD_CONF_ReadTimeout: 300
options: >-
--health-cmd "clamdscan --version || exit 1"
--health-interval 30s
--health-timeout 10s
--health-retries 5
--health-start-period 180s
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Setup Supabase
uses: supabase/setup-cli@v1
with:
version: 1.178.1
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry
run: |
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Using Poetry version ${HEAD_POETRY_VERSION}"
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- id: supabase
name: Start Supabase
working-directory: .
run: |
supabase init
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
- name: Wait for ClamAV to be ready
run: |
echo "Waiting for ClamAV daemon to start..."
max_attempts=60
attempt=0
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
sleep 5
attempt=$((attempt+1))
done
if [ $attempt -eq $max_attempts ]; then
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
exit 1
fi
echo "ClamAV is ready!"
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
- name: Run security-related tests
run: |
poetry run pytest -v \
backend/util/virus_scanner_test.py \
backend/util/file_test.py \
backend/server/v2/store/media_test.py \
-x
env:
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
JWT_VERIFY_KEY: ${{ steps.supabase.outputs.JWT_SECRET }}
REDIS_HOST: "localhost"
REDIS_PORT: "6379"
ENCRYPTION_KEY: "dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw="
CLAMAV_SERVICE_HOST: "localhost"
CLAMAV_SERVICE_PORT: "3310"
CLAMAV_SERVICE_ENABLED: "true"
env:
CI: true
PLAIN_OUTPUT: True
RUN_ENV: local
PORT: 8080

View File

@@ -154,78 +154,35 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Docker image tar caching - loads images from cache in parallel for faster startup
- name: Set up Docker image cache
id: docker-cache
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: ~/docker-cache
key: docker-images-frontend-${{ runner.os }}-${{ hashFiles('autogpt_platform/docker-compose.yml') }}
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
restore-keys: |
docker-images-frontend-${{ runner.os }}-
- name: Load or pull Docker images
working-directory: autogpt_platform
run: |
mkdir -p ~/docker-cache
# Define image list for easy maintenance
IMAGES=(
"redis:latest"
"rabbitmq:management"
"kong:2.8.1"
"supabase/gotrue:v2.170.0"
"supabase/postgres:15.8.1.049"
)
# Check if any cached tar files exist
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
echo "Docker cache found, loading images in parallel..."
for image in "${IMAGES[@]}"; do
filename=$(echo "$image" | tr ':/' '--')
if [ -f ~/docker-cache/${filename}.tar ]; then
echo "Loading $image..."
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
fi
done
wait
echo "All cached images loaded"
else
echo "No Docker cache found, pulling images in parallel..."
for image in "${IMAGES[@]}"; do
docker pull "$image" &
done
wait
# Only save cache on main branches (not PRs) to avoid cache pollution
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "Saving Docker images to cache in parallel..."
for image in "${IMAGES[@]}"; do
filename=$(echo "$image" | tr ':/' '--')
echo "Saving $image..."
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
done
wait
echo "Docker image cache saved"
else
echo "Skipping cache save for PR/feature branch"
fi
fi
echo "Docker images ready for use"
${{ runner.os }}-buildx-frontend-test-
- name: Run docker compose
run: |
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
env:
DOCKER_BUILDKIT: 1
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
if [ -d "/tmp/.buildx-cache-new" ]; then
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
fi
- name: Wait for services to be ready
run: |
echo "Waiting for rest_server to be ready..."
timeout 30 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
echo "Waiting for database to be ready..."
timeout 30 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
- name: Create E2E test data
run: |
@@ -264,27 +221,9 @@ jobs:
- name: Install dependencies
run: pnpm install --frozen-lockfile
# Playwright browser caching - saves 30-60s when cache hits
- name: Get Playwright version
id: playwright-version
run: |
echo "version=$(pnpm list @playwright/test --json | jq -r '.[0].dependencies["@playwright/test"].version')" >> $GITHUB_OUTPUT
- name: Cache Playwright browsers
uses: actions/cache@v4
id: playwright-cache
with:
path: ~/.cache/ms-playwright
key: playwright-${{ runner.os }}-${{ steps.playwright-version.outputs.version }}
- name: Install Playwright browsers
if: steps.playwright-cache.outputs.cache-hit != 'true'
- name: Install Browser 'chromium'
run: pnpm playwright install --with-deps chromium
- name: Install Playwright deps only (when cache hit)
if: steps.playwright-cache.outputs.cache-hit == 'true'
run: pnpm playwright install-deps chromium
- name: Run Playwright tests
run: pnpm test:no-build

View File

@@ -83,66 +83,6 @@ jobs:
run: |
cp ../backend/.env.default ../backend/.env
# Docker image tar caching - loads images from cache in parallel for faster startup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v4
with:
path: ~/docker-cache
key: docker-images-fullstack-${{ runner.os }}-${{ hashFiles('autogpt_platform/docker-compose.yml') }}
restore-keys: |
docker-images-fullstack-${{ runner.os }}-
- name: Load or pull Docker images
working-directory: autogpt_platform
run: |
mkdir -p ~/docker-cache
# Define image list for easy maintenance
IMAGES=(
"redis:latest"
"rabbitmq:management"
"kong:2.8.1"
"supabase/gotrue:v2.170.0"
"supabase/postgres:15.8.1.049"
)
# Check if any cached tar files exist
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
echo "Docker cache found, loading images in parallel..."
for image in "${IMAGES[@]}"; do
filename=$(echo "$image" | tr ':/' '--')
if [ -f ~/docker-cache/${filename}.tar ]; then
echo "Loading $image..."
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
fi
done
wait
echo "All cached images loaded"
else
echo "No Docker cache found, pulling images in parallel..."
for image in "${IMAGES[@]}"; do
docker pull "$image" &
done
wait
# Only save cache on main branches (not PRs) to avoid cache pollution
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "Saving Docker images to cache in parallel..."
for image in "${IMAGES[@]}"; do
filename=$(echo "$image" | tr ':/' '--')
echo "Saving $image..."
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
done
wait
echo "Docker image cache saved"
else
echo "Skipping cache save for PR/feature branch"
fi
fi
echo "Docker images ready for use"
- name: Run docker compose
run: |
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
@@ -164,9 +104,9 @@ jobs:
- name: Wait for services to be ready
run: |
echo "Waiting for rest_server to be ready..."
timeout 30 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
echo "Waiting for database to be ready..."
timeout 30 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
- name: Generate API queries
run: pnpm generate:api:force

View File

@@ -11,7 +11,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v10
- uses: actions/stale@v9
with:
# operations-per-run: 5000
stale-issue-message: >

View File

@@ -61,6 +61,6 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v6
- uses: actions/labeler@v5
with:
sync-labels: true

View File

@@ -1,5 +1,5 @@
import logging
from typing import Any
from typing import Any, Literal
from prisma.enums import ReviewStatus
@@ -45,11 +45,11 @@ class HumanInTheLoopBlock(Block):
)
class Output(BlockSchemaOutput):
approved_data: Any = SchemaField(
description="The data when approved (may be modified by reviewer)"
reviewed_data: Any = SchemaField(
description="The data after human review (may be modified)"
)
rejected_data: Any = SchemaField(
description="The data when rejected (may be modified by reviewer)"
status: Literal["approved", "rejected"] = SchemaField(
description="Status of the review: 'approved' or 'rejected'"
)
review_message: str = SchemaField(
description="Any message provided by the reviewer", default=""
@@ -69,7 +69,8 @@ class HumanInTheLoopBlock(Block):
"editable": True,
},
test_output=[
("approved_data", {"name": "John Doe", "age": 30}),
("status", "approved"),
("reviewed_data", {"name": "John Doe", "age": 30}),
],
test_mock={
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
@@ -115,7 +116,8 @@ class HumanInTheLoopBlock(Block):
logger.info(
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
)
yield "approved_data", input_data.data
yield "status", "approved"
yield "reviewed_data", input_data.data
yield "review_message", "Auto-approved (safe mode disabled)"
return
@@ -156,11 +158,12 @@ class HumanInTheLoopBlock(Block):
)
if result.status == ReviewStatus.APPROVED:
yield "approved_data", result.data
yield "status", "approved"
yield "reviewed_data", result.data
if result.message:
yield "review_message", result.message
elif result.status == ReviewStatus.REJECTED:
yield "rejected_data", result.data
yield "status", "rejected"
if result.message:
yield "review_message", result.message

View File

@@ -1,11 +1,8 @@
import logging
import re
from collections import Counter
from concurrent.futures import Future
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel
import backend.blocks.llm as llm
from backend.blocks.agent import AgentExecutorBlock
from backend.data.block import (
@@ -23,41 +20,16 @@ from backend.data.dynamic_fields import (
is_dynamic_field,
is_tool_pin,
)
from backend.data.execution import ExecutionContext
from backend.data.model import NodeExecutionStats, SchemaField
from backend.util import json
from backend.util.clients import get_database_manager_async_client
from backend.util.prompt import MAIN_OBJECTIVE_PREFIX
if TYPE_CHECKING:
from backend.data.graph import Link, Node
from backend.executor.manager import ExecutionProcessor
logger = logging.getLogger(__name__)
class ToolInfo(BaseModel):
"""Processed tool call information."""
tool_call: Any # The original tool call object from LLM response
tool_name: str # The function name
tool_def: dict[str, Any] # The tool definition from tool_functions
input_data: dict[str, Any] # Processed input data ready for tool execution
field_mapping: dict[str, str] # Field name mapping for the tool
class ExecutionParams(BaseModel):
"""Tool execution parameters."""
user_id: str
graph_id: str
node_id: str
graph_version: int
graph_exec_id: str
node_exec_id: str
execution_context: "ExecutionContext"
def _get_tool_requests(entry: dict[str, Any]) -> list[str]:
"""
Return a list of tool_call_ids if the entry is a tool request.
@@ -133,50 +105,6 @@ def _create_tool_response(call_id: str, output: Any) -> dict[str, Any]:
return {"role": "tool", "tool_call_id": call_id, "content": content}
def _combine_tool_responses(tool_outputs: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""
Combine multiple Anthropic tool responses into a single user message.
For non-Anthropic formats, returns the original list unchanged.
"""
if len(tool_outputs) <= 1:
return tool_outputs
# Anthropic responses have role="user", type="message", and content is a list with tool_result items
anthropic_responses = [
output
for output in tool_outputs
if (
output.get("role") == "user"
and output.get("type") == "message"
and isinstance(output.get("content"), list)
and any(
item.get("type") == "tool_result"
for item in output.get("content", [])
if isinstance(item, dict)
)
)
]
if len(anthropic_responses) > 1:
combined_content = [
item for response in anthropic_responses for item in response["content"]
]
combined_response = {
"role": "user",
"type": "message",
"content": combined_content,
}
non_anthropic_responses = [
output for output in tool_outputs if output not in anthropic_responses
]
return [combined_response] + non_anthropic_responses
return tool_outputs
def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]:
"""
Safely convert raw_response to dictionary format for conversation history.
@@ -276,17 +204,6 @@ class SmartDecisionMakerBlock(Block):
default="localhost:11434",
description="Ollama host for local models",
)
agent_mode_max_iterations: int = SchemaField(
title="Agent Mode Max Iterations",
description="Maximum iterations for agent mode. 0 = traditional mode (single LLM call, yield tool calls for external execution), -1 = infinite agent mode (loop until finished), 1+ = agent mode with max iterations limit.",
advanced=True,
default=0,
)
conversation_compaction: bool = SchemaField(
default=True,
title="Context window auto-compaction",
description="Automatically compact the context window once it hits the limit",
)
@classmethod
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
@@ -589,7 +506,6 @@ class SmartDecisionMakerBlock(Block):
Returns the response if successful, raises ValueError if validation fails.
"""
resp = await llm.llm_call(
compress_prompt_to_fit=input_data.conversation_compaction,
credentials=credentials,
llm_model=input_data.model,
prompt=current_prompt,
@@ -677,291 +593,6 @@ class SmartDecisionMakerBlock(Block):
return resp
def _process_tool_calls(
self, response, tool_functions: list[dict[str, Any]]
) -> list[ToolInfo]:
"""Process tool calls and extract tool definitions, arguments, and input data.
Returns a list of tool info dicts with:
- tool_call: The original tool call object
- tool_name: The function name
- tool_def: The tool definition from tool_functions
- input_data: Processed input data dict (includes None values)
- field_mapping: Field name mapping for the tool
"""
if not response.tool_calls:
return []
processed_tools = []
for tool_call in response.tool_calls:
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
tool_def = next(
(
tool
for tool in tool_functions
if tool["function"]["name"] == tool_name
),
None,
)
if not tool_def:
if len(tool_functions) == 1:
tool_def = tool_functions[0]
else:
continue
# Build input data for the tool
input_data = {}
field_mapping = tool_def["function"].get("_field_mapping", {})
if "function" in tool_def and "parameters" in tool_def["function"]:
expected_args = tool_def["function"]["parameters"].get("properties", {})
for clean_arg_name in expected_args:
original_field_name = field_mapping.get(
clean_arg_name, clean_arg_name
)
arg_value = tool_args.get(clean_arg_name)
# Include all expected parameters, even if None (for backward compatibility with tests)
input_data[original_field_name] = arg_value
processed_tools.append(
ToolInfo(
tool_call=tool_call,
tool_name=tool_name,
tool_def=tool_def,
input_data=input_data,
field_mapping=field_mapping,
)
)
return processed_tools
def _update_conversation(
self, prompt: list[dict], response, tool_outputs: list | None = None
):
"""Update conversation history with response and tool outputs."""
# Don't add separate reasoning message with tool calls (breaks Anthropic's tool_use->tool_result pairing)
assistant_message = _convert_raw_response_to_dict(response.raw_response)
has_tool_calls = isinstance(assistant_message.get("content"), list) and any(
item.get("type") == "tool_use"
for item in assistant_message.get("content", [])
)
if response.reasoning and not has_tool_calls:
prompt.append(
{"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"}
)
prompt.append(assistant_message)
if tool_outputs:
prompt.extend(tool_outputs)
async def _execute_single_tool_with_manager(
self,
tool_info: ToolInfo,
execution_params: ExecutionParams,
execution_processor: "ExecutionProcessor",
) -> dict:
"""Execute a single tool using the execution manager for proper integration."""
# Lazy imports to avoid circular dependencies
from backend.data.execution import NodeExecutionEntry
tool_call = tool_info.tool_call
tool_def = tool_info.tool_def
raw_input_data = tool_info.input_data
# Get sink node and field mapping
sink_node_id = tool_def["function"]["_sink_node_id"]
# Use proper database operations for tool execution
db_client = get_database_manager_async_client()
# Get target node
target_node = await db_client.get_node(sink_node_id)
if not target_node:
raise ValueError(f"Target node {sink_node_id} not found")
# Create proper node execution using upsert_execution_input
node_exec_result = None
final_input_data = None
# Add all inputs to the execution
if not raw_input_data:
raise ValueError(f"Tool call has no input data: {tool_call}")
for input_name, input_value in raw_input_data.items():
node_exec_result, final_input_data = await db_client.upsert_execution_input(
node_id=sink_node_id,
graph_exec_id=execution_params.graph_exec_id,
input_name=input_name,
input_data=input_value,
)
assert node_exec_result is not None, "node_exec_result should not be None"
# Create NodeExecutionEntry for execution manager
node_exec_entry = NodeExecutionEntry(
user_id=execution_params.user_id,
graph_exec_id=execution_params.graph_exec_id,
graph_id=execution_params.graph_id,
graph_version=execution_params.graph_version,
node_exec_id=node_exec_result.node_exec_id,
node_id=sink_node_id,
block_id=target_node.block_id,
inputs=final_input_data or {},
execution_context=execution_params.execution_context,
)
# Use the execution manager to execute the tool node
try:
# Get NodeExecutionProgress from the execution manager's running nodes
node_exec_progress = execution_processor.running_node_execution[
sink_node_id
]
# Use the execution manager's own graph stats
graph_stats_pair = (
execution_processor.execution_stats,
execution_processor.execution_stats_lock,
)
# Create a completed future for the task tracking system
node_exec_future = Future()
node_exec_progress.add_task(
node_exec_id=node_exec_result.node_exec_id,
task=node_exec_future,
)
# Execute the node directly since we're in the SmartDecisionMaker context
node_exec_future.set_result(
await execution_processor.on_node_execution(
node_exec=node_exec_entry,
node_exec_progress=node_exec_progress,
nodes_input_masks=None,
graph_stats_pair=graph_stats_pair,
)
)
# Get outputs from database after execution completes using database manager client
node_outputs = await db_client.get_execution_outputs_by_node_exec_id(
node_exec_result.node_exec_id
)
# Create tool response
tool_response_content = (
json.dumps(node_outputs)
if node_outputs
else "Tool executed successfully"
)
return _create_tool_response(tool_call.id, tool_response_content)
except Exception as e:
logger.error(f"Tool execution with manager failed: {e}")
# Return error response
return _create_tool_response(
tool_call.id, f"Tool execution failed: {str(e)}"
)
async def _execute_tools_agent_mode(
self,
input_data,
credentials,
tool_functions: list[dict[str, Any]],
prompt: list[dict],
graph_exec_id: str,
node_id: str,
node_exec_id: str,
user_id: str,
graph_id: str,
graph_version: int,
execution_context: ExecutionContext,
execution_processor: "ExecutionProcessor",
):
"""Execute tools in agent mode with a loop until finished."""
max_iterations = input_data.agent_mode_max_iterations
iteration = 0
# Execution parameters for tool execution
execution_params = ExecutionParams(
user_id=user_id,
graph_id=graph_id,
node_id=node_id,
graph_version=graph_version,
graph_exec_id=graph_exec_id,
node_exec_id=node_exec_id,
execution_context=execution_context,
)
current_prompt = list(prompt)
while max_iterations < 0 or iteration < max_iterations:
iteration += 1
logger.debug(f"Agent mode iteration {iteration}")
# Prepare prompt for this iteration
iteration_prompt = list(current_prompt)
# On the last iteration, add a special system message to encourage completion
if max_iterations > 0 and iteration == max_iterations:
last_iteration_message = {
"role": "system",
"content": f"{MAIN_OBJECTIVE_PREFIX}This is your last iteration ({iteration}/{max_iterations}). "
"Try to complete the task with the information you have. If you cannot fully complete it, "
"provide a summary of what you've accomplished and what remains to be done. "
"Prefer finishing with a clear response rather than making additional tool calls.",
}
iteration_prompt.append(last_iteration_message)
# Get LLM response
try:
response = await self._attempt_llm_call_with_validation(
credentials, input_data, iteration_prompt, tool_functions
)
except Exception as e:
yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}"
return
# Process tool calls
processed_tools = self._process_tool_calls(response, tool_functions)
# If no tool calls, we're done
if not processed_tools:
yield "finished", response.response
self._update_conversation(current_prompt, response)
yield "conversations", current_prompt
return
# Execute tools and collect responses
tool_outputs = []
for tool_info in processed_tools:
try:
tool_response = await self._execute_single_tool_with_manager(
tool_info, execution_params, execution_processor
)
tool_outputs.append(tool_response)
except Exception as e:
logger.error(f"Tool execution failed: {e}")
# Create error response for the tool
error_response = _create_tool_response(
tool_info.tool_call.id, f"Error: {str(e)}"
)
tool_outputs.append(error_response)
tool_outputs = _combine_tool_responses(tool_outputs)
self._update_conversation(current_prompt, response, tool_outputs)
# Yield intermediate conversation state
yield "conversations", current_prompt
# If we reach max iterations, yield the current state
if max_iterations < 0:
yield "finished", f"Agent mode completed after {iteration} iterations"
else:
yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)"
yield "conversations", current_prompt
async def run(
self,
input_data: Input,
@@ -972,12 +603,8 @@ class SmartDecisionMakerBlock(Block):
graph_exec_id: str,
node_exec_id: str,
user_id: str,
graph_version: int,
execution_context: ExecutionContext,
execution_processor: "ExecutionProcessor",
**kwargs,
) -> BlockOutput:
tool_functions = await self._create_tool_node_signatures(node_id)
yield "tool_functions", json.dumps(tool_functions)
@@ -1021,52 +648,24 @@ class SmartDecisionMakerBlock(Block):
input_data.prompt = llm.fmt.format_string(input_data.prompt, values)
input_data.sys_prompt = llm.fmt.format_string(input_data.sys_prompt, values)
prefix = "[Main Objective Prompt]: "
if input_data.sys_prompt and not any(
p["role"] == "system" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
for p in prompt
p["role"] == "system" and p["content"].startswith(prefix) for p in prompt
):
prompt.append(
{
"role": "system",
"content": MAIN_OBJECTIVE_PREFIX + input_data.sys_prompt,
}
)
prompt.append({"role": "system", "content": prefix + input_data.sys_prompt})
if input_data.prompt and not any(
p["role"] == "user" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
for p in prompt
p["role"] == "user" and p["content"].startswith(prefix) for p in prompt
):
prompt.append(
{"role": "user", "content": MAIN_OBJECTIVE_PREFIX + input_data.prompt}
)
prompt.append({"role": "user", "content": prefix + input_data.prompt})
# Execute tools based on the selected mode
if input_data.agent_mode_max_iterations != 0:
# In agent mode, execute tools directly in a loop until finished
async for result in self._execute_tools_agent_mode(
input_data=input_data,
credentials=credentials,
tool_functions=tool_functions,
prompt=prompt,
graph_exec_id=graph_exec_id,
node_id=node_id,
node_exec_id=node_exec_id,
user_id=user_id,
graph_id=graph_id,
graph_version=graph_version,
execution_context=execution_context,
execution_processor=execution_processor,
):
yield result
return
# One-off mode: single LLM call and yield tool calls for external execution
current_prompt = list(prompt)
max_attempts = max(1, int(input_data.retry))
response = None
last_error = None
for _ in range(max_attempts):
for attempt in range(max_attempts):
try:
response = await self._attempt_llm_call_with_validation(
credentials, input_data, current_prompt, tool_functions

View File

@@ -1,11 +1,7 @@
import logging
import threading
from collections import defaultdict
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from backend.data.execution import ExecutionContext
from backend.data.model import ProviderName, User
from backend.server.model import CreateGraph
from backend.server.rest_api import AgentServer
@@ -21,10 +17,10 @@ async def create_graph(s: SpinTestServer, g, u: User):
async def create_credentials(s: SpinTestServer, u: User):
import backend.blocks.llm as llm_module
import backend.blocks.llm as llm
provider = ProviderName.OPENAI
credentials = llm_module.TEST_CREDENTIALS
credentials = llm.TEST_CREDENTIALS
return await s.agent_server.test_create_credentials(u.id, provider, credentials)
@@ -200,6 +196,8 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer):
@pytest.mark.asyncio
async def test_smart_decision_maker_tracks_llm_stats():
"""Test that SmartDecisionMakerBlock correctly tracks LLM usage stats."""
from unittest.mock import MagicMock, patch
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
@@ -218,6 +216,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
}
# Mock the _create_tool_node_signatures method to avoid database calls
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
@@ -235,19 +234,10 @@ async def test_smart_decision_maker_tracks_llm_stats():
prompt="Should I continue with this task?",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
# Execute the block
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -256,9 +246,6 @@ async def test_smart_decision_maker_tracks_llm_stats():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -276,6 +263,8 @@ async def test_smart_decision_maker_tracks_llm_stats():
@pytest.mark.asyncio
async def test_smart_decision_maker_parameter_validation():
"""Test that SmartDecisionMakerBlock correctly validates tool call parameters."""
from unittest.mock import MagicMock, patch
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
@@ -322,6 +311,8 @@ async def test_smart_decision_maker_parameter_validation():
mock_response_with_typo.reasoning = None
mock_response_with_typo.raw_response = {"role": "assistant", "content": None}
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -338,17 +329,8 @@ async def test_smart_decision_maker_parameter_validation():
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2, # Set retry to 2 for testing
agent_mode_max_iterations=0,
)
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
# Should raise ValueError after retries due to typo'd parameter name
with pytest.raises(ValueError) as exc_info:
outputs = {}
@@ -360,9 +342,6 @@ async def test_smart_decision_maker_parameter_validation():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -389,6 +368,8 @@ async def test_smart_decision_maker_parameter_validation():
mock_response_missing_required.reasoning = None
mock_response_missing_required.raw_response = {"role": "assistant", "content": None}
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -404,17 +385,8 @@ async def test_smart_decision_maker_parameter_validation():
prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
# Should raise ValueError due to missing required parameter
with pytest.raises(ValueError) as exc_info:
outputs = {}
@@ -426,9 +398,6 @@ async def test_smart_decision_maker_parameter_validation():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -449,6 +418,8 @@ async def test_smart_decision_maker_parameter_validation():
mock_response_valid.reasoning = None
mock_response_valid.raw_response = {"role": "assistant", "content": None}
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -464,19 +435,10 @@ async def test_smart_decision_maker_parameter_validation():
prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
# Should succeed - optional parameter missing is OK
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -485,9 +447,6 @@ async def test_smart_decision_maker_parameter_validation():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -513,6 +472,8 @@ async def test_smart_decision_maker_parameter_validation():
mock_response_all_params.reasoning = None
mock_response_all_params.raw_response = {"role": "assistant", "content": None}
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -528,19 +489,10 @@ async def test_smart_decision_maker_parameter_validation():
prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
# Should succeed with all parameters
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -549,9 +501,6 @@ async def test_smart_decision_maker_parameter_validation():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -564,6 +513,8 @@ async def test_smart_decision_maker_parameter_validation():
@pytest.mark.asyncio
async def test_smart_decision_maker_raw_response_conversion():
"""Test that SmartDecisionMaker correctly handles different raw_response types with retry mechanism."""
from unittest.mock import MagicMock, patch
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
@@ -633,6 +584,7 @@ async def test_smart_decision_maker_raw_response_conversion():
)
# Mock llm_call to return different responses on different calls
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call", new_callable=AsyncMock
@@ -651,19 +603,10 @@ async def test_smart_decision_maker_raw_response_conversion():
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2,
agent_mode_max_iterations=0,
)
# Should succeed after retry, demonstrating our helper function works
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -672,9 +615,6 @@ async def test_smart_decision_maker_raw_response_conversion():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -710,6 +650,8 @@ async def test_smart_decision_maker_raw_response_conversion():
"I'll help you with that." # Ollama returns string
)
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -724,18 +666,9 @@ async def test_smart_decision_maker_raw_response_conversion():
prompt="Simple prompt",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -744,9 +677,6 @@ async def test_smart_decision_maker_raw_response_conversion():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
@@ -766,6 +696,8 @@ async def test_smart_decision_maker_raw_response_conversion():
"content": "Test response",
} # Dict format
from unittest.mock import AsyncMock
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
@@ -780,18 +712,9 @@ async def test_smart_decision_maker_raw_response_conversion():
prompt="Another test",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0,
)
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
@@ -800,260 +723,8 @@ async def test_smart_decision_maker_raw_response_conversion():
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
assert "finished" in outputs
assert outputs["finished"] == "Test response"
@pytest.mark.asyncio
async def test_smart_decision_maker_agent_mode():
"""Test that agent mode executes tools directly and loops until finished."""
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
block = SmartDecisionMakerBlock()
# Mock tool call that requires multiple iterations
mock_tool_call_1 = MagicMock()
mock_tool_call_1.id = "call_1"
mock_tool_call_1.function.name = "search_keywords"
mock_tool_call_1.function.arguments = (
'{"query": "test", "max_keyword_difficulty": 50}'
)
mock_response_1 = MagicMock()
mock_response_1.response = None
mock_response_1.tool_calls = [mock_tool_call_1]
mock_response_1.prompt_tokens = 50
mock_response_1.completion_tokens = 25
mock_response_1.reasoning = "Using search tool"
mock_response_1.raw_response = {
"role": "assistant",
"content": None,
"tool_calls": [{"id": "call_1", "type": "function"}],
}
# Final response with no tool calls (finished)
mock_response_2 = MagicMock()
mock_response_2.response = "Task completed successfully"
mock_response_2.tool_calls = []
mock_response_2.prompt_tokens = 30
mock_response_2.completion_tokens = 15
mock_response_2.reasoning = None
mock_response_2.raw_response = {
"role": "assistant",
"content": "Task completed successfully",
}
# Mock the LLM call to return different responses on each iteration
llm_call_mock = AsyncMock()
llm_call_mock.side_effect = [mock_response_1, mock_response_2]
# Mock tool node signatures
mock_tool_signatures = [
{
"type": "function",
"function": {
"name": "search_keywords",
"_sink_node_id": "test-sink-node-id",
"_field_mapping": {},
"parameters": {
"properties": {
"query": {"type": "string"},
"max_keyword_difficulty": {"type": "integer"},
},
"required": ["query", "max_keyword_difficulty"],
},
},
}
]
# Mock database and execution components
mock_db_client = AsyncMock()
mock_node = MagicMock()
mock_node.block_id = "test-block-id"
mock_db_client.get_node.return_value = mock_node
# Mock upsert_execution_input to return proper NodeExecutionResult and input data
mock_node_exec_result = MagicMock()
mock_node_exec_result.node_exec_id = "test-tool-exec-id"
mock_input_data = {"query": "test", "max_keyword_difficulty": 50}
mock_db_client.upsert_execution_input.return_value = (
mock_node_exec_result,
mock_input_data,
)
# No longer need mock_execute_node since we use execution_processor.on_node_execution
with patch("backend.blocks.llm.llm_call", llm_call_mock), patch.object(
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
), patch(
"backend.blocks.smart_decision_maker.get_database_manager_async_client",
return_value=mock_db_client,
), patch(
"backend.executor.manager.async_update_node_execution_status",
new_callable=AsyncMock,
), patch(
"backend.integrations.creds_manager.IntegrationCredentialsManager"
):
# Create a mock execution context
mock_execution_context = ExecutionContext(
safe_mode=False,
)
# Create a mock execution processor for agent mode tests
mock_execution_processor = AsyncMock()
# Configure the execution processor mock with required attributes
mock_execution_processor.running_node_execution = defaultdict(MagicMock)
mock_execution_processor.execution_stats = MagicMock()
mock_execution_processor.execution_stats_lock = threading.Lock()
# Mock the on_node_execution method to return successful stats
mock_node_stats = MagicMock()
mock_node_stats.error = None # No error
mock_execution_processor.on_node_execution = AsyncMock(
return_value=mock_node_stats
)
# Mock the get_execution_outputs_by_node_exec_id method
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = {
"result": {"status": "success", "data": "search completed"}
}
# Test agent mode with max_iterations = 3
input_data = SmartDecisionMakerBlock.Input(
prompt="Complete this task using tools",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations
)
outputs = {}
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
graph_id="test-graph-id",
node_id="test-node-id",
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
# Verify agent mode behavior
assert "tool_functions" in outputs # tool_functions is yielded in both modes
assert "finished" in outputs
assert outputs["finished"] == "Task completed successfully"
assert "conversations" in outputs
# Verify the conversation includes tool responses
conversations = outputs["conversations"]
assert len(conversations) > 2 # Should have multiple conversation entries
# Verify LLM was called twice (once for tool call, once for finish)
assert llm_call_mock.call_count == 2
# Verify tool was executed via execution processor
assert mock_execution_processor.on_node_execution.call_count == 1
@pytest.mark.asyncio
async def test_smart_decision_maker_traditional_mode_default():
"""Test that default behavior (agent_mode_max_iterations=0) works as traditional mode."""
import backend.blocks.llm as llm_module
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
block = SmartDecisionMakerBlock()
# Mock tool call
mock_tool_call = MagicMock()
mock_tool_call.function.name = "search_keywords"
mock_tool_call.function.arguments = (
'{"query": "test", "max_keyword_difficulty": 50}'
)
mock_response = MagicMock()
mock_response.response = None
mock_response.tool_calls = [mock_tool_call]
mock_response.prompt_tokens = 50
mock_response.completion_tokens = 25
mock_response.reasoning = None
mock_response.raw_response = {"role": "assistant", "content": None}
mock_tool_signatures = [
{
"type": "function",
"function": {
"name": "search_keywords",
"_sink_node_id": "test-sink-node-id",
"_field_mapping": {},
"parameters": {
"properties": {
"query": {"type": "string"},
"max_keyword_difficulty": {"type": "integer"},
},
"required": ["query", "max_keyword_difficulty"],
},
},
}
]
with patch(
"backend.blocks.llm.llm_call",
new_callable=AsyncMock,
return_value=mock_response,
), patch.object(
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
):
# Test default behavior (traditional mode)
input_data = SmartDecisionMakerBlock.Input(
prompt="Test prompt",
model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, # Traditional mode
)
# Create execution context
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
mock_execution_processor = MagicMock()
outputs = {}
async for output_name, output_data in block.run(
input_data,
credentials=llm_module.TEST_CREDENTIALS,
graph_id="test-graph-id",
node_id="test-node-id",
graph_exec_id="test-exec-id",
node_exec_id="test-node-exec-id",
user_id="test-user-id",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_data
# Verify traditional mode behavior
assert (
"tool_functions" in outputs
) # Should yield tool_functions in traditional mode
assert (
"tools_^_test-sink-node-id_~_query" in outputs
) # Should yield individual tool parameters
assert "tools_^_test-sink-node-id_~_max_keyword_difficulty" in outputs
assert "conversations" in outputs

View File

@@ -1,7 +1,7 @@
"""Comprehensive tests for SmartDecisionMakerBlock dynamic field handling."""
import json
from unittest.mock import AsyncMock, MagicMock, Mock, patch
from unittest.mock import AsyncMock, Mock, patch
import pytest
@@ -308,47 +308,10 @@ async def test_output_yielding_with_dynamic_fields():
) as mock_llm:
mock_llm.return_value = mock_response
# Mock the database manager to avoid HTTP calls during tool execution
with patch(
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
) as mock_db_manager, patch.object(
# Mock the function signature creation
with patch.object(
block, "_create_tool_node_signatures", new_callable=AsyncMock
) as mock_sig:
# Set up the mock database manager
mock_db_client = AsyncMock()
mock_db_manager.return_value = mock_db_client
# Mock the node retrieval
mock_target_node = Mock()
mock_target_node.id = "test-sink-node-id"
mock_target_node.block_id = "CreateDictionaryBlock"
mock_target_node.block = Mock()
mock_target_node.block.name = "Create Dictionary"
mock_db_client.get_node.return_value = mock_target_node
# Mock the execution result creation
mock_node_exec_result = Mock()
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
mock_final_input_data = {
"values_#_name": "Alice",
"values_#_age": 30,
"values_#_email": "alice@example.com",
}
mock_db_client.upsert_execution_input.return_value = (
mock_node_exec_result,
mock_final_input_data,
)
# Mock the output retrieval
mock_outputs = {
"values_#_name": "Alice",
"values_#_age": 30,
"values_#_email": "alice@example.com",
}
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
mock_outputs
)
mock_sig.return_value = [
{
"type": "function",
@@ -374,16 +337,10 @@ async def test_output_yielding_with_dynamic_fields():
prompt="Create a user dictionary",
credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.LlmModel.GPT4O,
agent_mode_max_iterations=0, # Use traditional mode to test output yielding
)
# Run the block
outputs = {}
from backend.data.execution import ExecutionContext
mock_execution_context = ExecutionContext(safe_mode=False)
mock_execution_processor = MagicMock()
async for output_name, output_value in block.run(
input_data,
credentials=llm.TEST_CREDENTIALS,
@@ -392,9 +349,6 @@ async def test_output_yielding_with_dynamic_fields():
graph_exec_id="test_exec",
node_exec_id="test_node_exec",
user_id="test_user",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_value
@@ -557,108 +511,45 @@ async def test_validation_errors_dont_pollute_conversation():
}
]
# Mock the database manager to avoid HTTP calls during tool execution
with patch(
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
) as mock_db_manager:
# Set up the mock database manager for agent mode
mock_db_client = AsyncMock()
mock_db_manager.return_value = mock_db_client
# Create input data
from backend.blocks import llm
# Mock the node retrieval
mock_target_node = Mock()
mock_target_node.id = "test-sink-node-id"
mock_target_node.block_id = "TestBlock"
mock_target_node.block = Mock()
mock_target_node.block.name = "Test Block"
mock_db_client.get_node.return_value = mock_target_node
input_data = block.input_schema(
prompt="Test prompt",
credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.LlmModel.GPT4O,
retry=3, # Allow retries
)
# Mock the execution result creation
mock_node_exec_result = Mock()
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
mock_final_input_data = {"correct_param": "value"}
mock_db_client.upsert_execution_input.return_value = (
mock_node_exec_result,
mock_final_input_data,
)
# Run the block
outputs = {}
async for output_name, output_value in block.run(
input_data,
credentials=llm.TEST_CREDENTIALS,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_exec",
node_exec_id="test_node_exec",
user_id="test_user",
):
outputs[output_name] = output_value
# Mock the output retrieval
mock_outputs = {"correct_param": "value"}
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
mock_outputs
)
# Verify we had 2 LLM calls (initial + retry)
assert call_count == 2
# Create input data
from backend.blocks import llm
# Check the final conversation output
final_conversation = outputs.get("conversations", [])
input_data = block.input_schema(
prompt="Test prompt",
credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.LlmModel.GPT4O,
retry=3, # Allow retries
agent_mode_max_iterations=1,
)
# The final conversation should NOT contain the validation error message
error_messages = [
msg
for msg in final_conversation
if msg.get("role") == "user"
and "parameter errors" in msg.get("content", "")
]
assert (
len(error_messages) == 0
), "Validation error leaked into final conversation"
# Run the block
outputs = {}
from backend.data.execution import ExecutionContext
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a proper mock execution processor for agent mode
from collections import defaultdict
mock_execution_processor = AsyncMock()
mock_execution_processor.execution_stats = MagicMock()
mock_execution_processor.execution_stats_lock = MagicMock()
# Create a mock NodeExecutionProgress for the sink node
mock_node_exec_progress = MagicMock()
mock_node_exec_progress.add_task = MagicMock()
mock_node_exec_progress.pop_output = MagicMock(
return_value=None
) # No outputs to process
# Set up running_node_execution as a defaultdict that returns our mock for any key
mock_execution_processor.running_node_execution = defaultdict(
lambda: mock_node_exec_progress
)
# Mock the on_node_execution method that gets called during tool execution
mock_node_stats = MagicMock()
mock_node_stats.error = None
mock_execution_processor.on_node_execution.return_value = (
mock_node_stats
)
async for output_name, output_value in block.run(
input_data,
credentials=llm.TEST_CREDENTIALS,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_exec",
node_exec_id="test_node_exec",
user_id="test_user",
graph_version=1,
execution_context=mock_execution_context,
execution_processor=mock_execution_processor,
):
outputs[output_name] = output_value
# Verify we had at least 1 LLM call
assert call_count >= 1
# Check the final conversation output
final_conversation = outputs.get("conversations", [])
# The final conversation should NOT contain validation error messages
# Even if retries don't happen in agent mode, we should not leak errors
error_messages = [
msg
for msg in final_conversation
if msg.get("role") == "user"
and "parameter errors" in msg.get("content", "")
]
assert (
len(error_messages) == 0
), "Validation error leaked into final conversation"
# The final conversation should only have the successful response
assert final_conversation[-1]["content"] == "valid"

View File

@@ -5,7 +5,6 @@ from enum import Enum
from multiprocessing import Manager
from queue import Empty
from typing import (
TYPE_CHECKING,
Annotated,
Any,
AsyncGenerator,
@@ -66,9 +65,6 @@ from .includes import (
)
from .model import CredentialsMetaInput, GraphExecutionStats, NodeExecutionStats
if TYPE_CHECKING:
pass
T = TypeVar("T")
logger = logging.getLogger(__name__)
@@ -840,30 +836,6 @@ async def upsert_execution_output(
await AgentNodeExecutionInputOutput.prisma().create(data=data)
async def get_execution_outputs_by_node_exec_id(
node_exec_id: str,
) -> dict[str, Any]:
"""
Get all execution outputs for a specific node execution ID.
Args:
node_exec_id: The node execution ID to get outputs for
Returns:
Dictionary mapping output names to their data values
"""
outputs = await AgentNodeExecutionInputOutput.prisma().find_many(
where={"referencedByOutputExecId": node_exec_id}
)
result = {}
for output in outputs:
if output.data is not None:
result[output.name] = type_utils.convert(output.data, JsonValue)
return result
async def update_graph_execution_start_time(
graph_exec_id: str,
) -> GraphExecution | None:

View File

@@ -100,7 +100,7 @@ async def get_or_create_human_review(
return None
else:
return ReviewResult(
data=review.payload,
data=review.payload if review.status == ReviewStatus.APPROVED else None,
status=review.status,
message=review.reviewMessage or "",
processed=review.processed,

View File

@@ -13,7 +13,6 @@ from backend.data.execution import (
get_block_error_stats,
get_child_graph_executions,
get_execution_kv_data,
get_execution_outputs_by_node_exec_id,
get_frequently_executed_graphs,
get_graph_execution_meta,
get_graph_executions,
@@ -148,7 +147,6 @@ class DatabaseManager(AppService):
update_graph_execution_stats = _(update_graph_execution_stats)
upsert_execution_input = _(upsert_execution_input)
upsert_execution_output = _(upsert_execution_output)
get_execution_outputs_by_node_exec_id = _(get_execution_outputs_by_node_exec_id)
get_execution_kv_data = _(get_execution_kv_data)
set_execution_kv_data = _(set_execution_kv_data)
get_block_error_stats = _(get_block_error_stats)
@@ -279,7 +277,6 @@ class DatabaseManagerAsyncClient(AppServiceClient):
get_user_integrations = d.get_user_integrations
upsert_execution_input = d.upsert_execution_input
upsert_execution_output = d.upsert_execution_output
get_execution_outputs_by_node_exec_id = d.get_execution_outputs_by_node_exec_id
update_graph_execution_stats = d.update_graph_execution_stats
update_node_execution_status = d.update_node_execution_status
update_node_execution_status_batch = d.update_node_execution_status_batch

View File

@@ -133,8 +133,9 @@ def execute_graph(
cluster_lock: ClusterLock,
):
"""Execute graph using thread-local ExecutionProcessor instance"""
processor: ExecutionProcessor = _tls.processor
return processor.on_graph_execution(graph_exec_entry, cancel_event, cluster_lock)
return _tls.processor.on_graph_execution(
graph_exec_entry, cancel_event, cluster_lock
)
T = TypeVar("T")
@@ -142,8 +143,8 @@ T = TypeVar("T")
async def execute_node(
node: Node,
creds_manager: IntegrationCredentialsManager,
data: NodeExecutionEntry,
execution_processor: "ExecutionProcessor",
execution_stats: NodeExecutionStats | None = None,
nodes_input_masks: Optional[NodesInputMasks] = None,
) -> BlockOutput:
@@ -168,7 +169,6 @@ async def execute_node(
node_id = data.node_id
node_block = node.block
execution_context = data.execution_context
creds_manager = execution_processor.creds_manager
log_metadata = LogMetadata(
logger=_logger,
@@ -212,7 +212,6 @@ async def execute_node(
"node_exec_id": node_exec_id,
"user_id": user_id,
"execution_context": execution_context,
"execution_processor": execution_processor,
}
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
@@ -609,8 +608,8 @@ class ExecutionProcessor:
async for output_name, output_data in execute_node(
node=node,
creds_manager=self.creds_manager,
data=node_exec,
execution_processor=self,
execution_stats=stats,
nodes_input_masks=nodes_input_masks,
):
@@ -861,17 +860,12 @@ class ExecutionProcessor:
execution_stats_lock = threading.Lock()
# State holders ----------------------------------------------------
self.running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
NodeExecutionProgress
)
self.running_node_evaluation: dict[str, Future] = {}
self.execution_stats = execution_stats
self.execution_stats_lock = execution_stats_lock
running_node_evaluation: dict[str, Future] = {}
execution_queue = ExecutionQueue[NodeExecutionEntry]()
running_node_execution = self.running_node_execution
running_node_evaluation = self.running_node_evaluation
try:
if db_client.get_credits(graph_exec.user_id) <= 0:
raise InsufficientBalanceError(

View File

@@ -1,157 +0,0 @@
"""
Embedding service for generating text embeddings using OpenAI.
Used for vector-based semantic search in the store.
"""
import functools
import logging
from typing import Optional
import openai
from backend.util.settings import Settings
logger = logging.getLogger(__name__)
# Model configuration
# Using text-embedding-3-small (1536 dimensions) for compatibility with pgvector indexes
# pgvector IVFFlat/HNSW indexes have dimension limits (2000 for IVFFlat, varies for HNSW)
EMBEDDING_MODEL = "text-embedding-3-small"
EMBEDDING_DIMENSIONS = 1536
# Input validation limits
# OpenAI text-embedding-3-large supports up to 8191 tokens (~32k chars)
# We set a conservative limit to prevent abuse
MAX_TEXT_LENGTH = 10000 # characters
MAX_BATCH_SIZE = 100 # maximum texts per batch request
class EmbeddingService:
"""Service for generating text embeddings using OpenAI.
The service can be created without an API key - the key is validated
only when the client property is first accessed. This allows the service
to be instantiated at module load time without requiring configuration.
"""
def __init__(self, api_key: Optional[str] = None):
settings = Settings()
self.api_key = (
api_key
or settings.secrets.openai_internal_api_key
or settings.secrets.openai_api_key
)
@functools.cached_property
def client(self) -> openai.AsyncOpenAI:
"""Lazily create the OpenAI client, raising if no API key is configured."""
if not self.api_key:
raise ValueError(
"OpenAI API key not configured. "
"Set OPENAI_API_KEY or OPENAI_INTERNAL_API_KEY environment variable."
)
return openai.AsyncOpenAI(api_key=self.api_key)
async def generate_embedding(self, text: str) -> list[float]:
"""
Generate embedding for a single text string.
Args:
text: The text to generate an embedding for.
Returns:
A list of floats representing the embedding vector.
Raises:
ValueError: If the text is empty or exceeds maximum length.
openai.APIError: If the OpenAI API call fails.
"""
# Input validation
if not text or not text.strip():
raise ValueError("Text cannot be empty")
if len(text) > MAX_TEXT_LENGTH:
raise ValueError(
f"Text exceeds maximum length of {MAX_TEXT_LENGTH} characters"
)
response = await self.client.embeddings.create(
model=EMBEDDING_MODEL,
input=text,
dimensions=EMBEDDING_DIMENSIONS,
)
if not response.data:
raise ValueError("OpenAI API returned empty embedding data")
return response.data[0].embedding
async def generate_embeddings(self, texts: list[str]) -> list[list[float]]:
"""
Generate embeddings for multiple texts (batch).
Args:
texts: List of texts to generate embeddings for.
Returns:
List of embedding vectors, one per input text.
Raises:
ValueError: If any text is invalid or batch size exceeds limit.
openai.APIError: If the OpenAI API call fails.
"""
# Input validation
if not texts:
raise ValueError("Texts list cannot be empty")
if len(texts) > MAX_BATCH_SIZE:
raise ValueError(f"Batch size exceeds maximum of {MAX_BATCH_SIZE} texts")
for i, text in enumerate(texts):
if not text or not text.strip():
raise ValueError(f"Text at index {i} cannot be empty")
if len(text) > MAX_TEXT_LENGTH:
raise ValueError(
f"Text at index {i} exceeds maximum length of {MAX_TEXT_LENGTH} characters"
)
response = await self.client.embeddings.create(
model=EMBEDDING_MODEL,
input=texts,
dimensions=EMBEDDING_DIMENSIONS,
)
# Sort by index to ensure correct ordering
sorted_data = sorted(response.data, key=lambda x: x.index)
return [item.embedding for item in sorted_data]
def create_search_text(name: str, sub_heading: str, description: str) -> str:
"""
Combine fields into searchable text for embedding.
This creates a single text string from the agent's name, sub-heading,
and description, which is then converted to an embedding vector.
Args:
name: The agent name.
sub_heading: The agent sub-heading/tagline.
description: The agent description.
Returns:
A single string combining all non-empty fields.
"""
parts = [name or "", sub_heading or "", description or ""]
# filter(None, parts) removes empty strings since empty string is falsy
return " ".join(filter(None, parts)).strip()
@functools.cache
def get_embedding_service() -> EmbeddingService:
"""
Get or create the embedding service singleton.
Uses functools.cache for thread-safe lazy initialization.
Returns:
The shared EmbeddingService instance.
Raises:
ValueError: If OpenAI API key is not configured (when generating embeddings).
"""
return EmbeddingService()

View File

@@ -1,235 +0,0 @@
"""Tests for the embedding service.
This module tests:
- create_search_text utility function
- EmbeddingService input validation
- EmbeddingService API interaction (mocked)
"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from backend.integrations.embeddings import (
EMBEDDING_DIMENSIONS,
MAX_BATCH_SIZE,
MAX_TEXT_LENGTH,
EmbeddingService,
create_search_text,
)
class TestCreateSearchText:
"""Tests for the create_search_text utility function."""
def test_combines_all_fields(self):
result = create_search_text("Agent Name", "A cool agent", "Does amazing things")
assert result == "Agent Name A cool agent Does amazing things"
def test_handles_empty_name(self):
result = create_search_text("", "Sub heading", "Description")
assert result == "Sub heading Description"
def test_handles_empty_sub_heading(self):
result = create_search_text("Name", "", "Description")
assert result == "Name Description"
def test_handles_empty_description(self):
result = create_search_text("Name", "Sub heading", "")
assert result == "Name Sub heading"
def test_handles_all_empty(self):
result = create_search_text("", "", "")
assert result == ""
def test_handles_none_values(self):
# The function expects strings but should handle None gracefully
result = create_search_text(None, None, None) # type: ignore
assert result == ""
def test_preserves_content_strips_outer_whitespace(self):
# The function joins parts and strips the outer result
# Internal whitespace in each part is preserved
result = create_search_text(" Name ", " Sub ", " Desc ")
# Each part is joined with space, then outer strip applied
assert result == "Name Sub Desc"
def test_handles_only_whitespace(self):
# Parts that are only whitespace become empty after filter
result = create_search_text(" ", " ", " ")
assert result == ""
class TestEmbeddingServiceValidation:
"""Tests for EmbeddingService input validation."""
@pytest.fixture
def mock_settings(self):
"""Mock settings with a test API key."""
with patch("backend.integrations.embeddings.Settings") as mock:
mock_instance = MagicMock()
mock_instance.secrets.openai_internal_api_key = "test-api-key"
mock_instance.secrets.openai_api_key = ""
mock.return_value = mock_instance
yield mock
@pytest.fixture
def service(self, mock_settings):
"""Create an EmbeddingService instance with mocked settings."""
service = EmbeddingService()
# Inject a mock client by setting the cached_property directly
service.__dict__["client"] = MagicMock()
return service
def test_client_access_requires_api_key(self):
"""Test that accessing client fails without an API key."""
with patch("backend.integrations.embeddings.Settings") as mock:
mock_instance = MagicMock()
mock_instance.secrets.openai_internal_api_key = ""
mock_instance.secrets.openai_api_key = ""
mock.return_value = mock_instance
# Service creation should succeed
service = EmbeddingService()
# But accessing client should fail
with pytest.raises(ValueError, match="OpenAI API key not configured"):
_ = service.client
def test_init_accepts_explicit_api_key(self):
"""Test that explicit API key overrides settings."""
with patch("backend.integrations.embeddings.Settings") as mock:
mock_instance = MagicMock()
mock_instance.secrets.openai_internal_api_key = ""
mock_instance.secrets.openai_api_key = ""
mock.return_value = mock_instance
with patch("backend.integrations.embeddings.openai.AsyncOpenAI"):
service = EmbeddingService(api_key="explicit-key")
assert service.api_key == "explicit-key"
@pytest.mark.asyncio
async def test_generate_embedding_empty_text(self, service):
"""Test that empty text raises ValueError."""
with pytest.raises(ValueError, match="Text cannot be empty"):
await service.generate_embedding("")
@pytest.mark.asyncio
async def test_generate_embedding_whitespace_only(self, service):
"""Test that whitespace-only text raises ValueError."""
with pytest.raises(ValueError, match="Text cannot be empty"):
await service.generate_embedding(" ")
@pytest.mark.asyncio
async def test_generate_embedding_exceeds_max_length(self, service):
"""Test that text exceeding max length raises ValueError."""
long_text = "a" * (MAX_TEXT_LENGTH + 1)
with pytest.raises(ValueError, match="exceeds maximum length"):
await service.generate_embedding(long_text)
@pytest.mark.asyncio
async def test_generate_embeddings_empty_list(self, service):
"""Test that empty list raises ValueError."""
with pytest.raises(ValueError, match="Texts list cannot be empty"):
await service.generate_embeddings([])
@pytest.mark.asyncio
async def test_generate_embeddings_exceeds_batch_size(self, service):
"""Test that batch exceeding max size raises ValueError."""
texts = ["text"] * (MAX_BATCH_SIZE + 1)
with pytest.raises(ValueError, match="Batch size exceeds maximum"):
await service.generate_embeddings(texts)
@pytest.mark.asyncio
async def test_generate_embeddings_empty_text_in_batch(self, service):
"""Test that empty text in batch raises ValueError with index."""
with pytest.raises(ValueError, match="Text at index 1 cannot be empty"):
await service.generate_embeddings(["valid", "", "also valid"])
@pytest.mark.asyncio
async def test_generate_embeddings_long_text_in_batch(self, service):
"""Test that long text in batch raises ValueError with index."""
long_text = "a" * (MAX_TEXT_LENGTH + 1)
with pytest.raises(ValueError, match="Text at index 2 exceeds maximum length"):
await service.generate_embeddings(["short", "also short", long_text])
class TestEmbeddingServiceAPI:
"""Tests for EmbeddingService API interaction."""
@pytest.fixture
def mock_openai_client(self):
"""Create a mock OpenAI client."""
mock_client = MagicMock()
mock_client.embeddings = MagicMock()
return mock_client
@pytest.fixture
def service_with_mock_client(self, mock_openai_client):
"""Create an EmbeddingService with a mocked OpenAI client."""
with patch("backend.integrations.embeddings.Settings") as mock_settings:
mock_instance = MagicMock()
mock_instance.secrets.openai_internal_api_key = "test-key"
mock_instance.secrets.openai_api_key = ""
mock_settings.return_value = mock_instance
service = EmbeddingService()
# Inject mock client by setting the cached_property directly
service.__dict__["client"] = mock_openai_client
return service, mock_openai_client
@pytest.mark.asyncio
async def test_generate_embedding_success(self, service_with_mock_client):
"""Test successful embedding generation."""
service, mock_client = service_with_mock_client
# Create mock response
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_response = MagicMock()
mock_response.data = [MagicMock(embedding=mock_embedding)]
mock_client.embeddings.create = AsyncMock(return_value=mock_response)
result = await service.generate_embedding("test text")
assert result == mock_embedding
mock_client.embeddings.create.assert_called_once()
@pytest.mark.asyncio
async def test_generate_embeddings_success(self, service_with_mock_client):
"""Test successful batch embedding generation."""
service, mock_client = service_with_mock_client
# Create mock response with multiple embeddings
mock_embeddings = [[0.1] * EMBEDDING_DIMENSIONS, [0.2] * EMBEDDING_DIMENSIONS]
mock_response = MagicMock()
mock_response.data = [
MagicMock(embedding=mock_embeddings[0], index=0),
MagicMock(embedding=mock_embeddings[1], index=1),
]
mock_client.embeddings.create = AsyncMock(return_value=mock_response)
result = await service.generate_embeddings(["text1", "text2"])
assert result == mock_embeddings
mock_client.embeddings.create.assert_called_once()
@pytest.mark.asyncio
async def test_generate_embeddings_preserves_order(self, service_with_mock_client):
"""Test that batch embeddings are returned in correct order even if API returns out of order."""
service, mock_client = service_with_mock_client
# Create mock response with embeddings out of order
mock_embeddings = [[0.1] * EMBEDDING_DIMENSIONS, [0.2] * EMBEDDING_DIMENSIONS]
mock_response = MagicMock()
# Return in reverse order
mock_response.data = [
MagicMock(embedding=mock_embeddings[1], index=1),
MagicMock(embedding=mock_embeddings[0], index=0),
]
mock_client.embeddings.create = AsyncMock(return_value=mock_response)
result = await service.generate_embeddings(["text1", "text2"])
# Should be sorted by index
assert result[0] == mock_embeddings[0]
assert result[1] == mock_embeddings[1]

View File

@@ -1,4 +1,3 @@
import functools
import logging
from collections.abc import AsyncGenerator
from datetime import UTC, datetime
@@ -33,12 +32,7 @@ from backend.util.exceptions import NotFoundError
logger = logging.getLogger(__name__)
config = backend.server.v2.chat.config.ChatConfig()
@functools.cache
def get_openai_client() -> AsyncOpenAI:
"""Lazily create the OpenAI client singleton."""
return AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
async def create_chat_session(
@@ -361,7 +355,7 @@ async def _stream_chat_chunks(
logger.info("Creating OpenAI chat completion stream...")
# Create the stream with proper types
stream = await get_openai_client().chat.completions.create(
stream = await client.chat.completions.create(
model=model,
messages=session.to_openai_messages(),
tools=tools,

View File

@@ -134,14 +134,18 @@ async def process_review_action(
# Build review decisions map
review_decisions = {}
for review in request.reviews:
review_status = (
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
)
review_decisions[review.node_exec_id] = (
review_status,
review.reviewed_data,
review.message,
)
if review.approved:
review_decisions[review.node_exec_id] = (
ReviewStatus.APPROVED,
review.reviewed_data,
review.message,
)
else:
review_decisions[review.node_exec_id] = (
ReviewStatus.REJECTED,
None,
review.message,
)
# Process all reviews
updated_reviews = await process_all_reviews_for_execution(

View File

@@ -1,168 +0,0 @@
"""
Script to backfill embeddings for existing store listing versions.
This script should be run after the migration to add the embedding column
to populate embeddings for all existing store listing versions.
Usage:
poetry run python -m backend.server.v2.store.backfill_embeddings
poetry run python -m backend.server.v2.store.backfill_embeddings --dry-run
poetry run python -m backend.server.v2.store.backfill_embeddings --batch-size 25
"""
import argparse
import asyncio
import logging
import sys
from backend.data.db import connect, disconnect, query_raw_with_schema
from backend.integrations.embeddings import create_search_text, get_embedding_service
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
# Default batch size for processing
DEFAULT_BATCH_SIZE = 50
# Delay between batches to avoid rate limits (seconds)
BATCH_DELAY_SECONDS = 1.0
async def backfill_embeddings(
dry_run: bool = False,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> tuple[int, int]:
"""
Backfill embeddings for all store listing versions without embeddings.
Args:
dry_run: If True, don't make any changes, just report what would be done.
batch_size: Number of versions to process in each batch.
Returns:
Tuple of (processed_count, error_count)
"""
await connect()
try:
embedding_service = get_embedding_service()
# Get all versions without embeddings
versions = await query_raw_with_schema(
"""
SELECT id, name, "subHeading", description
FROM {schema_prefix}"StoreListingVersion"
WHERE embedding IS NULL
ORDER BY "createdAt" DESC
"""
)
total = len(versions)
logger.info(f"Found {total} versions without embeddings")
if dry_run:
logger.info("Dry run mode - no changes will be made")
return (0, 0)
if total == 0:
logger.info("No versions need embeddings")
return (0, 0)
processed = 0
errors = 0
for i in range(0, total, batch_size):
batch = versions[i : i + batch_size]
batch_num = (i // batch_size) + 1
total_batches = (total + batch_size - 1) // batch_size
logger.info(f"Processing batch {batch_num}/{total_batches}")
for version in batch:
version_id = version["id"]
try:
search_text = create_search_text(
version["name"] or "",
version["subHeading"] or "",
version["description"] or "",
)
if not search_text:
logger.warning(f"Skipping {version_id} - no searchable text")
continue
embedding = await embedding_service.generate_embedding(search_text)
embedding_str = "[" + ",".join(map(str, embedding)) + "]"
await query_raw_with_schema(
"""
UPDATE {schema_prefix}"StoreListingVersion"
SET embedding = $1::vector
WHERE id = $2
""",
embedding_str,
version_id,
)
processed += 1
except Exception as e:
logger.error(f"Error processing {version_id}: {e}")
errors += 1
logger.info(f"Progress: {processed}/{total} processed, {errors} errors")
# Rate limit: wait between batches to avoid hitting API limits
if i + batch_size < total:
await asyncio.sleep(BATCH_DELAY_SECONDS)
logger.info(f"Backfill complete: {processed} processed, {errors} errors")
return (processed, errors)
finally:
await disconnect()
def main():
parser = argparse.ArgumentParser(
description="Backfill embeddings for store listing versions"
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Don't make any changes, just report what would be done",
)
parser.add_argument(
"--batch-size",
type=int,
default=DEFAULT_BATCH_SIZE,
help=f"Number of versions to process in each batch (default: {DEFAULT_BATCH_SIZE})",
)
args = parser.parse_args()
try:
processed, errors = asyncio.run(
backfill_embeddings(dry_run=args.dry_run, batch_size=args.batch_size)
)
if errors > 0:
logger.warning(f"Completed with {errors} errors")
sys.exit(1)
else:
logger.info("Completed successfully")
sys.exit(0)
except KeyboardInterrupt:
logger.info("Interrupted by user")
sys.exit(130)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -27,9 +27,8 @@ async def _get_cached_store_agents(
category: str | None,
page: int,
page_size: int,
filter_mode: Literal["strict", "permissive", "combined"] = "permissive",
):
"""Cached helper to get store agents with hybrid search support."""
"""Cached helper to get store agents."""
return await backend.server.v2.store.db.get_store_agents(
featured=featured,
creators=[creator] if creator else None,
@@ -38,7 +37,6 @@ async def _get_cached_store_agents(
category=category,
page=page,
page_size=page_size,
filter_mode=filter_mode,
)

View File

@@ -26,7 +26,6 @@ from backend.data.notifications import (
AgentRejectionData,
NotificationEventModel,
)
from backend.integrations.embeddings import create_search_text, get_embedding_service
from backend.notifications.notifications import queue_notification_async
from backend.util.exceptions import DatabaseError
from backend.util.settings import Settings
@@ -39,25 +38,6 @@ settings = Settings()
DEFAULT_ADMIN_NAME = "AutoGPT Admin"
DEFAULT_ADMIN_EMAIL = "admin@autogpt.co"
# Minimum similarity threshold for vector search results
# Cosine similarity ranges from -1 to 1, where 1 is identical
# 0.4 filters loosely related results while keeping semantically relevant ones
VECTOR_SEARCH_SIMILARITY_THRESHOLD = 0.4
# Minimum relevance threshold for BM25 full-text search results
# ts_rank_cd returns values typically in range 0-1 (can exceed 1 for exact matches)
# 0.05 allows partial keyword matches
BM25_RELEVANCE_THRESHOLD = 0.05
# RRF constant (k) - standard value that balances influence of top vs lower ranks
# Higher k values reduce the influence of high-ranking items
RRF_K = 60
# Minimum RRF score threshold for combined mode
# Filters out results that rank poorly across all signals
# For reference: rank #1 in all = ~0.041, rank #100 in all = ~0.016
RRF_SCORE_THRESHOLD = 0.02
async def get_store_agents(
featured: bool = False,
@@ -67,223 +47,64 @@ async def get_store_agents(
category: str | None = None,
page: int = 1,
page_size: int = 20,
filter_mode: Literal["strict", "permissive", "combined"] = "permissive",
) -> backend.server.v2.store.model.StoreAgentsResponse:
"""
Get PUBLIC store agents from the StoreAgent view.
When search_query is provided, uses hybrid search combining:
- BM25 full-text search (lexical matching via PostgreSQL tsvector)
- Vector semantic similarity (meaning-based matching via pgvector)
- Popularity signal (run counts as PageRank proxy)
Results are ranked using Reciprocal Rank Fusion (RRF).
Args:
featured: Filter to only show featured agents.
creators: Filter agents by creator usernames.
sorted_by: Sort agents by "runs", "rating", "name", or "updated_at".
search_query: Search query for hybrid search.
category: Filter agents by category.
page: Page number for pagination.
page_size: Number of agents per page.
filter_mode: Controls how results are filtered when searching:
- "strict": Must match BOTH BM25 AND vector thresholds
- "permissive": Must match EITHER BM25 OR vector threshold
- "combined": No threshold filtering, rely on RRF score (default)
Returns:
StoreAgentsResponse with paginated list of agents.
Get PUBLIC store agents from the StoreAgent view
"""
logger.debug(
f"Getting store agents. featured={featured}, creators={creators}, "
f"sorted_by={sorted_by}, search={search_query}, category={category}, "
f"page={page}, filter_mode={filter_mode}"
f"Getting store agents. featured={featured}, creators={creators}, sorted_by={sorted_by}, search={search_query}, category={category}, page={page}"
)
try:
# If search_query is provided, use hybrid search (BM25 + vector + popularity)
# If search_query is provided, use full-text search
if search_query:
offset = (page - 1) * page_size
# Try to generate embedding for vector search
# Falls back to BM25-only if embedding service is not available
query_embedding: list[float] | None = None
try:
embedding_service = get_embedding_service()
query_embedding = await embedding_service.generate_embedding(
search_query
)
except (ValueError, Exception) as e:
# Embedding service not configured or failed - use BM25 only
logger.warning(f"Embedding generation failed, using BM25 only: {e}")
# Whitelist allowed order_by columns
ALLOWED_ORDER_BY = {
"rating": "rating DESC, rank DESC",
"runs": "runs DESC, rank DESC",
"name": "agent_name ASC, rank ASC",
"updated_at": "updated_at DESC, rank DESC",
}
# Convert embedding to PostgreSQL array format (or None for BM25-only)
embedding_str = (
"[" + ",".join(map(str, query_embedding)) + "]"
if query_embedding
else None
)
# Validate and get order clause
if sorted_by and sorted_by in ALLOWED_ORDER_BY:
order_by_clause = ALLOWED_ORDER_BY[sorted_by]
else:
order_by_clause = "updated_at DESC, rank DESC"
# Build WHERE conditions and parameters list
# When embedding is not available (no OpenAI key), $1 will be NULL
where_parts: list[str] = []
params: list[typing.Any] = [embedding_str] # $1 - query embedding (or NULL)
params: list[typing.Any] = [search_query] # $1 - search term
param_index = 2 # Start at $2 for next parameter
# Always filter for available agents
where_parts.append("is_available = true")
# Require search signals to be present
if embedding_str is None:
# No embedding available - require BM25 search only
where_parts.append("search IS NOT NULL")
elif filter_mode == "strict":
# Strict mode: require both embedding AND search to be available
where_parts.append("embedding IS NOT NULL")
where_parts.append("search IS NOT NULL")
else:
# Permissive/combined: require at least one signal
where_parts.append("(embedding IS NOT NULL OR search IS NOT NULL)")
if featured:
where_parts.append("featured = true")
if creators:
if creators and creators:
# Use ANY with array parameter
where_parts.append(f"creator_username = ANY(${param_index})")
params.append(creators)
param_index += 1
if category:
if category and category:
where_parts.append(f"${param_index} = ANY(categories)")
params.append(category)
param_index += 1
# Add search query for BM25
params.append(search_query)
bm25_query_param = f"${param_index}"
param_index += 1
sql_where_clause: str = " AND ".join(where_parts) if where_parts else "1=1"
# Build score filter based on filter_mode
# This filter is applied BEFORE RRF ranking in the filtered_agents CTE
if embedding_str is None:
# No embedding - filter only on BM25 score
score_filter = f"bm25_score >= {BM25_RELEVANCE_THRESHOLD}"
elif filter_mode == "strict":
score_filter = f"""
bm25_score >= {BM25_RELEVANCE_THRESHOLD}
AND vector_score >= {VECTOR_SEARCH_SIMILARITY_THRESHOLD}
"""
elif filter_mode == "permissive":
score_filter = f"""
bm25_score >= {BM25_RELEVANCE_THRESHOLD}
OR vector_score >= {VECTOR_SEARCH_SIMILARITY_THRESHOLD}
"""
else: # combined - no pre-filtering on individual scores
score_filter = "1=1"
# RRF score filter is applied AFTER ranking to filter irrelevant results
rrf_score_filter = f"rrf_score >= {RRF_SCORE_THRESHOLD}"
# Build ORDER BY clause - sorted_by takes precedence, rrf_score as secondary
if sorted_by == "rating":
order_by_clause = "rating DESC, rrf_score DESC"
elif sorted_by == "runs":
order_by_clause = "runs DESC, rrf_score DESC"
elif sorted_by == "name":
order_by_clause = "agent_name ASC, rrf_score DESC"
elif sorted_by == "updated_at":
order_by_clause = "updated_at DESC, rrf_score DESC"
else:
# Default: order by RRF relevance score
order_by_clause = "rrf_score DESC, updated_at DESC"
# Add pagination params
params.extend([page_size, offset])
limit_param = f"${param_index}"
offset_param = f"${param_index + 1}"
# Hybrid search SQL with Reciprocal Rank Fusion (RRF)
# CTEs: scored_agents -> filtered_agents -> ranked_agents -> rrf_scored
# Execute full-text search query with parameterized values
sql_query = f"""
WITH scored_agents AS (
SELECT
slug,
agent_name,
agent_image,
creator_username,
creator_avatar,
sub_heading,
description,
runs,
rating,
categories,
featured,
is_available,
updated_at,
-- BM25 score using ts_rank_cd (covers density normalization)
COALESCE(
ts_rank_cd(
search,
plainto_tsquery('english', {bm25_query_param}),
32 -- normalization: divide by document length
),
0
) AS bm25_score,
-- Vector similarity score (cosine: 1 - distance)
-- Returns 0 when query embedding ($1) is NULL (no OpenAI key)
CASE
WHEN $1 IS NOT NULL AND embedding IS NOT NULL
THEN 1 - (embedding <=> $1::vector)
ELSE 0
END AS vector_score,
-- Popularity score (log-normalized run count)
CASE
WHEN runs > 0
THEN LN(runs + 1)
ELSE 0
END AS popularity_score
FROM {{schema_prefix}}"StoreAgent"
WHERE {sql_where_clause}
),
max_popularity AS (
SELECT GREATEST(MAX(popularity_score), 1) AS max_pop
FROM scored_agents
),
normalized_agents AS (
SELECT
sa.*,
-- Normalize popularity to [0, 1] range
sa.popularity_score / mp.max_pop AS norm_popularity_score
FROM scored_agents sa
CROSS JOIN max_popularity mp
),
filtered_agents AS (
SELECT *
FROM normalized_agents
WHERE {score_filter}
),
ranked_agents AS (
SELECT
*,
ROW_NUMBER() OVER (ORDER BY bm25_score DESC NULLS LAST) AS bm25_rank,
ROW_NUMBER() OVER (ORDER BY vector_score DESC NULLS LAST) AS vector_rank,
ROW_NUMBER() OVER (ORDER BY norm_popularity_score DESC NULLS LAST) AS popularity_rank
FROM filtered_agents
),
rrf_scored AS (
SELECT
*,
-- RRF formula with weighted contributions
-- BM25 and vector get full weight, popularity gets 0.5x weight
(1.0 / ({RRF_K} + bm25_rank)) +
(1.0 / ({RRF_K} + vector_rank)) +
(0.5 / ({RRF_K} + popularity_rank)) AS rrf_score
FROM ranked_agents
)
SELECT
slug,
agent_name,
@@ -298,79 +119,25 @@ async def get_store_agents(
featured,
is_available,
updated_at,
rrf_score
FROM rrf_scored
WHERE {rrf_score_filter}
ts_rank_cd(search, query) AS rank
FROM {{schema_prefix}}"StoreAgent",
plainto_tsquery('english', $1) AS query
WHERE {sql_where_clause}
AND search @@ query
ORDER BY {order_by_clause}
LIMIT {limit_param} OFFSET {offset_param}
"""
# Count query (without pagination) - requires same CTE structure because:
# 1. RRF scoring requires computing ranks across ALL matching results
# 2. The rrf_score_filter threshold must be applied consistently
# Note: This is inherent to RRF - there's no way to count without ranking
# Count query for pagination - only uses search term parameter
count_query = f"""
WITH scored_agents AS (
SELECT
runs,
COALESCE(
ts_rank_cd(
search,
plainto_tsquery('english', {bm25_query_param}),
32
),
0
) AS bm25_score,
CASE
WHEN $1 IS NOT NULL AND embedding IS NOT NULL
THEN 1 - (embedding <=> $1::vector)
ELSE 0
END AS vector_score,
CASE
WHEN runs > 0
THEN LN(runs + 1)
ELSE 0
END AS popularity_score
FROM {{schema_prefix}}"StoreAgent"
WHERE {sql_where_clause}
),
max_popularity AS (
SELECT GREATEST(MAX(popularity_score), 1) AS max_pop
FROM scored_agents
),
normalized_agents AS (
SELECT
sa.*,
sa.popularity_score / mp.max_pop AS norm_popularity_score
FROM scored_agents sa
CROSS JOIN max_popularity mp
),
filtered_agents AS (
SELECT *
FROM normalized_agents
WHERE {score_filter}
),
ranked_agents AS (
SELECT
*,
ROW_NUMBER() OVER (ORDER BY bm25_score DESC NULLS LAST) AS bm25_rank,
ROW_NUMBER() OVER (ORDER BY vector_score DESC NULLS LAST) AS vector_rank,
ROW_NUMBER() OVER (ORDER BY norm_popularity_score DESC NULLS LAST) AS popularity_rank
FROM filtered_agents
),
rrf_scored AS (
SELECT
(1.0 / ({RRF_K} + bm25_rank)) +
(1.0 / ({RRF_K} + vector_rank)) +
(0.5 / ({RRF_K} + popularity_rank)) AS rrf_score
FROM ranked_agents
)
SELECT COUNT(*) as count
FROM rrf_scored
WHERE {rrf_score_filter}
FROM {{schema_prefix}}"StoreAgent",
plainto_tsquery('english', $1) AS query
WHERE {sql_where_clause}
AND search @@ query
"""
# Execute queries
# Execute both queries with parameters
agents = await query_raw_with_schema(sql_query, *params)
# For count, use params without pagination (last 2 params)
@@ -488,56 +255,6 @@ async def log_search_term(search_query: str):
logger.error(f"Error logging search term: {e}")
async def _generate_and_store_embedding(
store_listing_version_id: str,
name: str,
sub_heading: str,
description: str,
) -> None:
"""
Generate and store embedding for a store listing version.
This creates a vector embedding from the agent's name, sub_heading, and
description, which is used for semantic search.
Args:
store_listing_version_id: The ID of the store listing version.
name: The agent name.
sub_heading: The agent sub-heading/tagline.
description: The agent description.
"""
try:
embedding_service = get_embedding_service()
search_text = create_search_text(name, sub_heading, description)
if not search_text:
logger.warning(
f"No searchable text for version {store_listing_version_id}, "
"skipping embedding generation"
)
return
embedding = await embedding_service.generate_embedding(search_text)
embedding_str = "[" + ",".join(map(str, embedding)) + "]"
await query_raw_with_schema(
"""
UPDATE {schema_prefix}"StoreListingVersion"
SET embedding = $1::vector
WHERE id = $2
""",
embedding_str,
store_listing_version_id,
)
logger.debug(f"Generated embedding for version {store_listing_version_id}")
except Exception as e:
# Log error but don't fail the whole operation
# Embeddings can be generated later via backfill
logger.error(
f"Failed to generate embedding for {store_listing_version_id}: {e}"
)
async def get_store_agent_details(
username: str, agent_name: str
) -> backend.server.v2.store.model.StoreAgentDetails:
@@ -1088,12 +805,6 @@ async def create_store_submission(
else None
)
# Generate embedding for semantic search
if store_listing_version_id:
await _generate_and_store_embedding(
store_listing_version_id, name, sub_heading, description
)
logger.debug(f"Created store listing for agent {agent_id}")
# Return submission details
return backend.server.v2.store.model.StoreSubmission(
@@ -1259,12 +970,6 @@ async def edit_store_submission(
if not updated_version:
raise DatabaseError("Failed to update store listing version")
# Regenerate embedding with updated content
await _generate_and_store_embedding(
store_listing_version_id, name, sub_heading, description
)
return backend.server.v2.store.model.StoreSubmission(
agent_id=current_version.agentGraphId,
agent_version=current_version.agentGraphVersion,
@@ -1397,12 +1102,6 @@ async def create_store_version(
logger.debug(
f"Created new version for listing {store_listing_id} of agent {agent_id}"
)
# Generate embedding for semantic search
await _generate_and_store_embedding(
new_version.id, name, sub_heading, description
)
# Return submission details
return backend.server.v2.store.model.StoreSubmission(
agent_id=agent_id,

View File

@@ -405,347 +405,3 @@ async def test_get_store_agents_search_category_array_injection():
# Verify the query executed without error
# Category should be parameterized, preventing SQL injection
assert isinstance(result.agents, list)
# Hybrid search tests (BM25 + vector + popularity with RRF ranking)
@pytest.mark.asyncio(loop_scope="session")
async def test_get_store_agents_hybrid_search_mocked(mocker):
"""Test hybrid search uses embedding service and executes query safely."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.MagicMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema to return empty results
mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
)
# Call function with search query
result = await db.get_store_agents(search_query="test query")
# Verify embedding service was called
mock_embedding_service.generate_embedding.assert_called_once_with("test query")
# Verify results
assert isinstance(result.agents, list)
assert len(result.agents) == 0
@pytest.mark.asyncio(loop_scope="session")
async def test_get_store_agents_hybrid_search_with_results(mocker):
"""Test hybrid search returns properly formatted results with RRF scoring."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.MagicMock(return_value=mock_embedding_service),
)
# Mock query results (hybrid search returns rrf_score instead of similarity)
mock_agents = [
{
"slug": "test-agent",
"agent_name": "Test Agent",
"agent_image": ["image.jpg"],
"creator_username": "creator",
"creator_avatar": "avatar.jpg",
"sub_heading": "Test heading",
"description": "Test description",
"runs": 10,
"rating": 4.5,
"categories": ["test"],
"featured": False,
"is_available": True,
"updated_at": datetime.now(),
"rrf_score": 0.048, # RRF score from combined rankings
}
]
mock_count = [{"count": 1}]
mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(side_effect=[mock_agents, mock_count]),
)
# Call function with search query
result = await db.get_store_agents(search_query="test query")
# Verify results
assert len(result.agents) == 1
assert result.agents[0].slug == "test-agent"
assert result.agents[0].agent_name == "Test Agent"
assert result.pagination.total_items == 1
@pytest.mark.asyncio(loop_scope="session")
async def test_get_store_agents_hybrid_search_with_filters(mocker):
"""Test hybrid search works correctly with additional filters."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.MagicMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema
mock_query = mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
)
# Call function with search query and filters
await db.get_store_agents(
search_query="test query",
featured=True,
creators=["creator1", "creator2"],
category="AI",
)
# Verify query was called with parameterized values
# First call is the main query, second is count
assert mock_query.call_count == 2
# Check that the SQL query includes proper parameterization
first_call_args = mock_query.call_args_list[0]
sql_query = first_call_args[0][0]
# Verify key elements of hybrid search query
assert "embedding <=> $1::vector" in sql_query # Vector search
assert "ts_rank_cd" in sql_query # BM25 search
assert "rrf_score" in sql_query # RRF ranking
assert "featured = true" in sql_query
assert "creator_username = ANY($" in sql_query
assert "= ANY(categories)" in sql_query
@pytest.mark.asyncio(loop_scope="session")
async def test_get_store_agents_hybrid_search_strict_filter_mode(mocker):
"""Test hybrid search with strict filter mode requires both BM25 and vector matches."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.MagicMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema
mock_query = mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
)
# Call function with strict filter mode
await db.get_store_agents(search_query="test query", filter_mode="strict")
# Check that the SQL query includes strict filtering conditions
first_call_args = mock_query.call_args_list[0]
sql_query = first_call_args[0][0]
# Strict mode requires both embedding AND search to be present
assert "embedding IS NOT NULL" in sql_query
assert "search IS NOT NULL" in sql_query
# Strict score filter requires both thresholds to be met
assert "bm25_score >=" in sql_query
assert "AND vector_score >=" in sql_query
@pytest.mark.asyncio(loop_scope="session")
async def test_get_store_agents_hybrid_search_permissive_filter_mode(mocker):
"""Test hybrid search with permissive filter mode requires either BM25 or vector match."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.MagicMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema
mock_query = mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
)
# Call function with permissive filter mode
await db.get_store_agents(search_query="test query", filter_mode="permissive")
# Check that the SQL query includes permissive filtering conditions
first_call_args = mock_query.call_args_list[0]
sql_query = first_call_args[0][0]
# Permissive mode requires at least one signal
assert "(embedding IS NOT NULL OR search IS NOT NULL)" in sql_query
# Permissive score filter requires either threshold to be met
assert "OR vector_score >=" in sql_query
@pytest.mark.asyncio(loop_scope="session")
async def test_get_store_agents_hybrid_search_combined_filter_mode(mocker):
"""Test hybrid search with combined filter mode (default) filters by RRF score."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.MagicMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema
mock_query = mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
)
# Call function with combined filter mode (default)
await db.get_store_agents(search_query="test query", filter_mode="combined")
# Check that the SQL query includes combined filtering
first_call_args = mock_query.call_args_list[0]
sql_query = first_call_args[0][0]
# Combined mode requires at least one signal
assert "(embedding IS NOT NULL OR search IS NOT NULL)" in sql_query
# Combined mode uses "1=1" as pre-filter (no individual score filtering)
# But applies RRF score threshold to filter irrelevant results
assert "rrf_score" in sql_query
assert "rrf_score >=" in sql_query # RRF threshold filter applied
@pytest.mark.asyncio(loop_scope="session")
async def test_generate_and_store_embedding_success(mocker):
"""Test that embedding generation and storage works correctly."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.MagicMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema
mock_query = mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(return_value=[]),
)
# Call the internal function
await db._generate_and_store_embedding(
store_listing_version_id="version-123",
name="Test Agent",
sub_heading="A test agent",
description="Does testing",
)
# Verify embedding service was called with combined text
mock_embedding_service.generate_embedding.assert_called_once_with(
"Test Agent A test agent Does testing"
)
# Verify database update was called
mock_query.assert_called_once()
call_args = mock_query.call_args
assert "UPDATE" in call_args[0][0]
assert "embedding = $1::vector" in call_args[0][0]
assert call_args[0][2] == "version-123"
@pytest.mark.asyncio(loop_scope="session")
async def test_generate_and_store_embedding_empty_text(mocker):
"""Test that embedding is not generated for empty text."""
# Mock embedding service
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock()
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.MagicMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema
mock_query = mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(return_value=[]),
)
# Call with empty fields
await db._generate_and_store_embedding(
store_listing_version_id="version-123",
name="",
sub_heading="",
description="",
)
# Verify embedding service was NOT called
mock_embedding_service.generate_embedding.assert_not_called()
# Verify database was NOT updated
mock_query.assert_not_called()
@pytest.mark.asyncio(loop_scope="session")
async def test_generate_and_store_embedding_handles_error(mocker):
"""Test that embedding generation errors don't crash the operation."""
# Mock embedding service to raise an error
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
side_effect=Exception("API error")
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.MagicMock(return_value=mock_embedding_service),
)
# Call should not raise - errors are logged but not propagated
await db._generate_and_store_embedding(
store_listing_version_id="version-123",
name="Test Agent",
sub_heading="A test agent",
description="Does testing",
)
# Verify embedding service was called (and failed)
mock_embedding_service.generate_embedding.assert_called_once()

View File

@@ -1,5 +1,4 @@
import datetime
from enum import Enum
from typing import List
import prisma.enums
@@ -8,19 +7,6 @@ import pydantic
from backend.util.models import Pagination
class SearchFilterMode(str, Enum):
"""How to combine BM25 and vector search results for filtering.
- STRICT: Must pass BOTH BM25 AND vector similarity thresholds
- PERMISSIVE: Must pass EITHER BM25 OR vector similarity threshold
- COMBINED: No pre-filtering, only the combined RRF score matters (default)
"""
STRICT = "strict"
PERMISSIVE = "permissive"
COMBINED = "combined"
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int

View File

@@ -99,30 +99,18 @@ async def get_agents(
category: str | None = None,
page: int = 1,
page_size: int = 20,
filter_mode: Literal["strict", "permissive", "combined"] = "permissive",
):
"""
Get a paginated list of agents from the store with optional filtering and sorting.
When search_query is provided, uses hybrid search combining:
- BM25 full-text search (lexical matching)
- Vector semantic similarity (meaning-based matching)
- Popularity signal (run counts)
Results are ranked using Reciprocal Rank Fusion (RRF).
Args:
featured (bool, optional): Filter to only show featured agents. Defaults to False.
creator (str | None, optional): Filter agents by creator username. Defaults to None.
sorted_by (str | None, optional): Sort agents by "runs" or "rating". Defaults to None.
search_query (str | None, optional): Search agents by name, subheading and description.
search_query (str | None, optional): Search agents by name, subheading and description. Defaults to None.
category (str | None, optional): Filter agents by category. Defaults to None.
page (int, optional): Page number for pagination. Defaults to 1.
page_size (int, optional): Number of agents per page. Defaults to 20.
filter_mode (str, optional): Controls result filtering when searching:
- "strict": Must match BOTH BM25 AND vector thresholds
- "permissive": Must match EITHER BM25 OR vector threshold
- "combined": No threshold filtering, rely on RRF score (default)
Returns:
StoreAgentsResponse: Paginated list of agents matching the filters
@@ -156,7 +144,6 @@ async def get_agents(
category=category,
page=page,
page_size=page_size,
filter_mode=filter_mode,
)
return agents

View File

@@ -65,7 +65,6 @@ def test_get_agents_defaults(
category=None,
page=1,
page_size=20,
filter_mode="permissive",
)
@@ -113,7 +112,6 @@ def test_get_agents_featured(
category=None,
page=1,
page_size=20,
filter_mode="permissive",
)
@@ -161,7 +159,6 @@ def test_get_agents_by_creator(
category=None,
page=1,
page_size=20,
filter_mode="permissive",
)
@@ -209,7 +206,6 @@ def test_get_agents_sorted(
category=None,
page=1,
page_size=20,
filter_mode="permissive",
)
@@ -257,7 +253,6 @@ def test_get_agents_search(
category=None,
page=1,
page_size=20,
filter_mode="permissive",
)
@@ -304,7 +299,6 @@ def test_get_agents_category(
category="test-category",
page=1,
page_size=20,
filter_mode="permissive",
)
@@ -354,7 +348,6 @@ def test_get_agents_pagination(
category=None,
page=2,
page_size=5,
filter_mode="permissive",
)

View File

@@ -5,13 +5,6 @@ from tiktoken import encoding_for_model
from backend.util import json
# ---------------------------------------------------------------------------#
# CONSTANTS #
# ---------------------------------------------------------------------------#
# Message prefixes for important system messages that should be protected during compression
MAIN_OBJECTIVE_PREFIX = "[Main Objective Prompt]: "
# ---------------------------------------------------------------------------#
# INTERNAL UTILITIES #
# ---------------------------------------------------------------------------#
@@ -70,55 +63,6 @@ def _msg_tokens(msg: dict, enc) -> int:
return WRAPPER + content_tokens + tool_call_tokens
def _is_tool_message(msg: dict) -> bool:
"""Check if a message contains tool calls or results that should be protected."""
content = msg.get("content")
# Check for Anthropic-style tool messages
if isinstance(content, list) and any(
isinstance(item, dict) and item.get("type") in ("tool_use", "tool_result")
for item in content
):
return True
# Check for OpenAI-style tool calls in the message
if "tool_calls" in msg or msg.get("role") == "tool":
return True
return False
def _is_objective_message(msg: dict) -> bool:
"""Check if a message contains objective/system prompts that should be absolutely protected."""
content = msg.get("content", "")
if isinstance(content, str):
# Protect any message with the main objective prefix
return content.startswith(MAIN_OBJECTIVE_PREFIX)
return False
def _truncate_tool_message_content(msg: dict, enc, max_tokens: int) -> None:
"""
Carefully truncate tool message content while preserving tool structure.
Only truncates tool_result content, leaves tool_use intact.
"""
content = msg.get("content")
if not isinstance(content, list):
return
for item in content:
# Only process tool_result items, leave tool_use blocks completely intact
if not (isinstance(item, dict) and item.get("type") == "tool_result"):
continue
result_content = item.get("content", "")
if (
isinstance(result_content, str)
and _tok_len(result_content, enc) > max_tokens
):
item["content"] = _truncate_middle_tokens(result_content, enc, max_tokens)
def _truncate_middle_tokens(text: str, enc, max_tok: int) -> str:
"""
Return *text* shortened to ≈max_tok tokens by keeping the head & tail
@@ -196,21 +140,13 @@ def compress_prompt(
return sum(_msg_tokens(m, enc) for m in msgs)
original_token_count = total_tokens()
if original_token_count + reserve <= target_tokens:
return msgs
# ---- STEP 0 : normalise content --------------------------------------
# Convert non-string payloads to strings so token counting is coherent.
for i, m in enumerate(msgs):
for m in msgs[1:-1]: # keep the first & last intact
if not isinstance(m.get("content"), str) and m.get("content") is not None:
if _is_tool_message(m):
continue
# Keep first and last messages intact (unless they're tool messages)
if i == 0 or i == len(msgs) - 1:
continue
# Reasonable 20k-char ceiling prevents pathological blobs
content_str = json.dumps(m["content"], separators=(",", ":"))
if len(content_str) > 20_000:
@@ -221,45 +157,34 @@ def compress_prompt(
cap = start_cap
while total_tokens() + reserve > target_tokens and cap >= floor_cap:
for m in msgs[1:-1]: # keep first & last intact
if _is_tool_message(m):
# For tool messages, only truncate tool result content, preserve structure
_truncate_tool_message_content(m, enc, cap)
continue
if _is_objective_message(m):
# Never truncate objective messages - they contain the core task
continue
content = m.get("content") or ""
if _tok_len(content, enc) > cap:
m["content"] = _truncate_middle_tokens(content, enc, cap)
if _tok_len(m.get("content") or "", enc) > cap:
m["content"] = _truncate_middle_tokens(m["content"], enc, cap)
cap //= 2 # tighten the screw
# ---- STEP 2 : middle-out deletion -----------------------------------
while total_tokens() + reserve > target_tokens and len(msgs) > 2:
# Identify all deletable messages (not first/last, not tool messages, not objective messages)
deletable_indices = []
for i in range(1, len(msgs) - 1): # Skip first and last
if not _is_tool_message(msgs[i]) and not _is_objective_message(msgs[i]):
deletable_indices.append(i)
if not deletable_indices:
break # nothing more we can drop
# Delete from center outward - find the index closest to center
centre = len(msgs) // 2
to_delete = min(deletable_indices, key=lambda i: abs(i - centre))
del msgs[to_delete]
# Build a symmetrical centre-out index walk: centre, centre+1, centre-1, ...
order = [centre] + [
i
for pair in zip(range(centre + 1, len(msgs) - 1), range(centre - 1, 0, -1))
for i in pair
]
removed = False
for i in order:
msg = msgs[i]
if "tool_calls" in msg or msg.get("role") == "tool":
continue # protect tool shells
del msgs[i]
removed = True
break
if not removed: # nothing more we can drop
break
# ---- STEP 3 : final safety-net trim on first & last ------------------
cap = start_cap
while total_tokens() + reserve > target_tokens and cap >= floor_cap:
for idx in (0, -1): # first and last
if _is_tool_message(msgs[idx]):
# For tool messages at first/last position, truncate tool result content only
_truncate_tool_message_content(msgs[idx], enc, cap)
continue
text = msgs[idx].get("content") or ""
if _tok_len(text, enc) > cap:
msgs[idx]["content"] = _truncate_middle_tokens(text, enc, cap)

View File

@@ -1,87 +0,0 @@
-- Migration: Replace full-text search with pgvector-based vector search
-- This migration:
-- 1. Enables the pgvector extension
-- 2. Drops the StoreAgent view (depends on search column)
-- 3. Removes the full-text search infrastructure (trigger, function, tsvector column)
-- 4. Adds a vector embedding column for semantic search
-- 5. Creates an index for fast vector similarity search
-- 6. Recreates the StoreAgent view with the embedding column
-- Enable pgvector extension
CREATE EXTENSION IF NOT EXISTS vector;
-- First drop the view that depends on the search column
DROP VIEW IF EXISTS "StoreAgent";
-- Add embedding column for vector search (1536 dimensions for text-embedding-3-small)
ALTER TABLE "StoreListingVersion"
ADD COLUMN IF NOT EXISTS "embedding" vector(1536);
-- Create IVFFlat index for fast similarity search
-- Using cosine distance (vector_cosine_ops) which is standard for text embeddings
-- lists = 100 is appropriate for datasets under 1M rows
CREATE INDEX IF NOT EXISTS idx_store_listing_version_embedding
ON "StoreListingVersion"
USING ivfflat (embedding vector_cosine_ops)
WITH (lists = 100);
-- Recreate StoreAgent view WITHOUT search column, WITH embedding column
CREATE OR REPLACE VIEW "StoreAgent" AS
WITH latest_versions AS (
SELECT
"storeListingId",
MAX(version) AS max_version
FROM "StoreListingVersion"
WHERE "submissionStatus" = 'APPROVED'
GROUP BY "storeListingId"
),
agent_versions AS (
SELECT
"storeListingId",
array_agg(DISTINCT version::text ORDER BY version::text) AS versions
FROM "StoreListingVersion"
WHERE "submissionStatus" = 'APPROVED'
GROUP BY "storeListingId"
)
SELECT
sl.id AS listing_id,
slv.id AS "storeListingVersionId",
slv."createdAt" AS updated_at,
sl.slug,
COALESCE(slv.name, '') AS agent_name,
slv."videoUrl" AS agent_video,
slv."agentOutputDemoUrl" AS agent_output_demo,
COALESCE(slv."imageUrls", ARRAY[]::text[]) AS agent_image,
slv."isFeatured" AS featured,
p.username AS creator_username,
p."avatarUrl" AS creator_avatar,
slv."subHeading" AS sub_heading,
slv.description,
slv.categories,
slv.search,
slv.embedding,
COALESCE(ar.run_count, 0::bigint) AS runs,
COALESCE(rs.avg_rating, 0.0)::double precision AS rating,
COALESCE(av.versions, ARRAY[slv.version::text]) AS versions,
COALESCE(sl."useForOnboarding", false) AS "useForOnboarding",
slv."isAvailable" AS is_available
FROM "StoreListing" sl
JOIN latest_versions lv
ON sl.id = lv."storeListingId"
JOIN "StoreListingVersion" slv
ON slv."storeListingId" = lv."storeListingId"
AND slv.version = lv.max_version
AND slv."submissionStatus" = 'APPROVED'
JOIN "AgentGraph" a
ON slv."agentGraphId" = a.id
AND slv."agentGraphVersion" = a.version
LEFT JOIN "Profile" p
ON sl."owningUserId" = p."userId"
LEFT JOIN "mv_review_stats" rs
ON sl.id = rs."storeListingId"
LEFT JOIN "mv_agent_run_counts" ar
ON a.id = ar."agentGraphId"
LEFT JOIN agent_versions av
ON sl.id = av."storeListingId"
WHERE sl."isDeleted" = false
AND sl."hasApprovedVersion" = true;

View File

@@ -1,35 +0,0 @@
-- Migration: Add hybrid search infrastructure (BM25 + vector + popularity)
-- This migration:
-- 1. Creates/updates the tsvector trigger with weighted fields
-- 2. Adds GIN index for full-text search performance
-- 3. Backfills existing records with tsvector data
-- Create or replace the trigger function with WEIGHTED tsvector
-- Weight A = name (highest priority), B = subHeading, C = description
CREATE OR REPLACE FUNCTION update_tsvector_column() RETURNS TRIGGER AS $$
BEGIN
NEW.search := setweight(to_tsvector('english', COALESCE(NEW.name, '')), 'A') ||
setweight(to_tsvector('english', COALESCE(NEW."subHeading", '')), 'B') ||
setweight(to_tsvector('english', COALESCE(NEW.description, '')), 'C');
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Drop and recreate trigger to ensure it's active with the updated function
DROP TRIGGER IF EXISTS "update_tsvector" ON "StoreListingVersion";
CREATE TRIGGER "update_tsvector"
BEFORE INSERT OR UPDATE OF name, "subHeading", description ON "StoreListingVersion"
FOR EACH ROW
EXECUTE FUNCTION update_tsvector_column();
-- Create GIN index for full-text search performance
CREATE INDEX IF NOT EXISTS idx_store_listing_version_search_gin
ON "StoreListingVersion" USING GIN (search);
-- Backfill existing records with weighted tsvector
UPDATE "StoreListingVersion"
SET search = setweight(to_tsvector('english', COALESCE(name, '')), 'A') ||
setweight(to_tsvector('english', COALESCE("subHeading", '')), 'B') ||
setweight(to_tsvector('english', COALESCE(description, '')), 'C')
WHERE search IS NULL
OR search = ''::tsvector;

View File

@@ -138,4 +138,3 @@ filterwarnings = [
[tool.ruff]
target-version = "py310"

View File

@@ -727,7 +727,7 @@ view StoreAgent {
sub_heading String
description String
categories String[]
embedding Unsupported("vector(1536)")?
search Unsupported("tsvector")? @default(dbgenerated("''::tsvector"))
runs Int
rating Float
versions String[]
@@ -863,11 +863,7 @@ model StoreListingVersion {
// Old versions can be made unavailable by the author if desired
isAvailable Boolean @default(true)
// Full-text search tsvector column
search Unsupported("tsvector")?
// Vector embedding for semantic search
embedding Unsupported("vector(1536)")?
search Unsupported("tsvector")? @default(dbgenerated("''::tsvector"))
// Version workflow state
submissionStatus SubmissionStatus @default(DRAFT)

View File

@@ -3,14 +3,6 @@ import { withSentryConfig } from "@sentry/nextjs";
/** @type {import('next').NextConfig} */
const nextConfig = {
productionBrowserSourceMaps: true,
experimental: {
serverActions: {
bodySizeLimit: "256mb",
},
// Increase body size limit for API routes (file uploads) - 256MB to match backend limit
proxyClientMaxBodySize: "256mb",
middlewareClientMaxBodySize: "256mb",
},
images: {
domains: [
// We dont need to maintain alphabetical order here

View File

@@ -137,8 +137,9 @@
"concurrently": "9.2.1",
"cross-env": "10.1.0",
"eslint": "8.57.1",
"eslint-config-next": "15.5.7",
"eslint-config-next": "15.5.2",
"eslint-plugin-storybook": "9.1.5",
"import-in-the-middle": "1.14.2",
"msw": "2.11.6",
"msw-storybook-addon": "2.0.6",
"orval": "7.13.0",
@@ -158,4 +159,3 @@
},
"packageManager": "pnpm@10.20.0+sha512.cf9998222162dd85864d0a8102e7892e7ba4ceadebbf5a31f9c2fce48dfce317a9c53b9f6464d1ef9042cba2e02ae02a9f7c143a2b438cd93c91840f0192b9dd"
}

View File

@@ -331,11 +331,14 @@ importers:
specifier: 8.57.1
version: 8.57.1
eslint-config-next:
specifier: 15.5.7
version: 15.5.7(eslint@8.57.1)(typescript@5.9.3)
specifier: 15.5.2
version: 15.5.2(eslint@8.57.1)(typescript@5.9.3)
eslint-plugin-storybook:
specifier: 9.1.5
version: 9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3)
import-in-the-middle:
specifier: 1.14.2
version: 1.14.2
msw:
specifier: 2.11.6
version: 2.11.6(@types/node@24.10.0)(typescript@5.9.3)
@@ -983,15 +986,12 @@ packages:
'@date-fns/tz@1.4.1':
resolution: {integrity: sha512-P5LUNhtbj6YfI3iJjw5EL9eUAG6OitD0W3fWQcpQjDRc/QIsL0tRNuO1PcDvPccWL1fSTXXdE1ds+l95DV/OFA==}
'@emnapi/core@1.7.1':
resolution: {integrity: sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==}
'@emnapi/core@1.5.0':
resolution: {integrity: sha512-sbP8GzB1WDzacS8fgNPpHlp6C9VZe+SJP3F90W9rLemaQj2PzIuTEl1qDOYQf58YIpyjViI24y9aPWCjEzY2cg==}
'@emnapi/runtime@1.5.0':
resolution: {integrity: sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==}
'@emnapi/runtime@1.7.1':
resolution: {integrity: sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==}
'@emnapi/wasi-threads@1.1.0':
resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==}
@@ -1329,10 +1329,6 @@ packages:
resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==}
engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0}
'@eslint-community/regexpp@4.12.2':
resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==}
engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0}
'@eslint/eslintrc@2.1.4':
resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
@@ -1609,8 +1605,8 @@ packages:
'@next/env@15.4.10':
resolution: {integrity: sha512-knhmoJ0Vv7VRf6pZEPSnciUG1S4bIhWx+qTYBW/AjxEtlzsiNORPk8sFDCEvqLfmKuey56UB9FL1UdHEV3uBrg==}
'@next/eslint-plugin-next@15.5.7':
resolution: {integrity: sha512-DtRU2N7BkGr8r+pExfuWHwMEPX5SD57FeA6pxdgCHODo+b/UgIgjE+rgWKtJAbEbGhVZ2jtHn4g3wNhWFoNBQQ==}
'@next/eslint-plugin-next@15.5.2':
resolution: {integrity: sha512-lkLrRVxcftuOsJNhWatf1P2hNVfh98k/omQHrCEPPriUypR6RcS13IvLdIrEvkm9AH2Nu2YpR5vLqBuy6twH3Q==}
'@next/swc-darwin-arm64@15.4.8':
resolution: {integrity: sha512-Pf6zXp7yyQEn7sqMxur6+kYcywx5up1J849psyET7/8pG2gQTVMjU3NzgIt8SeEP5to3If/SaWmaA6H6ysBr1A==}
@@ -2626,8 +2622,8 @@ packages:
'@rtsao/scc@1.1.0':
resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==}
'@rushstack/eslint-patch@1.15.0':
resolution: {integrity: sha512-ojSshQPKwVvSMR8yT2L/QtUkV5SXi/IfDiJ4/8d6UbTPjiHVmxZzUAzGD8Tzks1b9+qQkZa0isUOvYObedITaw==}
'@rushstack/eslint-patch@1.12.0':
resolution: {integrity: sha512-5EwMtOqvJMMa3HbmxLlF74e+3/HhwBTMcvt3nqVJgGCozO6hzIPOBlwm8mGVNR9SN2IJpxSnlxczyDjcn7qIyw==}
'@scarf/scarf@1.4.0':
resolution: {integrity: sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==}
@@ -3101,8 +3097,8 @@ packages:
peerDependencies:
'@testing-library/dom': '>=7.21.4'
'@tybys/wasm-util@0.10.1':
resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==}
'@tybys/wasm-util@0.10.0':
resolution: {integrity: sha512-VyyPYFlOMNylG45GoAe0xDoLwWuowvf92F9kySqzYh8vmYm7D2u4iUJKa1tOUpS70Ku13ASrOkS4ScXFsTaCNQ==}
'@types/aria-query@5.0.4':
resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==}
@@ -3292,16 +3288,16 @@ packages:
'@types/ws@8.18.1':
resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==}
'@typescript-eslint/eslint-plugin@8.48.1':
resolution: {integrity: sha512-X63hI1bxl5ohelzr0LY5coufyl0LJNthld+abwxpCoo6Gq+hSqhKwci7MUWkXo67mzgUK6YFByhmaHmUcuBJmA==}
'@typescript-eslint/eslint-plugin@8.43.0':
resolution: {integrity: sha512-8tg+gt7ENL7KewsKMKDHXR1vm8tt9eMxjJBYINf6swonlWgkYn5NwyIgXpbbDxTNU5DgpDFfj95prcTq2clIQQ==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
'@typescript-eslint/parser': ^8.48.1
'@typescript-eslint/parser': ^8.43.0
eslint: ^8.57.0 || ^9.0.0
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/parser@8.48.1':
resolution: {integrity: sha512-PC0PDZfJg8sP7cmKe6L3QIL8GZwU5aRvUFedqSIpw3B+QjRSUZeeITC2M5XKeMXEzL6wccN196iy3JLwKNvDVA==}
'@typescript-eslint/parser@8.43.0':
resolution: {integrity: sha512-B7RIQiTsCBBmY+yW4+ILd6mF5h1FUwJsVvpqkrgpszYifetQ2Ke+Z4u6aZh0CblkUGIdR59iYVyXqqZGkZ3aBw==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
eslint: ^8.57.0 || ^9.0.0
@@ -3319,12 +3315,6 @@ packages:
peerDependencies:
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/project-service@8.48.1':
resolution: {integrity: sha512-HQWSicah4s9z2/HifRPQ6b6R7G+SBx64JlFQpgSSHWPKdvCZX57XCbszg/bapbRsOEv42q5tayTYcEFpACcX1w==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/scope-manager@8.43.0':
resolution: {integrity: sha512-daSWlQ87ZhsjrbMLvpuuMAt3y4ba57AuvadcR7f3nl8eS3BjRc8L9VLxFLk92RL5xdXOg6IQ+qKjjqNEimGuAg==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
@@ -3333,10 +3323,6 @@ packages:
resolution: {integrity: sha512-LF4b/NmGvdWEHD2H4MsHD8ny6JpiVNDzrSZr3CsckEgCbAGZbYM4Cqxvi9L+WqDMT+51Ozy7lt2M+d0JLEuBqA==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/scope-manager@8.48.1':
resolution: {integrity: sha512-rj4vWQsytQbLxC5Bf4XwZ0/CKd362DkWMUkviT7DCS057SK64D5lH74sSGzhI6PDD2HCEq02xAP9cX68dYyg1w==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/tsconfig-utils@8.43.0':
resolution: {integrity: sha512-ALC2prjZcj2YqqL5X/bwWQmHA2em6/94GcbB/KKu5SX3EBDOsqztmmX1kMkvAJHzxk7TazKzJfFiEIagNV3qEA==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
@@ -3349,14 +3335,8 @@ packages:
peerDependencies:
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/tsconfig-utils@8.48.1':
resolution: {integrity: sha512-k0Jhs4CpEffIBm6wPaCXBAD7jxBtrHjrSgtfCjUvPp9AZ78lXKdTR8fxyZO5y4vWNlOvYXRtngSZNSn+H53Jkw==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/type-utils@8.48.1':
resolution: {integrity: sha512-1jEop81a3LrJQLTf/1VfPQdhIY4PlGDBc/i67EVWObrtvcziysbLN3oReexHOM6N3jyXgCrkBsZpqwH0hiDOQg==}
'@typescript-eslint/type-utils@8.43.0':
resolution: {integrity: sha512-qaH1uLBpBuBBuRf8c1mLJ6swOfzCXryhKND04Igr4pckzSEW9JX5Aw9AgW00kwfjWJF0kk0ps9ExKTfvXfw4Qg==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
eslint: ^8.57.0 || ^9.0.0
@@ -3370,10 +3350,6 @@ packages:
resolution: {integrity: sha512-lNCWCbq7rpg7qDsQrd3D6NyWYu+gkTENkG5IKYhUIcxSb59SQC/hEQ+MrG4sTgBVghTonNWq42bA/d4yYumldQ==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/types@8.48.1':
resolution: {integrity: sha512-+fZ3LZNeiELGmimrujsDCT4CRIbq5oXdHe7chLiW8qzqyPMnn1puNstCrMNVAqwcl2FdIxkuJ4tOs/RFDBVc/Q==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/typescript-estree@8.43.0':
resolution: {integrity: sha512-7Vv6zlAhPb+cvEpP06WXXy/ZByph9iL6BQRBDj4kmBsW98AqEeQHlj/13X+sZOrKSo9/rNKH4Ul4f6EICREFdw==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
@@ -3386,12 +3362,6 @@ packages:
peerDependencies:
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/typescript-estree@8.48.1':
resolution: {integrity: sha512-/9wQ4PqaefTK6POVTjJaYS0bynCgzh6ClJHGSBj06XEHjkfylzB+A3qvyaXnErEZSaxhIo4YdyBgq6j4RysxDg==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/utils@8.43.0':
resolution: {integrity: sha512-S1/tEmkUeeswxd0GGcnwuVQPFWo8NzZTOMxCvw8BX7OMxnNae+i8Tm7REQen/SwUIPoPqfKn7EaZ+YLpiB3k9g==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
@@ -3406,13 +3376,6 @@ packages:
eslint: ^8.57.0 || ^9.0.0
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/utils@8.48.1':
resolution: {integrity: sha512-fAnhLrDjiVfey5wwFRwrweyRlCmdz5ZxXz2G/4cLn0YDLjTapmN4gcCsTBR1N2rWnZSDeWpYtgLDsJt+FpmcwA==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
eslint: ^8.57.0 || ^9.0.0
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/visitor-keys@8.43.0':
resolution: {integrity: sha512-T+S1KqRD4sg/bHfLwrpF/K3gQLBM1n7Rp7OjjikjTEssI2YJzQpi5WXoynOaQ93ERIuq3O8RBTOUYDKszUCEHw==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
@@ -3421,10 +3384,6 @@ packages:
resolution: {integrity: sha512-tUFMXI4gxzzMXt4xpGJEsBsTox0XbNQ1y94EwlD/CuZwFcQP79xfQqMhau9HsRc/J0cAPA/HZt1dZPtGn9V/7w==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/visitor-keys@8.48.1':
resolution: {integrity: sha512-BmxxndzEWhE4TIEEMBs8lP3MBWN3jFPs/p6gPm/wkv02o41hI6cq9AuSmGAaTTHPtA1FTi2jBre4A9rm5ZmX+Q==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@ungap/structured-clone@1.3.0':
resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==}
@@ -4626,8 +4585,8 @@ packages:
resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==}
engines: {node: '>=12'}
eslint-config-next@15.5.7:
resolution: {integrity: sha512-nU/TRGHHeG81NeLW5DeQT5t6BDUqbpsNQTvef1ld/tqHT+/zTx60/TIhKnmPISTTe++DVo+DLxDmk4rnwHaZVw==}
eslint-config-next@15.5.2:
resolution: {integrity: sha512-3hPZghsLupMxxZ2ggjIIrat/bPniM2yRpsVPVM40rp8ZMzKWOJp2CGWn7+EzoV2ddkUr5fxNfHpF+wU1hGt/3g==}
peerDependencies:
eslint: ^7.23.0 || ^8.0.0 || ^9.0.0
typescript: '>=3.3.1'
@@ -4959,10 +4918,6 @@ packages:
peerDependencies:
next: '>=13.2.0'
generator-function@2.0.1:
resolution: {integrity: sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==}
engines: {node: '>= 0.4'}
gensync@1.0.0-beta.2:
resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==}
engines: {node: '>=6.9.0'}
@@ -4991,8 +4946,8 @@ packages:
resolution: {integrity: sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==}
engines: {node: '>= 0.4'}
get-tsconfig@4.13.0:
resolution: {integrity: sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==}
get-tsconfig@4.10.1:
resolution: {integrity: sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==}
github-slugger@2.0.0:
resolution: {integrity: sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==}
@@ -5213,6 +5168,9 @@ packages:
resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==}
engines: {node: '>=6'}
import-in-the-middle@1.14.2:
resolution: {integrity: sha512-5tCuY9BV8ujfOpwtAGgsTx9CGUapcFMEEyByLv1B+v2+6DhAcw+Zr0nhQT7uwaZ7DiourxFEscghOR8e1aPLQw==}
import-in-the-middle@2.0.0:
resolution: {integrity: sha512-yNZhyQYqXpkT0AKq3F3KLasUSK4fHvebNH5hOsKQw2dhGSALvQ4U0BqUc5suziKvydO5u5hgN2hy1RJaho8U5A==}
@@ -5324,10 +5282,6 @@ packages:
resolution: {integrity: sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==}
engines: {node: '>= 0.4'}
is-generator-function@1.1.2:
resolution: {integrity: sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==}
engines: {node: '>= 0.4'}
is-glob@4.0.3:
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
engines: {node: '>=0.10.0'}
@@ -5949,8 +5903,8 @@ packages:
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
hasBin: true
napi-postinstall@0.3.4:
resolution: {integrity: sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==}
napi-postinstall@0.3.3:
resolution: {integrity: sha512-uTp172LLXSxuSYHv/kou+f6KW3SMppU9ivthaVTXian9sOt3XM/zHYHpRZiLgQoxeWfYUnslNWQHF1+G71xcow==}
engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0}
hasBin: true
@@ -6815,11 +6769,6 @@ packages:
engines: {node: '>= 0.4'}
hasBin: true
resolve@1.22.11:
resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==}
engines: {node: '>= 0.4'}
hasBin: true
resolve@1.22.8:
resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==}
hasBin: true
@@ -7909,7 +7858,7 @@ snapshots:
'@babel/helper-plugin-utils': 7.27.1
debug: 4.4.3
lodash.debounce: 4.0.8
resolve: 1.22.11
resolve: 1.22.10
transitivePeerDependencies:
- supports-color
@@ -8601,7 +8550,7 @@ snapshots:
'@date-fns/tz@1.4.1': {}
'@emnapi/core@1.7.1':
'@emnapi/core@1.5.0':
dependencies:
'@emnapi/wasi-threads': 1.1.0
tslib: 2.8.1
@@ -8612,11 +8561,6 @@ snapshots:
tslib: 2.8.1
optional: true
'@emnapi/runtime@1.7.1':
dependencies:
tslib: 2.8.1
optional: true
'@emnapi/wasi-threads@1.1.0':
dependencies:
tslib: 2.8.1
@@ -8795,8 +8739,6 @@ snapshots:
'@eslint-community/regexpp@4.12.1': {}
'@eslint-community/regexpp@4.12.2': {}
'@eslint/eslintrc@2.1.4':
dependencies:
ajv: 6.12.6
@@ -9054,16 +8996,16 @@ snapshots:
'@napi-rs/wasm-runtime@0.2.12':
dependencies:
'@emnapi/core': 1.7.1
'@emnapi/runtime': 1.7.1
'@tybys/wasm-util': 0.10.1
'@emnapi/core': 1.5.0
'@emnapi/runtime': 1.5.0
'@tybys/wasm-util': 0.10.0
optional: true
'@neoconfetti/react@1.0.0': {}
'@next/env@15.4.10': {}
'@next/eslint-plugin-next@15.5.7':
'@next/eslint-plugin-next@15.5.2':
dependencies:
fast-glob: 3.3.1
@@ -10173,7 +10115,7 @@ snapshots:
'@rtsao/scc@1.1.0': {}
'@rushstack/eslint-patch@1.15.0': {}
'@rushstack/eslint-patch@1.12.0': {}
'@scarf/scarf@1.4.0': {}
@@ -10925,7 +10867,7 @@ snapshots:
dependencies:
'@testing-library/dom': 10.4.1
'@tybys/wasm-util@0.10.1':
'@tybys/wasm-util@0.10.0':
dependencies:
tslib: 2.8.1
optional: true
@@ -11123,14 +11065,14 @@ snapshots:
dependencies:
'@types/node': 24.10.0
'@typescript-eslint/eslint-plugin@8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)':
'@typescript-eslint/eslint-plugin@8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)':
dependencies:
'@eslint-community/regexpp': 4.12.2
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/scope-manager': 8.48.1
'@typescript-eslint/type-utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/visitor-keys': 8.48.1
'@eslint-community/regexpp': 4.12.1
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/scope-manager': 8.43.0
'@typescript-eslint/type-utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/visitor-keys': 8.43.0
eslint: 8.57.1
graphemer: 1.4.0
ignore: 7.0.5
@@ -11140,12 +11082,12 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3)':
'@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3)':
dependencies:
'@typescript-eslint/scope-manager': 8.48.1
'@typescript-eslint/types': 8.48.1
'@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3)
'@typescript-eslint/visitor-keys': 8.48.1
'@typescript-eslint/scope-manager': 8.43.0
'@typescript-eslint/types': 8.43.0
'@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3)
'@typescript-eslint/visitor-keys': 8.43.0
debug: 4.4.3
eslint: 8.57.1
typescript: 5.9.3
@@ -11155,7 +11097,7 @@ snapshots:
'@typescript-eslint/project-service@8.43.0(typescript@5.9.3)':
dependencies:
'@typescript-eslint/tsconfig-utils': 8.43.0(typescript@5.9.3)
'@typescript-eslint/types': 8.48.1
'@typescript-eslint/types': 8.43.0
debug: 4.4.3
typescript: 5.9.3
transitivePeerDependencies:
@@ -11164,16 +11106,7 @@ snapshots:
'@typescript-eslint/project-service@8.46.2(typescript@5.9.3)':
dependencies:
'@typescript-eslint/tsconfig-utils': 8.46.2(typescript@5.9.3)
'@typescript-eslint/types': 8.48.1
debug: 4.4.3
typescript: 5.9.3
transitivePeerDependencies:
- supports-color
'@typescript-eslint/project-service@8.48.1(typescript@5.9.3)':
dependencies:
'@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3)
'@typescript-eslint/types': 8.48.1
'@typescript-eslint/types': 8.46.2
debug: 4.4.3
typescript: 5.9.3
transitivePeerDependencies:
@@ -11189,11 +11122,6 @@ snapshots:
'@typescript-eslint/types': 8.46.2
'@typescript-eslint/visitor-keys': 8.46.2
'@typescript-eslint/scope-manager@8.48.1':
dependencies:
'@typescript-eslint/types': 8.48.1
'@typescript-eslint/visitor-keys': 8.48.1
'@typescript-eslint/tsconfig-utils@8.43.0(typescript@5.9.3)':
dependencies:
typescript: 5.9.3
@@ -11202,15 +11130,11 @@ snapshots:
dependencies:
typescript: 5.9.3
'@typescript-eslint/tsconfig-utils@8.48.1(typescript@5.9.3)':
'@typescript-eslint/type-utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)':
dependencies:
typescript: 5.9.3
'@typescript-eslint/type-utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)':
dependencies:
'@typescript-eslint/types': 8.48.1
'@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3)
'@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/types': 8.43.0
'@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3)
'@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
debug: 4.4.3
eslint: 8.57.1
ts-api-utils: 2.1.0(typescript@5.9.3)
@@ -11222,8 +11146,6 @@ snapshots:
'@typescript-eslint/types@8.46.2': {}
'@typescript-eslint/types@8.48.1': {}
'@typescript-eslint/typescript-estree@8.43.0(typescript@5.9.3)':
dependencies:
'@typescript-eslint/project-service': 8.43.0(typescript@5.9.3)
@@ -11234,7 +11156,7 @@ snapshots:
fast-glob: 3.3.3
is-glob: 4.0.3
minimatch: 9.0.5
semver: 7.7.3
semver: 7.7.2
ts-api-utils: 2.1.0(typescript@5.9.3)
typescript: 5.9.3
transitivePeerDependencies:
@@ -11256,21 +11178,6 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/typescript-estree@8.48.1(typescript@5.9.3)':
dependencies:
'@typescript-eslint/project-service': 8.48.1(typescript@5.9.3)
'@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3)
'@typescript-eslint/types': 8.48.1
'@typescript-eslint/visitor-keys': 8.48.1
debug: 4.4.3
minimatch: 9.0.5
semver: 7.7.3
tinyglobby: 0.2.15
ts-api-utils: 2.1.0(typescript@5.9.3)
typescript: 5.9.3
transitivePeerDependencies:
- supports-color
'@typescript-eslint/utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)':
dependencies:
'@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1)
@@ -11293,17 +11200,6 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)':
dependencies:
'@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1)
'@typescript-eslint/scope-manager': 8.48.1
'@typescript-eslint/types': 8.48.1
'@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3)
eslint: 8.57.1
typescript: 5.9.3
transitivePeerDependencies:
- supports-color
'@typescript-eslint/visitor-keys@8.43.0':
dependencies:
'@typescript-eslint/types': 8.43.0
@@ -11314,11 +11210,6 @@ snapshots:
'@typescript-eslint/types': 8.46.2
eslint-visitor-keys: 4.2.1
'@typescript-eslint/visitor-keys@8.48.1':
dependencies:
'@typescript-eslint/types': 8.48.1
eslint-visitor-keys: 4.2.1
'@ungap/structured-clone@1.3.0': {}
'@unrs/resolver-binding-android-arm-eabi@1.11.1':
@@ -12641,16 +12532,16 @@ snapshots:
escape-string-regexp@5.0.0: {}
eslint-config-next@15.5.7(eslint@8.57.1)(typescript@5.9.3):
eslint-config-next@15.5.2(eslint@8.57.1)(typescript@5.9.3):
dependencies:
'@next/eslint-plugin-next': 15.5.7
'@rushstack/eslint-patch': 1.15.0
'@typescript-eslint/eslint-plugin': 8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
'@next/eslint-plugin-next': 15.5.2
'@rushstack/eslint-patch': 1.12.0
'@typescript-eslint/eslint-plugin': 8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
eslint: 8.57.1
eslint-import-resolver-node: 0.3.9
eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1)
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1)
eslint-plugin-react: 7.37.5(eslint@8.57.1)
eslint-plugin-react-hooks: 5.2.0(eslint@8.57.1)
@@ -12665,7 +12556,7 @@ snapshots:
dependencies:
debug: 3.2.7
is-core-module: 2.16.1
resolve: 1.22.11
resolve: 1.22.10
transitivePeerDependencies:
- supports-color
@@ -12674,28 +12565,28 @@ snapshots:
'@nolyfill/is-core-module': 1.0.39
debug: 4.4.3
eslint: 8.57.1
get-tsconfig: 4.13.0
get-tsconfig: 4.10.1
is-bun-module: 2.0.0
stable-hash: 0.0.5
tinyglobby: 0.2.15
unrs-resolver: 1.11.1
optionalDependencies:
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
transitivePeerDependencies:
- supports-color
eslint-module-utils@2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
eslint-module-utils@2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
dependencies:
debug: 3.2.7
optionalDependencies:
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
eslint: 8.57.1
eslint-import-resolver-node: 0.3.9
eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1)
transitivePeerDependencies:
- supports-color
eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
dependencies:
'@rtsao/scc': 1.1.0
array-includes: 3.1.9
@@ -12706,7 +12597,7 @@ snapshots:
doctrine: 2.1.0
eslint: 8.57.1
eslint-import-resolver-node: 0.3.9
eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
hasown: 2.0.2
is-core-module: 2.16.1
is-glob: 4.0.3
@@ -12718,7 +12609,7 @@ snapshots:
string.prototype.trimend: 1.0.9
tsconfig-paths: 3.15.0
optionalDependencies:
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
transitivePeerDependencies:
- eslint-import-resolver-typescript
- eslint-import-resolver-webpack
@@ -13067,8 +12958,6 @@ snapshots:
dependencies:
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
generator-function@2.0.1: {}
gensync@1.0.0-beta.2: {}
get-caller-file@2.0.5: {}
@@ -13101,7 +12990,7 @@ snapshots:
es-errors: 1.3.0
get-intrinsic: 1.3.0
get-tsconfig@4.13.0:
get-tsconfig@4.10.1:
dependencies:
resolve-pkg-maps: 1.0.0
@@ -13385,6 +13274,13 @@ snapshots:
parent-module: 1.0.1
resolve-from: 4.0.0
import-in-the-middle@1.14.2:
dependencies:
acorn: 8.15.0
acorn-import-attributes: 1.9.5(acorn@8.15.0)
cjs-module-lexer: 1.4.3
module-details-from-path: 1.0.4
import-in-the-middle@2.0.0:
dependencies:
acorn: 8.15.0
@@ -13461,7 +13357,7 @@ snapshots:
is-bun-module@2.0.0:
dependencies:
semver: 7.7.3
semver: 7.7.2
is-callable@1.2.7: {}
@@ -13499,14 +13395,6 @@ snapshots:
has-tostringtag: 1.0.2
safe-regex-test: 1.1.0
is-generator-function@1.1.2:
dependencies:
call-bound: 1.0.4
generator-function: 2.0.1
get-proto: 1.0.1
has-tostringtag: 1.0.2
safe-regex-test: 1.1.0
is-glob@4.0.3:
dependencies:
is-extglob: 2.1.1
@@ -14327,7 +14215,7 @@ snapshots:
nanoid@3.3.11: {}
napi-postinstall@0.3.4: {}
napi-postinstall@0.3.3: {}
natural-compare@1.4.0: {}
@@ -15297,12 +15185,6 @@ snapshots:
path-parse: 1.0.7
supports-preserve-symlinks-flag: 1.0.0
resolve@1.22.11:
dependencies:
is-core-module: 2.16.1
path-parse: 1.0.7
supports-preserve-symlinks-flag: 1.0.0
resolve@1.22.8:
dependencies:
is-core-module: 2.16.1
@@ -16114,7 +15996,7 @@ snapshots:
unrs-resolver@1.11.1:
dependencies:
napi-postinstall: 0.3.4
napi-postinstall: 0.3.3
optionalDependencies:
'@unrs/resolver-binding-android-arm-eabi': 1.11.1
'@unrs/resolver-binding-android-arm64': 1.11.1
@@ -16342,7 +16224,7 @@ snapshots:
is-async-function: 2.1.1
is-date-object: 1.1.0
is-finalizationregistry: 1.1.1
is-generator-function: 1.1.2
is-generator-function: 1.1.0
is-regex: 1.2.1
is-weakref: 1.1.1
isarray: 2.0.5

View File

@@ -8,6 +8,7 @@ import {
CardTitle,
} from "@/components/__legacy__/ui/card";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import { CircleNotchIcon } from "@phosphor-icons/react/dist/ssr";
import { Play } from "lucide-react";
import OnboardingButton from "../components/OnboardingButton";
@@ -78,13 +79,20 @@ export default function Page() {
<CardContent className="flex flex-col gap-4">
{Object.entries(agent?.input_schema.properties || {}).map(
([key, inputSubSchema]) => (
<RunAgentInputs
key={key}
schema={inputSubSchema}
value={onboarding.state?.agentInput?.[key]}
placeholder={inputSubSchema.description}
onChange={(value) => handleSetAgentInput(key, value)}
/>
<div key={key} className="flex flex-col space-y-2">
<label className="flex items-center gap-1 text-sm font-medium">
{inputSubSchema.title || key}
<InformationTooltip
description={inputSubSchema.description}
/>
</label>
<RunAgentInputs
schema={inputSubSchema}
value={onboarding.state?.agentInput?.[key]}
placeholder={inputSubSchema.description}
onChange={(value) => handleSetAgentInput(key, value)}
/>
</div>
),
)}
<AgentOnboardingCredentials

View File

@@ -106,11 +106,7 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
/>
<NodeAdvancedToggle nodeId={nodeId} />
{data.uiType != BlockUIType.OUTPUT && (
<OutputHandler
uiType={data.uiType}
outputSchema={outputSchema}
nodeId={nodeId}
/>
<OutputHandler outputSchema={outputSchema} nodeId={nodeId} />
)}
<NodeDataRenderer nodeId={nodeId} />
</div>

View File

@@ -20,32 +20,17 @@ export const FormCreator = React.memo(
className?: string;
}) => {
const updateNodeData = useNodeStore((state) => state.updateNodeData);
const getHardCodedValues = useNodeStore(
(state) => state.getHardCodedValues,
);
const handleChange = ({ formData }: any) => {
if ("credentials" in formData && !formData.credentials?.id) {
delete formData.credentials;
}
const updatedValues =
uiType === BlockUIType.AGENT
? {
...getHardCodedValues(nodeId),
inputs: formData,
}
: formData;
updateNodeData(nodeId, { hardcodedValues: updatedValues });
updateNodeData(nodeId, { hardcodedValues: formData });
};
const hardcodedValues = getHardCodedValues(nodeId);
const initialValues =
uiType === BlockUIType.AGENT
? (hardcodedValues.inputs ?? {})
: hardcodedValues;
const initialValues = getHardCodedValues(nodeId);
return (
<div className={className}>

View File

@@ -14,16 +14,13 @@ import {
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
import { getTypeDisplayInfo } from "./helpers";
import { generateHandleId } from "../handlers/helpers";
import { BlockUIType } from "../../types";
export const OutputHandler = ({
outputSchema,
nodeId,
uiType,
}: {
outputSchema: RJSFSchema;
nodeId: string;
uiType: BlockUIType;
}) => {
const { isOutputConnected } = useEdgeStore();
const properties = outputSchema?.properties || {};
@@ -82,9 +79,7 @@ export const OutputHandler = ({
</Text>
<NodeHandle
handleId={
uiType === BlockUIType.AGENT ? key : generateHandleId(key)
}
handleId={generateHandleId(key)}
isConnected={isConnected}
side="right"
/>

View File

@@ -7,7 +7,6 @@ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { getV2GetSpecificAgent } from "@/app/api/__generated__/endpoints/store/store";
import {
getGetV2ListLibraryAgentsQueryKey,
getV2GetLibraryAgent,
usePostV2AddMarketplaceAgent,
} from "@/app/api/__generated__/endpoints/library/library";
import {
@@ -152,12 +151,7 @@ export const useBlockMenuSearch = () => {
});
const libraryAgent = response.data as LibraryAgent;
const { data: libraryAgentDetails } = await getV2GetLibraryAgent(
libraryAgent.id,
);
addAgentToBuilder(libraryAgentDetails as LibraryAgent);
addAgentToBuilder(libraryAgent);
toast({
title: "Agent Added",

View File

@@ -1,7 +1,6 @@
import { getGetV2GetBuilderItemCountsQueryKey } from "@/app/api/__generated__/endpoints/default/default";
import {
getGetV2ListLibraryAgentsQueryKey,
getV2GetLibraryAgent,
usePostV2AddMarketplaceAgent,
} from "@/app/api/__generated__/endpoints/library/library";
import {
@@ -106,16 +105,8 @@ export const useMarketplaceAgentsContent = () => {
},
});
// Here, libraryAgent has empty input and output schemas.
// Not updating the endpoint because this endpoint is used elsewhere.
// TODO: Create a new endpoint for builder specific to marketplace agents.
const libraryAgent = response.data as LibraryAgent;
const { data: libraryAgentDetails } = await getV2GetLibraryAgent(
libraryAgent.id,
);
addAgentToBuilder(libraryAgentDetails as LibraryAgent);
addAgentToBuilder(libraryAgent);
toast({
title: "Agent Added",

View File

@@ -1,11 +1,16 @@
"use client";
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { Text } from "@/components/atoms/Text/Text";
import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types";
import type {
BlockIOSubSchema,
CredentialsMetaInput,
} from "@/lib/autogpt-server-api/types";
import { CredentialsInput } from "../CredentialsInputs/CredentialsInputs";
import { RunAgentInputs } from "../RunAgentInputs/RunAgentInputs";
import { getAgentCredentialsFields, getAgentInputFields } from "./helpers";
import {
getAgentCredentialsFields,
getAgentInputFields,
renderValue,
} from "./helpers";
type Props = {
agent: LibraryAgent;
@@ -23,23 +28,19 @@ export function AgentInputsReadOnly({
getAgentCredentialsFields(agent),
);
// Take actual input entries as leading; augment with schema from input fields.
// TODO: ensure consistent ordering.
const inputEntries =
inputs &&
Object.entries(inputs).map(([key, value]) => ({
key,
schema: inputFields[key],
value,
}));
Object.entries(inputs).map<[string, [BlockIOSubSchema | undefined, any]]>(
([k, v]) => [k, [inputFields[k], v]],
);
const hasInputs = inputEntries && inputEntries.length > 0;
const hasCredentials = credentialInputs && credentialFieldEntries.length > 0;
if (!hasInputs && !hasCredentials) {
return (
<Text variant="body" className="text-zinc-700">
No input for this run.
</Text>
);
return <div className="text-neutral-600">No input for this run.</div>;
}
return (
@@ -47,20 +48,16 @@ export function AgentInputsReadOnly({
{/* Regular inputs */}
{hasInputs && (
<div className="flex flex-col gap-4">
{inputEntries.map(({ key, schema, value }) => {
if (!schema) return null;
return (
<RunAgentInputs
key={key}
schema={schema}
value={value}
placeholder={schema.description}
onChange={() => {}}
readOnly={true}
/>
);
})}
{inputEntries.map(([key, [schema, value]]) => (
<div key={key} className="flex flex-col gap-1.5">
<label className="text-sm font-medium">
{schema?.title || key}
</label>
<p className="whitespace-pre-wrap break-words text-sm text-neutral-700">
{renderValue(value)}
</p>
</div>
))}
</div>
)}

View File

@@ -62,15 +62,12 @@ export function CredentialRow({
</div>
<IconKey className="h-5 w-5 shrink-0 text-zinc-800" />
<div className="flex min-w-0 flex-1 flex-nowrap items-center gap-4">
<Text
variant="body"
className="line-clamp-1 flex-[0_0_50%] text-ellipsis tracking-tight"
>
<Text variant="body" className="tracking-tight">
{getCredentialDisplayName(credential, displayName)}
</Text>
<Text
variant="large"
className="relative top-1 hidden flex-[0_0_40%] overflow-hidden truncate font-mono tracking-tight md:block"
className="relative top-1 font-mono tracking-tight"
>
{"*".repeat(MASKED_KEY_LENGTH)}
</Text>

View File

@@ -9,7 +9,6 @@ import { Button } from "@/components/atoms/Button/Button";
import { FileInput } from "@/components/atoms/FileInput/FileInput";
import { Switch } from "@/components/atoms/Switch/Switch";
import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import { TimePicker } from "@/components/molecules/TimePicker/TimePicker";
import {
BlockIOObjectSubSchema,
@@ -33,7 +32,6 @@ interface Props {
value?: any;
placeholder?: string;
onChange: (value: any) => void;
readOnly?: boolean;
}
/**
@@ -46,7 +44,6 @@ export function RunAgentInputs({
value,
placeholder,
onChange,
readOnly = false,
...props
}: Props & React.HTMLAttributes<HTMLElement>) {
const { handleUploadFile, uploadProgress } = useRunAgentInputs();
@@ -65,6 +62,7 @@ export function RunAgentInputs({
id={`${baseId}-number`}
label={schema.title ?? placeholder ?? "Number"}
hideLabel
size="small"
type="number"
value={value ?? ""}
placeholder={placeholder || "Enter number"}
@@ -82,6 +80,7 @@ export function RunAgentInputs({
id={`${baseId}-textarea`}
label={schema.title ?? placeholder ?? "Text"}
hideLabel
size="small"
type="textarea"
rows={3}
value={value ?? ""}
@@ -103,7 +102,7 @@ export function RunAgentInputs({
value={value}
onChange={onChange}
className="w-full"
showRemoveButton={!readOnly}
showRemoveButton={false}
/>
);
break;
@@ -131,6 +130,7 @@ export function RunAgentInputs({
id={`${baseId}-date`}
label={schema.title ?? placeholder ?? "Date"}
hideLabel
size="small"
type="date"
value={value ? format(value as Date, "yyyy-MM-dd") : ""}
onChange={(e) => {
@@ -159,6 +159,7 @@ export function RunAgentInputs({
id={`${baseId}-datetime`}
label={schema.title ?? placeholder ?? "Date time"}
hideLabel
size="small"
type="datetime-local"
value={value ?? ""}
onChange={(e) => onChange((e.target as HTMLInputElement).value)}
@@ -193,6 +194,7 @@ export function RunAgentInputs({
label={schema.title ?? placeholder ?? "Select"}
hideLabel
value={value ?? ""}
size="small"
onValueChange={(val: string) => onChange(val)}
placeholder={placeholder || "Select an option"}
options={schema.enum
@@ -215,6 +217,7 @@ export function RunAgentInputs({
items={allKeys.map((key) => ({
value: key,
label: _schema.properties[key]?.title ?? key,
size: "small",
}))}
selectedValues={selectedValues}
onChange={(values: string[]) =>
@@ -333,6 +336,7 @@ export function RunAgentInputs({
id={`${baseId}-text`}
label={schema.title ?? placeholder ?? "Text"}
hideLabel
size="small"
type="text"
value={value ?? ""}
onChange={(e) => onChange((e.target as HTMLInputElement).value)}
@@ -343,17 +347,6 @@ export function RunAgentInputs({
}
return (
<div className="flex w-full flex-col gap-0 space-y-2">
<label className="large-medium flex items-center gap-1 font-medium">
{schema.title || placeholder}
<InformationTooltip description={schema.description} />
</label>
<div
className="no-drag relative flex w-full"
style={readOnly ? { pointerEvents: "none", opacity: 0.7 } : undefined}
>
{innerInputElement}
</div>
</div>
<div className="no-drag relative flex w-full">{innerInputElement}</div>
);
}

View File

@@ -73,15 +73,22 @@ export function ModalRunSection() {
title="Task Inputs"
subtitle="Enter the information you want to provide to the agent for this task"
>
{/* Regular inputs */}
{inputFields.map(([key, inputSubSchema]) => (
<RunAgentInputs
key={key}
schema={inputSubSchema}
value={inputValues[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
data-testid={`agent-input-${key}`}
/>
<div key={key} className="flex w-full flex-col gap-0 space-y-2">
<label className="flex items-center gap-1 text-sm font-medium">
{inputSubSchema.title || key}
<InformationTooltip description={inputSubSchema.description} />
</label>
<RunAgentInputs
schema={inputSubSchema}
value={inputValues[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
data-testid={`agent-input-${key}`}
/>
</div>
))}
</ModalSection>
) : null}

View File

@@ -4,19 +4,20 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecut
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { Text } from "@/components/atoms/Text/Text";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import {
ScrollableTabs,
ScrollableTabsContent,
ScrollableTabsList,
ScrollableTabsTrigger,
} from "@/components/molecules/ScrollableTabs/ScrollableTabs";
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList";
import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews";
import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint";
import { InfoIcon } from "@phosphor-icons/react";
import { useEffect } from "react";
import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly";
import { AnchorLinksWrap } from "../AnchorLinksWrap";
import { LoadingSelectedContent } from "../LoadingSelectedContent";
import { RunDetailCard } from "../RunDetailCard/RunDetailCard";
import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader";
@@ -27,6 +28,9 @@ import { SelectedRunActions } from "./components/SelectedRunActions/SelectedRunA
import { WebhookTriggerSection } from "./components/WebhookTriggerSection";
import { useSelectedRunView } from "./useSelectedRunView";
const anchorStyles =
"border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900";
interface Props {
agent: LibraryAgent;
runId: string;
@@ -61,6 +65,13 @@ export function SelectedRunView({
const withSummary = run?.stats?.activity_status;
const withReviews = run?.status === AgentExecutionStatus.REVIEW;
function scrollToSection(id: string) {
const element = document.getElementById(id);
if (element) {
element.scrollIntoView({ behavior: "smooth", block: "start" });
}
}
if (responseError || httpError) {
return (
<ErrorCard
@@ -101,116 +112,118 @@ export function SelectedRunView({
/>
)}
<ScrollableTabs
defaultValue="output"
className="-mt-2 flex flex-col"
>
<ScrollableTabsList className="px-4">
{withSummary && (
<ScrollableTabsTrigger value="summary">
Summary
</ScrollableTabsTrigger>
)}
<ScrollableTabsTrigger value="output">
Output
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="input">
Your input
</ScrollableTabsTrigger>
{withReviews && (
<ScrollableTabsTrigger value="reviews">
Reviews ({pendingReviews.length})
</ScrollableTabsTrigger>
)}
</ScrollableTabsList>
<div className="my-6 flex flex-col gap-6">
{/* Summary Section */}
{withSummary && (
<ScrollableTabsContent value="summary">
<div className="scroll-mt-4">
<RunDetailCard
title={
<div className="flex items-center gap-1">
<Text variant="lead-semibold">Summary</Text>
<InformationTooltip
iconSize={20}
description="This AI-generated summary describes how the agent handled your task. It's an experimental feature and may occasionally be inaccurate."
{/* Navigation Links */}
<AnchorLinksWrap>
{withSummary && (
<button
onClick={() => scrollToSection("summary")}
className={anchorStyles}
>
Summary
</button>
)}
<button
onClick={() => scrollToSection("output")}
className={anchorStyles}
>
Output
</button>
<button
onClick={() => scrollToSection("input")}
className={anchorStyles}
>
Your input
</button>
{withReviews && (
<button
onClick={() => scrollToSection("reviews")}
className={anchorStyles}
>
Reviews ({pendingReviews.length})
</button>
)}
</AnchorLinksWrap>
{/* Summary Section */}
{withSummary && (
<div id="summary" className="scroll-mt-4">
<RunDetailCard
title={
<div className="flex items-center gap-2">
<Text variant="lead-semibold">Summary</Text>
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<InfoIcon
size={16}
className="cursor-help text-neutral-500 hover:text-neutral-700"
/>
</div>
}
>
<RunSummary run={run} />
</RunDetailCard>
</TooltipTrigger>
<TooltipContent>
<p className="max-w-xs">
This AI-generated summary describes how the agent
handled your task. It&apos;s an experimental
feature and may occasionally be inaccurate.
</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
</ScrollableTabsContent>
)}
{/* Output Section */}
<ScrollableTabsContent value="output">
<div className="scroll-mt-4">
<RunDetailCard title="Output">
{isLoading ? (
<div className="text-neutral-500">
<LoadingSpinner />
</div>
) : run && "outputs" in run ? (
<RunOutputs outputs={run.outputs as any} />
) : (
<Text variant="body" className="text-neutral-600">
No output from this run.
</Text>
)}
</RunDetailCard>
</div>
</ScrollableTabsContent>
{/* Input Section */}
<ScrollableTabsContent value="input">
<div id="input" className="scroll-mt-4">
<RunDetailCard
title={
<div className="flex items-center gap-1">
<Text variant="lead-semibold">Your input</Text>
<InformationTooltip
iconSize={20}
description="This is the input that was provided to the agent for running this task."
/>
</div>
}
>
<AgentInputsReadOnly
agent={agent}
inputs={run?.inputs}
credentialInputs={run?.credential_inputs}
/>
</RunDetailCard>
</div>
</ScrollableTabsContent>
{/* Reviews Section */}
{withReviews && (
<ScrollableTabsContent value="reviews">
<div className="scroll-mt-4">
<RunDetailCard>
{reviewsLoading ? (
<LoadingSpinner size="small" />
) : pendingReviews.length > 0 ? (
<PendingReviewsList
reviews={pendingReviews}
onReviewComplete={refetchReviews}
emptyMessage="No pending reviews for this execution"
/>
) : (
<Text variant="body" className="text-zinc-700">
No pending reviews for this execution
</Text>
)}
</RunDetailCard>
</div>
</ScrollableTabsContent>
)}
}
>
<RunSummary run={run} />
</RunDetailCard>
</div>
</ScrollableTabs>
)}
{/* Output Section */}
<div id="output" className="scroll-mt-4">
<RunDetailCard title="Output">
{isLoading ? (
<div className="text-neutral-500">
<LoadingSpinner />
</div>
) : run && "outputs" in run ? (
<RunOutputs outputs={run.outputs as any} />
) : (
<Text variant="body" className="text-neutral-600">
No output from this run.
</Text>
)}
</RunDetailCard>
</div>
{/* Input Section */}
<div id="input" className="scroll-mt-4">
<RunDetailCard title="Your input">
<AgentInputsReadOnly
agent={agent}
inputs={run?.inputs}
credentialInputs={run?.credential_inputs}
/>
</RunDetailCard>
</div>
{/* Reviews Section */}
{withReviews && (
<div id="reviews" className="scroll-mt-4">
<RunDetailCard>
{reviewsLoading ? (
<div className="text-neutral-500">Loading reviews</div>
) : pendingReviews.length > 0 ? (
<PendingReviewsList
reviews={pendingReviews}
onReviewComplete={refetchReviews}
emptyMessage="No pending reviews for this execution"
/>
) : (
<div className="text-neutral-600">
No pending reviews for this execution
</div>
)}
</RunDetailCard>
</div>
)}
</div>
</SelectedViewLayout>
</div>

View File

@@ -9,6 +9,7 @@ import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint";
import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils";
import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly";
import { AnchorLinksWrap } from "../AnchorLinksWrap";
import { LoadingSelectedContent } from "../LoadingSelectedContent";
import { RunDetailCard } from "../RunDetailCard/RunDetailCard";
import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader";
@@ -16,6 +17,9 @@ import { SelectedViewLayout } from "../SelectedViewLayout";
import { SelectedScheduleActions } from "./components/SelectedScheduleActions";
import { useSelectedScheduleView } from "./useSelectedScheduleView";
const anchorStyles =
"border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900";
interface Props {
agent: LibraryAgent;
scheduleId: string;
@@ -41,6 +45,13 @@ export function SelectedScheduleView({
const breakpoint = useBreakpoint();
const isLgScreenUp = isLargeScreen(breakpoint);
function scrollToSection(id: string) {
const element = document.getElementById(id);
if (element) {
element.scrollIntoView({ behavior: "smooth", block: "start" });
}
}
if (error) {
return (
<ErrorCard
@@ -97,6 +108,22 @@ export function SelectedScheduleView({
) : null}
</div>
{/* Navigation Links */}
<AnchorLinksWrap>
<button
onClick={() => scrollToSection("schedule")}
className={anchorStyles}
>
Schedule
</button>
<button
onClick={() => scrollToSection("input")}
className={anchorStyles}
>
Your input
</button>
</AnchorLinksWrap>
{/* Schedule Section */}
<div id="schedule" className="scroll-mt-4">
<RunDetailCard title="Schedule">

View File

@@ -0,0 +1,84 @@
"use client";
import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { PencilSimpleIcon } from "@phosphor-icons/react";
import { RunAgentInputs } from "../../../../modals/RunAgentInputs/RunAgentInputs";
import { useEditInputsModal } from "./useEditInputsModal";
type Props = {
agent: LibraryAgent;
schedule: GraphExecutionJobInfo;
};
export function EditInputsModal({ agent, schedule }: Props) {
const {
isOpen,
setIsOpen,
inputFields,
values,
setValues,
handleSave,
isSaving,
} = useEditInputsModal(agent, schedule);
return (
<Dialog
controlled={{ isOpen, set: setIsOpen }}
styling={{ maxWidth: "32rem" }}
>
<Dialog.Trigger>
<Button
variant="ghost"
size="small"
className="absolute -right-2 -top-2"
>
<PencilSimpleIcon className="size-4" /> Edit inputs
</Button>
</Dialog.Trigger>
<Dialog.Content>
<div className="flex flex-col gap-4">
<Text variant="h3">Edit inputs</Text>
<div className="flex flex-col gap-4">
{Object.entries(inputFields).map(([key, fieldSchema]) => (
<div key={key} className="flex flex-col gap-1.5">
<label className="text-sm font-medium">
{fieldSchema?.title || key}
</label>
<RunAgentInputs
schema={fieldSchema as any}
value={values[key]}
onChange={(v) => setValues((prev) => ({ ...prev, [key]: v }))}
/>
</div>
))}
</div>
</div>
<Dialog.Footer>
<div className="flex w-full justify-end gap-2">
<Button
variant="secondary"
size="small"
onClick={() => setIsOpen(false)}
className="min-w-32"
>
Cancel
</Button>
<Button
variant="primary"
size="small"
onClick={handleSave}
loading={isSaving}
className="min-w-32"
>
{isSaving ? "Saving…" : "Save"}
</Button>
</div>
</Dialog.Footer>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -0,0 +1,78 @@
"use client";
import { useMemo, useState } from "react";
import { useQueryClient } from "@tanstack/react-query";
import { getGetV1ListExecutionSchedulesForAGraphQueryKey } from "@/app/api/__generated__/endpoints/schedules/schedules";
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
import { useToast } from "@/components/molecules/Toast/use-toast";
function getAgentInputFields(agent: LibraryAgent): Record<string, any> {
const schema = agent.input_schema as unknown as {
properties?: Record<string, any>;
} | null;
if (!schema || !schema.properties) return {};
const properties = schema.properties as Record<string, any>;
const visibleEntries = Object.entries(properties).filter(
([, sub]) => !sub?.hidden,
);
return Object.fromEntries(visibleEntries);
}
export function useEditInputsModal(
agent: LibraryAgent,
schedule: GraphExecutionJobInfo,
) {
const queryClient = useQueryClient();
const { toast } = useToast();
const [isOpen, setIsOpen] = useState(false);
const [isSaving, setIsSaving] = useState(false);
const inputFields = useMemo(() => getAgentInputFields(agent), [agent]);
const [values, setValues] = useState<Record<string, any>>({
...(schedule.input_data as Record<string, any>),
});
async function handleSave() {
setIsSaving(true);
try {
const res = await fetch(`/api/schedules/${schedule.id}`, {
method: "PATCH",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ inputs: values }),
});
if (!res.ok) {
let message = "Failed to update schedule inputs";
const data = await res.json();
message = data?.message || data?.detail || message;
throw new Error(message);
}
await queryClient.invalidateQueries({
queryKey: getGetV1ListExecutionSchedulesForAGraphQueryKey(
schedule.graph_id,
),
});
toast({
title: "Schedule inputs updated",
});
setIsOpen(false);
} catch (error: any) {
toast({
title: "Failed to update schedule inputs",
description: error?.message || "An unexpected error occurred.",
variant: "destructive",
});
}
setIsSaving(false);
}
return {
isOpen,
setIsOpen,
inputFields,
values,
setValues,
handleSave,
isSaving,
} as const;
}

View File

@@ -25,10 +25,9 @@ export function SelectedScheduleActions({ agent, scheduleId }: Props) {
<Button
variant="icon"
size="icon"
aria-label="Open in builder"
as="NextLink"
href={openInBuilderHref}
target="_blank"
aria-label="View scheduled task details"
>
<EyeIcon weight="bold" size={18} className="text-zinc-700" />
</Button>

View File

@@ -4,6 +4,7 @@ import type { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExe
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { Input } from "@/components/atoms/Input/Input";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import {
getAgentCredentialsFields,
getAgentInputFields,
@@ -137,13 +138,25 @@ export function SelectedTemplateView({
<RunDetailCard title="Your Input">
<div className="flex flex-col gap-4">
{inputFields.map(([key, inputSubSchema]) => (
<RunAgentInputs
<div
key={key}
schema={inputSubSchema}
value={inputs[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
/>
className="flex w-full flex-col gap-0 space-y-2"
>
<label className="flex items-center gap-1 text-sm font-medium">
{inputSubSchema.title || key}
{inputSubSchema.description && (
<InformationTooltip
description={inputSubSchema.description}
/>
)}
</label>
<RunAgentInputs
schema={inputSubSchema}
value={inputs[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
/>
</div>
))}
</div>
</RunDetailCard>

View File

@@ -3,6 +3,7 @@
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { Input } from "@/components/atoms/Input/Input";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import {
getAgentCredentialsFields,
getAgentInputFields,
@@ -130,13 +131,25 @@ export function SelectedTriggerView({
<RunDetailCard title="Your Input">
<div className="flex flex-col gap-4">
{inputFields.map(([key, inputSubSchema]) => (
<RunAgentInputs
<div
key={key}
schema={inputSubSchema}
value={inputs[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
/>
className="flex w-full flex-col gap-0 space-y-2"
>
<label className="flex items-center gap-1 text-sm font-medium">
{inputSubSchema.title || key}
{inputSubSchema.description && (
<InformationTooltip
description={inputSubSchema.description}
/>
)}
</label>
<RunAgentInputs
schema={inputSubSchema}
value={inputs[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => setInputValue(key, value)}
/>
</div>
))}
</div>
</RunDetailCard>

View File

@@ -680,20 +680,28 @@ export function AgentRunDraftView({
{/* Regular inputs */}
{Object.entries(agentInputFields).map(([key, inputSubSchema]) => (
<RunAgentInputs
key={key}
schema={inputSubSchema}
value={inputValues[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => {
setInputValues((obj) => ({
...obj,
[key]: value,
}));
setChangedPresetAttributes((prev) => prev.add("inputs"));
}}
data-testid={`agent-input-${key}`}
/>
<div key={key} className="flex flex-col space-y-2">
<label className="flex items-center gap-1 text-sm font-medium">
{inputSubSchema.title || key}
<InformationTooltip
description={inputSubSchema.description}
/>
</label>
<RunAgentInputs
schema={inputSubSchema}
value={inputValues[key] ?? inputSubSchema.default}
placeholder={inputSubSchema.description}
onChange={(value) => {
setInputValues((obj) => ({
...obj,
[key]: value,
}));
setChangedPresetAttributes((prev) => prev.add("inputs"));
}}
data-testid={`agent-input-${key}`}
/>
</div>
))}
</CardContent>
</Card>

View File

@@ -2690,7 +2690,7 @@
"get": {
"tags": ["v2", "store", "public"],
"summary": "List store agents",
"description": "Get a paginated list of agents from the store with optional filtering and sorting.\n\nWhen search_query is provided, uses hybrid search combining:\n- BM25 full-text search (lexical matching)\n- Vector semantic similarity (meaning-based matching)\n- Popularity signal (run counts)\n\nResults are ranked using Reciprocal Rank Fusion (RRF).\n\nArgs:\n featured (bool, optional): Filter to only show featured agents. Defaults to False.\n creator (str | None, optional): Filter agents by creator username. Defaults to None.\n sorted_by (str | None, optional): Sort agents by \"runs\" or \"rating\". Defaults to None.\n search_query (str | None, optional): Search agents by name, subheading and description.\n category (str | None, optional): Filter agents by category. Defaults to None.\n page (int, optional): Page number for pagination. Defaults to 1.\n page_size (int, optional): Number of agents per page. Defaults to 20.\n filter_mode (str, optional): Controls result filtering when searching:\n - \"strict\": Must match BOTH BM25 AND vector thresholds\n - \"permissive\": Must match EITHER BM25 OR vector threshold\n - \"combined\": No threshold filtering, rely on RRF score (default)\n\nReturns:\n StoreAgentsResponse: Paginated list of agents matching the filters\n\nRaises:\n HTTPException: If page or page_size are less than 1\n\nUsed for:\n- Home Page Featured Agents\n- Home Page Top Agents\n- Search Results\n- Agent Details - Other Agents By Creator\n- Agent Details - Similar Agents\n- Creator Details - Agents By Creator",
"description": "Get a paginated list of agents from the store with optional filtering and sorting.\n\nArgs:\n featured (bool, optional): Filter to only show featured agents. Defaults to False.\n creator (str | None, optional): Filter agents by creator username. Defaults to None.\n sorted_by (str | None, optional): Sort agents by \"runs\" or \"rating\". Defaults to None.\n search_query (str | None, optional): Search agents by name, subheading and description. Defaults to None.\n category (str | None, optional): Filter agents by category. Defaults to None.\n page (int, optional): Page number for pagination. Defaults to 1.\n page_size (int, optional): Number of agents per page. Defaults to 20.\n\nReturns:\n StoreAgentsResponse: Paginated list of agents matching the filters\n\nRaises:\n HTTPException: If page or page_size are less than 1\n\nUsed for:\n- Home Page Featured Agents\n- Home Page Top Agents\n- Search Results\n- Agent Details - Other Agents By Creator\n- Agent Details - Similar Agents\n- Creator Details - Agents By Creator",
"operationId": "getV2List store agents",
"parameters": [
{
@@ -2756,17 +2756,6 @@
"in": "query",
"required": false,
"schema": { "type": "integer", "default": 20, "title": "Page Size" }
},
{
"name": "filter_mode",
"in": "query",
"required": false,
"schema": {
"enum": ["strict", "permissive", "combined"],
"type": "string",
"default": "permissive",
"title": "Filter Mode"
}
}
],
"responses": {

View File

@@ -6,10 +6,6 @@ import {
import { environment } from "@/services/environment";
import { NextRequest, NextResponse } from "next/server";
// Increase body size limit to 256MB to match backend file upload limit
export const maxDuration = 300; // 5 minutes timeout for large uploads
export const dynamic = "force-dynamic";
function buildBackendUrl(path: string[], queryString: string): string {
const backendPath = path.join("/");
return `${environment.getAGPTServerBaseUrl()}/${backendPath}${queryString}`;

View File

@@ -1,33 +1,36 @@
"use client";
import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip";
import { SentryUserTracker } from "@/components/monitor/SentryUserTracker";
import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider";
import OnboardingProvider from "@/providers/onboarding/onboarding-provider";
import { BackendAPIProvider } from "@/lib/autogpt-server-api/context";
import { getQueryClient } from "@/lib/react-query/queryClient";
import CredentialsProvider from "@/providers/agent-credentials/credentials-provider";
import OnboardingProvider from "@/providers/onboarding/onboarding-provider";
import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider";
import { QueryClientProvider } from "@tanstack/react-query";
import { ThemeProvider, ThemeProviderProps } from "next-themes";
import {
ThemeProvider as NextThemesProvider,
ThemeProviderProps,
} from "next-themes";
import { NuqsAdapter } from "nuqs/adapters/next/app";
import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip";
import CredentialsProvider from "@/providers/agent-credentials/credentials-provider";
import { SentryUserTracker } from "@/components/monitor/SentryUserTracker";
export function Providers({ children, ...props }: ThemeProviderProps) {
const queryClient = getQueryClient();
return (
<QueryClientProvider client={queryClient}>
<NuqsAdapter>
<BackendAPIProvider>
<SentryUserTracker />
<CredentialsProvider>
<LaunchDarklyProvider>
<OnboardingProvider>
<ThemeProvider forcedTheme="light" {...props}>
<NextThemesProvider {...props}>
<BackendAPIProvider>
<SentryUserTracker />
<CredentialsProvider>
<LaunchDarklyProvider>
<OnboardingProvider>
<TooltipProvider>{children}</TooltipProvider>
</ThemeProvider>
</OnboardingProvider>
</LaunchDarklyProvider>
</CredentialsProvider>
</BackendAPIProvider>
</OnboardingProvider>
</LaunchDarklyProvider>
</CredentialsProvider>
</BackendAPIProvider>
</NextThemesProvider>
</NuqsAdapter>
</QueryClientProvider>
);

View File

@@ -1,157 +0,0 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import { OverflowText } from "./OverflowText";
const meta: Meta<typeof OverflowText> = {
title: "Atoms/OverflowText",
component: OverflowText,
tags: ["autodocs"],
parameters: {
layout: "centered",
docs: {
description: {
component:
"Text component that automatically truncates overflowing content with ellipsis and shows a tooltip on hover when truncated. Supports both string and ReactNode values.",
},
},
},
argTypes: {
value: {
control: "text",
description: "The text content to display (string or ReactNode)",
},
className: {
control: "text",
description: "Additional CSS classes to customize styling",
},
},
args: {
value: "This is a sample text that may overflow",
className: "",
},
};
export default meta;
type Story = StoryObj<typeof meta>;
export const Default: Story = {
render: function DefaultOverflowText(args) {
return (
<div className="w-64">
<OverflowText {...args} />
</div>
);
},
};
export const ShortText: Story = {
args: {
value: "Short text",
},
render: function ShortTextStory(args) {
return (
<div className="w-64">
<OverflowText {...args} />
</div>
);
},
};
export const LongText: Story = {
args: {
value:
"This is a very long text that will definitely overflow and show a tooltip when you hover over it",
},
render: function LongTextStory(args) {
return (
<div className="w-64">
<OverflowText {...args} />
</div>
);
},
};
export const CustomStyling: Story = {
args: {
value: "Text with custom styling",
className: "text-lg font-semibold text-indigo-600",
},
render: function CustomStylingStory(args) {
return (
<div className="w-64">
<OverflowText {...args} />
</div>
);
},
};
export const WithReactNode: Story = {
args: {
value: (
<span>
Text with <strong>bold</strong> and <em>italic</em> content
</span>
),
},
render: function WithReactNodeStory(args) {
return (
<div className="w-64">
<OverflowText {...args} />
</div>
);
},
};
export const DifferentWidths: Story = {
render: function DifferentWidthsStory() {
const longText =
"This text will truncate differently depending on the container width";
return (
<div className="flex flex-col gap-8">
<div className="flex flex-col gap-2">
<span className="text-xs text-zinc-500">Width: 200px</span>
<div className="w-[200px]">
<OverflowText value={longText} variant="body" />
</div>
</div>
<div className="flex flex-col gap-2">
<span className="text-xs text-zinc-500">Width: 300px</span>
<div className="w-[300px]">
<OverflowText value={longText} variant="body" />
</div>
</div>
<div className="flex flex-col gap-2">
<span className="text-xs text-zinc-500">Width: 400px</span>
<div className="w-[400px]">
<OverflowText value={longText} variant="body" />
</div>
</div>
</div>
);
},
};
export const FilePathExample: Story = {
args: {
value: "/very/long/path/to/a/file/that/might/overflow/in/the/ui.tsx",
},
render: function FilePathExampleStory(args) {
return (
<div className="w-64">
<OverflowText {...args} className="font-mono text-sm" />
</div>
);
},
};
export const URLExample: Story = {
args: {
value: "https://example.com/very/long/url/path/that/might/overflow",
},
render: function URLExampleStory(args) {
return (
<div className="w-64">
<OverflowText {...args} className="text-blue-600" />
</div>
);
},
};

View File

@@ -1,100 +0,0 @@
import { Text, type TextProps } from "@/components/atoms/Text/Text";
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { cn } from "@/lib/utils";
import type { ReactNode } from "react";
import { useEffect, useRef, useState } from "react";
interface Props extends Omit<TextProps, "children"> {
value: string | ReactNode;
}
export function OverflowText(props: Props) {
const elementRef = useRef<HTMLSpanElement | null>(null);
const [isTruncated, setIsTruncated] = useState(false);
function updateTruncation() {
const element = elementRef.current;
if (!element) {
return;
}
const hasOverflow = element.scrollWidth > element.clientWidth;
setIsTruncated(hasOverflow);
}
function setupResizeListener() {
function handleResize() {
updateTruncation();
}
window.addEventListener("resize", handleResize);
return function cleanupResizeListener() {
window.removeEventListener("resize", handleResize);
};
}
function setupObserver() {
const element = elementRef.current;
if (!element || typeof ResizeObserver === "undefined") {
return undefined;
}
function handleResizeObserver() {
updateTruncation();
}
const observer = new ResizeObserver(handleResizeObserver);
observer.observe(element);
return function disconnectObserver() {
observer.disconnect();
};
}
useEffect(() => {
if (typeof props.value === "string") updateTruncation();
}, [props.value]);
useEffect(setupResizeListener, []);
useEffect(setupObserver, []);
const { value, className, variant = "body", ...restProps } = props;
const content = (
<span
ref={elementRef}
className={cn(
"block min-w-0 overflow-hidden text-ellipsis whitespace-nowrap",
)}
>
<Text variant={variant} className={className} {...restProps}>
{value}
</Text>
</span>
);
if (isTruncated) {
return (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>{content}</TooltipTrigger>
<TooltipContent>
{typeof value === "string" ? <p>{value}</p> : value}
</TooltipContent>
</Tooltip>
</TooltipProvider>
);
}
return content;
}

View File

@@ -5,7 +5,7 @@ import { Cross2Icon } from "@radix-ui/react-icons";
import React, { useCallback } from "react";
import { GoogleDrivePicker } from "./GoogleDrivePicker";
export interface Props {
export interface GoogleDrivePickerInputProps {
config: GoogleDrivePickerConfig;
value: any;
onChange: (value: any) => void;
@@ -21,7 +21,7 @@ export function GoogleDrivePickerInput({
error,
className,
showRemoveButton = true,
}: Props) {
}: GoogleDrivePickerInputProps) {
const [pickerError, setPickerError] = React.useState<string | null>(null);
const isMultiSelect = config.multiselect || false;
const hasAutoCredentials = !!config.auto_credentials;

View File

@@ -19,7 +19,7 @@ export function MobileNavbarMenuItem({
onClick,
}: Props) {
const content = (
<div className="inline-flex w-full items-center justify-start gap-4 py-2 hover:rounded hover:bg-[#e0e0e0]">
<div className="inline-flex w-full items-center justify-start gap-4 hover:rounded hover:bg-[#e0e0e0]">
{getAccountMenuOptionIcon(icon)}
<div className="relative">
<div

View File

@@ -3,7 +3,8 @@ const commonStyles = {
title: "font-poppins text-md md:text-lg leading-none",
overlay:
"fixed inset-0 z-50 bg-stone-500/20 dark:bg-black/50 backdrop-blur-md animate-fade-in",
content: "bg-white p-6 fixed rounded-2xlarge flex flex-col z-50 w-full",
content:
"overflow-y-hidden bg-white p-6 fixed rounded-2xlarge flex flex-col z-50 w-full",
};
// Modal specific styles

View File

@@ -9,20 +9,16 @@ import ReactMarkdown from "react-markdown";
type Props = {
description?: string;
iconSize?: number;
};
export function InformationTooltip({ description, iconSize = 24 }: Props) {
export function InformationTooltip({ description }: Props) {
if (!description) return null;
return (
<TooltipProvider delayDuration={400}>
<Tooltip>
<TooltipTrigger asChild>
<Info
className="rounded-full p-1 hover:bg-slate-50"
size={iconSize}
/>
<Info className="rounded-full p-1 hover:bg-slate-50" size={24} />
</TooltipTrigger>
<TooltipContent>
<ReactMarkdown

View File

@@ -1,437 +0,0 @@
import type { Meta, StoryObj } from "@storybook/nextjs";
import {
ScrollableTabs,
ScrollableTabsContent,
ScrollableTabsList,
ScrollableTabsTrigger,
} from "./ScrollableTabs";
const meta = {
title: "Molecules/ScrollableTabs",
component: ScrollableTabs,
parameters: {
layout: "fullscreen",
},
tags: ["autodocs"],
argTypes: {},
} satisfies Meta<typeof ScrollableTabs>;
export default meta;
type Story = StoryObj<typeof meta>;
function ScrollableTabsDemo() {
return (
<div className="flex flex-col gap-8 p-8">
<h2 className="text-2xl font-bold">ScrollableTabs Examples</h2>
<div className="space-y-6">
<div>
<h3 className="mb-4 text-lg font-semibold">
Short Content (Tabs Hidden)
</h3>
<div className="h-[300px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="tab1" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="tab1">
Account
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab2">
Password
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab3">
Settings
</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="tab1">
<div className="p-4 text-sm">
Make changes to your account here. Click save when you&apos;re
done.
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab2">
<div className="p-4 text-sm">
Change your password here. After saving, you&apos;ll be logged
out.
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab3">
<div className="p-4 text-sm">
Update your preferences and settings here.
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
<div>
<h3 className="mb-4 text-lg font-semibold">
Long Content (Tabs Visible)
</h3>
<div className="h-[400px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="tab1" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="tab1">
Account
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab2">
Password
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab3">
Settings
</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="tab1">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">
Account Settings
</h4>
<p className="mb-4">
Make changes to your account here. Click save when
you&apos;re done.
</p>
<p className="mb-4">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed
do eiusmod tempor incididunt ut labore et dolore magna
aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris.
</p>
<p className="mb-4">
Duis aute irure dolor in reprehenderit in voluptate velit
esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident.
</p>
<p>
Sed ut perspiciatis unde omnis iste natus error sit
voluptatem accusantium doloremque laudantium, totam rem
aperiam.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab2">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">
Password Settings
</h4>
<p className="mb-4">
Change your password here. After saving, you&apos;ll be
logged out.
</p>
<p className="mb-4">
At vero eos et accusamus et iusto odio dignissimos ducimus
qui blanditiis praesentium voluptatum deleniti atque
corrupti quos dolores et quas molestias excepturi sint
occaecati cupiditate.
</p>
<p className="mb-4">
Et harum quidem rerum facilis est et expedita distinctio.
Nam libero tempore, cum soluta nobis est eligendi optio
cumque nihil impedit quo minus.
</p>
<p>
Temporibus autem quibusdam et aut officiis debitis aut rerum
necessitatibus saepe eveniet ut et voluptates repudiandae
sint.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab3">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">
General Settings
</h4>
<p className="mb-4">
Update your preferences and settings here.
</p>
<p className="mb-4">
Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut
odit aut fugit, sed quia consequuntur magni dolores eos qui
ratione voluptatem sequi nesciunt.
</p>
<p className="mb-4">
Neque porro quisquam est, qui dolorem ipsum quia dolor sit
amet, consectetur, adipisci velit, sed quia non numquam eius
modi tempora incidunt ut labore et dolore magnam aliquam
quaerat voluptatem.
</p>
<p>
Ut enim ad minima veniam, quis nostrum exercitationem ullam
corporis suscipit laboriosam, nisi ut aliquid ex ea commodi
consequatur.
</p>
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
<div>
<h3 className="mb-4 text-lg font-semibold">Many Tabs</h3>
<div className="h-[500px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="overview" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="overview">
Overview
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="analytics">
Analytics
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="reports">
Reports
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="notifications">
Notifications
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="integrations">
Integrations
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="billing">
Billing
</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="overview">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">
Dashboard Overview
</h4>
<p className="mb-4">
Dashboard overview with key metrics and recent activity.
</p>
<p className="mb-4">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed
do eiusmod tempor incididunt ut labore et dolore magna
aliqua.
</p>
<p>
Ut enim ad minim veniam, quis nostrud exercitation ullamco
laboris nisi ut aliquip ex ea commodo consequat.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="analytics">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Analytics</h4>
<p className="mb-4">
Detailed analytics and performance metrics.
</p>
<p className="mb-4">
Duis aute irure dolor in reprehenderit in voluptate velit
esse cillum dolore eu fugiat nulla pariatur.
</p>
<p>
Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="reports">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Reports</h4>
<p className="mb-4">
Generate and view reports for your account.
</p>
<p className="mb-4">
Sed ut perspiciatis unde omnis iste natus error sit
voluptatem accusantium doloremque laudantium.
</p>
<p>
Totam rem aperiam, eaque ipsa quae ab illo inventore
veritatis et quasi architecto beatae vitae dicta sunt
explicabo.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="notifications">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Notifications</h4>
<p className="mb-4">Manage your notification preferences.</p>
<p className="mb-4">
Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut
odit aut fugit.
</p>
<p>
Sed quia consequuntur magni dolores eos qui ratione
voluptatem sequi nesciunt.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="integrations">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Integrations</h4>
<p className="mb-4">
Connect and manage third-party integrations.
</p>
<p className="mb-4">
Neque porro quisquam est, qui dolorem ipsum quia dolor sit
amet.
</p>
<p>
Consectetur, adipisci velit, sed quia non numquam eius modi
tempora incidunt.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="billing">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Billing</h4>
<p className="mb-4">
View and manage your billing information.
</p>
<p className="mb-4">
Ut enim ad minima veniam, quis nostrum exercitationem ullam
corporis suscipit laboriosam.
</p>
<p>
Nisi ut aliquid ex ea commodi consequatur? Quis autem vel
eum iure reprehenderit qui in ea voluptate velit esse.
</p>
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
</div>
</div>
);
}
export const Default = {
render: () => <ScrollableTabsDemo />,
} satisfies Story;
export const ShortContent = {
render: () => (
<div className="p-8">
<div className="h-[200px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="account" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="account">
Account
</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="password">
Password
</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="account">
<div className="p-4 text-sm">
Make changes to your account here. Click save when you&apos;re
done.
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="password">
<div className="p-4 text-sm">
Change your password here. After saving, you&apos;ll be logged
out.
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
),
} satisfies Story;
export const LongContent = {
render: () => (
<div className="p-8">
<div className="h-[600px] overflow-y-auto border border-zinc-200">
<ScrollableTabs defaultValue="tab1" className="h-full">
<ScrollableTabsList>
<ScrollableTabsTrigger value="tab1">Account</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab2">Password</ScrollableTabsTrigger>
<ScrollableTabsTrigger value="tab3">Settings</ScrollableTabsTrigger>
</ScrollableTabsList>
<ScrollableTabsContent value="tab1">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Account Settings</h4>
<p className="mb-4">
Make changes to your account here. Click save when you&apos;re
done.
</p>
<p className="mb-4">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut
enim ad minim veniam, quis nostrud exercitation ullamco laboris
nisi ut aliquip ex ea commodo consequat.
</p>
<p className="mb-4">
Duis aute irure dolor in reprehenderit in voluptate velit esse
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat
cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
<p className="mb-4">
Sed ut perspiciatis unde omnis iste natus error sit voluptatem
accusantium doloremque laudantium, totam rem aperiam, eaque ipsa
quae ab illo inventore veritatis et quasi architecto beatae
vitae dicta sunt explicabo.
</p>
<p>
Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit
aut fugit, sed quia consequuntur magni dolores eos qui ratione
voluptatem sequi nesciunt.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab2">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">Password Settings</h4>
<p className="mb-4">
Change your password here. After saving, you&apos;ll be logged
out.
</p>
<p className="mb-4">
At vero eos et accusamus et iusto odio dignissimos ducimus qui
blanditiis praesentium voluptatum deleniti atque corrupti quos
dolores et quas molestias excepturi sint occaecati cupiditate
non provident.
</p>
<p className="mb-4">
Similique sunt in culpa qui officia deserunt mollitia animi, id
est laborum et dolorum fuga. Et harum quidem rerum facilis est
et expedita distinctio.
</p>
<p className="mb-4">
Nam libero tempore, cum soluta nobis est eligendi optio cumque
nihil impedit quo minus id quod maxime placeat facere possimus,
omnis voluptas assumenda est, omnis dolor repellendus.
</p>
<p>
Temporibus autem quibusdam et aut officiis debitis aut rerum
necessitatibus saepe eveniet ut et voluptates repudiandae sint
et molestiae non recusandae.
</p>
</div>
</ScrollableTabsContent>
<ScrollableTabsContent value="tab3">
<div className="p-8 text-sm">
<h4 className="mb-4 text-lg font-semibold">General Settings</h4>
<p className="mb-4">Update your preferences and settings here.</p>
<p className="mb-4">
Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet,
consectetur, adipisci velit, sed quia non numquam eius modi
tempora incidunt ut labore et dolore magnam aliquam quaerat
voluptatem.
</p>
<p className="mb-4">
Ut enim ad minima veniam, quis nostrum exercitationem ullam
corporis suscipit laboriosam, nisi ut aliquid ex ea commodi
consequatur? Quis autem vel eum iure reprehenderit qui in ea
voluptate velit esse quam nihil molestiae consequatur.
</p>
<p className="mb-4">
Vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? At
vero eos et accusamus et iusto odio dignissimos ducimus qui
blanditiis praesentium voluptatum deleniti atque corrupti quos
dolores.
</p>
<p>
Et quas molestias excepturi sint occaecati cupiditate non
provident, similique sunt in culpa qui officia deserunt mollitia
animi, id est laborum et dolorum fuga.
</p>
</div>
</ScrollableTabsContent>
</ScrollableTabs>
</div>
</div>
),
} satisfies Story;

View File

@@ -1,59 +0,0 @@
"use client";
import { cn } from "@/lib/utils";
import { Children } from "react";
import { ScrollableTabsContent } from "./components/ScrollableTabsContent";
import { ScrollableTabsList } from "./components/ScrollableTabsList";
import { ScrollableTabsTrigger } from "./components/ScrollableTabsTrigger";
import { ScrollableTabsContext } from "./context";
import { findContentElements, findListElement } from "./helpers";
import { useScrollableTabsInternal } from "./useScrollableTabs";
interface Props {
children?: React.ReactNode;
className?: string;
defaultValue?: string;
}
export function ScrollableTabs({ children, className, defaultValue }: Props) {
const {
activeValue,
setActiveValue,
registerContent,
scrollToSection,
scrollContainer,
contentContainerRef,
} = useScrollableTabsInternal({ defaultValue });
const childrenArray = Children.toArray(children);
const listElement = findListElement(childrenArray);
const contentElements = findContentElements(childrenArray);
return (
<ScrollableTabsContext.Provider
value={{
activeValue,
setActiveValue,
registerContent,
scrollToSection,
scrollContainer,
}}
>
<div className={cn("relative flex flex-col", className)}>
{listElement}
<div
ref={(node) => {
if (contentContainerRef) {
contentContainerRef.current = node;
}
}}
className="max-h-[64rem] overflow-y-auto scrollbar-thin scrollbar-track-transparent scrollbar-thumb-zinc-300 dark:scrollbar-thumb-zinc-700"
>
<div className="min-h-full pb-[200px]">{contentElements}</div>
</div>
</div>
</ScrollableTabsContext.Provider>
);
}
export { ScrollableTabsContent, ScrollableTabsList, ScrollableTabsTrigger };

View File

@@ -1,48 +0,0 @@
"use client";
import { cn } from "@/lib/utils";
import * as React from "react";
import { useScrollableTabs } from "../context";
interface Props extends React.HTMLAttributes<HTMLDivElement> {
value: string;
}
export const ScrollableTabsContent = React.forwardRef<HTMLDivElement, Props>(
function ScrollableTabsContent(
{ className, value, children, ...props },
ref,
) {
const { registerContent } = useScrollableTabs();
const contentRef = React.useRef<HTMLDivElement>(null);
React.useEffect(() => {
if (contentRef.current) {
registerContent(value, contentRef.current);
}
return () => {
registerContent(value, null);
};
}, [value, registerContent]);
return (
<div
ref={(node) => {
if (typeof ref === "function") ref(node);
else if (ref) ref.current = node;
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
contentRef.current = node;
}}
data-scrollable-tab-content
data-value={value}
className={cn("focus-visible:outline-none", className)}
{...props}
>
{children}
</div>
);
},
);
ScrollableTabsContent.displayName = "ScrollableTabsContent";

View File

@@ -1,52 +0,0 @@
"use client";
import { cn } from "@/lib/utils";
import * as React from "react";
import { useScrollableTabs } from "../context";
export const ScrollableTabsList = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(function ScrollableTabsList({ className, children, ...props }, ref) {
const { activeValue } = useScrollableTabs();
const [activeTabElement, setActiveTabElement] =
React.useState<HTMLElement | null>(null);
React.useEffect(() => {
const activeButton = Array.from(
document.querySelectorAll<HTMLElement>(
'[data-scrollable-tab-trigger][data-value="' + activeValue + '"]',
),
)[0];
if (activeButton) {
setActiveTabElement(activeButton);
}
}, [activeValue]);
return (
<div className="relative" ref={ref}>
<div
className={cn(
"inline-flex w-full items-center justify-start border-b border-zinc-100",
className,
)}
{...props}
>
{children}
</div>
{activeTabElement && (
<div
className="transition-left transition-right absolute bottom-0 h-0.5 bg-purple-600 duration-200 ease-in-out"
style={{
left: activeTabElement.offsetLeft,
width: activeTabElement.offsetWidth,
willChange: "left, width",
}}
/>
)}
</div>
);
});
ScrollableTabsList.displayName = "ScrollableTabsList";

View File

@@ -1,53 +0,0 @@
"use client";
import { cn } from "@/lib/utils";
import * as React from "react";
import { useScrollableTabs } from "../context";
interface Props extends React.ButtonHTMLAttributes<HTMLButtonElement> {
value: string;
}
export const ScrollableTabsTrigger = React.forwardRef<HTMLButtonElement, Props>(
function ScrollableTabsTrigger(
{ className, value, children, ...props },
ref,
) {
const { activeValue, scrollToSection } = useScrollableTabs();
const elementRef = React.useRef<HTMLButtonElement>(null);
const isActive = activeValue === value;
function handleClick(e: React.MouseEvent<HTMLButtonElement>) {
e.preventDefault();
e.stopPropagation();
scrollToSection(value);
props.onClick?.(e);
}
return (
<button
type="button"
ref={(node) => {
if (typeof ref === "function") ref(node);
else if (ref) ref.current = node;
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
elementRef.current = node;
}}
data-scrollable-tab-trigger
data-value={value}
onClick={handleClick}
className={cn(
"relative inline-flex items-center justify-center whitespace-nowrap px-3 py-3 font-sans text-[0.875rem] font-medium leading-[1.5rem] text-zinc-700 transition-all focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-neutral-400 focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50",
isActive && "text-purple-600",
className,
)}
{...props}
>
{children}
</button>
);
},
);
ScrollableTabsTrigger.displayName = "ScrollableTabsTrigger";

View File

@@ -1,22 +0,0 @@
import * as React from "react";
import { createContext, useContext } from "react";
interface ScrollableTabsContextValue {
activeValue: string | null;
setActiveValue: React.Dispatch<React.SetStateAction<string | null>>;
registerContent: (value: string, element: HTMLElement | null) => void;
scrollToSection: (value: string) => void;
scrollContainer: HTMLElement | null;
}
export const ScrollableTabsContext = createContext<
ScrollableTabsContextValue | undefined
>(undefined);
export function useScrollableTabs() {
const context = useContext(ScrollableTabsContext);
if (!context) {
throw new Error("useScrollableTabs must be used within a ScrollableTabs");
}
return context;
}

View File

@@ -1,48 +0,0 @@
import * as React from "react";
const HEADER_OFFSET = 100;
export function calculateScrollPosition(
elementRect: DOMRect,
containerRect: DOMRect,
currentScrollTop: number,
): number {
const elementTopRelativeToContainer =
elementRect.top - containerRect.top + currentScrollTop - HEADER_OFFSET;
return Math.max(0, elementTopRelativeToContainer);
}
function hasDisplayName(
type: unknown,
displayName: string,
): type is { displayName: string } {
return (
typeof type === "object" &&
type !== null &&
"displayName" in type &&
(type as { displayName: unknown }).displayName === displayName
);
}
export function findListElement(
children: React.ReactNode[],
): React.ReactElement | undefined {
return children.find(
(child) =>
React.isValidElement(child) &&
hasDisplayName(child.type, "ScrollableTabsList"),
) as React.ReactElement | undefined;
}
export function findContentElements(
children: React.ReactNode[],
): React.ReactNode[] {
return children.filter(
(child) =>
!(
React.isValidElement(child) &&
hasDisplayName(child.type, "ScrollableTabsList")
),
);
}

View File

@@ -1,60 +0,0 @@
import { useCallback, useRef, useState } from "react";
import { calculateScrollPosition } from "./helpers";
interface Args {
defaultValue?: string;
}
export function useScrollableTabsInternal({ defaultValue }: Args) {
const [activeValue, setActiveValue] = useState<string | null>(
defaultValue || null,
);
const contentRefs = useRef<Map<string, HTMLElement>>(new Map());
const contentContainerRef = useRef<HTMLDivElement | null>(null);
function registerContent(value: string, element: HTMLElement | null) {
if (element) {
contentRefs.current.set(value, element);
} else {
contentRefs.current.delete(value);
}
}
function scrollToSection(value: string) {
const element = contentRefs.current.get(value);
const scrollContainer = contentContainerRef.current;
if (!element || !scrollContainer) return;
setActiveValue(value);
const containerRect = scrollContainer.getBoundingClientRect();
const elementRect = element.getBoundingClientRect();
const currentScrollTop = scrollContainer.scrollTop;
const scrollTop = calculateScrollPosition(
elementRect,
containerRect,
currentScrollTop,
);
const maxScrollTop =
scrollContainer.scrollHeight - scrollContainer.clientHeight;
const clampedScrollTop = Math.min(Math.max(0, scrollTop), maxScrollTop);
scrollContainer.scrollTo({
top: clampedScrollTop,
behavior: "smooth",
});
}
const memoizedRegisterContent = useCallback(registerContent, []);
const memoizedScrollToSection = useCallback(scrollToSection, []);
return {
activeValue,
setActiveValue,
registerContent: memoizedRegisterContent,
scrollToSection: memoizedScrollToSection,
scrollContainer: contentContainerRef.current,
contentContainerRef,
};
}

View File

@@ -23,7 +23,6 @@ import {
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { cn } from "@/lib/utils";
import { BlockUIType } from "@/app/(platform)/build/components/types";
type TypeOption = {
type: string;
@@ -48,14 +47,7 @@ export const AnyOfField = ({
onBlur,
onFocus,
}: FieldProps) => {
const handleId =
formContext.uiType === BlockUIType.AGENT
? (idSchema.$id ?? "")
.split("_")
.filter((p) => p !== "root" && p !== "properties" && p.length > 0)
.join("_") || ""
: generateHandleId(idSchema.$id ?? "");
const handleId = generateHandleId(idSchema.$id ?? "");
const updatedFormContexrt = { ...formContext, fromAnyOf: true };
const { nodeId, showHandles = true } = updatedFormContexrt;

View File

@@ -58,15 +58,7 @@ const FieldTemplate: React.FC<FieldTemplateProps> = ({
let handleId = null;
if (!isArrayItem) {
if (uiType === BlockUIType.AGENT) {
const parts = fieldId.split("_");
const filtered = parts.filter(
(p) => p !== "root" && p !== "properties" && p.length > 0,
);
handleId = filtered.join("_") || "";
} else {
handleId = generateHandleId(fieldId);
}
handleId = generateHandleId(fieldId);
} else {
handleId = arrayFieldHandleId;
}

View File

@@ -910,37 +910,7 @@ export default class BackendAPI {
reject(new Error("Invalid JSON response"));
}
} else {
// Handle file size errors with user-friendly message
if (xhr.status === 413) {
reject(new Error("File is too large — max size is 256MB"));
return;
}
// Try to parse error response for better messages
let errorMessage = `Upload failed (${xhr.status})`;
try {
const errorData = JSON.parse(xhr.responseText);
if (errorData.detail) {
if (
typeof errorData.detail === "string" &&
errorData.detail.includes("exceeds the maximum")
) {
const match = errorData.detail.match(
/maximum allowed size of (\d+)MB/,
);
const maxSize = match ? match[1] : "256";
errorMessage = `File is too large — max size is ${maxSize}MB`;
} else if (typeof errorData.detail === "string") {
errorMessage = errorData.detail;
}
} else if (errorData.error) {
errorMessage = errorData.error;
}
} catch {
// Keep default message if parsing fails
}
reject(new Error(errorMessage));
reject(new Error(`HTTP ${xhr.status}: ${xhr.statusText}`));
}
});

View File

@@ -184,11 +184,6 @@ export function serializeRequestBody(
}
export async function parseApiError(response: Response): Promise<string> {
// Handle 413 Payload Too Large with user-friendly message
if (response.status === 413) {
return "File is too large — max size is 256MB";
}
try {
const errorData = await response.clone().json();
@@ -210,16 +205,6 @@ export async function parseApiError(response: Response): Promise<string> {
return response.statusText; // Fallback to status text if no message
}
// Check for file size error from backend
if (
typeof errorData.detail === "string" &&
errorData.detail.includes("exceeds the maximum")
) {
const match = errorData.detail.match(/maximum allowed size of (\d+)MB/);
const maxSize = match ? match[1] : "256";
return `File is too large — max size is ${maxSize}MB`;
}
return errorData.detail || errorData.error || response.statusText;
} catch {
return response.statusText;

View File

@@ -1,10 +1,10 @@
import scrollbar from "tailwind-scrollbar";
import type { Config } from "tailwindcss";
import tailwindcssAnimate from "tailwindcss-animate";
import scrollbar from "tailwind-scrollbar";
import { colors } from "./src/components/styles/colors";
const config = {
darkMode: ["class", ".dark-mode"], // ignore dark: prefix classes for now until we fully support dark mode
darkMode: ["class"],
content: ["./src/**/*.{ts,tsx}"],
prefix: "",
theme: {