mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-17 10:12:02 -05:00
Compare commits
5 Commits
kpczerwins
...
otto/secrt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
901b5e8b75 | ||
|
|
92ddb57460 | ||
|
|
55dcf9359a | ||
|
|
08c44ba872 | ||
|
|
44e341dccc |
@@ -1,16 +1,15 @@
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Sequence
|
||||
|
||||
import prisma
|
||||
from prisma.enums import ContentType
|
||||
|
||||
import backend.api.features.library.db as library_db
|
||||
import backend.api.features.library.model as library_model
|
||||
import backend.api.features.store.db as store_db
|
||||
import backend.api.features.store.model as store_model
|
||||
from backend.api.features.store.hybrid_search import unified_hybrid_search
|
||||
from backend.blocks import load_all_blocks
|
||||
from backend.blocks._base import (
|
||||
AnyBlockSchema,
|
||||
@@ -43,16 +42,6 @@ MAX_LIBRARY_AGENT_RESULTS = 100
|
||||
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
||||
MIN_SCORE_FOR_FILTERED_RESULTS = 10.0
|
||||
|
||||
# Boost blocks over marketplace agents in search results
|
||||
BLOCK_SCORE_BOOST = 50.0
|
||||
|
||||
# Block IDs to exclude from search results
|
||||
EXCLUDED_BLOCK_IDS = frozenset(
|
||||
{
|
||||
"e189baac-8c20-45a1-94a7-55177ea42565", # AgentExecutorBlock
|
||||
}
|
||||
)
|
||||
|
||||
SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent
|
||||
|
||||
|
||||
@@ -75,8 +64,8 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
# Skip disabled and excluded blocks
|
||||
if block.disabled or block.id in EXCLUDED_BLOCK_IDS:
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip blocks that don't have categories (all should have at least one)
|
||||
if not block.categories:
|
||||
@@ -127,9 +116,6 @@ def get_blocks(
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip excluded blocks
|
||||
if block.id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
# Skip blocks that don't match the category
|
||||
if category and category not in {c.name.lower() for c in block.categories}:
|
||||
continue
|
||||
@@ -269,25 +255,14 @@ async def _build_cached_search_results(
|
||||
"my_agents": 0,
|
||||
}
|
||||
|
||||
# Use hybrid search when query is present, otherwise list all blocks
|
||||
if (include_blocks or include_integrations) and normalized_query:
|
||||
block_results, block_total, integration_total = await _hybrid_search_blocks(
|
||||
query=search_query,
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
elif include_blocks or include_integrations:
|
||||
# No query - list all blocks using in-memory approach
|
||||
block_results, block_total, integration_total = _collect_block_results(
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
block_results, block_total, integration_total = _collect_block_results(
|
||||
normalized_query=normalized_query,
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
|
||||
if include_library_agents:
|
||||
library_response = await library_db.list_library_agents(
|
||||
@@ -332,14 +307,10 @@ async def _build_cached_search_results(
|
||||
|
||||
def _collect_block_results(
|
||||
*,
|
||||
normalized_query: str,
|
||||
include_blocks: bool,
|
||||
include_integrations: bool,
|
||||
) -> tuple[list[_ScoredItem], int, int]:
|
||||
"""
|
||||
Collect all blocks for listing (no search query).
|
||||
|
||||
All blocks get BLOCK_SCORE_BOOST to prioritize them over marketplace agents.
|
||||
"""
|
||||
results: list[_ScoredItem] = []
|
||||
block_count = 0
|
||||
integration_count = 0
|
||||
@@ -352,10 +323,6 @@ def _collect_block_results(
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
# Skip excluded blocks
|
||||
if block.id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
|
||||
block_info = block.get_info()
|
||||
credentials = list(block.input_schema.get_credentials_fields().values())
|
||||
is_integration = len(credentials) > 0
|
||||
@@ -365,6 +332,10 @@ def _collect_block_results(
|
||||
if not is_integration and not include_blocks:
|
||||
continue
|
||||
|
||||
score = _score_block(block, block_info, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
filter_type: FilterType = "integrations" if is_integration else "blocks"
|
||||
if is_integration:
|
||||
integration_count += 1
|
||||
@@ -375,122 +346,8 @@ def _collect_block_results(
|
||||
_ScoredItem(
|
||||
item=block_info,
|
||||
filter_type=filter_type,
|
||||
score=BLOCK_SCORE_BOOST,
|
||||
sort_key=block_info.name.lower(),
|
||||
)
|
||||
)
|
||||
|
||||
return results, block_count, integration_count
|
||||
|
||||
|
||||
async def _hybrid_search_blocks(
|
||||
*,
|
||||
query: str,
|
||||
include_blocks: bool,
|
||||
include_integrations: bool,
|
||||
) -> tuple[list[_ScoredItem], int, int]:
|
||||
"""
|
||||
Search blocks using hybrid search with builder-specific filtering.
|
||||
|
||||
Uses unified_hybrid_search for semantic + lexical search, then applies
|
||||
post-filtering for block/integration types and scoring adjustments.
|
||||
|
||||
Scoring:
|
||||
- Base: hybrid relevance score (0-1) scaled to 0-100, plus BLOCK_SCORE_BOOST
|
||||
to prioritize blocks over marketplace agents in combined results
|
||||
- +30 for exact name match, +15 for prefix name match
|
||||
- +20 if the block has an LlmModel field and the query matches an LLM model name
|
||||
|
||||
Args:
|
||||
query: The search query string
|
||||
include_blocks: Whether to include regular blocks
|
||||
include_integrations: Whether to include integration blocks
|
||||
|
||||
Returns:
|
||||
Tuple of (scored_items, block_count, integration_count)
|
||||
"""
|
||||
results: list[_ScoredItem] = []
|
||||
block_count = 0
|
||||
integration_count = 0
|
||||
|
||||
if not include_blocks and not include_integrations:
|
||||
return results, block_count, integration_count
|
||||
|
||||
normalized_query = query.strip().lower()
|
||||
|
||||
# Fetch more results to account for post-filtering
|
||||
search_results, _ = await unified_hybrid_search(
|
||||
query=query,
|
||||
content_types=[ContentType.BLOCK],
|
||||
page=1,
|
||||
page_size=150,
|
||||
min_score=0.10,
|
||||
)
|
||||
|
||||
# Load all blocks for getting BlockInfo
|
||||
all_blocks = load_all_blocks()
|
||||
|
||||
for result in search_results:
|
||||
block_id = result["content_id"]
|
||||
|
||||
# Skip excluded blocks
|
||||
if block_id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
|
||||
metadata = result.get("metadata", {})
|
||||
hybrid_score = result.get("relevance", 0.0)
|
||||
|
||||
# Get the actual block class
|
||||
if block_id not in all_blocks:
|
||||
continue
|
||||
|
||||
block_cls = all_blocks[block_id]
|
||||
block: AnyBlockSchema = block_cls()
|
||||
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
# Check block/integration filter using metadata
|
||||
is_integration = metadata.get("is_integration", False)
|
||||
|
||||
if is_integration and not include_integrations:
|
||||
continue
|
||||
if not is_integration and not include_blocks:
|
||||
continue
|
||||
|
||||
# Get block info
|
||||
block_info = block.get_info()
|
||||
|
||||
# Calculate final score: scale hybrid score and add builder-specific bonuses
|
||||
# Hybrid scores are 0-1, builder scores were 0-200+
|
||||
# Add BLOCK_SCORE_BOOST to prioritize blocks over marketplace agents
|
||||
final_score = hybrid_score * 100 + BLOCK_SCORE_BOOST
|
||||
|
||||
# Add LLM model match bonus
|
||||
has_llm_field = metadata.get("has_llm_model_field", False)
|
||||
if has_llm_field and _matches_llm_model(block.input_schema, normalized_query):
|
||||
final_score += 20
|
||||
|
||||
# Add exact/prefix match bonus for deterministic tie-breaking
|
||||
name = block_info.name.lower()
|
||||
if name == normalized_query:
|
||||
final_score += 30
|
||||
elif name.startswith(normalized_query):
|
||||
final_score += 15
|
||||
|
||||
# Track counts
|
||||
filter_type: FilterType = "integrations" if is_integration else "blocks"
|
||||
if is_integration:
|
||||
integration_count += 1
|
||||
else:
|
||||
block_count += 1
|
||||
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=block_info,
|
||||
filter_type=filter_type,
|
||||
score=final_score,
|
||||
sort_key=name,
|
||||
score=score,
|
||||
sort_key=_get_item_name(block_info),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -615,8 +472,6 @@ async def _get_static_counts():
|
||||
block: AnyBlockSchema = block_type()
|
||||
if block.disabled:
|
||||
continue
|
||||
if block.id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
|
||||
all_blocks += 1
|
||||
|
||||
@@ -652,6 +507,38 @@ def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _score_block(
|
||||
block: AnyBlockSchema,
|
||||
block_info: BlockInfo,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = block_info.name.lower()
|
||||
description = block_info.description.lower()
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
|
||||
category_text = " ".join(
|
||||
category.get("category", "").lower() for category in block_info.categories
|
||||
)
|
||||
score += _score_additional_field(category_text, normalized_query, 12, 6)
|
||||
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
provider_names = [
|
||||
provider.value.lower()
|
||||
for info in credentials_info
|
||||
for provider in info.provider
|
||||
]
|
||||
provider_text = " ".join(provider_names)
|
||||
score += _score_additional_field(provider_text, normalized_query, 15, 6)
|
||||
|
||||
if _matches_llm_model(block.input_schema, normalized_query):
|
||||
score += 20
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_library_agent(
|
||||
agent: library_model.LibraryAgent,
|
||||
normalized_query: str,
|
||||
@@ -758,21 +645,31 @@ def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
return providers
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600, shared_cache=True)
|
||||
@cached(ttl_seconds=3600)
|
||||
async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
# Query the materialized view for execution counts per block
|
||||
# The view aggregates executions from the last 14 days and is refreshed hourly
|
||||
suggested_blocks = []
|
||||
# Sum the number of executions for each block type
|
||||
# Prisma cannot group by nested relations, so we do a raw query
|
||||
# Calculate the cutoff timestamp
|
||||
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
|
||||
results = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT block_id, execution_count
|
||||
FROM {schema_prefix}"mv_suggested_blocks";
|
||||
"""
|
||||
SELECT
|
||||
agent_node."agentBlockId" AS block_id,
|
||||
COUNT(execution.id) AS execution_count
|
||||
FROM {schema_prefix}"AgentNodeExecution" execution
|
||||
JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
WHERE execution."endedTime" >= $1::timestamp
|
||||
GROUP BY agent_node."agentBlockId"
|
||||
ORDER BY execution_count DESC;
|
||||
""",
|
||||
timestamp_threshold,
|
||||
)
|
||||
|
||||
# Get the top blocks based on execution count
|
||||
# But ignore Input, Output, Agent, and excluded blocks
|
||||
# But ignore Input and Output blocks
|
||||
blocks: list[tuple[BlockInfo, int]] = []
|
||||
execution_counts = {row["block_id"]: row["execution_count"] for row in results}
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
@@ -782,9 +679,11 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
BlockType.AGENT,
|
||||
):
|
||||
continue
|
||||
if block.id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
execution_count = execution_counts.get(block.id, 0)
|
||||
# Find the execution count for this block
|
||||
execution_count = next(
|
||||
(row["execution_count"] for row in results if row["block_id"] == block.id),
|
||||
0,
|
||||
)
|
||||
blocks.append((block.get_info(), execution_count))
|
||||
# Sort blocks by execution count
|
||||
blocks.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
@@ -27,6 +27,7 @@ class SearchEntry(BaseModel):
|
||||
|
||||
# Suggestions
|
||||
class SuggestionsResponse(BaseModel):
|
||||
otto_suggestions: list[str]
|
||||
recent_searches: list[SearchEntry]
|
||||
providers: list[ProviderName]
|
||||
top_blocks: list[BlockInfo]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import logging
|
||||
from typing import Annotated, Sequence, cast, get_args
|
||||
from typing import Annotated, Sequence
|
||||
|
||||
import fastapi
|
||||
from autogpt_libs.auth.dependencies import get_user_id, requires_user
|
||||
@@ -10,8 +10,6 @@ from backend.util.models import Pagination
|
||||
from . import db as builder_db
|
||||
from . import model as builder_model
|
||||
|
||||
VALID_FILTER_VALUES = get_args(builder_model.FilterType)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = fastapi.APIRouter(
|
||||
@@ -51,6 +49,11 @@ async def get_suggestions(
|
||||
Get all suggestions for the Blocks Menu.
|
||||
"""
|
||||
return builder_model.SuggestionsResponse(
|
||||
otto_suggestions=[
|
||||
"What blocks do I need to get started?",
|
||||
"Help me create a list",
|
||||
"Help me feed my data to Google Maps",
|
||||
],
|
||||
recent_searches=await builder_db.get_recent_searches(user_id),
|
||||
providers=[
|
||||
ProviderName.TWITTER,
|
||||
@@ -148,7 +151,7 @@ async def get_providers(
|
||||
async def search(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
search_query: Annotated[str | None, fastapi.Query()] = None,
|
||||
filter: Annotated[str | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None,
|
||||
search_id: Annotated[str | None, fastapi.Query()] = None,
|
||||
by_creator: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
page: Annotated[int, fastapi.Query()] = 1,
|
||||
@@ -157,20 +160,9 @@ async def search(
|
||||
"""
|
||||
Search for blocks (including integrations), marketplace agents, and user library agents.
|
||||
"""
|
||||
# Parse and validate filter parameter
|
||||
filters: list[builder_model.FilterType]
|
||||
if filter:
|
||||
filter_values = [f.strip() for f in filter.split(",")]
|
||||
invalid_filters = [f for f in filter_values if f not in VALID_FILTER_VALUES]
|
||||
if invalid_filters:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid filter value(s): {', '.join(invalid_filters)}. "
|
||||
f"Valid values are: {', '.join(VALID_FILTER_VALUES)}",
|
||||
)
|
||||
filters = cast(list[builder_model.FilterType], filter_values)
|
||||
else:
|
||||
filters = [
|
||||
# If no filters are provided, then we will return all types
|
||||
if not filter:
|
||||
filter = [
|
||||
"blocks",
|
||||
"integrations",
|
||||
"marketplace_agents",
|
||||
@@ -182,7 +174,7 @@ async def search(
|
||||
cached_results = await builder_db.get_sorted_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query,
|
||||
filters=filters,
|
||||
filters=filter,
|
||||
by_creator=by_creator,
|
||||
)
|
||||
|
||||
@@ -204,7 +196,7 @@ async def search(
|
||||
user_id,
|
||||
builder_model.SearchEntry(
|
||||
search_query=search_query,
|
||||
filter=filters,
|
||||
filter=filter,
|
||||
by_creator=by_creator,
|
||||
search_id=search_id,
|
||||
),
|
||||
|
||||
@@ -5,7 +5,7 @@ import re
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.library import db as library_db
|
||||
@@ -14,6 +14,7 @@ from backend.data import execution as execution_db
|
||||
from backend.data.execution import ExecutionStatus, GraphExecution, GraphExecutionMeta
|
||||
|
||||
from .base import BaseTool
|
||||
from .execution_utils import TERMINAL_STATUSES, wait_for_execution
|
||||
from .models import (
|
||||
AgentOutputResponse,
|
||||
ErrorResponse,
|
||||
@@ -34,6 +35,7 @@ class AgentOutputInput(BaseModel):
|
||||
store_slug: str = ""
|
||||
execution_id: str = ""
|
||||
run_time: str = "latest"
|
||||
wait_if_running: int = Field(default=0, ge=0, le=300)
|
||||
|
||||
@field_validator(
|
||||
"agent_name",
|
||||
@@ -117,6 +119,11 @@ class AgentOutputTool(BaseTool):
|
||||
Select which run to retrieve using:
|
||||
- execution_id: Specific execution ID
|
||||
- run_time: 'latest' (default), 'yesterday', 'last week', or ISO date 'YYYY-MM-DD'
|
||||
|
||||
Wait for completion (optional):
|
||||
- wait_if_running: Max seconds to wait if execution is still running (0-300).
|
||||
If the execution is running/queued, waits up to this many seconds for it to complete.
|
||||
Returns current status on timeout. If already finished, returns immediately.
|
||||
"""
|
||||
|
||||
@property
|
||||
@@ -146,6 +153,13 @@ class AgentOutputTool(BaseTool):
|
||||
"Time filter: 'latest', 'yesterday', 'last week', or 'YYYY-MM-DD'"
|
||||
),
|
||||
},
|
||||
"wait_if_running": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max seconds to wait if execution is still running (0-300). "
|
||||
"If running, waits for completion. Returns current state on timeout."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
@@ -223,10 +237,14 @@ class AgentOutputTool(BaseTool):
|
||||
execution_id: str | None,
|
||||
time_start: datetime | None,
|
||||
time_end: datetime | None,
|
||||
include_running: bool = False,
|
||||
) -> tuple[GraphExecution | None, list[GraphExecutionMeta], str | None]:
|
||||
"""
|
||||
Fetch execution(s) based on filters.
|
||||
Returns (single_execution, available_executions_meta, error_message).
|
||||
|
||||
Args:
|
||||
include_running: If True, also look for running/queued executions (for waiting)
|
||||
"""
|
||||
# If specific execution_id provided, fetch it directly
|
||||
if execution_id:
|
||||
@@ -239,11 +257,22 @@ class AgentOutputTool(BaseTool):
|
||||
return None, [], f"Execution '{execution_id}' not found"
|
||||
return execution, [], None
|
||||
|
||||
# Get completed executions with time filters
|
||||
# Determine which statuses to query
|
||||
statuses = [ExecutionStatus.COMPLETED]
|
||||
if include_running:
|
||||
statuses.extend(
|
||||
[
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
]
|
||||
)
|
||||
|
||||
# Get executions with time filters
|
||||
executions = await execution_db.get_graph_executions(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
statuses=[ExecutionStatus.COMPLETED],
|
||||
statuses=statuses,
|
||||
created_time_gte=time_start,
|
||||
created_time_lte=time_end,
|
||||
limit=10,
|
||||
@@ -310,10 +339,28 @@ class AgentOutputTool(BaseTool):
|
||||
for e in available_executions[:5]
|
||||
]
|
||||
|
||||
message = f"Found execution outputs for agent '{agent.name}'"
|
||||
# Build appropriate message based on execution status
|
||||
if execution.status == ExecutionStatus.COMPLETED:
|
||||
message = f"Found execution outputs for agent '{agent.name}'"
|
||||
elif execution.status == ExecutionStatus.FAILED:
|
||||
message = f"Execution for agent '{agent.name}' failed"
|
||||
elif execution.status == ExecutionStatus.TERMINATED:
|
||||
message = f"Execution for agent '{agent.name}' was terminated"
|
||||
elif execution.status in (
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
):
|
||||
message = (
|
||||
f"Execution for agent '{agent.name}' is still {execution.status.value}. "
|
||||
"Results may be incomplete. Use wait_if_running to wait for completion."
|
||||
)
|
||||
else:
|
||||
message = f"Found execution for agent '{agent.name}' (status: {execution.status.value})"
|
||||
|
||||
if len(available_executions) > 1:
|
||||
message += (
|
||||
f". Showing latest of {len(available_executions)} matching executions."
|
||||
f" Showing latest of {len(available_executions)} matching executions."
|
||||
)
|
||||
|
||||
return AgentOutputResponse(
|
||||
@@ -428,13 +475,17 @@ class AgentOutputTool(BaseTool):
|
||||
# Parse time expression
|
||||
time_start, time_end = parse_time_expression(input_data.run_time)
|
||||
|
||||
# Fetch execution(s)
|
||||
# Check if we should wait for running executions
|
||||
wait_timeout = input_data.wait_if_running
|
||||
|
||||
# Fetch execution(s) - include running if we're going to wait
|
||||
execution, available_executions, exec_error = await self._get_execution(
|
||||
user_id=user_id,
|
||||
graph_id=agent.graph_id,
|
||||
execution_id=input_data.execution_id or None,
|
||||
time_start=time_start,
|
||||
time_end=time_end,
|
||||
include_running=wait_timeout > 0,
|
||||
)
|
||||
|
||||
if exec_error:
|
||||
@@ -443,4 +494,17 @@ class AgentOutputTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# If we have an execution that's still running and we should wait
|
||||
if execution and wait_timeout > 0 and execution.status not in TERMINAL_STATUSES:
|
||||
logger.info(
|
||||
f"Execution {execution.id} is {execution.status}, "
|
||||
f"waiting up to {wait_timeout}s for completion"
|
||||
)
|
||||
execution = await wait_for_execution(
|
||||
user_id=user_id,
|
||||
graph_id=agent.graph_id,
|
||||
execution_id=execution.id,
|
||||
timeout_seconds=wait_timeout,
|
||||
)
|
||||
|
||||
return self._build_response(agent, execution, available_executions, session_id)
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
"""Shared utilities for execution waiting and status handling."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.data import execution as execution_db
|
||||
from backend.data.execution import (
|
||||
AsyncRedisExecutionEventBus,
|
||||
ExecutionStatus,
|
||||
GraphExecution,
|
||||
GraphExecutionEvent,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Terminal statuses that indicate execution is complete
|
||||
TERMINAL_STATUSES = frozenset(
|
||||
{
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.FAILED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def wait_for_execution(
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
execution_id: str,
|
||||
timeout_seconds: int,
|
||||
) -> GraphExecution | None:
|
||||
"""
|
||||
Wait for an execution to reach a terminal status using Redis pubsub.
|
||||
|
||||
Uses asyncio.wait_for to ensure timeout is respected even when no events
|
||||
are received.
|
||||
|
||||
Args:
|
||||
user_id: User ID
|
||||
graph_id: Graph ID
|
||||
execution_id: Execution ID to wait for
|
||||
timeout_seconds: Max seconds to wait
|
||||
|
||||
Returns:
|
||||
The execution with current status, or None if not found
|
||||
"""
|
||||
# First check current status - maybe it's already done
|
||||
execution = await execution_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
if not execution:
|
||||
return None
|
||||
|
||||
# If already in terminal state, return immediately
|
||||
if execution.status in TERMINAL_STATUSES:
|
||||
logger.debug(
|
||||
f"Execution {execution_id} already in terminal state: {execution.status}"
|
||||
)
|
||||
return execution
|
||||
|
||||
logger.info(
|
||||
f"Waiting up to {timeout_seconds}s for execution {execution_id} "
|
||||
f"(current status: {execution.status})"
|
||||
)
|
||||
|
||||
# Subscribe to execution updates via Redis pubsub
|
||||
event_bus = AsyncRedisExecutionEventBus()
|
||||
channel_key = f"{user_id}/{graph_id}/{execution_id}"
|
||||
|
||||
try:
|
||||
# Use wait_for to enforce timeout on the entire listen operation
|
||||
result = await asyncio.wait_for(
|
||||
_listen_for_terminal_status(event_bus, channel_key, user_id, execution_id),
|
||||
timeout=timeout_seconds,
|
||||
)
|
||||
return result
|
||||
except asyncio.TimeoutError:
|
||||
logger.info(f"Timeout waiting for execution {execution_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error waiting for execution: {e}", exc_info=True)
|
||||
|
||||
# Return current state on timeout/error
|
||||
return await execution_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
|
||||
|
||||
async def _listen_for_terminal_status(
|
||||
event_bus: AsyncRedisExecutionEventBus,
|
||||
channel_key: str,
|
||||
user_id: str,
|
||||
execution_id: str,
|
||||
) -> GraphExecution | None:
|
||||
"""
|
||||
Listen for execution events until a terminal status is reached.
|
||||
|
||||
This is a helper that gets wrapped in asyncio.wait_for for timeout handling.
|
||||
"""
|
||||
async for event in event_bus.listen_events(channel_key):
|
||||
# Only process GraphExecutionEvents (not NodeExecutionEvents)
|
||||
if isinstance(event, GraphExecutionEvent):
|
||||
logger.debug(f"Received execution update: {event.status}")
|
||||
if event.status in TERMINAL_STATUSES:
|
||||
# Fetch full execution with outputs
|
||||
return await execution_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
|
||||
# Should not reach here normally (generator should yield indefinitely)
|
||||
return None
|
||||
|
||||
|
||||
def get_execution_outputs(execution: GraphExecution | None) -> dict[str, Any] | None:
|
||||
"""Extract outputs from an execution, or return None."""
|
||||
if execution is None:
|
||||
return None
|
||||
return execution.outputs
|
||||
@@ -192,6 +192,7 @@ class ExecutionStartedResponse(ToolResponseBase):
|
||||
library_agent_id: str | None = None
|
||||
library_agent_link: str | None = None
|
||||
status: str = "QUEUED"
|
||||
outputs: dict[str, Any] | None = None # Populated when wait_for_result is used
|
||||
|
||||
|
||||
# Auth/error models
|
||||
|
||||
@@ -12,6 +12,7 @@ from backend.api.features.chat.tracking import (
|
||||
track_agent_scheduled,
|
||||
)
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.user import get_user_by_id
|
||||
@@ -24,6 +25,7 @@ from backend.util.timezone_utils import (
|
||||
)
|
||||
|
||||
from .base import BaseTool
|
||||
from .execution_utils import get_execution_outputs, wait_for_execution
|
||||
from .helpers import get_inputs_from_schema
|
||||
from .models import (
|
||||
AgentDetails,
|
||||
@@ -70,6 +72,7 @@ class RunAgentInput(BaseModel):
|
||||
schedule_name: str = ""
|
||||
cron: str = ""
|
||||
timezone: str = "UTC"
|
||||
wait_for_result: int = Field(default=0, ge=0, le=300)
|
||||
|
||||
@field_validator(
|
||||
"username_agent_slug",
|
||||
@@ -151,6 +154,14 @@ class RunAgentTool(BaseTool):
|
||||
"type": "string",
|
||||
"description": "IANA timezone for schedule (default: UTC)",
|
||||
},
|
||||
"wait_for_result": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max seconds to wait for execution to complete (0-300). "
|
||||
"If >0, blocks until the execution finishes or times out. "
|
||||
"Returns execution outputs when complete."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
@@ -347,6 +358,7 @@ class RunAgentTool(BaseTool):
|
||||
graph=graph,
|
||||
graph_credentials=graph_credentials,
|
||||
inputs=params.inputs,
|
||||
wait_for_result=params.wait_for_result,
|
||||
)
|
||||
|
||||
except NotFoundError as e:
|
||||
@@ -430,8 +442,9 @@ class RunAgentTool(BaseTool):
|
||||
graph: GraphModel,
|
||||
graph_credentials: dict[str, CredentialsMetaInput],
|
||||
inputs: dict[str, Any],
|
||||
wait_for_result: int = 0,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute an agent immediately."""
|
||||
"""Execute an agent immediately, optionally waiting for completion."""
|
||||
session_id = session.session_id
|
||||
|
||||
# Check rate limits
|
||||
@@ -468,6 +481,60 @@ class RunAgentTool(BaseTool):
|
||||
)
|
||||
|
||||
library_agent_link = f"/library/agents/{library_agent.id}"
|
||||
|
||||
# If wait_for_result is specified, wait for execution to complete
|
||||
if wait_for_result > 0:
|
||||
logger.info(
|
||||
f"Waiting up to {wait_for_result}s for execution {execution.id}"
|
||||
)
|
||||
result = await wait_for_execution(
|
||||
user_id=user_id,
|
||||
graph_id=library_agent.graph_id,
|
||||
execution_id=execution.id,
|
||||
timeout_seconds=wait_for_result,
|
||||
)
|
||||
final_status = result.status if result else ExecutionStatus.FAILED
|
||||
outputs = get_execution_outputs(result)
|
||||
|
||||
# Build message based on final status
|
||||
if final_status == ExecutionStatus.COMPLETED:
|
||||
message = (
|
||||
f"Agent '{library_agent.name}' execution completed successfully. "
|
||||
f"{MSG_DO_NOT_RUN_AGAIN}"
|
||||
)
|
||||
elif final_status == ExecutionStatus.FAILED:
|
||||
message = (
|
||||
f"Agent '{library_agent.name}' execution failed. "
|
||||
f"View details at {library_agent_link}. "
|
||||
f"{MSG_DO_NOT_RUN_AGAIN}"
|
||||
)
|
||||
elif final_status == ExecutionStatus.TERMINATED:
|
||||
message = (
|
||||
f"Agent '{library_agent.name}' execution was terminated. "
|
||||
f"View details at {library_agent_link}. "
|
||||
f"{MSG_DO_NOT_RUN_AGAIN}"
|
||||
)
|
||||
else:
|
||||
message = (
|
||||
f"Agent '{library_agent.name}' execution is still {final_status.value} "
|
||||
f"(timed out after {wait_for_result}s). "
|
||||
f"View at {library_agent_link}. "
|
||||
f"{MSG_DO_NOT_RUN_AGAIN}"
|
||||
)
|
||||
|
||||
return ExecutionStartedResponse(
|
||||
message=message,
|
||||
session_id=session_id,
|
||||
execution_id=execution.id,
|
||||
graph_id=library_agent.graph_id,
|
||||
graph_name=library_agent.name,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
status=final_status.value,
|
||||
outputs=outputs,
|
||||
)
|
||||
|
||||
# Default: return immediately without waiting
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' execution started successfully. "
|
||||
|
||||
@@ -183,70 +183,60 @@ class BlockHandler(ContentHandler):
|
||||
]
|
||||
|
||||
# Convert to ContentItem
|
||||
from backend.blocks.llm import LlmModel
|
||||
|
||||
items = []
|
||||
for block_id, block_cls in missing_blocks[:batch_size]:
|
||||
try:
|
||||
block_instance = block_cls()
|
||||
|
||||
# Skip disabled blocks - they shouldn't be indexed
|
||||
if block_instance.disabled:
|
||||
continue
|
||||
|
||||
# Build searchable text from block metadata
|
||||
parts = []
|
||||
if block_instance.name:
|
||||
if hasattr(block_instance, "name") and block_instance.name:
|
||||
parts.append(block_instance.name)
|
||||
if block_instance.description:
|
||||
if (
|
||||
hasattr(block_instance, "description")
|
||||
and block_instance.description
|
||||
):
|
||||
parts.append(block_instance.description)
|
||||
if block_instance.categories:
|
||||
if hasattr(block_instance, "categories") and block_instance.categories:
|
||||
# Convert BlockCategory enum to strings
|
||||
parts.append(
|
||||
" ".join(str(cat.value) for cat in block_instance.categories)
|
||||
)
|
||||
|
||||
# Add input schema field descriptions
|
||||
schema_dict = block_instance.input_schema.model_json_schema()
|
||||
if "properties" in schema_dict:
|
||||
for prop_name, prop_info in schema_dict["properties"].items():
|
||||
if "description" in prop_info:
|
||||
parts.append(f"{prop_name}: {prop_info['description']}")
|
||||
# Add input/output schema info
|
||||
if hasattr(block_instance, "input_schema"):
|
||||
schema = block_instance.input_schema
|
||||
if hasattr(schema, "model_json_schema"):
|
||||
schema_dict = schema.model_json_schema()
|
||||
if "properties" in schema_dict:
|
||||
for prop_name, prop_info in schema_dict[
|
||||
"properties"
|
||||
].items():
|
||||
if "description" in prop_info:
|
||||
parts.append(
|
||||
f"{prop_name}: {prop_info['description']}"
|
||||
)
|
||||
|
||||
searchable_text = " ".join(parts)
|
||||
|
||||
# Convert categories set of enums to list of strings for JSON serialization
|
||||
categories = getattr(block_instance, "categories", set())
|
||||
categories_list = (
|
||||
[cat.value for cat in block_instance.categories]
|
||||
if block_instance.categories
|
||||
else []
|
||||
[cat.value for cat in categories] if categories else []
|
||||
)
|
||||
|
||||
# Extract provider names from credentials fields
|
||||
provider_names: list[str] = []
|
||||
credentials_info = (
|
||||
block_instance.input_schema.get_credentials_fields_info()
|
||||
)
|
||||
is_integration = len(credentials_info) > 0
|
||||
for info in credentials_info.values():
|
||||
for provider in info.provider:
|
||||
provider_names.append(provider.value.lower())
|
||||
|
||||
# Check if block has LlmModel field in input schema
|
||||
has_llm_model_field = False
|
||||
for field in block_instance.input_schema.model_fields.values():
|
||||
if field.annotation == LlmModel:
|
||||
has_llm_model_field = True
|
||||
break
|
||||
|
||||
items.append(
|
||||
ContentItem(
|
||||
content_id=block_id,
|
||||
content_type=ContentType.BLOCK,
|
||||
searchable_text=searchable_text,
|
||||
metadata={
|
||||
"name": block_instance.name,
|
||||
"name": getattr(block_instance, "name", ""),
|
||||
"categories": categories_list,
|
||||
"providers": provider_names,
|
||||
"has_llm_model_field": has_llm_model_field,
|
||||
"is_integration": is_integration,
|
||||
},
|
||||
user_id=None, # Blocks are public
|
||||
)
|
||||
|
||||
@@ -85,8 +85,6 @@ async def test_block_handler_get_missing_items(mocker):
|
||||
mock_block_instance.input_schema.model_json_schema.return_value = {
|
||||
"properties": {"expression": {"description": "Math expression to evaluate"}}
|
||||
}
|
||||
mock_block_instance.input_schema.get_credentials_fields_info.return_value = {}
|
||||
mock_block_instance.input_schema.model_fields = {}
|
||||
mock_block_class.return_value = mock_block_instance
|
||||
|
||||
mock_blocks = {"block-uuid-1": mock_block_class}
|
||||
@@ -311,20 +309,19 @@ async def test_content_handlers_registry():
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_block_handler_handles_empty_attributes():
|
||||
"""Test BlockHandler handles blocks with empty/falsy attribute values."""
|
||||
async def test_block_handler_handles_missing_attributes():
|
||||
"""Test BlockHandler gracefully handles blocks with missing attributes."""
|
||||
handler = BlockHandler()
|
||||
|
||||
# Mock block with empty values (all attributes exist but are falsy)
|
||||
# Mock block with minimal attributes
|
||||
mock_block_class = MagicMock()
|
||||
mock_block_instance = MagicMock()
|
||||
mock_block_instance.name = "Minimal Block"
|
||||
mock_block_instance.disabled = False
|
||||
mock_block_instance.description = ""
|
||||
mock_block_instance.categories = set()
|
||||
mock_block_instance.input_schema.model_json_schema.return_value = {}
|
||||
mock_block_instance.input_schema.get_credentials_fields_info.return_value = {}
|
||||
mock_block_instance.input_schema.model_fields = {}
|
||||
# No description, categories, or schema
|
||||
del mock_block_instance.description
|
||||
del mock_block_instance.categories
|
||||
del mock_block_instance.input_schema
|
||||
mock_block_class.return_value = mock_block_instance
|
||||
|
||||
mock_blocks = {"block-minimal": mock_block_class}
|
||||
@@ -355,9 +352,6 @@ async def test_block_handler_skips_failed_blocks():
|
||||
good_instance.description = "Works fine"
|
||||
good_instance.categories = []
|
||||
good_instance.disabled = False
|
||||
good_instance.input_schema.model_json_schema.return_value = {}
|
||||
good_instance.input_schema.get_credentials_fields_info.return_value = {}
|
||||
good_instance.input_schema.model_fields = {}
|
||||
good_block.return_value = good_instance
|
||||
|
||||
bad_block = MagicMock()
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
-- This migration creates a materialized view for suggested blocks based on execution counts
|
||||
-- The view aggregates execution counts per block for the last 14 days
|
||||
--
|
||||
-- IMPORTANT: For production environments, pg_cron is REQUIRED for automatic refresh
|
||||
-- Prerequisites for production:
|
||||
-- 1. pg_cron extension must be installed: CREATE EXTENSION pg_cron;
|
||||
-- 2. pg_cron must be configured in postgresql.conf:
|
||||
-- shared_preload_libraries = 'pg_cron'
|
||||
-- cron.database_name = 'your_database_name'
|
||||
--
|
||||
-- For development environments without pg_cron:
|
||||
-- The migration will succeed but you must manually refresh views with:
|
||||
-- SET search_path TO platform;
|
||||
-- SELECT refresh_suggested_blocks_view();
|
||||
|
||||
-- Check if pg_cron extension is installed
|
||||
DO $$
|
||||
DECLARE
|
||||
has_pg_cron BOOLEAN;
|
||||
BEGIN
|
||||
SELECT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_cron') INTO has_pg_cron;
|
||||
|
||||
IF NOT has_pg_cron THEN
|
||||
RAISE WARNING 'pg_cron is not installed. Materialized view will be created but will NOT refresh automatically. For production, install pg_cron. For development, manually refresh with: SELECT refresh_suggested_blocks_view();';
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Create materialized view for suggested blocks based on execution counts in last 14 days
|
||||
-- The 14-day threshold is hardcoded to ensure consistent behavior
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS "mv_suggested_blocks" AS
|
||||
SELECT
|
||||
agent_node."agentBlockId" AS block_id,
|
||||
COUNT(execution.id) AS execution_count
|
||||
FROM "AgentNodeExecution" execution
|
||||
JOIN "AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
WHERE execution."endedTime" >= (NOW() - INTERVAL '14 days')
|
||||
GROUP BY agent_node."agentBlockId"
|
||||
ORDER BY execution_count DESC;
|
||||
|
||||
-- Create unique index for concurrent refresh support
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "idx_mv_suggested_blocks_block_id" ON "mv_suggested_blocks"("block_id");
|
||||
|
||||
-- Create refresh function
|
||||
CREATE OR REPLACE FUNCTION refresh_suggested_blocks_view()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
target_schema text := current_schema();
|
||||
BEGIN
|
||||
-- Use CONCURRENTLY for better performance during refresh
|
||||
REFRESH MATERIALIZED VIEW CONCURRENTLY "mv_suggested_blocks";
|
||||
RAISE NOTICE 'Suggested blocks materialized view refreshed in schema % at %', target_schema, NOW();
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Fallback to non-concurrent refresh if concurrent fails
|
||||
REFRESH MATERIALIZED VIEW "mv_suggested_blocks";
|
||||
RAISE NOTICE 'Suggested blocks materialized view refreshed (non-concurrent) in schema % at %. Concurrent refresh failed due to: %', target_schema, NOW(), SQLERRM;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Initial refresh of the materialized view
|
||||
SELECT refresh_suggested_blocks_view();
|
||||
|
||||
-- Schedule automatic refresh every hour (only if pg_cron is available)
|
||||
DO $$
|
||||
DECLARE
|
||||
has_pg_cron BOOLEAN;
|
||||
current_schema_name text := current_schema();
|
||||
job_name text;
|
||||
BEGIN
|
||||
-- Check if pg_cron extension exists
|
||||
SELECT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_cron') INTO has_pg_cron;
|
||||
|
||||
IF has_pg_cron THEN
|
||||
job_name := format('refresh-suggested-blocks_%s', current_schema_name);
|
||||
|
||||
-- Try to unschedule existing job (ignore errors if it doesn't exist)
|
||||
BEGIN
|
||||
PERFORM cron.unschedule(job_name);
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
NULL;
|
||||
END;
|
||||
|
||||
-- Schedule the new job to run every hour
|
||||
PERFORM cron.schedule(
|
||||
job_name,
|
||||
'0 * * * *', -- Every hour at minute 0
|
||||
format('SET search_path TO %I; SELECT refresh_suggested_blocks_view();', current_schema_name)
|
||||
);
|
||||
RAISE NOTICE 'Scheduled job %; runs every hour for schema %', job_name, current_schema_name;
|
||||
ELSE
|
||||
RAISE WARNING 'Automatic refresh NOT configured - pg_cron is not available. Manually refresh with: SELECT refresh_suggested_blocks_view();';
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
@@ -920,17 +920,6 @@ view mv_review_stats {
|
||||
// Refresh uses CONCURRENTLY to avoid blocking reads
|
||||
}
|
||||
|
||||
// Note: This is actually a MATERIALIZED VIEW in the database
|
||||
// Refreshed automatically every hour via pg_cron (with fallback to manual refresh)
|
||||
view mv_suggested_blocks {
|
||||
block_id String @unique
|
||||
execution_count Int
|
||||
|
||||
// Pre-aggregated execution counts per block for the last 14 days
|
||||
// Used by builder suggestions for ordering blocks by popularity
|
||||
// Refresh uses CONCURRENTLY to avoid blocking reads
|
||||
}
|
||||
|
||||
model StoreListing {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
|
||||
@@ -127,10 +127,7 @@ export const Block: BlockComponent = ({
|
||||
// preview when user drags it
|
||||
const dragPreview = document.createElement("div");
|
||||
dragPreview.style.cssText = blockDragPreviewStyle;
|
||||
dragPreview.textContent = beautifyString(title || "").replace(
|
||||
/ Block$/,
|
||||
"",
|
||||
);
|
||||
dragPreview.textContent = beautifyString(title || "");
|
||||
|
||||
document.body.appendChild(dragPreview);
|
||||
e.dataTransfer.setDragImage(dragPreview, 0, 0);
|
||||
@@ -165,10 +162,7 @@ export const Block: BlockComponent = ({
|
||||
"line-clamp-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{highlightText(
|
||||
beautifyString(title).replace(/ Block$/, ""),
|
||||
highlightedText,
|
||||
)}
|
||||
{highlightText(beautifyString(title), highlightedText)}
|
||||
</span>
|
||||
)}
|
||||
{description && (
|
||||
|
||||
@@ -2,7 +2,7 @@ import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore"
|
||||
import { FilterChip } from "../FilterChip";
|
||||
import { categories } from "./constants";
|
||||
import { FilterSheet } from "../FilterSheet/FilterSheet";
|
||||
import { CategoryKey } from "./types";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export const BlockMenuFilters = () => {
|
||||
const {
|
||||
@@ -15,7 +15,7 @@ export const BlockMenuFilters = () => {
|
||||
removeCreator,
|
||||
} = useBlockMenuStore();
|
||||
|
||||
const handleFilterClick = (filter: CategoryKey) => {
|
||||
const handleFilterClick = (filter: GetV2BuilderSearchFilterAnyOfItem) => {
|
||||
if (filters.includes(filter)) {
|
||||
removeFilter(filter);
|
||||
} else {
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
import { CategoryKey } from "./types";
|
||||
|
||||
export const categories: Array<{ key: CategoryKey; name: string }> = [
|
||||
{ key: "blocks", name: "Blocks" },
|
||||
{ key: "integrations", name: "Integrations" },
|
||||
{ key: "marketplace_agents", name: "Marketplace agents" },
|
||||
{ key: "my_agents", name: "My agents" },
|
||||
{ key: GetV2BuilderSearchFilterAnyOfItem.blocks, name: "Blocks" },
|
||||
{
|
||||
key: GetV2BuilderSearchFilterAnyOfItem.integrations,
|
||||
name: "Integrations",
|
||||
},
|
||||
{
|
||||
key: GetV2BuilderSearchFilterAnyOfItem.marketplace_agents,
|
||||
name: "Marketplace agents",
|
||||
},
|
||||
{ key: GetV2BuilderSearchFilterAnyOfItem.my_agents, name: "My agents" },
|
||||
];
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export type DefaultStateType =
|
||||
| "suggestion"
|
||||
| "all_blocks"
|
||||
@@ -8,11 +10,7 @@ export type DefaultStateType =
|
||||
| "marketplace_agents"
|
||||
| "my_agents";
|
||||
|
||||
export type CategoryKey =
|
||||
| "blocks"
|
||||
| "integrations"
|
||||
| "marketplace_agents"
|
||||
| "my_agents";
|
||||
export type CategoryKey = GetV2BuilderSearchFilterAnyOfItem;
|
||||
|
||||
export interface Filters {
|
||||
categories: {
|
||||
|
||||
@@ -23,7 +23,7 @@ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { CategoryCounts } from "../BlockMenuFilters/types";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export const useBlockMenuSearchContent = () => {
|
||||
const {
|
||||
@@ -67,7 +67,7 @@ export const useBlockMenuSearchContent = () => {
|
||||
page_size: 8,
|
||||
search_query: searchQuery,
|
||||
search_id: searchId,
|
||||
filter: filters.length > 0 ? filters.join(",") : undefined,
|
||||
filter: filters.length > 0 ? filters : undefined,
|
||||
by_creator: creators.length > 0 ? creators : undefined,
|
||||
},
|
||||
{
|
||||
@@ -117,7 +117,10 @@ export const useBlockMenuSearchContent = () => {
|
||||
}
|
||||
const latestData = okData(searchQueryData.pages.at(-1));
|
||||
setCategoryCounts(
|
||||
(latestData?.total_items as CategoryCounts) || {
|
||||
(latestData?.total_items as Record<
|
||||
GetV2BuilderSearchFilterAnyOfItem,
|
||||
number
|
||||
>) || {
|
||||
blocks: 0,
|
||||
integrations: 0,
|
||||
marketplace_agents: 0,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore";
|
||||
import { useState } from "react";
|
||||
import { INITIAL_CREATORS_TO_SHOW } from "./constant";
|
||||
import { CategoryKey } from "../BlockMenuFilters/types";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export const useFilterSheet = () => {
|
||||
const { filters, creators_list, creators, setFilters, setCreators } =
|
||||
@@ -9,13 +9,15 @@ export const useFilterSheet = () => {
|
||||
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const [localCategories, setLocalCategories] =
|
||||
useState<CategoryKey[]>(filters);
|
||||
useState<GetV2BuilderSearchFilterAnyOfItem[]>(filters);
|
||||
const [localCreators, setLocalCreators] = useState<string[]>(creators);
|
||||
const [displayedCreatorsCount, setDisplayedCreatorsCount] = useState(
|
||||
INITIAL_CREATORS_TO_SHOW,
|
||||
);
|
||||
|
||||
const handleLocalCategoryChange = (category: CategoryKey) => {
|
||||
const handleLocalCategoryChange = (
|
||||
category: GetV2BuilderSearchFilterAnyOfItem,
|
||||
) => {
|
||||
setLocalCategories((prev) => {
|
||||
if (prev.includes(category)) {
|
||||
return prev.filter((c) => c !== category);
|
||||
|
||||
@@ -61,10 +61,7 @@ export const IntegrationBlock: IntegrationBlockComponent = ({
|
||||
// preview when user drags it
|
||||
const dragPreview = document.createElement("div");
|
||||
dragPreview.style.cssText = blockDragPreviewStyle;
|
||||
dragPreview.textContent = beautifyString(title || "").replace(
|
||||
/ Block$/,
|
||||
"",
|
||||
);
|
||||
dragPreview.textContent = beautifyString(title || "");
|
||||
|
||||
document.body.appendChild(dragPreview);
|
||||
e.dataTransfer.setDragImage(dragPreview, 0, 0);
|
||||
@@ -103,10 +100,7 @@ export const IntegrationBlock: IntegrationBlockComponent = ({
|
||||
"line-clamp-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{highlightText(
|
||||
beautifyString(title).replace(/ Block$/, ""),
|
||||
highlightedText,
|
||||
)}
|
||||
{highlightText(beautifyString(title), highlightedText)}
|
||||
</span>
|
||||
)}
|
||||
{description && (
|
||||
|
||||
@@ -81,14 +81,6 @@ export const UGCAgentBlock: UGCAgentBlockComponent = ({
|
||||
>
|
||||
Version {version}
|
||||
</span>
|
||||
|
||||
<span
|
||||
className={cn(
|
||||
"rounded-[0.75rem] bg-zinc-200 px-[0.5rem] font-sans text-xs leading-[1.25rem] text-zinc-500",
|
||||
)}
|
||||
>
|
||||
Your Agent
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
|
||||
@@ -3,29 +3,28 @@ import { DefaultStateType } from "../components/NewControlPanel/NewBlockMenu/typ
|
||||
import { SearchResponseItemsItem } from "@/app/api/__generated__/models/searchResponseItemsItem";
|
||||
import { getSearchItemType } from "../components/NewControlPanel/NewBlockMenu/BlockMenuSearchContent/helper";
|
||||
import { StoreAgent } from "@/app/api/__generated__/models/storeAgent";
|
||||
import {
|
||||
CategoryKey,
|
||||
CategoryCounts,
|
||||
} from "../components/NewControlPanel/NewBlockMenu/BlockMenuFilters/types";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
type BlockMenuStore = {
|
||||
searchQuery: string;
|
||||
searchId: string | undefined;
|
||||
defaultState: DefaultStateType;
|
||||
integration: string | undefined;
|
||||
filters: CategoryKey[];
|
||||
filters: GetV2BuilderSearchFilterAnyOfItem[];
|
||||
creators: string[];
|
||||
creators_list: string[];
|
||||
categoryCounts: CategoryCounts;
|
||||
categoryCounts: Record<GetV2BuilderSearchFilterAnyOfItem, number>;
|
||||
|
||||
setCategoryCounts: (counts: CategoryCounts) => void;
|
||||
setCategoryCounts: (
|
||||
counts: Record<GetV2BuilderSearchFilterAnyOfItem, number>,
|
||||
) => void;
|
||||
setCreatorsList: (searchData: SearchResponseItemsItem[]) => void;
|
||||
addCreator: (creator: string) => void;
|
||||
setCreators: (creators: string[]) => void;
|
||||
removeCreator: (creator: string) => void;
|
||||
addFilter: (filter: CategoryKey) => void;
|
||||
setFilters: (filters: CategoryKey[]) => void;
|
||||
removeFilter: (filter: CategoryKey) => void;
|
||||
addFilter: (filter: GetV2BuilderSearchFilterAnyOfItem) => void;
|
||||
setFilters: (filters: GetV2BuilderSearchFilterAnyOfItem[]) => void;
|
||||
removeFilter: (filter: GetV2BuilderSearchFilterAnyOfItem) => void;
|
||||
setSearchQuery: (query: string) => void;
|
||||
setSearchId: (id: string | undefined) => void;
|
||||
setDefaultState: (state: DefaultStateType) => void;
|
||||
|
||||
@@ -820,7 +820,21 @@
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"schema": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"enum": [
|
||||
"blocks",
|
||||
"integrations",
|
||||
"marketplace_agents",
|
||||
"my_agents"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Filter"
|
||||
}
|
||||
},
|
||||
@@ -11665,6 +11679,11 @@
|
||||
},
|
||||
"SuggestionsResponse": {
|
||||
"properties": {
|
||||
"otto_suggestions": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Otto Suggestions"
|
||||
},
|
||||
"recent_searches": {
|
||||
"items": { "$ref": "#/components/schemas/SearchEntry" },
|
||||
"type": "array",
|
||||
@@ -11685,7 +11704,12 @@
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["recent_searches", "providers", "top_blocks"],
|
||||
"required": [
|
||||
"otto_suggestions",
|
||||
"recent_searches",
|
||||
"providers",
|
||||
"top_blocks"
|
||||
],
|
||||
"title": "SuggestionsResponse"
|
||||
},
|
||||
"TimezoneResponse": {
|
||||
|
||||
Reference in New Issue
Block a user