Compare commits

..

26 Commits

Author SHA1 Message Date
Krzysztof Czerwinski
18ea322214 Update test 2026-02-12 16:47:45 +09:00
Krzysztof Czerwinski
a5fb012dc1 Address feedback 2026-02-12 15:52:24 +09:00
Krzysztof Czerwinski
4f70dc00e1 Update migration and schema 2026-02-12 15:51:51 +09:00
Krzysztof Czerwinski
6baf5c8344 Merge branch 'dev' into kpczerwinski/secrt-1778-update-new-builder-search 2026-02-12 14:54:49 +09:00
Krzysztof Czerwinski
a8f9e652c9 Merge branch 'dev' into kpczerwinski/secrt-1778-update-new-builder-search 2026-02-03 18:01:16 +09:00
Krzysztof Czerwinski
fe7268fdd4 Exclude Agent Block from block categories 2026-02-03 18:01:04 +09:00
Krzysztof Czerwinski
e39e0e47a6 Merge branch 'dev' into kpczerwinski/secrt-1778-update-new-builder-search 2026-02-03 16:45:30 +09:00
Krzysztof Czerwinski
d897c99682 Move migration 2026-01-29 19:10:03 +09:00
Krzysztof Czerwinski
250a640a6a Merge branch 'dev' into kpczerwinski/secrt-1778-update-new-builder-search 2026-01-29 17:23:12 +09:00
Krzysztof Czerwinski
449834fcb2 Update migration 2026-01-29 17:22:48 +09:00
Krzysztof Czerwinski
8e65854458 Update migration 2026-01-28 16:41:25 +09:00
Krzysztof Czerwinski
dd3b4c3527 Lint 2026-01-28 15:37:09 +09:00
Krzysztof Czerwinski
a844d14d18 Fix search filter types 2026-01-28 15:26:40 +09:00
Krzysztof Czerwinski
ca1df77c20 Merge branch 'dev' into kpczerwinski/secrt-1778-update-new-builder-search 2026-01-28 14:41:46 +09:00
Krzysztof Czerwinski
8f7429e3fd Format 2026-01-27 19:01:23 +09:00
Krzysztof Czerwinski
6935661ea5 Rename Block suffix from blocks names in builder 2026-01-27 19:00:39 +09:00
Krzysztof Czerwinski
1ea47eacbd Exclude AgentExecutorBlock from builder blocks 2026-01-27 18:59:57 +09:00
Krzysztof Czerwinski
a733dd5b4b Move migration 2026-01-27 18:26:19 +09:00
Krzysztof Czerwinski
cfa2464918 Fix 2026-01-27 17:29:08 +09:00
Krzysztof Czerwinski
eb1c2a04a2 Merge branch 'dev' into kpczerwinski/secrt-1778-update-new-builder-search 2026-01-27 16:15:50 +09:00
Krzysztof Czerwinski
9594a0bc23 Your Agent badge 2026-01-27 16:15:40 +09:00
Krzysztof Czerwinski
5ee71a86c7 Update openapi schema 2026-01-27 16:15:22 +09:00
Krzysztof Czerwinski
af85c4a3a9 Update builder search 2026-01-20 16:43:27 +09:00
Krzysztof Czerwinski
43794c71fa Hybrid search in builder 2026-01-20 16:34:39 +09:00
Krzysztof Czerwinski
259eff725e Add materialized view for suggested blocks 2026-01-20 15:55:36 +09:00
Krzysztof Czerwinski
9577b93576 Update routes 2026-01-20 15:15:24 +09:00
246 changed files with 2457 additions and 7345 deletions

View File

@@ -10,7 +10,7 @@ from typing_extensions import TypedDict
import backend.api.features.store.cache as store_cache
import backend.api.features.store.model as store_model
import backend.blocks
import backend.data.block
from backend.api.external.middleware import require_permission
from backend.data import execution as execution_db
from backend.data import graph as graph_db
@@ -67,7 +67,7 @@ async def get_user_info(
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
)
async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.blocks.get_blocks().values()]
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@@ -83,7 +83,7 @@ async def execute_graph_block(
require_permission(APIKeyPermission.EXECUTE_BLOCK)
),
) -> CompletedBlockOutput:
obj = backend.blocks.get_block(block_id)
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
if obj.disabled:

View File

@@ -1,24 +1,20 @@
import logging
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from difflib import SequenceMatcher
from typing import Sequence
import prisma
from prisma.enums import ContentType
import backend.api.features.library.db as library_db
import backend.api.features.library.model as library_model
import backend.api.features.store.db as store_db
import backend.api.features.store.model as store_model
import backend.data.block
from backend.api.features.store.hybrid_search import unified_hybrid_search
from backend.blocks import load_all_blocks
from backend.blocks._base import (
AnyBlockSchema,
BlockCategory,
BlockInfo,
BlockSchema,
BlockType,
)
from backend.blocks.llm import LlmModel
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
from backend.data.db import query_raw_with_schema
from backend.integrations.providers import ProviderName
from backend.util.cache import cached
@@ -27,7 +23,7 @@ from backend.util.models import Pagination
from .model import (
BlockCategoryResponse,
BlockResponse,
BlockTypeFilter,
BlockType,
CountResponse,
FilterType,
Provider,
@@ -42,6 +38,16 @@ MAX_LIBRARY_AGENT_RESULTS = 100
MAX_MARKETPLACE_AGENT_RESULTS = 100
MIN_SCORE_FOR_FILTERED_RESULTS = 10.0
# Boost blocks over marketplace agents in search results
BLOCK_SCORE_BOOST = 50.0
# Block IDs to exclude from search results
EXCLUDED_BLOCK_IDS = frozenset(
{
"e189baac-8c20-45a1-94a7-55177ea42565", # AgentExecutorBlock
}
)
SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent
@@ -64,8 +70,8 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
# Skip disabled blocks
if block.disabled:
# Skip disabled and excluded blocks
if block.disabled or block.id in EXCLUDED_BLOCK_IDS:
continue
# Skip blocks that don't have categories (all should have at least one)
if not block.categories:
@@ -93,7 +99,7 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse
def get_blocks(
*,
category: str | None = None,
type: BlockTypeFilter | None = None,
type: BlockType | None = None,
provider: ProviderName | None = None,
page: int = 1,
page_size: int = 50,
@@ -116,6 +122,9 @@ def get_blocks(
# Skip disabled blocks
if block.disabled:
continue
# Skip excluded blocks
if block.id in EXCLUDED_BLOCK_IDS:
continue
# Skip blocks that don't match the category
if category and category not in {c.name.lower() for c in block.categories}:
continue
@@ -255,14 +264,25 @@ async def _build_cached_search_results(
"my_agents": 0,
}
block_results, block_total, integration_total = _collect_block_results(
normalized_query=normalized_query,
include_blocks=include_blocks,
include_integrations=include_integrations,
)
scored_items.extend(block_results)
total_items["blocks"] = block_total
total_items["integrations"] = integration_total
# Use hybrid search when query is present, otherwise list all blocks
if (include_blocks or include_integrations) and normalized_query:
block_results, block_total, integration_total = await _hybrid_search_blocks(
query=search_query,
include_blocks=include_blocks,
include_integrations=include_integrations,
)
scored_items.extend(block_results)
total_items["blocks"] = block_total
total_items["integrations"] = integration_total
elif include_blocks or include_integrations:
# No query - list all blocks using in-memory approach
block_results, block_total, integration_total = _collect_block_results(
include_blocks=include_blocks,
include_integrations=include_integrations,
)
scored_items.extend(block_results)
total_items["blocks"] = block_total
total_items["integrations"] = integration_total
if include_library_agents:
library_response = await library_db.list_library_agents(
@@ -307,10 +327,14 @@ async def _build_cached_search_results(
def _collect_block_results(
*,
normalized_query: str,
include_blocks: bool,
include_integrations: bool,
) -> tuple[list[_ScoredItem], int, int]:
"""
Collect all blocks for listing (no search query).
All blocks get BLOCK_SCORE_BOOST to prioritize them over marketplace agents.
"""
results: list[_ScoredItem] = []
block_count = 0
integration_count = 0
@@ -323,6 +347,10 @@ def _collect_block_results(
if block.disabled:
continue
# Skip excluded blocks
if block.id in EXCLUDED_BLOCK_IDS:
continue
block_info = block.get_info()
credentials = list(block.input_schema.get_credentials_fields().values())
is_integration = len(credentials) > 0
@@ -332,10 +360,6 @@ def _collect_block_results(
if not is_integration and not include_blocks:
continue
score = _score_block(block, block_info, normalized_query)
if not _should_include_item(score, normalized_query):
continue
filter_type: FilterType = "integrations" if is_integration else "blocks"
if is_integration:
integration_count += 1
@@ -346,8 +370,122 @@ def _collect_block_results(
_ScoredItem(
item=block_info,
filter_type=filter_type,
score=score,
sort_key=_get_item_name(block_info),
score=BLOCK_SCORE_BOOST,
sort_key=block_info.name.lower(),
)
)
return results, block_count, integration_count
async def _hybrid_search_blocks(
*,
query: str,
include_blocks: bool,
include_integrations: bool,
) -> tuple[list[_ScoredItem], int, int]:
"""
Search blocks using hybrid search with builder-specific filtering.
Uses unified_hybrid_search for semantic + lexical search, then applies
post-filtering for block/integration types and scoring adjustments.
Scoring:
- Base: hybrid relevance score (0-1) scaled to 0-100, plus BLOCK_SCORE_BOOST
to prioritize blocks over marketplace agents in combined results
- +30 for exact name match, +15 for prefix name match
- +20 if the block has an LlmModel field and the query matches an LLM model name
Args:
query: The search query string
include_blocks: Whether to include regular blocks
include_integrations: Whether to include integration blocks
Returns:
Tuple of (scored_items, block_count, integration_count)
"""
results: list[_ScoredItem] = []
block_count = 0
integration_count = 0
if not include_blocks and not include_integrations:
return results, block_count, integration_count
normalized_query = query.strip().lower()
# Fetch more results to account for post-filtering
search_results, _ = await unified_hybrid_search(
query=query,
content_types=[ContentType.BLOCK],
page=1,
page_size=150,
min_score=0.10,
)
# Load all blocks for getting BlockInfo
all_blocks = load_all_blocks()
for result in search_results:
block_id = result["content_id"]
# Skip excluded blocks
if block_id in EXCLUDED_BLOCK_IDS:
continue
metadata = result.get("metadata", {})
hybrid_score = result.get("relevance", 0.0)
# Get the actual block class
if block_id not in all_blocks:
continue
block_cls = all_blocks[block_id]
block: AnyBlockSchema = block_cls()
if block.disabled:
continue
# Check block/integration filter using metadata
is_integration = metadata.get("is_integration", False)
if is_integration and not include_integrations:
continue
if not is_integration and not include_blocks:
continue
# Get block info
block_info = block.get_info()
# Calculate final score: scale hybrid score and add builder-specific bonuses
# Hybrid scores are 0-1, builder scores were 0-200+
# Add BLOCK_SCORE_BOOST to prioritize blocks over marketplace agents
final_score = hybrid_score * 100 + BLOCK_SCORE_BOOST
# Add LLM model match bonus
has_llm_field = metadata.get("has_llm_model_field", False)
if has_llm_field and _matches_llm_model(block.input_schema, normalized_query):
final_score += 20
# Add exact/prefix match bonus for deterministic tie-breaking
name = block_info.name.lower()
if name == normalized_query:
final_score += 30
elif name.startswith(normalized_query):
final_score += 15
# Track counts
filter_type: FilterType = "integrations" if is_integration else "blocks"
if is_integration:
integration_count += 1
else:
block_count += 1
results.append(
_ScoredItem(
item=block_info,
filter_type=filter_type,
score=final_score,
sort_key=name,
)
)
@@ -472,6 +610,8 @@ async def _get_static_counts():
block: AnyBlockSchema = block_type()
if block.disabled:
continue
if block.id in EXCLUDED_BLOCK_IDS:
continue
all_blocks += 1
@@ -507,38 +647,6 @@ def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
return False
def _score_block(
block: AnyBlockSchema,
block_info: BlockInfo,
normalized_query: str,
) -> float:
if not normalized_query:
return 0.0
name = block_info.name.lower()
description = block_info.description.lower()
score = _score_primary_fields(name, description, normalized_query)
category_text = " ".join(
category.get("category", "").lower() for category in block_info.categories
)
score += _score_additional_field(category_text, normalized_query, 12, 6)
credentials_info = block.input_schema.get_credentials_fields_info().values()
provider_names = [
provider.value.lower()
for info in credentials_info
for provider in info.provider
]
provider_text = " ".join(provider_names)
score += _score_additional_field(provider_text, normalized_query, 15, 6)
if _matches_llm_model(block.input_schema, normalized_query):
score += 20
return score
def _score_library_agent(
agent: library_model.LibraryAgent,
normalized_query: str,
@@ -645,45 +753,33 @@ def _get_all_providers() -> dict[ProviderName, Provider]:
return providers
@cached(ttl_seconds=3600)
@cached(ttl_seconds=3600, shared_cache=True)
async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
suggested_blocks = []
# Sum the number of executions for each block type
# Prisma cannot group by nested relations, so we do a raw query
# Calculate the cutoff timestamp
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
# Query the materialized view for execution counts per block
# The view aggregates executions from the last 14 days and is refreshed hourly
results = await query_raw_with_schema(
"""
SELECT
agent_node."agentBlockId" AS block_id,
COUNT(execution.id) AS execution_count
FROM {schema_prefix}"AgentNodeExecution" execution
JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
WHERE execution."endedTime" >= $1::timestamp
GROUP BY agent_node."agentBlockId"
ORDER BY execution_count DESC;
""",
timestamp_threshold,
SELECT block_id, execution_count
FROM {schema_prefix}"mv_suggested_blocks";
"""
)
# Get the top blocks based on execution count
# But ignore Input and Output blocks
# But ignore Input, Output, Agent, and excluded blocks
blocks: list[tuple[BlockInfo, int]] = []
execution_counts = {row["block_id"]: row["execution_count"] for row in results}
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
if block.disabled or block.block_type in (
BlockType.INPUT,
BlockType.OUTPUT,
BlockType.AGENT,
backend.data.block.BlockType.INPUT,
backend.data.block.BlockType.OUTPUT,
backend.data.block.BlockType.AGENT,
):
continue
# Find the execution count for this block
execution_count = next(
(row["execution_count"] for row in results if row["block_id"] == block.id),
0,
)
if block.id in EXCLUDED_BLOCK_IDS:
continue
execution_count = execution_counts.get(block.id, 0)
blocks.append((block.get_info(), execution_count))
# Sort blocks by execution count
blocks.sort(key=lambda x: x[1], reverse=True)

View File

@@ -4,7 +4,7 @@ from pydantic import BaseModel
import backend.api.features.library.model as library_model
import backend.api.features.store.model as store_model
from backend.blocks._base import BlockInfo
from backend.data.block import BlockInfo
from backend.integrations.providers import ProviderName
from backend.util.models import Pagination
@@ -15,7 +15,7 @@ FilterType = Literal[
"my_agents",
]
BlockTypeFilter = Literal["all", "input", "action", "output"]
BlockType = Literal["all", "input", "action", "output"]
class SearchEntry(BaseModel):
@@ -27,7 +27,6 @@ class SearchEntry(BaseModel):
# Suggestions
class SuggestionsResponse(BaseModel):
otto_suggestions: list[str]
recent_searches: list[SearchEntry]
providers: list[ProviderName]
top_blocks: list[BlockInfo]

View File

@@ -1,5 +1,5 @@
import logging
from typing import Annotated, Sequence
from typing import Annotated, Sequence, cast, get_args
import fastapi
from autogpt_libs.auth.dependencies import get_user_id, requires_user
@@ -10,6 +10,8 @@ from backend.util.models import Pagination
from . import db as builder_db
from . import model as builder_model
VALID_FILTER_VALUES = get_args(builder_model.FilterType)
logger = logging.getLogger(__name__)
router = fastapi.APIRouter(
@@ -49,11 +51,6 @@ async def get_suggestions(
Get all suggestions for the Blocks Menu.
"""
return builder_model.SuggestionsResponse(
otto_suggestions=[
"What blocks do I need to get started?",
"Help me create a list",
"Help me feed my data to Google Maps",
],
recent_searches=await builder_db.get_recent_searches(user_id),
providers=[
ProviderName.TWITTER,
@@ -88,7 +85,7 @@ async def get_block_categories(
)
async def get_blocks(
category: Annotated[str | None, fastapi.Query()] = None,
type: Annotated[builder_model.BlockTypeFilter | None, fastapi.Query()] = None,
type: Annotated[builder_model.BlockType | None, fastapi.Query()] = None,
provider: Annotated[ProviderName | None, fastapi.Query()] = None,
page: Annotated[int, fastapi.Query()] = 1,
page_size: Annotated[int, fastapi.Query()] = 50,
@@ -151,7 +148,7 @@ async def get_providers(
async def search(
user_id: Annotated[str, fastapi.Security(get_user_id)],
search_query: Annotated[str | None, fastapi.Query()] = None,
filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None,
filter: Annotated[str | None, fastapi.Query()] = None,
search_id: Annotated[str | None, fastapi.Query()] = None,
by_creator: Annotated[list[str] | None, fastapi.Query()] = None,
page: Annotated[int, fastapi.Query()] = 1,
@@ -160,9 +157,20 @@ async def search(
"""
Search for blocks (including integrations), marketplace agents, and user library agents.
"""
# If no filters are provided, then we will return all types
if not filter:
filter = [
# Parse and validate filter parameter
filters: list[builder_model.FilterType]
if filter:
filter_values = [f.strip() for f in filter.split(",")]
invalid_filters = [f for f in filter_values if f not in VALID_FILTER_VALUES]
if invalid_filters:
raise fastapi.HTTPException(
status_code=400,
detail=f"Invalid filter value(s): {', '.join(invalid_filters)}. "
f"Valid values are: {', '.join(VALID_FILTER_VALUES)}",
)
filters = cast(list[builder_model.FilterType], filter_values)
else:
filters = [
"blocks",
"integrations",
"marketplace_agents",
@@ -174,7 +182,7 @@ async def search(
cached_results = await builder_db.get_sorted_search_results(
user_id=user_id,
search_query=search_query,
filters=filter,
filters=filters,
by_creator=by_creator,
)
@@ -196,7 +204,7 @@ async def search(
user_id,
builder_model.SearchEntry(
search_query=search_query,
filter=filter,
filter=filters,
by_creator=by_creator,
search_id=search_id,
),

View File

@@ -1,154 +0,0 @@
"""Dummy Agent Generator for testing.
Returns mock responses matching the format expected from the external service.
Enable via AGENTGENERATOR_USE_DUMMY=true in settings.
WARNING: This is for testing only. Do not use in production.
"""
import asyncio
import logging
import uuid
from typing import Any
logger = logging.getLogger(__name__)
# Dummy decomposition result (instructions type)
DUMMY_DECOMPOSITION_RESULT: dict[str, Any] = {
"type": "instructions",
"steps": [
{
"description": "Get input from user",
"action": "input",
"block_name": "AgentInputBlock",
},
{
"description": "Process the input",
"action": "process",
"block_name": "TextFormatterBlock",
},
{
"description": "Return output to user",
"action": "output",
"block_name": "AgentOutputBlock",
},
],
}
# Block IDs from backend/blocks/io.py
AGENT_INPUT_BLOCK_ID = "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b"
AGENT_OUTPUT_BLOCK_ID = "363ae599-353e-4804-937e-b2ee3cef3da4"
def _generate_dummy_agent_json() -> dict[str, Any]:
"""Generate a minimal valid agent JSON for testing."""
input_node_id = str(uuid.uuid4())
output_node_id = str(uuid.uuid4())
return {
"id": str(uuid.uuid4()),
"version": 1,
"is_active": True,
"name": "Dummy Test Agent",
"description": "A dummy agent generated for testing purposes",
"nodes": [
{
"id": input_node_id,
"block_id": AGENT_INPUT_BLOCK_ID,
"input_default": {
"name": "input",
"title": "Input",
"description": "Enter your input",
"placeholder_values": [],
},
"metadata": {"position": {"x": 0, "y": 0}},
},
{
"id": output_node_id,
"block_id": AGENT_OUTPUT_BLOCK_ID,
"input_default": {
"name": "output",
"title": "Output",
"description": "Agent output",
"format": "{output}",
},
"metadata": {"position": {"x": 400, "y": 0}},
},
],
"links": [
{
"id": str(uuid.uuid4()),
"source_id": input_node_id,
"sink_id": output_node_id,
"source_name": "result",
"sink_name": "value",
"is_static": False,
},
],
}
async def decompose_goal_dummy(
description: str,
context: str = "",
library_agents: list[dict[str, Any]] | None = None,
) -> dict[str, Any]:
"""Return dummy decomposition result."""
logger.info("Using dummy agent generator for decompose_goal")
return DUMMY_DECOMPOSITION_RESULT.copy()
async def generate_agent_dummy(
instructions: dict[str, Any],
library_agents: list[dict[str, Any]] | None = None,
operation_id: str | None = None,
task_id: str | None = None,
) -> dict[str, Any]:
"""Return dummy agent JSON after a simulated delay."""
logger.info("Using dummy agent generator for generate_agent (30s delay)")
await asyncio.sleep(30)
return _generate_dummy_agent_json()
async def generate_agent_patch_dummy(
update_request: str,
current_agent: dict[str, Any],
library_agents: list[dict[str, Any]] | None = None,
operation_id: str | None = None,
task_id: str | None = None,
) -> dict[str, Any]:
"""Return dummy patched agent (returns the current agent with updated description)."""
logger.info("Using dummy agent generator for generate_agent_patch")
patched = current_agent.copy()
patched["description"] = (
f"{current_agent.get('description', '')} (updated: {update_request})"
)
return patched
async def customize_template_dummy(
template_agent: dict[str, Any],
modification_request: str,
context: str = "",
) -> dict[str, Any]:
"""Return dummy customized template (returns template with updated description)."""
logger.info("Using dummy agent generator for customize_template")
customized = template_agent.copy()
customized["description"] = (
f"{template_agent.get('description', '')} (customized: {modification_request})"
)
return customized
async def get_blocks_dummy() -> list[dict[str, Any]]:
"""Return dummy blocks list."""
logger.info("Using dummy agent generator for get_blocks")
return [
{"id": AGENT_INPUT_BLOCK_ID, "name": "AgentInputBlock"},
{"id": AGENT_OUTPUT_BLOCK_ID, "name": "AgentOutputBlock"},
]
async def health_check_dummy() -> bool:
"""Always returns healthy for dummy service."""
return True

View File

@@ -12,19 +12,8 @@ import httpx
from backend.util.settings import Settings
from .dummy import (
customize_template_dummy,
decompose_goal_dummy,
generate_agent_dummy,
generate_agent_patch_dummy,
get_blocks_dummy,
health_check_dummy,
)
logger = logging.getLogger(__name__)
_dummy_mode_warned = False
def _create_error_response(
error_message: str,
@@ -101,26 +90,10 @@ def _get_settings() -> Settings:
return _settings
def _is_dummy_mode() -> bool:
"""Check if dummy mode is enabled for testing."""
global _dummy_mode_warned
settings = _get_settings()
is_dummy = bool(settings.config.agentgenerator_use_dummy)
if is_dummy and not _dummy_mode_warned:
logger.warning(
"Agent Generator running in DUMMY MODE - returning mock responses. "
"Do not use in production!"
)
_dummy_mode_warned = True
return is_dummy
def is_external_service_configured() -> bool:
"""Check if external Agent Generator service is configured (or dummy mode)."""
"""Check if external Agent Generator service is configured."""
settings = _get_settings()
return bool(settings.config.agentgenerator_host) or bool(
settings.config.agentgenerator_use_dummy
)
return bool(settings.config.agentgenerator_host)
def _get_base_url() -> str:
@@ -164,9 +137,6 @@ async def decompose_goal_external(
- {"type": "error", "error": "...", "error_type": "..."} on error
Or None on unexpected error
"""
if _is_dummy_mode():
return await decompose_goal_dummy(description, context, library_agents)
client = _get_client()
if context:
@@ -256,11 +226,6 @@ async def generate_agent_external(
Returns:
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
"""
if _is_dummy_mode():
return await generate_agent_dummy(
instructions, library_agents, operation_id, task_id
)
client = _get_client()
# Build request payload
@@ -332,11 +297,6 @@ async def generate_agent_patch_external(
Returns:
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
"""
if _is_dummy_mode():
return await generate_agent_patch_dummy(
update_request, current_agent, library_agents, operation_id, task_id
)
client = _get_client()
# Build request payload
@@ -423,11 +383,6 @@ async def customize_template_external(
Returns:
Customized agent JSON, clarifying questions dict, or error dict on error
"""
if _is_dummy_mode():
return await customize_template_dummy(
template_agent, modification_request, context
)
client = _get_client()
request = modification_request
@@ -490,9 +445,6 @@ async def get_blocks_external() -> list[dict[str, Any]] | None:
Returns:
List of block info dicts or None on error
"""
if _is_dummy_mode():
return await get_blocks_dummy()
client = _get_client()
try:
@@ -526,9 +478,6 @@ async def health_check() -> bool:
if not is_external_service_configured():
return False
if _is_dummy_mode():
return await health_check_dummy()
client = _get_client()
try:

View File

@@ -13,8 +13,7 @@ from backend.api.features.chat.tools.models import (
NoResultsResponse,
)
from backend.api.features.store.hybrid_search import unified_hybrid_search
from backend.blocks import get_block
from backend.blocks._base import BlockType
from backend.data.block import BlockType, get_block
logger = logging.getLogger(__name__)

View File

@@ -10,7 +10,7 @@ from backend.api.features.chat.tools.find_block import (
FindBlockTool,
)
from backend.api.features.chat.tools.models import BlockListResponse
from backend.blocks._base import BlockType
from backend.data.block import BlockType
from ._test_data import make_session

View File

@@ -12,8 +12,7 @@ from backend.api.features.chat.tools.find_block import (
COPILOT_EXCLUDED_BLOCK_IDS,
COPILOT_EXCLUDED_BLOCK_TYPES,
)
from backend.blocks import get_block
from backend.blocks._base import AnyBlockSchema
from backend.data.block import AnyBlockSchema, get_block
from backend.data.execution import ExecutionContext
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
from backend.data.workspace import get_or_create_workspace

View File

@@ -6,7 +6,7 @@ import pytest
from backend.api.features.chat.tools.models import ErrorResponse
from backend.api.features.chat.tools.run_block import RunBlockTool
from backend.blocks._base import BlockType
from backend.data.block import BlockType
from ._test_data import make_session

View File

@@ -7,17 +7,17 @@ import prisma.errors
import prisma.models
import prisma.types
from backend.api.features.library.exceptions import FolderValidationError
import backend.api.features.store.exceptions as store_exceptions
import backend.api.features.store.image_gen as store_image_gen
import backend.api.features.store.media as store_media
import backend.data.graph as graph_db
import backend.data.integrations as integrations_db
from backend.data.block import BlockInput
from backend.data.db import transaction
from backend.data.execution import get_graph_execution
from backend.data.graph import GraphSettings
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
from backend.data.model import CredentialsMetaInput, GraphInput
from backend.data.model import CredentialsMetaInput
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.integrations.webhooks.graph_lifecycle_hooks import (
on_graph_activate,
@@ -43,8 +43,6 @@ async def list_library_agents(
page: int = 1,
page_size: int = 50,
include_executions: bool = False,
folder_id: Optional[str] = None,
include_root_only: bool = False,
) -> library_model.LibraryAgentResponse:
"""
Retrieves a paginated list of LibraryAgent records for a given user.
@@ -55,8 +53,6 @@ async def list_library_agents(
sort_by: Sorting field (createdAt, updatedAt, isFavorite, isCreatedByUser).
page: Current page (1-indexed).
page_size: Number of items per page.
folder_id: Filter by folder ID. If provided, only returns agents in this folder.
include_root_only: If True, only returns agents without a folder (root-level).
include_executions: Whether to include execution data for status calculation.
Defaults to False for performance (UI fetches status separately).
Set to True when accurate status/metrics are needed (e.g., agent generator).
@@ -87,13 +83,6 @@ async def list_library_agents(
"isArchived": False,
}
# Apply folder filter
if folder_id is not None:
where_clause["folderId"] = folder_id
elif include_root_only:
where_clause["folderId"] = None
# Build search filter if applicable
if search_term:
where_clause["OR"] = [
{
@@ -646,7 +635,6 @@ async def update_library_agent(
is_archived: Optional[bool] = None,
is_deleted: Optional[Literal[False]] = None,
settings: Optional[GraphSettings] = None,
folder_id: Optional[str] = None,
) -> library_model.LibraryAgent:
"""
Updates the specified LibraryAgent record.
@@ -659,7 +647,6 @@ async def update_library_agent(
is_favorite: Whether this agent is marked as a favorite.
is_archived: Whether this agent is archived.
settings: User-specific settings for this library agent.
folder_id: Folder ID to move agent to (empty string "" for root, None to skip).
Returns:
The updated LibraryAgent.
@@ -687,7 +674,13 @@ async def update_library_agent(
)
update_fields["isDeleted"] = is_deleted
if settings is not None:
update_fields["settings"] = SafeJson(settings.model_dump())
existing_agent = await get_library_agent(id=library_agent_id, user_id=user_id)
current_settings_dict = (
existing_agent.settings.model_dump() if existing_agent.settings else {}
)
new_settings = settings.model_dump(exclude_unset=True)
merged_settings = {**current_settings_dict, **new_settings}
update_fields["settings"] = SafeJson(merged_settings)
try:
# If graph_version is provided, update to that specific version
@@ -926,788 +919,6 @@ async def add_store_agent_to_library(
raise DatabaseError("Failed to add agent to library") from e
##############################################
############ Folder DB Functions #############
##############################################
MAX_FOLDER_DEPTH = 5
async def list_folders(
user_id: str,
parent_id: Optional[str] = None,
include_counts: bool = True,
) -> list[library_model.LibraryFolder]:
"""
Lists folders for a user, optionally filtered by parent.
Args:
user_id: The ID of the user.
parent_id: If provided, only returns folders with this parent.
If None, returns root-level folders.
include_counts: Whether to include agent and subfolder counts.
Returns:
A list of LibraryFolder objects.
"""
logger.debug(f"Listing folders for user #{user_id}, parent_id={parent_id}")
try:
where_clause: prisma.types.LibraryFolderWhereInput = {
"userId": user_id,
"isDeleted": False,
"parentId": parent_id,
}
folders = await prisma.models.LibraryFolder.prisma().find_many(
where=where_clause,
order={"createdAt": "asc"},
include=(
{
"LibraryAgents": {"where": {"isDeleted": False}},
"Children": {"where": {"isDeleted": False}},
}
if include_counts
else None
),
)
result = []
for folder in folders:
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
subfolder_count = len(folder.Children) if folder.Children else 0
result.append(
library_model.LibraryFolder.from_db(
folder,
agent_count=agent_count,
subfolder_count=subfolder_count,
)
)
return result
except prisma.errors.PrismaError as e:
logger.error(f"Database error listing folders: {e}")
raise DatabaseError("Failed to list folders") from e
async def get_folder_tree(
user_id: str,
) -> list[library_model.LibraryFolderTree]:
"""
Gets the full folder tree for a user.
Args:
user_id: The ID of the user.
Returns:
A list of LibraryFolderTree objects (root folders with nested children).
"""
logger.debug(f"Getting folder tree for user #{user_id}")
try:
# Fetch all folders for the user
all_folders = await prisma.models.LibraryFolder.prisma().find_many(
where={
"userId": user_id,
"isDeleted": False,
},
order={"createdAt": "asc"},
include={
"LibraryAgents": {"where": {"isDeleted": False}},
"Children": {"where": {"isDeleted": False}},
},
)
# Build a map of folder ID to folder data
folder_map: dict[str, library_model.LibraryFolderTree] = {}
for folder in all_folders:
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
subfolder_count = len(folder.Children) if folder.Children else 0
folder_map[folder.id] = library_model.LibraryFolderTree(
**library_model.LibraryFolder.from_db(
folder,
agent_count=agent_count,
subfolder_count=subfolder_count,
).model_dump(),
children=[],
)
# Build the tree structure
root_folders: list[library_model.LibraryFolderTree] = []
for folder in all_folders:
tree_folder = folder_map[folder.id]
if folder.parentId and folder.parentId in folder_map:
folder_map[folder.parentId].children.append(tree_folder)
else:
root_folders.append(tree_folder)
return root_folders
except prisma.errors.PrismaError as e:
logger.error(f"Database error getting folder tree: {e}")
raise DatabaseError("Failed to get folder tree") from e
async def get_folder(
folder_id: str,
user_id: str,
) -> library_model.LibraryFolder:
"""
Gets a single folder by ID.
Args:
folder_id: The ID of the folder.
user_id: The ID of the user (for ownership verification).
Returns:
The LibraryFolder object.
Raises:
NotFoundError: If the folder doesn't exist or doesn't belong to the user.
"""
try:
folder = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": folder_id,
"userId": user_id,
"isDeleted": False,
},
include={
"LibraryAgents": {"where": {"isDeleted": False}},
"Children": {"where": {"isDeleted": False}},
},
)
if not folder:
raise NotFoundError(f"Folder #{folder_id} not found")
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
subfolder_count = len(folder.Children) if folder.Children else 0
return library_model.LibraryFolder.from_db(
folder,
agent_count=agent_count,
subfolder_count=subfolder_count,
)
except prisma.errors.PrismaError as e:
logger.error(f"Database error getting folder: {e}")
raise DatabaseError("Failed to get folder") from e
async def get_folder_depth(folder_id: str, user_id: str) -> int:
"""
Calculate the depth of a folder in the hierarchy (root=0).
Args:
folder_id: The ID of the folder.
user_id: The ID of the user.
Returns:
The depth of the folder (0 for root-level folders).
"""
depth = 0
current_id: str | None = folder_id
while current_id:
folder = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": current_id,
"userId": user_id,
"isDeleted": False,
}
)
if not folder:
break
if folder.parentId:
depth += 1
current_id = folder.parentId
else:
break
return depth
async def is_descendant_of(
folder_id: str,
potential_ancestor_id: str,
user_id: str,
) -> bool:
"""
Check if folder_id is a descendant of potential_ancestor_id.
Args:
folder_id: The ID of the folder to check.
potential_ancestor_id: The ID of the potential ancestor.
user_id: The ID of the user.
Returns:
True if folder_id is a descendant of potential_ancestor_id.
"""
current_id: str | None = folder_id
while current_id:
if current_id == potential_ancestor_id:
return True
folder = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": current_id,
"userId": user_id,
"isDeleted": False,
}
)
if not folder or not folder.parentId:
break
current_id = folder.parentId
return False
async def validate_folder_operation(
folder_id: Optional[str],
target_parent_id: Optional[str],
user_id: str,
max_depth: int = MAX_FOLDER_DEPTH,
) -> None:
"""
Validate that a folder move/create operation is valid.
Args:
folder_id: The ID of the folder being moved (None for create).
target_parent_id: The target parent ID (None for root).
user_id: The ID of the user.
max_depth: Maximum allowed nesting depth.
Raises:
FolderValidationError: If the operation is invalid.
"""
# Cannot move folder into itself
if folder_id and folder_id == target_parent_id:
raise FolderValidationError("Cannot move folder into itself")
# Check for circular reference
if folder_id and target_parent_id:
if await is_descendant_of(target_parent_id, folder_id, user_id):
raise FolderValidationError("Cannot move folder into its own descendant")
# Check depth limit
if target_parent_id:
parent_depth = await get_folder_depth(target_parent_id, user_id)
if parent_depth + 1 >= max_depth:
raise FolderValidationError(
f"Maximum folder nesting depth of {max_depth} exceeded"
)
async def create_folder(
user_id: str,
name: str,
parent_id: Optional[str] = None,
icon: Optional[str] = None,
color: Optional[str] = None,
) -> library_model.LibraryFolder:
"""
Creates a new folder for the user.
Args:
user_id: The ID of the user.
name: The folder name.
parent_id: Optional parent folder ID.
icon: Optional icon identifier.
color: Optional hex color code.
Returns:
The created LibraryFolder.
Raises:
FolderValidationError: If validation fails.
DatabaseError: If there's a database error.
"""
logger.debug(f"Creating folder '{name}' for user #{user_id}")
try:
# Validate operation
await validate_folder_operation(
folder_id=None,
target_parent_id=parent_id,
user_id=user_id,
)
# Verify parent exists if provided
if parent_id:
parent = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": parent_id,
"userId": user_id,
"isDeleted": False,
}
)
if not parent:
raise NotFoundError(f"Parent folder #{parent_id} not found")
# Build data dict conditionally - don't include Parent key if no parent_id
create_data: dict = {
"name": name,
"User": {"connect": {"id": user_id}},
}
if icon is not None:
create_data["icon"] = icon
if color is not None:
create_data["color"] = color
if parent_id:
create_data["Parent"] = {"connect": {"id": parent_id}}
folder = await prisma.models.LibraryFolder.prisma().create(data=create_data)
return library_model.LibraryFolder.from_db(folder)
except prisma.errors.UniqueViolationError:
raise FolderValidationError(
"A folder with this name already exists in this location"
)
except prisma.errors.PrismaError as e:
logger.error(f"Database error creating folder: {e}")
raise DatabaseError("Failed to create folder") from e
async def create_folder_with_unique_name(
user_id: str,
base_name: str,
parent_id: Optional[str] = None,
icon: Optional[str] = None,
color: Optional[str] = None,
) -> library_model.LibraryFolder:
"""
Creates a folder, appending (2), (3), etc. if name exists.
Args:
user_id: The ID of the user.
base_name: The base folder name.
parent_id: Optional parent folder ID.
icon: Optional icon identifier.
color: Optional hex color code.
Returns:
The created LibraryFolder.
"""
name = base_name
suffix = 1
while True:
try:
return await create_folder(
user_id=user_id,
name=name,
parent_id=parent_id,
icon=icon,
color=color,
)
except FolderValidationError as e:
if "already exists" in str(e):
suffix += 1
name = f"{base_name} ({suffix})"
else:
raise
async def update_folder(
folder_id: str,
user_id: str,
name: Optional[str] = None,
icon: Optional[str] = None,
color: Optional[str] = None,
) -> library_model.LibraryFolder:
"""
Updates a folder's properties.
Args:
folder_id: The ID of the folder to update.
user_id: The ID of the user.
name: New folder name.
icon: New icon identifier.
color: New hex color code.
Returns:
The updated LibraryFolder.
Raises:
NotFoundError: If the folder doesn't exist.
DatabaseError: If there's a database error.
"""
logger.debug(f"Updating folder #{folder_id} for user #{user_id}")
try:
# Verify folder exists and belongs to user
existing = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": folder_id,
"userId": user_id,
"isDeleted": False,
}
)
if not existing:
raise NotFoundError(f"Folder #{folder_id} not found")
update_data: prisma.types.LibraryFolderUpdateInput = {}
if name is not None:
update_data["name"] = name
if icon is not None:
update_data["icon"] = icon
if color is not None:
update_data["color"] = color
if not update_data:
return await get_folder(folder_id, user_id)
folder = await prisma.models.LibraryFolder.prisma().update(
where={"id": folder_id},
data=update_data,
include={
"LibraryAgents": {"where": {"isDeleted": False}},
"Children": {"where": {"isDeleted": False}},
},
)
if not folder:
raise NotFoundError(f"Folder #{folder_id} not found")
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
subfolder_count = len(folder.Children) if folder.Children else 0
return library_model.LibraryFolder.from_db(
folder,
agent_count=agent_count,
subfolder_count=subfolder_count,
)
except prisma.errors.UniqueViolationError:
raise FolderValidationError(
"A folder with this name already exists in this location"
)
except prisma.errors.PrismaError as e:
logger.error(f"Database error updating folder: {e}")
raise DatabaseError("Failed to update folder") from e
async def move_folder(
folder_id: str,
user_id: str,
target_parent_id: Optional[str],
) -> library_model.LibraryFolder:
"""
Moves a folder to a new parent.
Args:
folder_id: The ID of the folder to move.
user_id: The ID of the user.
target_parent_id: The target parent ID (None for root).
Returns:
The moved LibraryFolder.
Raises:
FolderValidationError: If the move is invalid.
NotFoundError: If the folder doesn't exist.
DatabaseError: If there's a database error.
"""
logger.debug(f"Moving folder #{folder_id} to parent #{target_parent_id}")
try:
# Validate operation
await validate_folder_operation(
folder_id=folder_id,
target_parent_id=target_parent_id,
user_id=user_id,
)
# Verify folder exists
existing = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": folder_id,
"userId": user_id,
"isDeleted": False,
}
)
if not existing:
raise NotFoundError(f"Folder #{folder_id} not found")
# Verify target parent exists if provided
if target_parent_id:
parent = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": target_parent_id,
"userId": user_id,
"isDeleted": False,
}
)
if not parent:
raise NotFoundError(
f"Target parent folder #{target_parent_id} not found"
)
folder = await prisma.models.LibraryFolder.prisma().update(
where={"id": folder_id},
data={
"parentId": target_parent_id,
},
include={
"LibraryAgents": {"where": {"isDeleted": False}},
"Children": {"where": {"isDeleted": False}},
},
)
if not folder:
raise NotFoundError(f"Folder #{folder_id} not found")
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
subfolder_count = len(folder.Children) if folder.Children else 0
return library_model.LibraryFolder.from_db(
folder,
agent_count=agent_count,
subfolder_count=subfolder_count,
)
except prisma.errors.UniqueViolationError:
raise FolderValidationError(
"A folder with this name already exists in this location"
)
except prisma.errors.PrismaError as e:
logger.error(f"Database error moving folder: {e}")
raise DatabaseError("Failed to move folder") from e
async def delete_folder(
folder_id: str,
user_id: str,
soft_delete: bool = True,
) -> None:
"""
Deletes a folder and all its contents (cascade).
Args:
folder_id: The ID of the folder to delete.
user_id: The ID of the user.
soft_delete: If True, soft-deletes; otherwise hard-deletes.
Raises:
NotFoundError: If the folder doesn't exist.
DatabaseError: If there's a database error.
"""
logger.debug(f"Deleting folder #{folder_id} for user #{user_id}")
try:
# Verify folder exists
existing = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": folder_id,
"userId": user_id,
"isDeleted": False,
}
)
if not existing:
raise NotFoundError(f"Folder #{folder_id} not found")
async with transaction() as tx:
# Get all descendant folders recursively
descendant_ids = await _get_descendant_folder_ids(folder_id, user_id, tx)
all_folder_ids = [folder_id] + descendant_ids
if soft_delete:
# Soft-delete all agents in these folders
await prisma.models.LibraryAgent.prisma(tx).update_many(
where={
"folderId": {"in": all_folder_ids},
"userId": user_id,
},
data={"isDeleted": True},
)
# Soft-delete all folders
await prisma.models.LibraryFolder.prisma(tx).update_many(
where={
"id": {"in": all_folder_ids},
"userId": user_id,
},
data={"isDeleted": True},
)
else:
# Move agents to root (or could hard-delete them)
await prisma.models.LibraryAgent.prisma(tx).update_many(
where={
"folderId": {"in": all_folder_ids},
"userId": user_id,
},
data={"folderId": None},
)
# Hard-delete folders (children first due to FK constraints)
for fid in reversed(all_folder_ids):
await prisma.models.LibraryFolder.prisma(tx).delete(
where={"id": fid}
)
except prisma.errors.PrismaError as e:
logger.error(f"Database error deleting folder: {e}")
raise DatabaseError("Failed to delete folder") from e
async def _get_descendant_folder_ids(
folder_id: str,
user_id: str,
tx: Optional[prisma.Prisma] = None,
) -> list[str]:
"""
Recursively get all descendant folder IDs.
Args:
folder_id: The ID of the parent folder.
user_id: The ID of the user.
tx: Optional transaction.
Returns:
A list of descendant folder IDs.
"""
prisma_client = prisma.models.LibraryFolder.prisma(tx)
children = await prisma_client.find_many(
where={
"parentId": folder_id,
"userId": user_id,
"isDeleted": False,
}
)
result: list[str] = []
for child in children:
result.append(child.id)
result.extend(await _get_descendant_folder_ids(child.id, user_id, tx))
return result
async def move_agent_to_folder(
library_agent_id: str,
folder_id: Optional[str],
user_id: str,
) -> library_model.LibraryAgent:
"""
Moves a library agent to a folder.
Args:
library_agent_id: The ID of the library agent.
folder_id: The target folder ID (None for root).
user_id: The ID of the user.
Returns:
The updated LibraryAgent.
Raises:
NotFoundError: If the agent or folder doesn't exist.
DatabaseError: If there's a database error.
"""
logger.debug(f"Moving agent #{library_agent_id} to folder #{folder_id}")
try:
# Verify agent exists
agent = await prisma.models.LibraryAgent.prisma().find_first(
where={
"id": library_agent_id,
"userId": user_id,
"isDeleted": False,
}
)
if not agent:
raise NotFoundError(f"Library agent #{library_agent_id} not found")
# Verify folder exists if provided
if folder_id:
folder = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": folder_id,
"userId": user_id,
"isDeleted": False,
}
)
if not folder:
raise NotFoundError(f"Folder #{folder_id} not found")
await prisma.models.LibraryAgent.prisma().update(
where={"id": library_agent_id},
data={"folderId": folder_id},
)
return await get_library_agent(library_agent_id, user_id)
except prisma.errors.PrismaError as e:
logger.error(f"Database error moving agent to folder: {e}")
raise DatabaseError("Failed to move agent to folder") from e
async def bulk_move_agents_to_folder(
agent_ids: list[str],
folder_id: Optional[str],
user_id: str,
) -> list[library_model.LibraryAgent]:
"""
Moves multiple library agents to a folder.
Args:
agent_ids: The IDs of the library agents.
folder_id: The target folder ID (None for root).
user_id: The ID of the user.
Returns:
The updated LibraryAgents.
Raises:
NotFoundError: If any agent or the folder doesn't exist.
DatabaseError: If there's a database error.
"""
logger.debug(f"Bulk moving {len(agent_ids)} agents to folder #{folder_id}")
try:
# Verify folder exists if provided
if folder_id:
folder = await prisma.models.LibraryFolder.prisma().find_first(
where={
"id": folder_id,
"userId": user_id,
"isDeleted": False,
}
)
if not folder:
raise NotFoundError(f"Folder #{folder_id} not found")
# Update all agents
await prisma.models.LibraryAgent.prisma().update_many(
where={
"id": {"in": agent_ids},
"userId": user_id,
"isDeleted": False,
},
data={"folderId": folder_id},
)
# Fetch and return updated agents
agents = await prisma.models.LibraryAgent.prisma().find_many(
where={
"id": {"in": agent_ids},
"userId": user_id,
},
include=library_agent_include(
user_id, include_nodes=False, include_executions=False
),
)
return [library_model.LibraryAgent.from_db(agent) for agent in agents]
except prisma.errors.PrismaError as e:
logger.error(f"Database error bulk moving agents to folder: {e}")
raise DatabaseError("Failed to bulk move agents to folder") from e
##############################################
########### Presets DB Functions #############
##############################################
@@ -1919,7 +1130,7 @@ async def create_preset_from_graph_execution(
async def update_preset(
user_id: str,
preset_id: str,
inputs: Optional[GraphInput] = None,
inputs: Optional[BlockInput] = None,
credentials: Optional[dict[str, CredentialsMetaInput]] = None,
name: Optional[str] = None,
description: Optional[str] = None,

View File

@@ -1,4 +0,0 @@
class FolderValidationError(Exception):
"""Raised when folder operations fail validation."""
pass

View File

@@ -6,12 +6,9 @@ import prisma.enums
import prisma.models
import pydantic
from backend.data.block import BlockInput
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
from backend.data.model import (
CredentialsMetaInput,
GraphInput,
is_credentials_field_name,
)
from backend.data.model import CredentialsMetaInput, is_credentials_field_name
from backend.util.json import loads as json_loads
from backend.util.models import Pagination
@@ -26,95 +23,6 @@ class LibraryAgentStatus(str, Enum):
ERROR = "ERROR"
# === Folder Models ===
class LibraryFolder(pydantic.BaseModel):
"""Represents a folder for organizing library agents."""
id: str
user_id: str
name: str
icon: str | None = None
color: str | None = None
parent_id: str | None = None
created_at: datetime.datetime
updated_at: datetime.datetime
agent_count: int = 0 # Direct agents in folder
subfolder_count: int = 0 # Direct child folders
@staticmethod
def from_db(
folder: prisma.models.LibraryFolder,
agent_count: int = 0,
subfolder_count: int = 0,
) -> "LibraryFolder":
"""Factory method that constructs a LibraryFolder from a Prisma model."""
return LibraryFolder(
id=folder.id,
user_id=folder.userId,
name=folder.name,
icon=folder.icon,
color=folder.color,
parent_id=folder.parentId,
created_at=folder.createdAt,
updated_at=folder.updatedAt,
agent_count=agent_count,
subfolder_count=subfolder_count,
)
class LibraryFolderTree(LibraryFolder):
"""Folder with nested children for tree view."""
children: list["LibraryFolderTree"] = []
class FolderCreateRequest(pydantic.BaseModel):
"""Request model for creating a folder."""
name: str = pydantic.Field(..., min_length=1, max_length=100)
icon: str | None = None
color: str | None = pydantic.Field(
None, pattern=r"^#[0-9A-Fa-f]{6}$", description="Hex color code (#RRGGBB)"
)
parent_id: str | None = None
class FolderUpdateRequest(pydantic.BaseModel):
"""Request model for updating a folder."""
name: str | None = pydantic.Field(None, min_length=1, max_length=100)
icon: str | None = None
color: str | None = None
class FolderMoveRequest(pydantic.BaseModel):
"""Request model for moving a folder to a new parent."""
target_parent_id: str | None = None # None = move to root
class BulkMoveAgentsRequest(pydantic.BaseModel):
"""Request model for moving multiple agents to a folder."""
agent_ids: list[str]
folder_id: str | None = None # None = move to root
class FolderListResponse(pydantic.BaseModel):
"""Response schema for a list of folders."""
folders: list[LibraryFolder]
pagination: Pagination
class FolderTreeResponse(pydantic.BaseModel):
"""Response schema for folder tree structure."""
tree: list[LibraryFolderTree]
class MarketplaceListingCreator(pydantic.BaseModel):
"""Creator information for a marketplace listing."""
@@ -209,9 +117,6 @@ class LibraryAgent(pydantic.BaseModel):
can_access_graph: bool
is_latest_version: bool
is_favorite: bool
folder_id: str | None = None
folder_name: str | None = None # Denormalized for display
recommended_schedule_cron: str | None = None
settings: GraphSettings = pydantic.Field(default_factory=GraphSettings)
marketplace_listing: Optional["MarketplaceListing"] = None
@@ -320,10 +225,6 @@ class LibraryAgent(pydantic.BaseModel):
creator=creator_data,
)
# Folder information
folder_id = agent.folderId
folder_name = agent.Folder.name if agent.Folder else None
return LibraryAgent(
id=agent.id,
graph_id=agent.agentGraphId,
@@ -355,8 +256,6 @@ class LibraryAgent(pydantic.BaseModel):
can_access_graph=can_access_graph,
is_latest_version=is_latest_version,
is_favorite=agent.isFavorite,
folder_id=folder_id,
folder_name=folder_name,
recommended_schedule_cron=agent.AgentGraph.recommendedScheduleCron,
settings=_parse_settings(agent.settings),
marketplace_listing=marketplace_listing_data,
@@ -424,7 +323,7 @@ class LibraryAgentPresetCreatable(pydantic.BaseModel):
graph_id: str
graph_version: int
inputs: GraphInput
inputs: BlockInput
credentials: dict[str, CredentialsMetaInput]
name: str
@@ -453,7 +352,7 @@ class LibraryAgentPresetUpdatable(pydantic.BaseModel):
Request model used when updating a preset for a library agent.
"""
inputs: Optional[GraphInput] = None
inputs: Optional[BlockInput] = None
credentials: Optional[dict[str, CredentialsMetaInput]] = None
name: Optional[str] = None
@@ -496,7 +395,7 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable):
"Webhook must be included in AgentPreset query when webhookId is set"
)
input_data: GraphInput = {}
input_data: BlockInput = {}
input_credentials: dict[str, CredentialsMetaInput] = {}
for preset_input in preset.InputPresets:
@@ -568,7 +467,3 @@ class LibraryAgentUpdateRequest(pydantic.BaseModel):
settings: Optional[GraphSettings] = pydantic.Field(
default=None, description="User-specific settings for this library agent"
)
folder_id: Optional[str] = pydantic.Field(
default=None,
description="Folder ID to move agent to (empty string for root)",
)

View File

@@ -1,11 +1,9 @@
import fastapi
from .agents import router as agents_router
from .folders import router as folders_router
from .presets import router as presets_router
router = fastapi.APIRouter()
router.include_router(presets_router)
router.include_router(folders_router)
router.include_router(agents_router)

View File

@@ -41,34 +41,17 @@ async def list_library_agents(
ge=1,
description="Number of agents per page (must be >= 1)",
),
folder_id: Optional[str] = Query(
None,
description="Filter by folder ID",
),
include_root_only: bool = Query(
False,
description="Only return agents without a folder (root-level agents)",
),
) -> library_model.LibraryAgentResponse:
"""
Get all agents in the user's library (both created and saved).
"""
try:
return await library_db.list_library_agents(
user_id=user_id,
search_term=search_term,
sort_by=sort_by,
page=page,
page_size=page_size,
folder_id=folder_id,
include_root_only=include_root_only,
)
except Exception as e:
logger.error(f"Could not list library agents for user #{user_id}: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
) from e
return await library_db.list_library_agents(
user_id=user_id,
search_term=search_term,
sort_by=sort_by,
page=page,
page_size=page_size,
)
@router.get(
@@ -177,34 +160,15 @@ async def update_library_agent(
"""
Update the library agent with the given fields.
"""
try:
return await library_db.update_library_agent(
library_agent_id=library_agent_id,
user_id=user_id,
auto_update_version=payload.auto_update_version,
graph_version=payload.graph_version,
is_favorite=payload.is_favorite,
is_archived=payload.is_archived,
settings=payload.settings,
folder_id=payload.folder_id,
)
except NotFoundError as e:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=str(e),
) from e
except DatabaseError as e:
logger.error(f"Database error while updating library agent: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"message": str(e), "hint": "Verify DB connection."},
) from e
except Exception as e:
logger.error(f"Unexpected error while updating library agent: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"message": str(e), "hint": "Check server logs."},
) from e
return await library_db.update_library_agent(
library_agent_id=library_agent_id,
user_id=user_id,
auto_update_version=payload.auto_update_version,
graph_version=payload.graph_version,
is_favorite=payload.is_favorite,
is_archived=payload.is_archived,
settings=payload.settings,
)
@router.delete(

View File

@@ -1,408 +0,0 @@
import logging
from typing import Optional
import autogpt_libs.auth as autogpt_auth_lib
from fastapi import APIRouter, HTTPException, Query, Security, status
from fastapi.responses import Response
from backend.util.exceptions import DatabaseError, NotFoundError
from .. import db as library_db
from .. import model as library_model
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/folders",
tags=["library", "folders", "private"],
dependencies=[Security(autogpt_auth_lib.requires_user)],
)
@router.get(
"",
summary="List Library Folders",
response_model=library_model.FolderListResponse,
responses={
200: {"description": "List of folders"},
500: {"description": "Server error"},
},
)
async def list_folders(
user_id: str = Security(autogpt_auth_lib.get_user_id),
parent_id: Optional[str] = Query(
None,
description="Filter by parent folder ID. If not provided, returns root-level folders.",
),
include_counts: bool = Query(
True,
description="Include agent and subfolder counts",
),
) -> library_model.FolderListResponse:
"""
List folders for the authenticated user.
Args:
user_id: ID of the authenticated user.
parent_id: Optional parent folder ID to filter by.
include_counts: Whether to include agent and subfolder counts.
Returns:
A FolderListResponse containing folders.
"""
try:
folders = await library_db.list_folders(
user_id=user_id,
parent_id=parent_id,
include_counts=include_counts,
)
return library_model.FolderListResponse(
folders=folders,
pagination=library_model.Pagination(
total_items=len(folders),
total_pages=1,
current_page=1,
page_size=len(folders),
),
)
except Exception as e:
logger.error(f"Could not list folders for user #{user_id}: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
) from e
@router.get(
"/tree",
summary="Get Folder Tree",
response_model=library_model.FolderTreeResponse,
responses={
200: {"description": "Folder tree structure"},
500: {"description": "Server error"},
},
)
async def get_folder_tree(
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.FolderTreeResponse:
"""
Get the full folder tree for the authenticated user.
Args:
user_id: ID of the authenticated user.
Returns:
A FolderTreeResponse containing the nested folder structure.
"""
try:
tree = await library_db.get_folder_tree(user_id=user_id)
return library_model.FolderTreeResponse(tree=tree)
except Exception as e:
logger.error(f"Could not get folder tree for user #{user_id}: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
) from e
@router.get(
"/{folder_id}",
summary="Get Folder",
response_model=library_model.LibraryFolder,
responses={
200: {"description": "Folder details"},
404: {"description": "Folder not found"},
500: {"description": "Server error"},
},
)
async def get_folder(
folder_id: str,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.LibraryFolder:
"""
Get a specific folder.
Args:
folder_id: ID of the folder to retrieve.
user_id: ID of the authenticated user.
Returns:
The requested LibraryFolder.
"""
try:
return await library_db.get_folder(folder_id=folder_id, user_id=user_id)
except NotFoundError as e:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=str(e),
) from e
except Exception as e:
logger.error(f"Could not get folder #{folder_id}: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
) from e
@router.post(
"",
summary="Create Folder",
status_code=status.HTTP_201_CREATED,
response_model=library_model.LibraryFolder,
responses={
201: {"description": "Folder created successfully"},
400: {"description": "Validation error"},
404: {"description": "Parent folder not found"},
409: {"description": "Folder name conflict"},
500: {"description": "Server error"},
},
)
async def create_folder(
payload: library_model.FolderCreateRequest,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.LibraryFolder:
"""
Create a new folder.
Args:
payload: The folder creation request.
user_id: ID of the authenticated user.
Returns:
The created LibraryFolder.
"""
try:
return await library_db.create_folder(
user_id=user_id,
name=payload.name,
parent_id=payload.parent_id,
icon=payload.icon,
color=payload.color,
)
except library_db.FolderValidationError as e:
if "already exists" in str(e):
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=str(e),
) from e
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e),
) from e
except NotFoundError as e:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=str(e),
) from e
except DatabaseError as e:
logger.error(f"Database error creating folder: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
) from e
@router.patch(
"/{folder_id}",
summary="Update Folder",
response_model=library_model.LibraryFolder,
responses={
200: {"description": "Folder updated successfully"},
400: {"description": "Validation error"},
404: {"description": "Folder not found"},
409: {"description": "Folder name conflict"},
500: {"description": "Server error"},
},
)
async def update_folder(
folder_id: str,
payload: library_model.FolderUpdateRequest,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.LibraryFolder:
"""
Update a folder's properties.
Args:
folder_id: ID of the folder to update.
payload: The folder update request.
user_id: ID of the authenticated user.
Returns:
The updated LibraryFolder.
"""
try:
return await library_db.update_folder(
folder_id=folder_id,
user_id=user_id,
name=payload.name,
icon=payload.icon,
color=payload.color,
)
except library_db.FolderValidationError as e:
if "already exists" in str(e):
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=str(e),
) from e
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e),
) from e
except NotFoundError as e:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=str(e),
) from e
except DatabaseError as e:
logger.error(f"Database error updating folder #{folder_id}: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
) from e
@router.post(
"/{folder_id}/move",
summary="Move Folder",
response_model=library_model.LibraryFolder,
responses={
200: {"description": "Folder moved successfully"},
400: {"description": "Validation error (circular reference, depth exceeded)"},
404: {"description": "Folder or target parent not found"},
409: {"description": "Folder name conflict in target location"},
500: {"description": "Server error"},
},
)
async def move_folder(
folder_id: str,
payload: library_model.FolderMoveRequest,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> library_model.LibraryFolder:
"""
Move a folder to a new parent.
Args:
folder_id: ID of the folder to move.
payload: The move request with target parent.
user_id: ID of the authenticated user.
Returns:
The moved LibraryFolder.
"""
try:
return await library_db.move_folder(
folder_id=folder_id,
user_id=user_id,
target_parent_id=payload.target_parent_id,
)
except library_db.FolderValidationError as e:
if "already exists" in str(e):
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=str(e),
) from e
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e),
) from e
except NotFoundError as e:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=str(e),
) from e
except DatabaseError as e:
logger.error(f"Database error moving folder #{folder_id}: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
) from e
@router.delete(
"/{folder_id}",
summary="Delete Folder",
status_code=status.HTTP_204_NO_CONTENT,
responses={
204: {"description": "Folder deleted successfully"},
404: {"description": "Folder not found"},
500: {"description": "Server error"},
},
)
async def delete_folder(
folder_id: str,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> Response:
"""
Soft-delete a folder and all its contents.
Args:
folder_id: ID of the folder to delete.
user_id: ID of the authenticated user.
Returns:
204 No Content if successful.
"""
try:
await library_db.delete_folder(
folder_id=folder_id,
user_id=user_id,
soft_delete=True,
)
return Response(status_code=status.HTTP_204_NO_CONTENT)
except NotFoundError as e:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=str(e),
) from e
except DatabaseError as e:
logger.error(f"Database error deleting folder #{folder_id}: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
) from e
# === Bulk Agent Operations ===
@router.post(
"/agents/bulk-move",
summary="Bulk Move Agents",
response_model=list[library_model.LibraryAgent],
responses={
200: {"description": "Agents moved successfully"},
404: {"description": "Folder not found"},
500: {"description": "Server error"},
},
)
async def bulk_move_agents(
payload: library_model.BulkMoveAgentsRequest,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> list[library_model.LibraryAgent]:
"""
Move multiple agents to a folder.
Args:
payload: The bulk move request with agent IDs and target folder.
user_id: ID of the authenticated user.
Returns:
The updated LibraryAgents.
"""
try:
return await library_db.bulk_move_agents_to_folder(
agent_ids=payload.agent_ids,
folder_id=payload.folder_id,
user_id=user_id,
)
except NotFoundError as e:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=str(e),
) from e
except DatabaseError as e:
logger.error(f"Database error bulk moving agents: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
) from e

View File

@@ -5,8 +5,8 @@ from typing import Optional
import aiohttp
from fastapi import HTTPException
from backend.blocks import get_block
from backend.data import graph as graph_db
from backend.data.block import get_block
from backend.util.settings import Settings
from .models import ApiResponse, ChatRequest, GraphData

View File

@@ -152,7 +152,7 @@ class BlockHandler(ContentHandler):
async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
"""Fetch blocks without embeddings."""
from backend.blocks import get_blocks
from backend.data.block import get_blocks
# Get all available blocks
all_blocks = get_blocks()
@@ -183,60 +183,70 @@ class BlockHandler(ContentHandler):
]
# Convert to ContentItem
from backend.blocks.llm import LlmModel
items = []
for block_id, block_cls in missing_blocks[:batch_size]:
try:
block_instance = block_cls()
# Skip disabled blocks - they shouldn't be indexed
if block_instance.disabled:
continue
# Build searchable text from block metadata
parts = []
if hasattr(block_instance, "name") and block_instance.name:
if block_instance.name:
parts.append(block_instance.name)
if (
hasattr(block_instance, "description")
and block_instance.description
):
if block_instance.description:
parts.append(block_instance.description)
if hasattr(block_instance, "categories") and block_instance.categories:
# Convert BlockCategory enum to strings
if block_instance.categories:
parts.append(
" ".join(str(cat.value) for cat in block_instance.categories)
)
# Add input/output schema info
if hasattr(block_instance, "input_schema"):
schema = block_instance.input_schema
if hasattr(schema, "model_json_schema"):
schema_dict = schema.model_json_schema()
if "properties" in schema_dict:
for prop_name, prop_info in schema_dict[
"properties"
].items():
if "description" in prop_info:
parts.append(
f"{prop_name}: {prop_info['description']}"
)
# Add input schema field descriptions
schema_dict = block_instance.input_schema.model_json_schema()
if "properties" in schema_dict:
for prop_name, prop_info in schema_dict["properties"].items():
if "description" in prop_info:
parts.append(f"{prop_name}: {prop_info['description']}")
searchable_text = " ".join(parts)
# Convert categories set of enums to list of strings for JSON serialization
categories = getattr(block_instance, "categories", set())
categories_list = (
[cat.value for cat in categories] if categories else []
[cat.value for cat in block_instance.categories]
if block_instance.categories
else []
)
# Extract provider names from credentials fields
provider_names: list[str] = []
credentials_info = (
block_instance.input_schema.get_credentials_fields_info()
)
is_integration = len(credentials_info) > 0
for info in credentials_info.values():
for provider in info.provider:
provider_names.append(provider.value.lower())
# Check if block has LlmModel field in input schema
has_llm_model_field = False
for field in block_instance.input_schema.model_fields.values():
if field.annotation == LlmModel:
has_llm_model_field = True
break
items.append(
ContentItem(
content_id=block_id,
content_type=ContentType.BLOCK,
searchable_text=searchable_text,
metadata={
"name": getattr(block_instance, "name", ""),
"name": block_instance.name,
"categories": categories_list,
"providers": provider_names,
"has_llm_model_field": has_llm_model_field,
"is_integration": is_integration,
},
user_id=None, # Blocks are public
)
@@ -249,7 +259,7 @@ class BlockHandler(ContentHandler):
async def get_stats(self) -> dict[str, int]:
"""Get statistics about block embedding coverage."""
from backend.blocks import get_blocks
from backend.data.block import get_blocks
all_blocks = get_blocks()

View File

@@ -85,6 +85,8 @@ async def test_block_handler_get_missing_items(mocker):
mock_block_instance.input_schema.model_json_schema.return_value = {
"properties": {"expression": {"description": "Math expression to evaluate"}}
}
mock_block_instance.input_schema.get_credentials_fields_info.return_value = {}
mock_block_instance.input_schema.model_fields = {}
mock_block_class.return_value = mock_block_instance
mock_blocks = {"block-uuid-1": mock_block_class}
@@ -93,7 +95,7 @@ async def test_block_handler_get_missing_items(mocker):
mock_existing = []
with patch(
"backend.blocks.get_blocks",
"backend.data.block.get_blocks",
return_value=mock_blocks,
):
with patch(
@@ -135,7 +137,7 @@ async def test_block_handler_get_stats(mocker):
mock_embedded = [{"count": 2}]
with patch(
"backend.blocks.get_blocks",
"backend.data.block.get_blocks",
return_value=mock_blocks,
):
with patch(
@@ -309,25 +311,26 @@ async def test_content_handlers_registry():
@pytest.mark.asyncio(loop_scope="session")
async def test_block_handler_handles_missing_attributes():
"""Test BlockHandler gracefully handles blocks with missing attributes."""
async def test_block_handler_handles_empty_attributes():
"""Test BlockHandler handles blocks with empty/falsy attribute values."""
handler = BlockHandler()
# Mock block with minimal attributes
# Mock block with empty values (all attributes exist but are falsy)
mock_block_class = MagicMock()
mock_block_instance = MagicMock()
mock_block_instance.name = "Minimal Block"
mock_block_instance.disabled = False
# No description, categories, or schema
del mock_block_instance.description
del mock_block_instance.categories
del mock_block_instance.input_schema
mock_block_instance.description = ""
mock_block_instance.categories = set()
mock_block_instance.input_schema.model_json_schema.return_value = {}
mock_block_instance.input_schema.get_credentials_fields_info.return_value = {}
mock_block_instance.input_schema.model_fields = {}
mock_block_class.return_value = mock_block_instance
mock_blocks = {"block-minimal": mock_block_class}
with patch(
"backend.blocks.get_blocks",
"backend.data.block.get_blocks",
return_value=mock_blocks,
):
with patch(
@@ -352,6 +355,9 @@ async def test_block_handler_skips_failed_blocks():
good_instance.description = "Works fine"
good_instance.categories = []
good_instance.disabled = False
good_instance.input_schema.model_json_schema.return_value = {}
good_instance.input_schema.get_credentials_fields_info.return_value = {}
good_instance.input_schema.model_fields = {}
good_block.return_value = good_instance
bad_block = MagicMock()
@@ -360,7 +366,7 @@ async def test_block_handler_skips_failed_blocks():
mock_blocks = {"good-block": good_block, "bad-block": bad_block}
with patch(
"backend.blocks.get_blocks",
"backend.data.block.get_blocks",
return_value=mock_blocks,
):
with patch(

View File

@@ -662,7 +662,7 @@ async def cleanup_orphaned_embeddings() -> dict[str, Any]:
)
current_ids = {row["id"] for row in valid_agents}
elif content_type == ContentType.BLOCK:
from backend.blocks import get_blocks
from backend.data.block import get_blocks
current_ids = set(get_blocks().keys())
elif content_type == ContentType.DOCUMENTATION:

View File

@@ -7,6 +7,15 @@ from replicate.client import Client as ReplicateClient
from replicate.exceptions import ReplicateError
from replicate.helpers import FileOutput
from backend.blocks.ideogram import (
AspectRatio,
ColorPalettePreset,
IdeogramModelBlock,
IdeogramModelName,
MagicPromptOption,
StyleType,
UpscaleOption,
)
from backend.data.graph import GraphBaseMeta
from backend.data.model import CredentialsMetaInput, ProviderName
from backend.integrations.credentials_store import ideogram_credentials
@@ -41,16 +50,6 @@ async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.Bytes
if not ideogram_credentials.api_key:
raise ValueError("Missing Ideogram API key")
from backend.blocks.ideogram import (
AspectRatio,
ColorPalettePreset,
IdeogramModelBlock,
IdeogramModelName,
MagicPromptOption,
StyleType,
UpscaleOption,
)
name = graph.name
description = f"{name} ({graph.description})" if graph.description else name

View File

@@ -40,11 +40,10 @@ from backend.api.model import (
UpdateTimezoneRequest,
UploadFileResponse,
)
from backend.blocks import get_block, get_blocks
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.auth import api_key as api_key_db
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.block import BlockInput, CompletedBlockOutput, get_block, get_blocks
from backend.data.credit import (
AutoTopUpConfig,
RefundRequest,

View File

@@ -3,19 +3,22 @@ import logging
import os
import re
from pathlib import Path
from typing import Sequence, Type, TypeVar
from typing import TYPE_CHECKING, TypeVar
from backend.blocks._base import AnyBlockSchema, BlockType
from backend.util.cache import cached
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T")
@cached(ttl_seconds=3600)
def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
from backend.blocks._base import Block
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import Block
from backend.util.settings import Config
# Check if example blocks should be loaded from settings
@@ -47,8 +50,8 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
importlib.import_module(f".{module}", package=__name__)
# Load all Block instances from the available modules
available_blocks: dict[str, type["AnyBlockSchema"]] = {}
for block_cls in _all_subclasses(Block):
available_blocks: dict[str, type["Block"]] = {}
for block_cls in all_subclasses(Block):
class_name = block_cls.__name__
if class_name.endswith("Base"):
@@ -61,7 +64,7 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
"please name the class with 'Base' at the end"
)
block = block_cls() # pyright: ignore[reportAbstractUsage]
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(
@@ -102,7 +105,7 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
available_blocks[block.id] = block_cls
# Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets
from ._utils import is_block_auth_configured
from backend.data.block import is_block_auth_configured
filtered_blocks = {}
for block_id, block_cls in available_blocks.items():
@@ -112,48 +115,11 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
return filtered_blocks
def _all_subclasses(cls: type[T]) -> list[type[T]]:
__all__ = ["load_all_blocks"]
def all_subclasses(cls: type[T]) -> list[type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += _all_subclasses(subclass)
subclasses += all_subclasses(subclass)
return subclasses
# ============== Block access helper functions ============== #
def get_blocks() -> dict[str, Type["AnyBlockSchema"]]:
return load_all_blocks()
# Note on the return type annotation: https://github.com/microsoft/pyright/issues/10281
def get_block(block_id: str) -> "AnyBlockSchema | None":
cls = get_blocks().get(block_id)
return cls() if cls else None
@cached(ttl_seconds=3600)
def get_webhook_block_ids() -> Sequence[str]:
return [
id
for id, B in get_blocks().items()
if B().block_type in (BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL)
]
@cached(ttl_seconds=3600)
def get_io_block_ids() -> Sequence[str]:
return [
id
for id, B in get_blocks().items()
if B().block_type in (BlockType.INPUT, BlockType.OUTPUT)
]
@cached(ttl_seconds=3600)
def get_human_in_the_loop_block_ids() -> Sequence[str]:
return [
id
for id, B in get_blocks().items()
if B().block_type == BlockType.HUMAN_IN_THE_LOOP
]

View File

@@ -1,739 +0,0 @@
import inspect
import logging
from abc import ABC, abstractmethod
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Generic,
Optional,
Type,
TypeAlias,
TypeVar,
cast,
get_origin,
)
import jsonref
import jsonschema
from pydantic import BaseModel
from backend.data.block import BlockInput, BlockOutput, BlockOutputEntry
from backend.data.model import (
Credentials,
CredentialsFieldInfo,
CredentialsMetaInput,
SchemaField,
is_credentials_field_name,
)
from backend.integrations.providers import ProviderName
from backend.util import json
from backend.util.exceptions import (
BlockError,
BlockExecutionError,
BlockInputError,
BlockOutputError,
BlockUnknownError,
)
from backend.util.settings import Config
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from backend.data.execution import ExecutionContext
from backend.data.model import ContributorDetails, NodeExecutionStats
from ..data.graph import Link
app_config = Config()
BlockTestOutput = BlockOutputEntry | tuple[str, Callable[[Any], bool]]
class BlockType(Enum):
STANDARD = "Standard"
INPUT = "Input"
OUTPUT = "Output"
NOTE = "Note"
WEBHOOK = "Webhook"
WEBHOOK_MANUAL = "Webhook (manual)"
AGENT = "Agent"
AI = "AI"
AYRSHARE = "Ayrshare"
HUMAN_IN_THE_LOOP = "Human In The Loop"
class BlockCategory(Enum):
AI = "Block that leverages AI to perform a task."
SOCIAL = "Block that interacts with social media platforms."
TEXT = "Block that processes text data."
SEARCH = "Block that searches or extracts information from the internet."
BASIC = "Block that performs basic operations."
INPUT = "Block that interacts with input of the graph."
OUTPUT = "Block that interacts with output of the graph."
LOGIC = "Programming logic to control the flow of your agent"
COMMUNICATION = "Block that interacts with communication platforms."
DEVELOPER_TOOLS = "Developer tools such as GitHub blocks."
DATA = "Block that interacts with structured data."
HARDWARE = "Block that interacts with hardware."
AGENT = "Block that interacts with other agents."
CRM = "Block that interacts with CRM services."
SAFETY = (
"Block that provides AI safety mechanisms such as detecting harmful content"
)
PRODUCTIVITY = "Block that helps with productivity"
ISSUE_TRACKING = "Block that helps with issue tracking"
MULTIMEDIA = "Block that interacts with multimedia content"
MARKETING = "Block that helps with marketing"
def dict(self) -> dict[str, str]:
return {"category": self.name, "description": self.value}
class BlockCostType(str, Enum):
RUN = "run" # cost X credits per run
BYTE = "byte" # cost X credits per byte
SECOND = "second" # cost X credits per second
class BlockCost(BaseModel):
cost_amount: int
cost_filter: BlockInput
cost_type: BlockCostType
def __init__(
self,
cost_amount: int,
cost_type: BlockCostType = BlockCostType.RUN,
cost_filter: Optional[BlockInput] = None,
**data: Any,
) -> None:
super().__init__(
cost_amount=cost_amount,
cost_filter=cost_filter or {},
cost_type=cost_type,
**data,
)
class BlockInfo(BaseModel):
id: str
name: str
inputSchema: dict[str, Any]
outputSchema: dict[str, Any]
costs: list[BlockCost]
description: str
categories: list[dict[str, str]]
contributors: list[dict[str, Any]]
staticOutput: bool
uiType: str
class BlockSchema(BaseModel):
cached_jsonschema: ClassVar[dict[str, Any]]
@classmethod
def jsonschema(cls) -> dict[str, Any]:
if cls.cached_jsonschema:
return cls.cached_jsonschema
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
def ref_to_dict(obj):
if isinstance(obj, dict):
# OpenAPI <3.1 does not support sibling fields that has a $ref key
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
keys = {"allOf", "anyOf", "oneOf"}
one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None)
if one_key:
obj.update(obj[one_key][0])
return {
key: ref_to_dict(value)
for key, value in obj.items()
if not key.startswith("$") and key != one_key
}
elif isinstance(obj, list):
return [ref_to_dict(item) for item in obj]
return obj
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
return cls.cached_jsonschema
@classmethod
def validate_data(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(
schema=cls.jsonschema(),
data={k: v for k, v in data.items() if v is not None},
)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return cls.validate_data(data)
@classmethod
def get_field_schema(cls, field_name: str) -> dict[str, Any]:
model_schema = cls.jsonschema().get("properties", {})
if not model_schema:
raise ValueError(f"Invalid model schema {cls}")
property_schema = model_schema.get(field_name)
if not property_schema:
raise ValueError(f"Invalid property name {field_name}")
return property_schema
@classmethod
def validate_field(cls, field_name: str, data: BlockInput) -> str | None:
"""
Validate the data against a specific property (one of the input/output name).
Returns the validation error message if the data does not match the schema.
"""
try:
property_schema = cls.get_field_schema(field_name)
jsonschema.validate(json.to_dict(data), property_schema)
return None
except jsonschema.ValidationError as e:
return str(e)
@classmethod
def get_fields(cls) -> set[str]:
return set(cls.model_fields.keys())
@classmethod
def get_required_fields(cls) -> set[str]:
return {
field
for field, field_info in cls.model_fields.items()
if field_info.is_required()
}
@classmethod
def __pydantic_init_subclass__(cls, **kwargs):
"""Validates the schema definition. Rules:
- Fields with annotation `CredentialsMetaInput` MUST be
named `credentials` or `*_credentials`
- Fields named `credentials` or `*_credentials` MUST be
of type `CredentialsMetaInput`
"""
super().__pydantic_init_subclass__(**kwargs)
# Reset cached JSON schema to prevent inheriting it from parent class
cls.cached_jsonschema = {}
credentials_fields = cls.get_credentials_fields()
for field_name in cls.get_fields():
if is_credentials_field_name(field_name):
if field_name not in credentials_fields:
raise TypeError(
f"Credentials field '{field_name}' on {cls.__qualname__} "
f"is not of type {CredentialsMetaInput.__name__}"
)
CredentialsMetaInput.validate_credentials_field_schema(
cls.get_field_schema(field_name), field_name
)
elif field_name in credentials_fields:
raise KeyError(
f"Credentials field '{field_name}' on {cls.__qualname__} "
"has invalid name: must be 'credentials' or *_credentials"
)
@classmethod
def get_credentials_fields(cls) -> dict[str, type[CredentialsMetaInput]]:
return {
field_name: info.annotation
for field_name, info in cls.model_fields.items()
if (
inspect.isclass(info.annotation)
and issubclass(
get_origin(info.annotation) or info.annotation,
CredentialsMetaInput,
)
)
}
@classmethod
def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]:
"""
Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput).
Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config}
Raises:
ValueError: If multiple fields have the same kwarg_name, as this would
cause silent overwriting and only the last field would be processed.
"""
result: dict[str, dict[str, Any]] = {}
schema = cls.jsonschema()
properties = schema.get("properties", {})
for field_name, field_schema in properties.items():
auto_creds = field_schema.get("auto_credentials")
if auto_creds:
kwarg_name = auto_creds.get("kwarg_name", "credentials")
if kwarg_name in result:
raise ValueError(
f"Duplicate auto_credentials kwarg_name '{kwarg_name}' "
f"in fields '{result[kwarg_name]['field_name']}' and "
f"'{field_name}' on {cls.__qualname__}"
)
result[kwarg_name] = {
"field_name": field_name,
"config": auto_creds,
}
return result
@classmethod
def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]:
result = {}
# Regular credentials fields
for field_name in cls.get_credentials_fields().keys():
result[field_name] = CredentialsFieldInfo.model_validate(
cls.get_field_schema(field_name), by_alias=True
)
# Auto-generated credentials fields (from GoogleDriveFileInput etc.)
for kwarg_name, info in cls.get_auto_credentials_fields().items():
config = info["config"]
# Build a schema-like dict that CredentialsFieldInfo can parse
auto_schema = {
"credentials_provider": [config.get("provider", "google")],
"credentials_types": [config.get("type", "oauth2")],
"credentials_scopes": config.get("scopes"),
}
result[kwarg_name] = CredentialsFieldInfo.model_validate(
auto_schema, by_alias=True
)
return result
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data # Return as is, by default.
@classmethod
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
input_fields_from_nodes = {link.sink_name for link in links}
return input_fields_from_nodes - set(data)
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
return cls.get_required_fields() - set(data)
class BlockSchemaInput(BlockSchema):
"""
Base schema class for block inputs.
All block input schemas should extend this class for consistency.
"""
pass
class BlockSchemaOutput(BlockSchema):
"""
Base schema class for block outputs that includes a standard error field.
All block output schemas should extend this class to ensure consistent error handling.
"""
error: str = SchemaField(
description="Error message if the operation failed", default=""
)
BlockSchemaInputType = TypeVar("BlockSchemaInputType", bound=BlockSchemaInput)
BlockSchemaOutputType = TypeVar("BlockSchemaOutputType", bound=BlockSchemaOutput)
class EmptyInputSchema(BlockSchemaInput):
pass
class EmptyOutputSchema(BlockSchemaOutput):
pass
# For backward compatibility - will be deprecated
EmptySchema = EmptyOutputSchema
# --8<-- [start:BlockWebhookConfig]
class BlockManualWebhookConfig(BaseModel):
"""
Configuration model for webhook-triggered blocks on which
the user has to manually set up the webhook at the provider.
"""
provider: ProviderName
"""The service provider that the webhook connects to"""
webhook_type: str
"""
Identifier for the webhook type. E.g. GitHub has repo and organization level hooks.
Only for use in the corresponding `WebhooksManager`.
"""
event_filter_input: str = ""
"""
Name of the block's event filter input.
Leave empty if the corresponding webhook doesn't have distinct event/payload types.
"""
event_format: str = "{event}"
"""
Template string for the event(s) that a block instance subscribes to.
Applied individually to each event selected in the event filter input.
Example: `"pull_request.{event}"` -> `"pull_request.opened"`
"""
class BlockWebhookConfig(BlockManualWebhookConfig):
"""
Configuration model for webhook-triggered blocks for which
the webhook can be automatically set up through the provider's API.
"""
resource_format: str
"""
Template string for the resource that a block instance subscribes to.
Fields will be filled from the block's inputs (except `payload`).
Example: `f"{repo}/pull_requests"` (note: not how it's actually implemented)
Only for use in the corresponding `WebhooksManager`.
"""
# --8<-- [end:BlockWebhookConfig]
class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
def __init__(
self,
id: str = "",
description: str = "",
contributors: list["ContributorDetails"] = [],
categories: set[BlockCategory] | None = None,
input_schema: Type[BlockSchemaInputType] = EmptyInputSchema,
output_schema: Type[BlockSchemaOutputType] = EmptyOutputSchema,
test_input: BlockInput | list[BlockInput] | None = None,
test_output: BlockTestOutput | list[BlockTestOutput] | None = None,
test_mock: dict[str, Any] | None = None,
test_credentials: Optional[Credentials | dict[str, Credentials]] = None,
disabled: bool = False,
static_output: bool = False,
block_type: BlockType = BlockType.STANDARD,
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
is_sensitive_action: bool = False,
):
"""
Initialize the block with the given schema.
Args:
id: The unique identifier for the block, this value will be persisted in the
DB. So it should be a unique and constant across the application run.
Use the UUID format for the ID.
description: The description of the block, explaining what the block does.
contributors: The list of contributors who contributed to the block.
input_schema: The schema, defined as a Pydantic model, for the input data.
output_schema: The schema, defined as a Pydantic model, for the output data.
test_input: The list or single sample input data for the block, for testing.
test_output: The list or single expected output if the test_input is run.
test_mock: function names on the block implementation to mock on test run.
disabled: If the block is disabled, it will not be available for execution.
static_output: Whether the output links of the block are static by default.
"""
from backend.data.model import NodeExecutionStats
self.id = id
self.input_schema = input_schema
self.output_schema = output_schema
self.test_input = test_input
self.test_output = test_output
self.test_mock = test_mock
self.test_credentials = test_credentials
self.description = description
self.categories = categories or set()
self.contributors = contributors or set()
self.disabled = disabled
self.static_output = static_output
self.block_type = block_type
self.webhook_config = webhook_config
self.is_sensitive_action = is_sensitive_action
self.execution_stats: "NodeExecutionStats" = NodeExecutionStats()
if self.webhook_config:
if isinstance(self.webhook_config, BlockWebhookConfig):
# Enforce presence of credentials field on auto-setup webhook blocks
if not (cred_fields := self.input_schema.get_credentials_fields()):
raise TypeError(
"credentials field is required on auto-setup webhook blocks"
)
# Disallow multiple credentials inputs on webhook blocks
elif len(cred_fields) > 1:
raise ValueError(
"Multiple credentials inputs not supported on webhook blocks"
)
self.block_type = BlockType.WEBHOOK
else:
self.block_type = BlockType.WEBHOOK_MANUAL
# Enforce shape of webhook event filter, if present
if self.webhook_config.event_filter_input:
event_filter_field = self.input_schema.model_fields[
self.webhook_config.event_filter_input
]
if not (
isinstance(event_filter_field.annotation, type)
and issubclass(event_filter_field.annotation, BaseModel)
and all(
field.annotation is bool
for field in event_filter_field.annotation.model_fields.values()
)
):
raise NotImplementedError(
f"{self.name} has an invalid webhook event selector: "
"field must be a BaseModel and all its fields must be boolean"
)
# Enforce presence of 'payload' input
if "payload" not in self.input_schema.model_fields:
raise TypeError(
f"{self.name} is webhook-triggered but has no 'payload' input"
)
# Disable webhook-triggered block if webhook functionality not available
if not app_config.platform_base_url:
self.disabled = True
@abstractmethod
async def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput:
"""
Run the block with the given input data.
Args:
input_data: The input data with the structure of input_schema.
Kwargs: Currently 14/02/2025 these include
graph_id: The ID of the graph.
node_id: The ID of the node.
graph_exec_id: The ID of the graph execution.
node_exec_id: The ID of the node execution.
user_id: The ID of the user.
Returns:
A Generator that yields (output_name, output_data).
output_name: One of the output name defined in Block's output_schema.
output_data: The data for the output_name, matching the defined schema.
"""
# --- satisfy the type checker, never executed -------------
if False: # noqa: SIM115
yield "name", "value" # pyright: ignore[reportMissingYield]
raise NotImplementedError(f"{self.name} does not implement the run method.")
async def run_once(
self, input_data: BlockSchemaInputType, output: str, **kwargs
) -> Any:
async for item in self.run(input_data, **kwargs):
name, data = item
if name == output:
return data
raise ValueError(f"{self.name} did not produce any output for {output}")
def merge_stats(self, stats: "NodeExecutionStats") -> "NodeExecutionStats":
self.execution_stats += stats
return self.execution_stats
@property
def name(self):
return self.__class__.__name__
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"inputSchema": self.input_schema.jsonschema(),
"outputSchema": self.output_schema.jsonschema(),
"description": self.description,
"categories": [category.dict() for category in self.categories],
"contributors": [
contributor.model_dump() for contributor in self.contributors
],
"staticOutput": self.static_output,
"uiType": self.block_type.value,
}
def get_info(self) -> BlockInfo:
from backend.data.credit import get_block_cost
return BlockInfo(
id=self.id,
name=self.name,
inputSchema=self.input_schema.jsonschema(),
outputSchema=self.output_schema.jsonschema(),
costs=get_block_cost(self),
description=self.description,
categories=[category.dict() for category in self.categories],
contributors=[
contributor.model_dump() for contributor in self.contributors
],
staticOutput=self.static_output,
uiType=self.block_type.value,
)
async def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
try:
async for output_name, output_data in self._execute(input_data, **kwargs):
yield output_name, output_data
except Exception as ex:
if isinstance(ex, BlockError):
raise ex
else:
raise (
BlockExecutionError
if isinstance(ex, ValueError)
else BlockUnknownError
)(
message=str(ex),
block_name=self.name,
block_id=self.id,
) from ex
async def is_block_exec_need_review(
self,
input_data: BlockInput,
*,
user_id: str,
node_id: str,
node_exec_id: str,
graph_exec_id: str,
graph_id: str,
graph_version: int,
execution_context: "ExecutionContext",
**kwargs,
) -> tuple[bool, BlockInput]:
"""
Check if this block execution needs human review and handle the review process.
Returns:
Tuple of (should_pause, input_data_to_use)
- should_pause: True if execution should be paused for review
- input_data_to_use: The input data to use (may be modified by reviewer)
"""
if not (
self.is_sensitive_action and execution_context.sensitive_action_safe_mode
):
return False, input_data
from backend.blocks.helpers.review import HITLReviewHelper
# Handle the review request and get decision
decision = await HITLReviewHelper.handle_review_decision(
input_data=input_data,
user_id=user_id,
node_id=node_id,
node_exec_id=node_exec_id,
graph_exec_id=graph_exec_id,
graph_id=graph_id,
graph_version=graph_version,
block_name=self.name,
editable=True,
)
if decision is None:
# We're awaiting review - pause execution
return True, input_data
if not decision.should_proceed:
# Review was rejected, raise an error to stop execution
raise BlockExecutionError(
message=f"Block execution rejected by reviewer: {decision.message}",
block_name=self.name,
block_id=self.id,
)
# Review was approved - use the potentially modified data
# ReviewResult.data must be a dict for block inputs
reviewed_data = decision.review_result.data
if not isinstance(reviewed_data, dict):
raise BlockExecutionError(
message=f"Review data must be a dict for block input, got {type(reviewed_data).__name__}",
block_name=self.name,
block_id=self.id,
)
return False, reviewed_data
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
# Check for review requirement only if running within a graph execution context
# Direct block execution (e.g., from chat) skips the review process
has_graph_context = all(
key in kwargs
for key in (
"node_exec_id",
"graph_exec_id",
"graph_id",
"execution_context",
)
)
if has_graph_context:
should_pause, input_data = await self.is_block_exec_need_review(
input_data, **kwargs
)
if should_pause:
return
# Validate the input data (original or reviewer-modified) once
if error := self.input_schema.validate_data(input_data):
raise BlockInputError(
message=f"Unable to execute block with invalid input data: {error}",
block_name=self.name,
block_id=self.id,
)
# Use the validated input data
async for output_name, output_data in self.run(
self.input_schema(**{k: v for k, v in input_data.items() if v is not None}),
**kwargs,
):
if output_name == "error":
raise BlockExecutionError(
message=output_data, block_name=self.name, block_id=self.id
)
if self.block_type == BlockType.STANDARD and (
error := self.output_schema.validate_field(output_name, output_data)
):
raise BlockOutputError(
message=f"Block produced an invalid output data: {error}",
block_name=self.name,
block_id=self.id,
)
yield output_name, output_data
def is_triggered_by_event_type(
self, trigger_config: dict[str, Any], event_type: str
) -> bool:
if not self.webhook_config:
raise TypeError("This method can't be used on non-trigger blocks")
if not self.webhook_config.event_filter_input:
return True
event_filter = trigger_config.get(self.webhook_config.event_filter_input)
if not event_filter:
raise ValueError("Event filter is not configured on trigger")
return event_type in [
self.webhook_config.event_format.format(event=k)
for k in event_filter
if event_filter[k] is True
]
# Type alias for any block with standard input/output schemas
AnyBlockSchema: TypeAlias = Block[BlockSchemaInput, BlockSchemaOutput]

View File

@@ -1,122 +0,0 @@
import logging
import os
from backend.integrations.providers import ProviderName
from ._base import AnyBlockSchema
logger = logging.getLogger(__name__)
def is_block_auth_configured(
block_cls: type[AnyBlockSchema],
) -> bool:
"""
Check if a block has a valid authentication method configured at runtime.
For example if a block is an OAuth-only block and there env vars are not set,
do not show it in the UI.
"""
from backend.sdk.registry import AutoRegistry
# Create an instance to access input_schema
try:
block = block_cls()
except Exception as e:
# If we can't create a block instance, assume it's not OAuth-only
logger.error(f"Error creating block instance for {block_cls.__name__}: {e}")
return True
logger.debug(
f"Checking if block {block_cls.__name__} has a valid provider configured"
)
# Get all credential inputs from input schema
credential_inputs = block.input_schema.get_credentials_fields_info()
required_inputs = block.input_schema.get_required_fields()
if not credential_inputs:
logger.debug(
f"Block {block_cls.__name__} has no credential inputs - Treating as valid"
)
return True
# Check credential inputs
if len(required_inputs.intersection(credential_inputs.keys())) == 0:
logger.debug(
f"Block {block_cls.__name__} has only optional credential inputs"
" - will work without credentials configured"
)
# Check if the credential inputs for this block are correctly configured
for field_name, field_info in credential_inputs.items():
provider_names = field_info.provider
if not provider_names:
logger.warning(
f"Block {block_cls.__name__} "
f"has credential input '{field_name}' with no provider options"
" - Disabling"
)
return False
# If a field has multiple possible providers, each one needs to be usable to
# prevent breaking the UX
for _provider_name in provider_names:
provider_name = _provider_name.value
if provider_name in ProviderName.__members__.values():
logger.debug(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"provider '{provider_name}' is part of the legacy provider system"
" - Treating as valid"
)
break
provider = AutoRegistry.get_provider(provider_name)
if not provider:
logger.warning(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"refers to unknown provider '{provider_name}' - Disabling"
)
return False
# Check the provider's supported auth types
if field_info.supported_types != provider.supported_auth_types:
logger.warning(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"has mismatched supported auth types (field <> Provider): "
f"{field_info.supported_types} != {provider.supported_auth_types}"
)
if not (supported_auth_types := provider.supported_auth_types):
# No auth methods are been configured for this provider
logger.warning(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"provider '{provider_name}' "
"has no authentication methods configured - Disabling"
)
return False
# Check if provider supports OAuth
if "oauth2" in supported_auth_types:
# Check if OAuth environment variables are set
if (oauth_config := provider.oauth_config) and bool(
os.getenv(oauth_config.client_id_env_var)
and os.getenv(oauth_config.client_secret_env_var)
):
logger.debug(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"provider '{provider_name}' is configured for OAuth"
)
else:
logger.error(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"provider '{provider_name}' "
"is missing OAuth client ID or secret - Disabling"
)
return False
logger.debug(
f"Block {block_cls.__name__} credential input '{field_name}' is valid; "
f"supported credential types: {', '.join(field_info.supported_types)}"
)
return True

View File

@@ -1,7 +1,7 @@
import logging
from typing import TYPE_CHECKING, Any, Optional
from typing import Any, Optional
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
@@ -9,15 +9,13 @@ from backend.blocks._base import (
BlockSchema,
BlockSchemaInput,
BlockType,
get_block,
)
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
from backend.data.model import NodeExecutionStats, SchemaField
from backend.util.json import validate_with_jsonschema
from backend.util.retry import func_retry
if TYPE_CHECKING:
from backend.executor.utils import LogMetadata
_logger = logging.getLogger(__name__)
@@ -126,10 +124,9 @@ class AgentExecutorBlock(Block):
graph_version: int,
graph_exec_id: str,
user_id: str,
logger: "LogMetadata",
logger,
) -> BlockOutput:
from backend.blocks import get_block
from backend.data.execution import ExecutionEventType
from backend.executor import utils as execution_utils
@@ -201,7 +198,7 @@ class AgentExecutorBlock(Block):
self,
graph_exec_id: str,
user_id: str,
logger: "LogMetadata",
logger,
) -> None:
from backend.executor import utils as execution_utils

View File

@@ -1,11 +1,5 @@
from typing import Any
from backend.blocks._base import (
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.llm import (
DEFAULT_LLM_MODEL,
TEST_CREDENTIALS,
@@ -17,6 +11,12 @@ from backend.blocks.llm import (
LLMResponse,
llm_call,
)
from backend.data.block import (
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField

View File

@@ -6,7 +6,7 @@ from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -5,12 +5,7 @@ from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.blocks._base import (
Block,
BlockCategory,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.block import Block, BlockCategory, BlockSchemaInput, BlockSchemaOutput
from backend.data.execution import ExecutionContext
from backend.data.model import (
APIKeyCredentials,

View File

@@ -6,7 +6,7 @@ from typing import Literal
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -6,7 +6,7 @@ from typing import Literal
from pydantic import SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,10 +1,3 @@
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
@@ -17,6 +10,13 @@ from backend.blocks.apollo.models import (
PrimaryPhone,
SearchOrganizationsRequest,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import CredentialsField, SchemaField

View File

@@ -1,12 +1,5 @@
import asyncio
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
@@ -21,6 +14,13 @@ from backend.blocks.apollo.models import (
SearchPeopleRequest,
SenorityLevels,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import CredentialsField, SchemaField

View File

@@ -1,10 +1,3 @@
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
@@ -13,6 +6,13 @@ from backend.blocks.apollo._auth import (
ApolloCredentialsInput,
)
from backend.blocks.apollo.models import Contact, EnrichPersonRequest
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import CredentialsField, SchemaField

View File

@@ -3,7 +3,7 @@ from typing import Optional
from pydantic import BaseModel, Field
from backend.blocks._base import BlockSchemaInput
from backend.data.block import BlockSchemaInput
from backend.data.model import SchemaField, UserIntegrations
from backend.integrations.ayrshare import AyrshareClient
from backend.util.clients import get_database_manager_async_client

View File

@@ -1,7 +1,7 @@
import enum
from typing import Any
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -2,7 +2,7 @@ import os
import re
from typing import Type
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,7 +1,7 @@
from enum import Enum
from typing import Any
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,12 +1,12 @@
import json
import shlex
import uuid
from typing import TYPE_CHECKING, Literal, Optional
from typing import Literal, Optional
from e2b import AsyncSandbox as BaseAsyncSandbox
from pydantic import SecretStr
from pydantic import BaseModel, SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
@@ -20,13 +20,6 @@ from backend.data.model import (
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.sandbox_files import (
SandboxFileOutput,
extract_and_store_sandbox_files,
)
if TYPE_CHECKING:
from backend.executor.utils import ExecutionContext
class ClaudeCodeExecutionError(Exception):
@@ -181,15 +174,22 @@ class ClaudeCodeBlock(Block):
advanced=True,
)
class FileOutput(BaseModel):
"""A file extracted from the sandbox."""
path: str
relative_path: str # Path relative to working directory (for GitHub, etc.)
name: str
content: str
class Output(BlockSchemaOutput):
response: str = SchemaField(
description="The output/response from Claude Code execution"
)
files: list[SandboxFileOutput] = SchemaField(
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
description=(
"List of text files created/modified by Claude Code during this execution. "
"Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. "
"workspace_ref contains a workspace:// URI if the file was stored to workspace."
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
)
)
conversation_history: str = SchemaField(
@@ -252,7 +252,6 @@ class ClaudeCodeBlock(Block):
"relative_path": "index.html",
"name": "index.html",
"content": "<html>Hello World</html>",
"workspace_ref": None,
}
],
),
@@ -268,12 +267,11 @@ class ClaudeCodeBlock(Block):
"execute_claude_code": lambda *args, **kwargs: (
"Created index.html with hello world content", # response
[
SandboxFileOutput(
ClaudeCodeBlock.FileOutput(
path="/home/user/index.html",
relative_path="index.html",
name="index.html",
content="<html>Hello World</html>",
workspace_ref=None,
)
], # files
"User: Create a hello world HTML file\n"
@@ -296,8 +294,7 @@ class ClaudeCodeBlock(Block):
existing_sandbox_id: str,
conversation_history: str,
dispose_sandbox: bool,
execution_context: "ExecutionContext",
) -> tuple[str, list[SandboxFileOutput], str, str, str]:
) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
"""
Execute Claude Code in an E2B sandbox.
@@ -452,18 +449,14 @@ class ClaudeCodeBlock(Block):
else:
new_conversation_history = turn_entry
# Extract files created/modified during this run and store to workspace
sandbox_files = await extract_and_store_sandbox_files(
sandbox=sandbox,
working_directory=working_directory,
execution_context=execution_context,
since_timestamp=start_timestamp,
text_only=True,
# Extract files created/modified during this run
files = await self._extract_files(
sandbox, working_directory, start_timestamp
)
return (
response,
sandbox_files, # Already SandboxFileOutput objects
files,
new_conversation_history,
current_session_id,
sandbox_id,
@@ -478,6 +471,140 @@ class ClaudeCodeBlock(Block):
if dispose_sandbox and sandbox:
await sandbox.kill()
async def _extract_files(
self,
sandbox: BaseAsyncSandbox,
working_directory: str,
since_timestamp: str | None = None,
) -> list["ClaudeCodeBlock.FileOutput"]:
"""
Extract text files created/modified during this Claude Code execution.
Args:
sandbox: The E2B sandbox instance
working_directory: Directory to search for files
since_timestamp: ISO timestamp - only return files modified after this time
Returns:
List of FileOutput objects with path, relative_path, name, and content
"""
files: list[ClaudeCodeBlock.FileOutput] = []
# Text file extensions we can safely read as text
text_extensions = {
".txt",
".md",
".html",
".htm",
".css",
".js",
".ts",
".jsx",
".tsx",
".json",
".xml",
".yaml",
".yml",
".toml",
".ini",
".cfg",
".conf",
".py",
".rb",
".php",
".java",
".c",
".cpp",
".h",
".hpp",
".cs",
".go",
".rs",
".swift",
".kt",
".scala",
".sh",
".bash",
".zsh",
".sql",
".graphql",
".env",
".gitignore",
".dockerfile",
"Dockerfile",
".vue",
".svelte",
".astro",
".mdx",
".rst",
".tex",
".csv",
".log",
}
try:
# List files recursively using find command
# Exclude node_modules and .git directories, but allow hidden files
# like .env and .gitignore (they're filtered by text_extensions later)
# Filter by timestamp to only get files created/modified during this run
safe_working_dir = shlex.quote(working_directory)
timestamp_filter = ""
if since_timestamp:
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
find_result = await sandbox.commands.run(
f"find {safe_working_dir} -type f "
f"{timestamp_filter}"
f"-not -path '*/node_modules/*' "
f"-not -path '*/.git/*' "
f"2>/dev/null"
)
if find_result.stdout:
for file_path in find_result.stdout.strip().split("\n"):
if not file_path:
continue
# Check if it's a text file we can read
is_text = any(
file_path.endswith(ext) for ext in text_extensions
) or file_path.endswith("Dockerfile")
if is_text:
try:
content = await sandbox.files.read(file_path)
# Handle bytes or string
if isinstance(content, bytes):
content = content.decode("utf-8", errors="replace")
# Extract filename from path
file_name = file_path.split("/")[-1]
# Calculate relative path by stripping working directory
relative_path = file_path
if file_path.startswith(working_directory):
relative_path = file_path[len(working_directory) :]
# Remove leading slash if present
if relative_path.startswith("/"):
relative_path = relative_path[1:]
files.append(
ClaudeCodeBlock.FileOutput(
path=file_path,
relative_path=relative_path,
name=file_name,
content=content,
)
)
except Exception:
# Skip files that can't be read
pass
except Exception:
# If file extraction fails, return empty results
pass
return files
def _escape_prompt(self, prompt: str) -> str:
"""Escape the prompt for safe shell execution."""
# Use single quotes and escape any single quotes in the prompt
@@ -490,7 +617,6 @@ class ClaudeCodeBlock(Block):
*,
e2b_credentials: APIKeyCredentials,
anthropic_credentials: APIKeyCredentials,
execution_context: "ExecutionContext",
**kwargs,
) -> BlockOutput:
try:
@@ -511,7 +637,6 @@ class ClaudeCodeBlock(Block):
existing_sandbox_id=input_data.sandbox_id,
conversation_history=input_data.conversation_history,
dispose_sandbox=input_data.dispose_sandbox,
execution_context=execution_context,
)
yield "response", response

View File

@@ -1,12 +1,12 @@
from enum import Enum
from typing import TYPE_CHECKING, Any, Literal, Optional
from typing import Any, Literal, Optional
from e2b_code_interpreter import AsyncSandbox
from e2b_code_interpreter import Result as E2BExecutionResult
from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart
from pydantic import BaseModel, Field, JsonValue, SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
@@ -20,13 +20,6 @@ from backend.data.model import (
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.sandbox_files import (
SandboxFileOutput,
extract_and_store_sandbox_files,
)
if TYPE_CHECKING:
from backend.executor.utils import ExecutionContext
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
@@ -92,9 +85,6 @@ class CodeExecutionResult(MainCodeExecutionResult):
class BaseE2BExecutorMixin:
"""Shared implementation methods for E2B executor blocks."""
# Default working directory in E2B sandboxes
WORKING_DIR = "/home/user"
async def execute_code(
self,
api_key: str,
@@ -105,21 +95,14 @@ class BaseE2BExecutorMixin:
timeout: Optional[int] = None,
sandbox_id: Optional[str] = None,
dispose_sandbox: bool = False,
execution_context: Optional["ExecutionContext"] = None,
extract_files: bool = False,
):
"""
Unified code execution method that handles all three use cases:
1. Create new sandbox and execute (ExecuteCodeBlock)
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
Args:
extract_files: If True and execution_context provided, extract files
created/modified during execution and store to workspace.
""" # noqa
sandbox = None
files: list[SandboxFileOutput] = []
try:
if sandbox_id:
# Connect to existing sandbox (ExecuteCodeStepBlock case)
@@ -135,12 +118,6 @@ class BaseE2BExecutorMixin:
for cmd in setup_commands:
await sandbox.commands.run(cmd)
# Capture timestamp before execution to scope file extraction
start_timestamp = None
if extract_files:
ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S")
start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None
# Execute the code
execution = await sandbox.run_code(
code,
@@ -156,24 +133,7 @@ class BaseE2BExecutorMixin:
stdout_logs = "".join(execution.logs.stdout)
stderr_logs = "".join(execution.logs.stderr)
# Extract files created/modified during this execution
if extract_files and execution_context:
files = await extract_and_store_sandbox_files(
sandbox=sandbox,
working_directory=self.WORKING_DIR,
execution_context=execution_context,
since_timestamp=start_timestamp,
text_only=False, # Include binary files too
)
return (
results,
text_output,
stdout_logs,
stderr_logs,
sandbox.sandbox_id,
files,
)
return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
finally:
# Dispose of sandbox if requested to reduce usage costs
if dispose_sandbox and sandbox:
@@ -278,12 +238,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
description="Standard output logs from execution"
)
stderr_logs: str = SchemaField(description="Standard error logs from execution")
files: list[SandboxFileOutput] = SchemaField(
description=(
"Files created or modified during execution. "
"Each file has path, name, content, and workspace_ref (if stored)."
),
)
def __init__(self):
super().__init__(
@@ -305,30 +259,23 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
("results", []),
("response", "Hello World"),
("stdout_logs", "Hello World\n"),
("files", []),
],
test_mock={
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
[], # results
"Hello World", # text_output
"Hello World\n", # stdout_logs
"", # stderr_logs
"sandbox_id", # sandbox_id
[], # files
),
},
)
async def run(
self,
input_data: Input,
*,
credentials: APIKeyCredentials,
execution_context: "ExecutionContext",
**kwargs,
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
results, text_output, stdout, stderr, _, files = await self.execute_code(
results, text_output, stdout, stderr, _ = await self.execute_code(
api_key=credentials.api_key.get_secret_value(),
code=input_data.code,
language=input_data.language,
@@ -336,8 +283,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
setup_commands=input_data.setup_commands,
timeout=input_data.timeout,
dispose_sandbox=input_data.dispose_sandbox,
execution_context=execution_context,
extract_files=True,
)
# Determine result object shape & filter out empty formats
@@ -351,8 +296,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
yield "stdout_logs", stdout
if stderr:
yield "stderr_logs", stderr
# Always yield files (empty list if none)
yield "files", [f.model_dump() for f in files]
except Exception as e:
yield "error", str(e)
@@ -450,7 +393,6 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
"Hello World\n", # stdout_logs
"", # stderr_logs
"sandbox_id", # sandbox_id
[], # files
),
},
)
@@ -459,7 +401,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
_, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code(
_, text_output, stdout, stderr, sandbox_id = await self.execute_code(
api_key=credentials.api_key.get_secret_value(),
code=input_data.setup_code,
language=input_data.language,
@@ -558,7 +500,6 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
"Hello World\n", # stdout_logs
"", # stderr_logs
sandbox_id, # sandbox_id
[], # files
),
},
)
@@ -567,7 +508,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
results, text_output, stdout, stderr, _, _ = await self.execute_code(
results, text_output, stdout, stderr, _ = await self.execute_code(
api_key=credentials.api_key.get_secret_value(),
code=input_data.step_code,
language=input_data.language,

View File

@@ -1,6 +1,6 @@
import re
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -6,7 +6,7 @@ from openai import AsyncOpenAI
from openai.types.responses import Response as OpenAIResponse
from pydantic import SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,6 +1,6 @@
from pydantic import BaseModel
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,

View File

@@ -1,4 +1,4 @@
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,6 +1,6 @@
from typing import Any, List
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,6 +1,6 @@
import codecs
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -8,7 +8,7 @@ from typing import Any, Literal, cast
import discord
from pydantic import SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -2,7 +2,7 @@
Discord OAuth-based blocks.
"""
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -7,7 +7,7 @@ from typing import Literal
from pydantic import BaseModel, ConfigDict, SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -2,7 +2,7 @@
import codecs
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -8,7 +8,7 @@ which provides access to LinkedIn profile data and related information.
import logging
from typing import Optional
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -3,13 +3,6 @@ import logging
from enum import Enum
from typing import Any
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.fal._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
@@ -17,6 +10,13 @@ from backend.blocks.fal._auth import (
FalCredentialsField,
FalCredentialsInput,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.execution import ExecutionContext
from backend.data.model import SchemaField
from backend.util.file import store_media_file

View File

@@ -5,7 +5,7 @@ from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import Optional
from pydantic import BaseModel
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -5,7 +5,7 @@ from typing import Optional
from typing_extensions import TypedDict
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -3,7 +3,7 @@ from urllib.parse import urlparse
from typing_extensions import TypedDict
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -2,7 +2,7 @@ import re
from typing_extensions import TypedDict
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -2,7 +2,7 @@ import base64
from typing_extensions import TypedDict
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -4,7 +4,7 @@ from typing import Any, List, Optional
from typing_extensions import TypedDict
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import Optional
from pydantic import BaseModel
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -4,7 +4,7 @@ from pathlib import Path
from pydantic import BaseModel
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -8,7 +8,7 @@ from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from pydantic import BaseModel
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -7,14 +7,14 @@ from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from gravitas_md2gdocs import to_requests
from backend.blocks._base import (
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
from backend.data.model import SchemaField
from backend.util.settings import Settings

View File

@@ -14,7 +14,7 @@ from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from pydantic import BaseModel, Field
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -7,14 +7,14 @@ from enum import Enum
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from backend.blocks._base import (
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
from backend.data.model import SchemaField
from backend.util.settings import Settings

View File

@@ -3,7 +3,7 @@ from typing import Literal
import googlemaps
from pydantic import BaseModel, SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -9,7 +9,9 @@ from typing import Any, Optional
from prisma.enums import ReviewStatus
from pydantic import BaseModel
from backend.data.execution import ExecutionStatus
from backend.data.human_review import ReviewResult
from backend.executor.manager import async_update_node_execution_status
from backend.util.clients import get_database_manager_async_client
logger = logging.getLogger(__name__)
@@ -41,8 +43,6 @@ class HITLReviewHelper:
@staticmethod
async def update_node_execution_status(**kwargs) -> None:
"""Update the execution status of a node."""
from backend.executor.manager import async_update_node_execution_status
await async_update_node_execution_status(
db_client=get_database_manager_async_client(), **kwargs
)
@@ -88,13 +88,12 @@ class HITLReviewHelper:
Raises:
Exception: If review creation or status update fails
"""
from backend.data.execution import ExecutionStatus
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
# are handled by the caller:
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
# This function only handles checking for existing approvals.
# Check if this node has already been approved (normal or auto-approval)
if approval_result := await HITLReviewHelper.check_approval(
node_exec_id=node_exec_id,

View File

@@ -8,7 +8,7 @@ from typing import Literal
import aiofiles
from pydantic import SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import (
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.model import SchemaField
from backend.util.request import Requests

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import (
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.model import SchemaField
from backend.util.request import Requests

View File

@@ -1,17 +1,17 @@
from datetime import datetime, timedelta
from backend.blocks._base import (
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.model import SchemaField
from backend.util.request import Requests

View File

@@ -3,7 +3,8 @@ from typing import Any
from prisma.enums import ReviewStatus
from backend.blocks._base import (
from backend.blocks.helpers.review import HITLReviewHelper
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
@@ -11,7 +12,6 @@ from backend.blocks._base import (
BlockSchemaOutput,
BlockType,
)
from backend.blocks.helpers.review import HITLReviewHelper
from backend.data.execution import ExecutionContext
from backend.data.human_review import ReviewResult
from backend.data.model import SchemaField

View File

@@ -3,7 +3,7 @@ from typing import Any, Dict, Literal, Optional
from pydantic import SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -2,7 +2,9 @@ import copy
from datetime import date, time
from typing import Any, Optional
from backend.blocks._base import (
# Import for Google Drive file input block
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
@@ -10,9 +12,6 @@ from backend.blocks._base import (
BlockSchemaInput,
BlockType,
)
# Import for Google Drive file input block
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
from backend.data.execution import ExecutionContext
from backend.data.model import SchemaField
from backend.util.file import store_media_file

View File

@@ -1,6 +1,6 @@
from typing import Any
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import (
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.model import SchemaField
from backend.util.request import Requests

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import (
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.model import SchemaField
from backend.util.request import Requests

View File

@@ -3,18 +3,18 @@ from urllib.parse import quote
from typing_extensions import TypedDict
from backend.blocks._base import (
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.model import SchemaField
from backend.util.request import Requests

View File

@@ -1,12 +1,5 @@
from urllib.parse import quote
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.jina._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
@@ -15,6 +8,13 @@ from backend.blocks.jina._auth import (
JinaCredentialsInput,
)
from backend.blocks.search import GetRequest
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import SchemaField
from backend.util.exceptions import BlockExecutionError

View File

@@ -15,7 +15,7 @@ from anthropic.types import ToolParam
from groq import AsyncGroq
from pydantic import BaseModel, SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -2,7 +2,7 @@ import operator
from enum import Enum
from typing import Any
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import List, Literal
from pydantic import SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import Any, Literal, Optional, Union
from mem0 import MemoryClient
from pydantic import BaseModel, SecretStr
from backend.blocks._base import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
from backend.data.model import (
APIKeyCredentials,
CredentialsField,

View File

@@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional
from pydantic import model_validator
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -2,7 +2,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -4,7 +4,7 @@ from typing import List, Optional
from pydantic import BaseModel
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import (
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.type import MediaFileType

View File

@@ -6,7 +6,7 @@ from typing import Any, Literal
import openai
from pydantic import SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -1,7 +1,7 @@
import logging
from typing import Any, Literal
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import Any, Literal
from pinecone import Pinecone, ServerlessSpec
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -6,7 +6,7 @@ import praw
from praw.models import Comment, MoreComments, Submission
from pydantic import BaseModel, SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -4,19 +4,19 @@ from enum import Enum
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.replicate._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
ReplicateCredentialsInput,
)
from backend.blocks.replicate._helper import ReplicateOutputs, extract_result
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField

View File

@@ -4,19 +4,19 @@ from typing import Optional
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.replicate._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
ReplicateCredentialsInput,
)
from backend.blocks.replicate._helper import ReplicateOutputs, extract_result
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField
from backend.util.exceptions import BlockExecutionError, BlockInputError

View File

@@ -6,7 +6,7 @@ from typing import Any
import feedparser
import pydantic
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -3,7 +3,7 @@ from collections import defaultdict
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -4,7 +4,7 @@ from typing import Literal
from pydantic import SecretStr
from backend.blocks._base import (
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,

View File

@@ -3,14 +3,14 @@ from urllib.parse import quote
from pydantic import SecretStr
from backend.blocks._base import (
from backend.blocks.helpers.http import GetRequest
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.helpers.http import GetRequest
from backend.data.model import (
APIKeyCredentials,
CredentialsField,

View File

@@ -1,6 +1,6 @@
from typing import Any, Dict
from backend.blocks._base import Block
from backend.data.block import Block
from backend.util.request import Requests
from ._api import Color, CustomerDetails, OrderItem, Profile

Some files were not shown because too many files have changed in this diff Show More