Merge branch 'dev' into ntindle/secrt-1831-filter-out-graph-only-blocks-from-copilots-find_block

This commit is contained in:
Nicholas Tindle
2026-02-08 19:50:20 -06:00
committed by GitHub
44 changed files with 6047 additions and 3877 deletions

View File

@@ -49,7 +49,7 @@ jobs:
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v7
uses: peter-evans/create-pull-request@v8
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}

View File

@@ -309,6 +309,7 @@ jobs:
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
allowed_bots: "dependabot[bot]"
claude_args: |
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*)"
prompt: |

File diff suppressed because it is too large Load Diff

View File

@@ -9,17 +9,17 @@ packages = [{ include = "autogpt_libs" }]
[tool.poetry.dependencies]
python = ">=3.10,<4.0"
colorama = "^0.4.6"
cryptography = "^45.0"
cryptography = "^46.0"
expiringdict = "^1.2.2"
fastapi = "^0.116.1"
google-cloud-logging = "^3.12.1"
launchdarkly-server-sdk = "^9.12.0"
pydantic = "^2.11.7"
pydantic-settings = "^2.10.1"
pyjwt = { version = "^2.10.1", extras = ["crypto"] }
fastapi = "^0.128.0"
google-cloud-logging = "^3.13.0"
launchdarkly-server-sdk = "^9.14.1"
pydantic = "^2.12.5"
pydantic-settings = "^2.12.0"
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
redis = "^6.2.0"
supabase = "^2.16.0"
uvicorn = "^0.35.0"
supabase = "^2.27.2"
uvicorn = "^0.40.0"
[tool.poetry.group.dev.dependencies]
pyright = "^1.1.404"

View File

@@ -6,7 +6,6 @@ from typing import Any
from backend.api.features.library import db as library_db
from backend.api.features.library import model as library_model
from backend.api.features.store import db as store_db
from backend.data import graph as graph_db
from backend.data.graph import GraphModel
from backend.data.model import (
CredentialsFieldInfo,
@@ -44,14 +43,8 @@ async def fetch_graph_from_store_slug(
return None, None
# Get the graph from store listing version
graph_meta = await store_db.get_available_graph(
store_agent.store_listing_version_id
)
graph = await graph_db.get_graph(
graph_id=graph_meta.id,
version=graph_meta.version,
user_id=None, # Public access
include_subgraphs=True,
graph = await store_db.get_available_graph(
store_agent.store_listing_version_id, hide_nodes=False
)
return graph, store_agent
@@ -128,7 +121,7 @@ def build_missing_credentials_from_graph(
return {
field_key: _serialize_missing_credential(field_key, field_info)
for field_key, (field_info, _node_fields) in aggregated_fields.items()
for field_key, (field_info, _, _) in aggregated_fields.items()
if field_key not in matched_keys
}
@@ -269,7 +262,8 @@ async def match_user_credentials_to_graph(
# provider is in the set of acceptable providers.
for credential_field_name, (
credential_requirements,
_node_fields,
_,
_,
) in aggregated_creds.items():
# Find first matching credential by provider, type, and scopes
matching_cred = next(

View File

@@ -374,7 +374,7 @@ async def get_library_agent_by_graph_id(
async def add_generated_agent_image(
graph: graph_db.BaseGraph,
graph: graph_db.GraphBaseMeta,
user_id: str,
library_agent_id: str,
) -> Optional[prisma.models.LibraryAgent]:

View File

@@ -1,7 +1,7 @@
import asyncio
import logging
from datetime import datetime, timezone
from typing import Any, Literal
from typing import Any, Literal, overload
import fastapi
import prisma.enums
@@ -11,8 +11,8 @@ import prisma.types
from backend.data.db import transaction
from backend.data.graph import (
GraphMeta,
GraphModel,
GraphModelWithoutNodes,
get_graph,
get_graph_as_admin,
get_sub_graphs,
@@ -334,7 +334,22 @@ async def get_store_agent_details(
raise DatabaseError("Failed to fetch agent details") from e
async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
@overload
async def get_available_graph(
store_listing_version_id: str, hide_nodes: Literal[False]
) -> GraphModel: ...
@overload
async def get_available_graph(
store_listing_version_id: str, hide_nodes: Literal[True] = True
) -> GraphModelWithoutNodes: ...
async def get_available_graph(
store_listing_version_id: str,
hide_nodes: bool = True,
) -> GraphModelWithoutNodes | GraphModel:
try:
# Get avaialble, non-deleted store listing version
store_listing_version = (
@@ -344,7 +359,7 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
"isAvailable": True,
"isDeleted": False,
},
include={"AgentGraph": {"include": {"Nodes": True}}},
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
)
)
@@ -354,7 +369,9 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
detail=f"Store listing version {store_listing_version_id} not found",
)
return GraphModel.from_db(store_listing_version.AgentGraph).meta()
return (GraphModelWithoutNodes if hide_nodes else GraphModel).from_db(
store_listing_version.AgentGraph
)
except Exception as e:
logger.error(f"Error getting agent: {e}")

View File

@@ -16,7 +16,7 @@ from backend.blocks.ideogram import (
StyleType,
UpscaleOption,
)
from backend.data.graph import BaseGraph
from backend.data.graph import GraphBaseMeta
from backend.data.model import CredentialsMetaInput, ProviderName
from backend.integrations.credentials_store import ideogram_credentials
from backend.util.request import Requests
@@ -34,14 +34,14 @@ class ImageStyle(str, Enum):
DIGITAL_ART = "digital art"
async def generate_agent_image(agent: BaseGraph | AgentGraph) -> io.BytesIO:
async def generate_agent_image(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
if settings.config.use_agent_image_generation_v2:
return await generate_agent_image_v2(graph=agent)
else:
return await generate_agent_image_v1(agent=agent)
async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.BytesIO:
"""
Generate an image for an agent using Ideogram model.
Returns:
@@ -54,14 +54,17 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
description = f"{name} ({graph.description})" if graph.description else name
prompt = (
f"Create a visually striking retro-futuristic vector pop art illustration prominently featuring "
f'"{name}" in bold typography. The image clearly and literally depicts a {description}, '
f"along with recognizable objects directly associated with the primary function of a {name}. "
f"Ensure the imagery is concrete, intuitive, and immediately understandable, clearly conveying the "
f"purpose of a {name}. Maintain vibrant, limited-palette colors, sharp vector lines, geometric "
f"shapes, flat illustration techniques, and solid colors without gradients or shading. Preserve a "
f"retro-futuristic aesthetic influenced by mid-century futurism and 1960s psychedelia, "
f"prioritizing clear visual storytelling and thematic clarity above all else."
"Create a visually striking retro-futuristic vector pop art illustration "
f'prominently featuring "{name}" in bold typography. The image clearly and '
f"literally depicts a {description}, along with recognizable objects directly "
f"associated with the primary function of a {name}. "
f"Ensure the imagery is concrete, intuitive, and immediately understandable, "
f"clearly conveying the purpose of a {name}. "
"Maintain vibrant, limited-palette colors, sharp vector lines, "
"geometric shapes, flat illustration techniques, and solid colors "
"without gradients or shading. Preserve a retro-futuristic aesthetic "
"influenced by mid-century futurism and 1960s psychedelia, "
"prioritizing clear visual storytelling and thematic clarity above all else."
)
custom_colors = [
@@ -99,12 +102,12 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
return io.BytesIO(response.content)
async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
async def generate_agent_image_v1(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
"""
Generate an image for an agent using Flux model via Replicate API.
Args:
agent (Graph): The agent to generate an image for
agent (GraphBaseMeta | AgentGraph): The agent to generate an image for
Returns:
io.BytesIO: The generated image as bytes
@@ -114,7 +117,13 @@ async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
raise ValueError("Missing Replicate API key in settings")
# Construct prompt from agent details
prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design."
prompt = (
"Create a visually engaging app store thumbnail for the AI agent "
"that highlights what it does in a clear and captivating way:\n"
f"- **Name**: {agent.name}\n"
f"- **Description**: {agent.description}\n"
f"Focus on showcasing its core functionality with an appealing design."
)
# Set up Replicate client
client = ReplicateClient(api_token=settings.secrets.replicate_api_key)

View File

@@ -278,7 +278,7 @@ async def get_agent(
)
async def get_graph_meta_by_store_listing_version_id(
store_listing_version_id: str,
) -> backend.data.graph.GraphMeta:
) -> backend.data.graph.GraphModelWithoutNodes:
"""
Get Agent Graph from Store Listing Version ID.
"""

View File

@@ -478,7 +478,7 @@ class ExaCreateOrFindWebsetBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
try:
webset = aexa.websets.get(id=input_data.external_id)
webset = await aexa.websets.get(id=input_data.external_id)
webset_result = Webset.model_validate(webset.model_dump(by_alias=True))
yield "webset", webset_result
@@ -494,7 +494,7 @@ class ExaCreateOrFindWebsetBlock(Block):
count=input_data.search_count,
)
webset = aexa.websets.create(
webset = await aexa.websets.create(
params=CreateWebsetParameters(
search=search_params,
external_id=input_data.external_id,
@@ -554,7 +554,7 @@ class ExaUpdateWebsetBlock(Block):
if input_data.metadata is not None:
payload["metadata"] = input_data.metadata
sdk_webset = aexa.websets.update(id=input_data.webset_id, params=payload)
sdk_webset = await aexa.websets.update(id=input_data.webset_id, params=payload)
status_str = (
sdk_webset.status.value
@@ -617,7 +617,7 @@ class ExaListWebsetsBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
response = aexa.websets.list(
response = await aexa.websets.list(
cursor=input_data.cursor,
limit=input_data.limit,
)
@@ -678,7 +678,7 @@ class ExaGetWebsetBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_webset = aexa.websets.get(id=input_data.webset_id)
sdk_webset = await aexa.websets.get(id=input_data.webset_id)
status_str = (
sdk_webset.status.value
@@ -748,7 +748,7 @@ class ExaDeleteWebsetBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_webset = aexa.websets.delete(id=input_data.webset_id)
deleted_webset = await aexa.websets.delete(id=input_data.webset_id)
status_str = (
deleted_webset.status.value
@@ -798,7 +798,7 @@ class ExaCancelWebsetBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
canceled_webset = aexa.websets.cancel(id=input_data.webset_id)
canceled_webset = await aexa.websets.cancel(id=input_data.webset_id)
status_str = (
canceled_webset.status.value
@@ -968,7 +968,7 @@ class ExaPreviewWebsetBlock(Block):
entity["description"] = input_data.entity_description
payload["entity"] = entity
sdk_preview = aexa.websets.preview(params=payload)
sdk_preview = await aexa.websets.preview(params=payload)
preview = PreviewWebsetModel.from_sdk(sdk_preview)
@@ -1051,7 +1051,7 @@ class ExaWebsetStatusBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
webset = aexa.websets.get(id=input_data.webset_id)
webset = await aexa.websets.get(id=input_data.webset_id)
status = (
webset.status.value
@@ -1185,7 +1185,7 @@ class ExaWebsetSummaryBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
webset = aexa.websets.get(id=input_data.webset_id)
webset = await aexa.websets.get(id=input_data.webset_id)
# Extract basic info
webset_id = webset.id
@@ -1211,7 +1211,7 @@ class ExaWebsetSummaryBlock(Block):
total_items = 0
if input_data.include_sample_items and input_data.sample_size > 0:
items_response = aexa.websets.items.list(
items_response = await aexa.websets.items.list(
webset_id=input_data.webset_id, limit=input_data.sample_size
)
sample_items_data = [
@@ -1362,7 +1362,7 @@ class ExaWebsetReadyCheckBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
# Get webset details
webset = aexa.websets.get(id=input_data.webset_id)
webset = await aexa.websets.get(id=input_data.webset_id)
status = (
webset.status.value

View File

@@ -202,7 +202,7 @@ class ExaCreateEnrichmentBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_enrichment = aexa.websets.enrichments.create(
sdk_enrichment = await aexa.websets.enrichments.create(
webset_id=input_data.webset_id, params=payload
)
@@ -223,7 +223,7 @@ class ExaCreateEnrichmentBlock(Block):
items_enriched = 0
while time.time() - poll_start < input_data.polling_timeout:
current_enrich = aexa.websets.enrichments.get(
current_enrich = await aexa.websets.enrichments.get(
webset_id=input_data.webset_id, id=enrichment_id
)
current_status = (
@@ -234,7 +234,7 @@ class ExaCreateEnrichmentBlock(Block):
if current_status in ["completed", "failed", "cancelled"]:
# Estimate items from webset searches
webset = aexa.websets.get(id=input_data.webset_id)
webset = await aexa.websets.get(id=input_data.webset_id)
if webset.searches:
for search in webset.searches:
if search.progress:
@@ -329,7 +329,7 @@ class ExaGetEnrichmentBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_enrichment = aexa.websets.enrichments.get(
sdk_enrichment = await aexa.websets.enrichments.get(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
@@ -474,7 +474,7 @@ class ExaDeleteEnrichmentBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_enrichment = aexa.websets.enrichments.delete(
deleted_enrichment = await aexa.websets.enrichments.delete(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
@@ -525,13 +525,13 @@ class ExaCancelEnrichmentBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
canceled_enrichment = aexa.websets.enrichments.cancel(
canceled_enrichment = await aexa.websets.enrichments.cancel(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
# Try to estimate how many items were enriched before cancellation
items_enriched = 0
items_response = aexa.websets.items.list(
items_response = await aexa.websets.items.list(
webset_id=input_data.webset_id, limit=100
)

View File

@@ -222,7 +222,7 @@ class ExaCreateImportBlock(Block):
def _create_test_mock():
"""Create test mocks for the AsyncExa SDK."""
from datetime import datetime
from unittest.mock import MagicMock
from unittest.mock import AsyncMock, MagicMock
# Create mock SDK import object
mock_import = MagicMock()
@@ -247,7 +247,7 @@ class ExaCreateImportBlock(Block):
return {
"_get_client": lambda *args, **kwargs: MagicMock(
websets=MagicMock(
imports=MagicMock(create=lambda *args, **kwargs: mock_import)
imports=MagicMock(create=AsyncMock(return_value=mock_import))
)
)
}
@@ -294,7 +294,7 @@ class ExaCreateImportBlock(Block):
if input_data.metadata:
payload["metadata"] = input_data.metadata
sdk_import = aexa.websets.imports.create(
sdk_import = await aexa.websets.imports.create(
params=payload, csv_data=input_data.csv_data
)
@@ -360,7 +360,7 @@ class ExaGetImportBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_import = aexa.websets.imports.get(import_id=input_data.import_id)
sdk_import = await aexa.websets.imports.get(import_id=input_data.import_id)
import_obj = ImportModel.from_sdk(sdk_import)
@@ -426,7 +426,7 @@ class ExaListImportsBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
response = aexa.websets.imports.list(
response = await aexa.websets.imports.list(
cursor=input_data.cursor,
limit=input_data.limit,
)
@@ -474,7 +474,9 @@ class ExaDeleteImportBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_import = aexa.websets.imports.delete(import_id=input_data.import_id)
deleted_import = await aexa.websets.imports.delete(
import_id=input_data.import_id
)
yield "import_id", deleted_import.id
yield "success", "true"
@@ -573,14 +575,14 @@ class ExaExportWebsetBlock(Block):
}
)
# Create mock iterator
mock_items = [mock_item1, mock_item2]
# Create async iterator for list_all
async def async_item_iterator(*args, **kwargs):
for item in [mock_item1, mock_item2]:
yield item
return {
"_get_client": lambda *args, **kwargs: MagicMock(
websets=MagicMock(
items=MagicMock(list_all=lambda *args, **kwargs: iter(mock_items))
)
websets=MagicMock(items=MagicMock(list_all=async_item_iterator))
)
}
@@ -602,7 +604,7 @@ class ExaExportWebsetBlock(Block):
webset_id=input_data.webset_id, limit=input_data.max_items
)
for sdk_item in item_iterator:
async for sdk_item in item_iterator:
if len(all_items) >= input_data.max_items:
break

View File

@@ -178,7 +178,7 @@ class ExaGetWebsetItemBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_item = aexa.websets.items.get(
sdk_item = await aexa.websets.items.get(
webset_id=input_data.webset_id, id=input_data.item_id
)
@@ -269,7 +269,7 @@ class ExaListWebsetItemsBlock(Block):
response = None
while time.time() - start_time < input_data.wait_timeout:
response = aexa.websets.items.list(
response = await aexa.websets.items.list(
webset_id=input_data.webset_id,
cursor=input_data.cursor,
limit=input_data.limit,
@@ -282,13 +282,13 @@ class ExaListWebsetItemsBlock(Block):
interval = min(interval * 1.2, 10)
if not response:
response = aexa.websets.items.list(
response = await aexa.websets.items.list(
webset_id=input_data.webset_id,
cursor=input_data.cursor,
limit=input_data.limit,
)
else:
response = aexa.websets.items.list(
response = await aexa.websets.items.list(
webset_id=input_data.webset_id,
cursor=input_data.cursor,
limit=input_data.limit,
@@ -340,7 +340,7 @@ class ExaDeleteWebsetItemBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_item = aexa.websets.items.delete(
deleted_item = await aexa.websets.items.delete(
webset_id=input_data.webset_id, id=input_data.item_id
)
@@ -408,7 +408,7 @@ class ExaBulkWebsetItemsBlock(Block):
webset_id=input_data.webset_id, limit=input_data.max_items
)
for sdk_item in item_iterator:
async for sdk_item in item_iterator:
if len(all_items) >= input_data.max_items:
break
@@ -475,7 +475,7 @@ class ExaWebsetItemsSummaryBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
webset = aexa.websets.get(id=input_data.webset_id)
webset = await aexa.websets.get(id=input_data.webset_id)
entity_type = "unknown"
if webset.searches:
@@ -495,7 +495,7 @@ class ExaWebsetItemsSummaryBlock(Block):
# Get sample items if requested
sample_items: List[WebsetItemModel] = []
if input_data.sample_size > 0:
items_response = aexa.websets.items.list(
items_response = await aexa.websets.items.list(
webset_id=input_data.webset_id, limit=input_data.sample_size
)
# Convert to our stable models
@@ -569,7 +569,7 @@ class ExaGetNewItemsBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
# Get items starting from cursor
response = aexa.websets.items.list(
response = await aexa.websets.items.list(
webset_id=input_data.webset_id,
cursor=input_data.since_cursor,
limit=input_data.max_items,

View File

@@ -233,7 +233,7 @@ class ExaCreateMonitorBlock(Block):
def _create_test_mock():
"""Create test mocks for the AsyncExa SDK."""
from datetime import datetime
from unittest.mock import MagicMock
from unittest.mock import AsyncMock, MagicMock
# Create mock SDK monitor object
mock_monitor = MagicMock()
@@ -263,7 +263,7 @@ class ExaCreateMonitorBlock(Block):
return {
"_get_client": lambda *args, **kwargs: MagicMock(
websets=MagicMock(
monitors=MagicMock(create=lambda *args, **kwargs: mock_monitor)
monitors=MagicMock(create=AsyncMock(return_value=mock_monitor))
)
)
}
@@ -320,7 +320,7 @@ class ExaCreateMonitorBlock(Block):
if input_data.metadata:
payload["metadata"] = input_data.metadata
sdk_monitor = aexa.websets.monitors.create(params=payload)
sdk_monitor = await aexa.websets.monitors.create(params=payload)
monitor = MonitorModel.from_sdk(sdk_monitor)
@@ -384,7 +384,7 @@ class ExaGetMonitorBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_monitor = aexa.websets.monitors.get(monitor_id=input_data.monitor_id)
sdk_monitor = await aexa.websets.monitors.get(monitor_id=input_data.monitor_id)
monitor = MonitorModel.from_sdk(sdk_monitor)
@@ -476,7 +476,7 @@ class ExaUpdateMonitorBlock(Block):
if input_data.metadata is not None:
payload["metadata"] = input_data.metadata
sdk_monitor = aexa.websets.monitors.update(
sdk_monitor = await aexa.websets.monitors.update(
monitor_id=input_data.monitor_id, params=payload
)
@@ -522,7 +522,9 @@ class ExaDeleteMonitorBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_monitor = aexa.websets.monitors.delete(monitor_id=input_data.monitor_id)
deleted_monitor = await aexa.websets.monitors.delete(
monitor_id=input_data.monitor_id
)
yield "monitor_id", deleted_monitor.id
yield "success", "true"
@@ -579,7 +581,7 @@ class ExaListMonitorsBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
response = aexa.websets.monitors.list(
response = await aexa.websets.monitors.list(
cursor=input_data.cursor,
limit=input_data.limit,
webset_id=input_data.webset_id,

View File

@@ -121,7 +121,7 @@ class ExaWaitForWebsetBlock(Block):
WebsetTargetStatus.IDLE,
WebsetTargetStatus.ANY_COMPLETE,
]:
final_webset = aexa.websets.wait_until_idle(
final_webset = await aexa.websets.wait_until_idle(
id=input_data.webset_id,
timeout=input_data.timeout,
poll_interval=input_data.check_interval,
@@ -164,7 +164,7 @@ class ExaWaitForWebsetBlock(Block):
interval = input_data.check_interval
while time.time() - start_time < input_data.timeout:
# Get current webset status
webset = aexa.websets.get(id=input_data.webset_id)
webset = await aexa.websets.get(id=input_data.webset_id)
current_status = (
webset.status.value
if hasattr(webset.status, "value")
@@ -209,7 +209,7 @@ class ExaWaitForWebsetBlock(Block):
# Timeout reached
elapsed = time.time() - start_time
webset = aexa.websets.get(id=input_data.webset_id)
webset = await aexa.websets.get(id=input_data.webset_id)
final_status = (
webset.status.value
if hasattr(webset.status, "value")
@@ -345,7 +345,7 @@ class ExaWaitForSearchBlock(Block):
try:
while time.time() - start_time < input_data.timeout:
# Get current search status using SDK
search = aexa.websets.searches.get(
search = await aexa.websets.searches.get(
webset_id=input_data.webset_id, id=input_data.search_id
)
@@ -401,7 +401,7 @@ class ExaWaitForSearchBlock(Block):
elapsed = time.time() - start_time
# Get last known status
search = aexa.websets.searches.get(
search = await aexa.websets.searches.get(
webset_id=input_data.webset_id, id=input_data.search_id
)
final_status = (
@@ -503,7 +503,7 @@ class ExaWaitForEnrichmentBlock(Block):
try:
while time.time() - start_time < input_data.timeout:
# Get current enrichment status using SDK
enrichment = aexa.websets.enrichments.get(
enrichment = await aexa.websets.enrichments.get(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
@@ -548,7 +548,7 @@ class ExaWaitForEnrichmentBlock(Block):
elapsed = time.time() - start_time
# Get last known status
enrichment = aexa.websets.enrichments.get(
enrichment = await aexa.websets.enrichments.get(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
final_status = (
@@ -575,7 +575,7 @@ class ExaWaitForEnrichmentBlock(Block):
) -> tuple[list[SampleEnrichmentModel], int]:
"""Get sample enriched data and count."""
# Get a few items to see enrichment results using SDK
response = aexa.websets.items.list(webset_id=webset_id, limit=5)
response = await aexa.websets.items.list(webset_id=webset_id, limit=5)
sample_data: list[SampleEnrichmentModel] = []
enriched_count = 0

View File

@@ -317,7 +317,7 @@ class ExaCreateWebsetSearchBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_search = aexa.websets.searches.create(
sdk_search = await aexa.websets.searches.create(
webset_id=input_data.webset_id, params=payload
)
@@ -350,7 +350,7 @@ class ExaCreateWebsetSearchBlock(Block):
poll_start = time.time()
while time.time() - poll_start < input_data.polling_timeout:
current_search = aexa.websets.searches.get(
current_search = await aexa.websets.searches.get(
webset_id=input_data.webset_id, id=search_id
)
current_status = (
@@ -442,7 +442,7 @@ class ExaGetWebsetSearchBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_search = aexa.websets.searches.get(
sdk_search = await aexa.websets.searches.get(
webset_id=input_data.webset_id, id=input_data.search_id
)
@@ -523,7 +523,7 @@ class ExaCancelWebsetSearchBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
canceled_search = aexa.websets.searches.cancel(
canceled_search = await aexa.websets.searches.cancel(
webset_id=input_data.webset_id, id=input_data.search_id
)
@@ -604,7 +604,7 @@ class ExaFindOrCreateSearchBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
# Get webset to check existing searches
webset = aexa.websets.get(id=input_data.webset_id)
webset = await aexa.websets.get(id=input_data.webset_id)
# Look for existing search with same query
existing_search = None
@@ -636,7 +636,7 @@ class ExaFindOrCreateSearchBlock(Block):
if input_data.entity_type != SearchEntityType.AUTO:
payload["entity"] = {"type": input_data.entity_type.value}
sdk_search = aexa.websets.searches.create(
sdk_search = await aexa.websets.searches.create(
webset_id=input_data.webset_id, params=payload
)

View File

@@ -596,10 +596,10 @@ def extract_openai_tool_calls(response) -> list[ToolContentBlock] | None:
def get_parallel_tool_calls_param(
llm_model: LlmModel, parallel_tool_calls: bool | None
):
) -> bool | openai.Omit:
"""Get the appropriate parallel_tool_calls parameter for OpenAI-compatible APIs."""
if llm_model.startswith("o") or parallel_tool_calls is None:
return openai.NOT_GIVEN
return openai.omit
return parallel_tool_calls

View File

@@ -246,7 +246,9 @@ class BlockSchema(BaseModel):
f"is not of type {CredentialsMetaInput.__name__}"
)
credentials_fields[field_name].validate_credentials_field_schema(cls)
CredentialsMetaInput.validate_credentials_field_schema(
cls.get_field_schema(field_name), field_name
)
elif field_name in credentials_fields:
raise KeyError(

View File

@@ -1,9 +1,8 @@
import logging
import queue
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from enum import Enum
from multiprocessing import Manager
from queue import Empty
from typing import (
TYPE_CHECKING,
Annotated,
@@ -1200,12 +1199,16 @@ class NodeExecutionEntry(BaseModel):
class ExecutionQueue(Generic[T]):
"""
Queue for managing the execution of agents.
This will be shared between different processes
Thread-safe queue for managing node execution within a single graph execution.
Note: Uses queue.Queue (not multiprocessing.Queue) since all access is from
threads within the same process. If migrating back to ProcessPoolExecutor,
replace with multiprocessing.Manager().Queue() for cross-process safety.
"""
def __init__(self):
self.queue = Manager().Queue()
# Thread-safe queue (not multiprocessing) — see class docstring
self.queue: queue.Queue[T] = queue.Queue()
def add(self, execution: T) -> T:
self.queue.put(execution)
@@ -1220,7 +1223,7 @@ class ExecutionQueue(Generic[T]):
def get_or_none(self) -> T | None:
try:
return self.queue.get_nowait()
except Empty:
except queue.Empty:
return None

View File

@@ -0,0 +1,60 @@
"""Tests for ExecutionQueue thread-safety."""
import queue
import threading
import pytest
from backend.data.execution import ExecutionQueue
def test_execution_queue_uses_stdlib_queue():
"""Verify ExecutionQueue uses queue.Queue (not multiprocessing)."""
q = ExecutionQueue()
assert isinstance(q.queue, queue.Queue)
def test_basic_operations():
"""Test add, get, empty, and get_or_none."""
q = ExecutionQueue()
assert q.empty() is True
assert q.get_or_none() is None
result = q.add("item1")
assert result == "item1"
assert q.empty() is False
item = q.get()
assert item == "item1"
assert q.empty() is True
def test_thread_safety():
"""Test concurrent access from multiple threads."""
q = ExecutionQueue()
results = []
num_items = 100
def producer():
for i in range(num_items):
q.add(f"item_{i}")
def consumer():
count = 0
while count < num_items:
item = q.get_or_none()
if item is not None:
results.append(item)
count += 1
producer_thread = threading.Thread(target=producer)
consumer_thread = threading.Thread(target=consumer)
producer_thread.start()
consumer_thread.start()
producer_thread.join(timeout=5)
consumer_thread.join(timeout=5)
assert len(results) == num_items

View File

@@ -3,7 +3,7 @@ import logging
import uuid
from collections import defaultdict
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Self, cast
from prisma.enums import SubmissionStatus
from prisma.models import (
@@ -20,7 +20,7 @@ from prisma.types import (
AgentNodeLinkCreateInput,
StoreListingVersionWhereInput,
)
from pydantic import BaseModel, BeforeValidator, Field, create_model
from pydantic import BaseModel, BeforeValidator, Field
from pydantic.fields import computed_field
from backend.blocks.agent import AgentExecutorBlock
@@ -30,7 +30,6 @@ from backend.data.db import prisma as db
from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
from backend.data.model import (
CredentialsField,
CredentialsFieldInfo,
CredentialsMetaInput,
is_credentials_field_name,
@@ -45,7 +44,6 @@ from .block import (
AnyBlockSchema,
Block,
BlockInput,
BlockSchema,
BlockType,
EmptySchema,
get_block,
@@ -113,10 +111,12 @@ class Link(BaseDbModel):
class Node(BaseDbModel):
block_id: str
input_default: BlockInput = {} # dict[input_name, default_value]
metadata: dict[str, Any] = {}
input_links: list[Link] = []
output_links: list[Link] = []
input_default: BlockInput = Field( # dict[input_name, default_value]
default_factory=dict
)
metadata: dict[str, Any] = Field(default_factory=dict)
input_links: list[Link] = Field(default_factory=list)
output_links: list[Link] = Field(default_factory=list)
@property
def credentials_optional(self) -> bool:
@@ -221,18 +221,33 @@ class NodeModel(Node):
return result
class BaseGraph(BaseDbModel):
class GraphBaseMeta(BaseDbModel):
"""
Shared base for `GraphMeta` and `BaseGraph`, with core graph metadata fields.
"""
version: int = 1
is_active: bool = True
name: str
description: str
instructions: str | None = None
recommended_schedule_cron: str | None = None
nodes: list[Node] = []
links: list[Link] = []
forked_from_id: str | None = None
forked_from_version: int | None = None
class BaseGraph(GraphBaseMeta):
"""
Graph with nodes, links, and computed I/O schema fields.
Used to represent sub-graphs within a `Graph`. Contains the full graph
structure including nodes and links, plus computed fields for schemas
and trigger info. Does NOT include user_id or created_at (see GraphModel).
"""
nodes: list[Node] = Field(default_factory=list)
links: list[Link] = Field(default_factory=list)
@computed_field
@property
def input_schema(self) -> dict[str, Any]:
@@ -361,44 +376,79 @@ class GraphTriggerInfo(BaseModel):
class Graph(BaseGraph):
sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs
"""Creatable graph model used in API create/update endpoints."""
sub_graphs: list[BaseGraph] = Field(default_factory=list) # Flattened sub-graphs
class GraphMeta(GraphBaseMeta):
"""
Lightweight graph metadata model representing an existing graph from the database,
for use in listings and summaries.
Lacks `GraphModel`'s nodes, links, and expensive computed fields.
Use for list endpoints where full graph data is not needed and performance matters.
"""
id: str # type: ignore
version: int # type: ignore
user_id: str
created_at: datetime
@classmethod
def from_db(cls, graph: "AgentGraph") -> Self:
return cls(
id=graph.id,
version=graph.version,
is_active=graph.isActive,
name=graph.name or "",
description=graph.description or "",
instructions=graph.instructions,
recommended_schedule_cron=graph.recommendedScheduleCron,
forked_from_id=graph.forkedFromId,
forked_from_version=graph.forkedFromVersion,
user_id=graph.userId,
created_at=graph.createdAt,
)
class GraphModel(Graph, GraphMeta):
"""
Full graph model representing an existing graph from the database.
This is the primary model for working with persisted graphs. Includes all
graph data (nodes, links, sub_graphs) plus user ownership and timestamps.
Provides computed fields (input_schema, output_schema, etc.) used during
set-up (frontend) and execution (backend).
Inherits from:
- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas
- `GraphMeta`: provides user_id, created_at for database records
"""
nodes: list[NodeModel] = Field(default_factory=list) # type: ignore
@property
def starting_nodes(self) -> list[NodeModel]:
outbound_nodes = {link.sink_id for link in self.links}
input_nodes = {
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
}
return [
node
for node in self.nodes
if node.id not in outbound_nodes or node.id in input_nodes
]
@property
def webhook_input_node(self) -> NodeModel | None: # type: ignore
return cast(NodeModel, super().webhook_input_node)
@computed_field
@property
def credentials_input_schema(self) -> dict[str, Any]:
schema = self._credentials_input_schema.jsonschema()
# Determine which credential fields are required based on credentials_optional metadata
graph_credentials_inputs = self.aggregate_credentials_inputs()
required_fields = []
# Build a map of node_id -> node for quick lookup
all_nodes = {node.id: node for node in self.nodes}
for sub_graph in self.sub_graphs:
for node in sub_graph.nodes:
all_nodes[node.id] = node
for field_key, (
_field_info,
node_field_pairs,
) in graph_credentials_inputs.items():
# A field is required if ANY node using it has credentials_optional=False
is_required = False
for node_id, _field_name in node_field_pairs:
node = all_nodes.get(node_id)
if node and not node.credentials_optional:
is_required = True
break
if is_required:
required_fields.append(field_key)
schema["required"] = required_fields
return schema
@property
def _credentials_input_schema(self) -> type[BlockSchema]:
graph_credentials_inputs = self.aggregate_credentials_inputs()
logger.debug(
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
f"{graph_credentials_inputs}"
@@ -406,8 +456,8 @@ class Graph(BaseGraph):
# Warn if same-provider credentials inputs can't be combined (= bad UX)
graph_cred_fields = list(graph_credentials_inputs.values())
for i, (field, keys) in enumerate(graph_cred_fields):
for other_field, other_keys in list(graph_cred_fields)[i + 1 :]:
for i, (field, keys, _) in enumerate(graph_cred_fields):
for other_field, other_keys, _ in list(graph_cred_fields)[i + 1 :]:
if field.provider != other_field.provider:
continue
if ProviderName.HTTP in field.provider:
@@ -423,31 +473,78 @@ class Graph(BaseGraph):
f"keys: {keys} <> {other_keys}."
)
fields: dict[str, tuple[type[CredentialsMetaInput], CredentialsMetaInput]] = {
agg_field_key: (
CredentialsMetaInput[
Literal[tuple(field_info.provider)], # type: ignore
Literal[tuple(field_info.supported_types)], # type: ignore
],
CredentialsField(
required_scopes=set(field_info.required_scopes or []),
discriminator=field_info.discriminator,
discriminator_mapping=field_info.discriminator_mapping,
discriminator_values=field_info.discriminator_values,
),
)
for agg_field_key, (field_info, _) in graph_credentials_inputs.items()
}
# Build JSON schema directly to avoid expensive create_model + validation overhead
properties = {}
required_fields = []
return create_model(
self.name.replace(" ", "") + "CredentialsInputSchema",
__base__=BlockSchema,
**fields, # type: ignore
)
for agg_field_key, (
field_info,
_,
is_required,
) in graph_credentials_inputs.items():
providers = list(field_info.provider)
cred_types = list(field_info.supported_types)
field_schema: dict[str, Any] = {
"credentials_provider": providers,
"credentials_types": cred_types,
"type": "object",
"properties": {
"id": {"title": "Id", "type": "string"},
"title": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"title": "Title",
},
"provider": {
"title": "Provider",
"type": "string",
**(
{"enum": providers}
if len(providers) > 1
else {"const": providers[0]}
),
},
"type": {
"title": "Type",
"type": "string",
**(
{"enum": cred_types}
if len(cred_types) > 1
else {"const": cred_types[0]}
),
},
},
"required": ["id", "provider", "type"],
}
# Add other (optional) field info items
field_schema.update(
field_info.model_dump(
by_alias=True,
exclude_defaults=True,
exclude={"provider", "supported_types"}, # already included above
)
)
# Ensure field schema is well-formed
CredentialsMetaInput.validate_credentials_field_schema(
field_schema, agg_field_key
)
properties[agg_field_key] = field_schema
if is_required:
required_fields.append(agg_field_key)
return {
"type": "object",
"properties": properties,
"required": required_fields,
}
def aggregate_credentials_inputs(
self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]]]]:
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
"""
Returns:
dict[aggregated_field_key, tuple(
@@ -455,13 +552,19 @@ class Graph(BaseGraph):
(now includes discriminator_values from matching nodes)
set[(node_id, field_name)]: Node credentials fields that are
compatible with this aggregated field spec
bool: True if the field is required (any node has credentials_optional=False)
)]
"""
# First collect all credential field data with input defaults
node_credential_data = []
# Track (field_info, (node_id, field_name), is_required) for each credential field
node_credential_data: list[tuple[CredentialsFieldInfo, tuple[str, str]]] = []
node_required_map: dict[str, bool] = {} # node_id -> is_required
for graph in [self] + self.sub_graphs:
for node in graph.nodes:
# Track if this node requires credentials (credentials_optional=False means required)
node_required_map[node.id] = not node.credentials_optional
for (
field_name,
field_info,
@@ -485,37 +588,21 @@ class Graph(BaseGraph):
)
# Combine credential field info (this will merge discriminator_values automatically)
return CredentialsFieldInfo.combine(*node_credential_data)
combined = CredentialsFieldInfo.combine(*node_credential_data)
class GraphModel(Graph):
user_id: str
nodes: list[NodeModel] = [] # type: ignore
created_at: datetime
@property
def starting_nodes(self) -> list[NodeModel]:
outbound_nodes = {link.sink_id for link in self.links}
input_nodes = {
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
# Add is_required flag to each aggregated field
# A field is required if ANY node using it has credentials_optional=False
return {
key: (
field_info,
node_field_pairs,
any(
node_required_map.get(node_id, True)
for node_id, _ in node_field_pairs
),
)
for key, (field_info, node_field_pairs) in combined.items()
}
return [
node
for node in self.nodes
if node.id not in outbound_nodes or node.id in input_nodes
]
@property
def webhook_input_node(self) -> NodeModel | None: # type: ignore
return cast(NodeModel, super().webhook_input_node)
def meta(self) -> "GraphMeta":
"""
Returns a GraphMeta object with metadata about the graph.
This is used to return metadata about the graph without exposing nodes and links.
"""
return GraphMeta.from_graph(self)
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
"""
@@ -799,13 +886,14 @@ class GraphModel(Graph):
if is_static_output_block(link.source_id):
link.is_static = True # Each value block output should be static.
@staticmethod
def from_db(
@classmethod
def from_db( # type: ignore[reportIncompatibleMethodOverride]
cls,
graph: AgentGraph,
for_export: bool = False,
sub_graphs: list[AgentGraph] | None = None,
) -> "GraphModel":
return GraphModel(
) -> Self:
return cls(
id=graph.id,
user_id=graph.userId if not for_export else "",
version=graph.version,
@@ -831,17 +919,28 @@ class GraphModel(Graph):
],
)
def hide_nodes(self) -> "GraphModelWithoutNodes":
"""
Returns a copy of the `GraphModel` with nodes, links, and sub-graphs hidden
(excluded from serialization). They are still present in the model instance
so all computed fields (e.g. `credentials_input_schema`) still work.
"""
return GraphModelWithoutNodes.model_validate(self, from_attributes=True)
class GraphMeta(Graph):
user_id: str
# Easy work-around to prevent exposing nodes and links in the API response
nodes: list[NodeModel] = Field(default=[], exclude=True) # type: ignore
links: list[Link] = Field(default=[], exclude=True)
class GraphModelWithoutNodes(GraphModel):
"""
GraphModel variant that excludes nodes, links, and sub-graphs from serialization.
@staticmethod
def from_graph(graph: GraphModel) -> "GraphMeta":
return GraphMeta(**graph.model_dump())
Used in contexts like the store where exposing internal graph structure
is not desired. Inherits all computed fields from GraphModel but marks
nodes and links as excluded from JSON output.
"""
nodes: list[NodeModel] = Field(default_factory=list, exclude=True)
links: list[Link] = Field(default_factory=list, exclude=True)
sub_graphs: list[BaseGraph] = Field(default_factory=list, exclude=True)
class GraphsPaginated(BaseModel):
@@ -912,21 +1011,11 @@ async def list_graphs_paginated(
where=where_clause,
distinct=["id"],
order={"version": "desc"},
include=AGENT_GRAPH_INCLUDE,
skip=offset,
take=page_size,
)
graph_models: list[GraphMeta] = []
for graph in graphs:
try:
graph_meta = GraphModel.from_db(graph).meta()
# Trigger serialization to validate that the graph is well formed
graph_meta.model_dump()
graph_models.append(graph_meta)
except Exception as e:
logger.error(f"Error processing graph {graph.id}: {e}")
continue
graph_models = [GraphMeta.from_db(graph) for graph in graphs]
return GraphsPaginated(
graphs=graph_models,

View File

@@ -163,7 +163,6 @@ class User(BaseModel):
if TYPE_CHECKING:
from prisma.models import User as PrismaUser
from backend.data.block import BlockSchema
T = TypeVar("T")
logger = logging.getLogger(__name__)
@@ -508,15 +507,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
def allowed_cred_types(cls) -> tuple[CredentialsType, ...]:
return get_args(cls.model_fields["type"].annotation)
@classmethod
def validate_credentials_field_schema(cls, model: type["BlockSchema"]):
@staticmethod
def validate_credentials_field_schema(
field_schema: dict[str, Any], field_name: str
):
"""Validates the schema of a credentials input field"""
field_name = next(
name for name, type in model.get_credentials_fields().items() if type is cls
)
field_schema = model.jsonschema()["properties"][field_name]
try:
schema_extra = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
field_info = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
except ValidationError as e:
if "Field required [type=missing" not in str(e):
raise
@@ -526,11 +523,11 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
f"{field_schema}"
) from e
providers = cls.allowed_providers()
providers = field_info.provider
if (
providers is not None
and len(providers) > 1
and not schema_extra.discriminator
and not field_info.discriminator
):
raise TypeError(
f"Multi-provider CredentialsField '{field_name}' "

View File

@@ -373,7 +373,7 @@ def make_node_credentials_input_map(
# Get aggregated credentials fields for the graph
graph_cred_inputs = graph.aggregate_credentials_inputs()
for graph_input_name, (_, compatible_node_fields) in graph_cred_inputs.items():
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
# Best-effort map: skip missing items
if graph_input_name not in graph_credentials_input:
continue

View File

@@ -342,6 +342,14 @@ async def store_media_file(
if not target_path.is_file():
raise ValueError(f"Local file does not exist: {target_path}")
# Virus scan the local file before any further processing
local_content = target_path.read_bytes()
if len(local_content) > MAX_FILE_SIZE_BYTES:
raise ValueError(
f"File too large: {len(local_content)} bytes > {MAX_FILE_SIZE_BYTES} bytes"
)
await scan_content_safe(local_content, filename=sanitized_file)
# Return based on requested format
if return_format == "for_local_processing":
# Use when processing files locally with tools like ffmpeg, MoviePy, PIL

View File

@@ -247,3 +247,100 @@ class TestFileCloudIntegration:
execution_context=make_test_context(graph_exec_id=graph_exec_id),
return_format="for_local_processing",
)
@pytest.mark.asyncio
async def test_store_media_file_local_path_scanned(self):
"""Test that local file paths are scanned for viruses."""
graph_exec_id = "test-exec-123"
local_file = "test_video.mp4"
file_content = b"fake video content"
with patch(
"backend.util.file.get_cloud_storage_handler"
) as mock_handler_getter, patch(
"backend.util.file.scan_content_safe"
) as mock_scan, patch(
"backend.util.file.Path"
) as mock_path_class:
# Mock cloud storage handler - not a cloud path
mock_handler = MagicMock()
mock_handler.is_cloud_path.return_value = False
mock_handler_getter.return_value = mock_handler
# Mock virus scanner
mock_scan.return_value = None
# Mock file system operations
mock_base_path = MagicMock()
mock_target_path = MagicMock()
mock_resolved_path = MagicMock()
mock_path_class.return_value = mock_base_path
mock_base_path.mkdir = MagicMock()
mock_base_path.__truediv__ = MagicMock(return_value=mock_target_path)
mock_target_path.resolve.return_value = mock_resolved_path
mock_resolved_path.is_relative_to.return_value = True
mock_resolved_path.is_file.return_value = True
mock_resolved_path.read_bytes.return_value = file_content
mock_resolved_path.relative_to.return_value = Path(local_file)
mock_resolved_path.name = local_file
result = await store_media_file(
file=MediaFileType(local_file),
execution_context=make_test_context(graph_exec_id=graph_exec_id),
return_format="for_local_processing",
)
# Verify virus scan was called for local file
mock_scan.assert_called_once_with(file_content, filename=local_file)
# Result should be the relative path
assert str(result) == local_file
@pytest.mark.asyncio
async def test_store_media_file_local_path_virus_detected(self):
"""Test that infected local files raise VirusDetectedError."""
from backend.api.features.store.exceptions import VirusDetectedError
graph_exec_id = "test-exec-123"
local_file = "infected.exe"
file_content = b"malicious content"
with patch(
"backend.util.file.get_cloud_storage_handler"
) as mock_handler_getter, patch(
"backend.util.file.scan_content_safe"
) as mock_scan, patch(
"backend.util.file.Path"
) as mock_path_class:
# Mock cloud storage handler - not a cloud path
mock_handler = MagicMock()
mock_handler.is_cloud_path.return_value = False
mock_handler_getter.return_value = mock_handler
# Mock virus scanner to detect virus
mock_scan.side_effect = VirusDetectedError(
"EICAR-Test-File", "File rejected due to virus detection"
)
# Mock file system operations
mock_base_path = MagicMock()
mock_target_path = MagicMock()
mock_resolved_path = MagicMock()
mock_path_class.return_value = mock_base_path
mock_base_path.mkdir = MagicMock()
mock_base_path.__truediv__ = MagicMock(return_value=mock_target_path)
mock_target_path.resolve.return_value = mock_resolved_path
mock_resolved_path.is_relative_to.return_value = True
mock_resolved_path.is_file.return_value = True
mock_resolved_path.read_bytes.return_value = file_content
with pytest.raises(VirusDetectedError):
await store_media_file(
file=MediaFileType(local_file),
execution_context=make_test_context(graph_exec_id=graph_exec_id),
return_format="for_local_processing",
)

File diff suppressed because it is too large Load Diff

View File

@@ -17,11 +17,11 @@ apscheduler = "^3.11.1"
autogpt-libs = { path = "../autogpt_libs", develop = true }
bleach = { extras = ["css"], version = "^6.2.0" }
click = "^8.2.0"
cryptography = "^45.0"
cryptography = "^46.0"
discord-py = "^2.5.2"
e2b-code-interpreter = "^1.5.2"
elevenlabs = "^1.50.0"
fastapi = "^0.116.1"
fastapi = "^0.128.0"
feedparser = "^6.0.11"
flake8 = "^7.3.0"
google-api-python-client = "^2.177.0"
@@ -35,7 +35,7 @@ jinja2 = "^3.1.6"
jsonref = "^1.1.0"
jsonschema = "^4.25.0"
langfuse = "^3.11.0"
launchdarkly-server-sdk = "^9.12.0"
launchdarkly-server-sdk = "^9.14.1"
mem0ai = "^0.1.115"
moviepy = "^2.1.2"
ollama = "^0.5.1"
@@ -52,8 +52,8 @@ prometheus-client = "^0.22.1"
prometheus-fastapi-instrumentator = "^7.0.0"
psutil = "^7.0.0"
psycopg2-binary = "^2.9.10"
pydantic = { extras = ["email"], version = "^2.11.7" }
pydantic-settings = "^2.10.1"
pydantic = { extras = ["email"], version = "^2.12.5" }
pydantic-settings = "^2.12.0"
pytest = "^8.4.1"
pytest-asyncio = "^1.1.0"
python-dotenv = "^1.1.1"
@@ -65,11 +65,11 @@ sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlal
sqlalchemy = "^2.0.40"
strenum = "^0.4.9"
stripe = "^11.5.0"
supabase = "2.17.0"
supabase = "2.27.2"
tenacity = "^9.1.2"
todoist-api-python = "^2.1.7"
tweepy = "^4.16.0"
uvicorn = { extras = ["standard"], version = "^0.35.0" }
uvicorn = { extras = ["standard"], version = "^0.40.0" }
websockets = "^15.0"
youtube-transcript-api = "^1.2.1"
yt-dlp = "2025.12.08"

View File

@@ -3,7 +3,6 @@
"credentials_input_schema": {
"properties": {},
"required": [],
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"description": "A test graph",

View File

@@ -1,34 +1,14 @@
[
{
"credentials_input_schema": {
"properties": {},
"required": [],
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"created_at": "2025-09-04T13:37:00",
"description": "A test graph",
"forked_from_id": null,
"forked_from_version": null,
"has_external_trigger": false,
"has_human_in_the_loop": false,
"has_sensitive_action": false,
"id": "graph-123",
"input_schema": {
"properties": {},
"required": [],
"type": "object"
},
"instructions": null,
"is_active": true,
"name": "Test Graph",
"output_schema": {
"properties": {},
"required": [],
"type": "object"
},
"recommended_schedule_cron": null,
"sub_graphs": [],
"trigger_setup_info": null,
"user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
"version": 1
}

View File

@@ -1,5 +1,5 @@
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import { useState } from "react";
import { getSchemaDefaultCredentials } from "../../helpers";
@@ -9,7 +9,7 @@ type Credential = CredentialsMetaInput | undefined;
type Credentials = Record<string, Credential>;
type Props = {
agent: GraphMeta | null;
agent: GraphModel | null;
siblingInputs?: Record<string, any>;
onCredentialsChange: (
credentials: Record<string, CredentialsMetaInput>,

View File

@@ -1,9 +1,9 @@
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types";
export function getCredentialFields(
agent: GraphMeta | null,
agent: GraphModel | null,
): AgentCredentialsFields {
if (!agent) return {};

View File

@@ -3,10 +3,10 @@ import type {
CredentialsMetaInput,
} from "@/lib/autogpt-server-api/types";
import type { InputValues } from "./types";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
export function computeInitialAgentInputs(
agent: GraphMeta | null,
agent: GraphModel | null,
existingInputs?: InputValues | null,
): InputValues {
const properties = agent?.input_schema?.properties || {};
@@ -29,7 +29,7 @@ export function computeInitialAgentInputs(
}
type IsRunDisabledParams = {
agent: GraphMeta | null;
agent: GraphModel | null;
isRunning: boolean;
agentInputs: InputValues | null | undefined;
};

View File

@@ -30,6 +30,8 @@ import {
} from "@/components/atoms/Tooltip/BaseTooltip";
import { GraphMeta } from "@/lib/autogpt-server-api";
import jaro from "jaro-winkler";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
uiKey?: string;
@@ -107,6 +109,8 @@ export function BlocksControl({
.filter((b) => b.uiType !== BlockUIType.AGENT)
.sort((a, b) => a.name.localeCompare(b.name));
// Agent blocks are created from GraphMeta which doesn't include schemas.
// Schemas will be fetched on-demand when the block is actually added.
const agentBlockList = flows
.map((flow): _Block => {
return {
@@ -116,8 +120,9 @@ export function BlocksControl({
`Ver.${flow.version}` +
(flow.description ? ` | ${flow.description}` : ""),
categories: [{ category: "AGENT", description: "" }],
inputSchema: flow.input_schema,
outputSchema: flow.output_schema,
// Empty schemas - will be populated when block is added
inputSchema: { type: "object", properties: {} },
outputSchema: { type: "object", properties: {} },
staticOutput: false,
uiType: BlockUIType.AGENT,
costs: [],
@@ -125,8 +130,7 @@ export function BlocksControl({
hardcodedValues: {
graph_id: flow.id,
graph_version: flow.version,
input_schema: flow.input_schema,
output_schema: flow.output_schema,
// Schemas will be fetched on-demand when block is added
},
};
})
@@ -182,6 +186,37 @@ export function BlocksControl({
setSelectedCategory(null);
}, []);
// Handler to add a block, fetching graph data on-demand for agent blocks
const handleAddBlock = useCallback(
async (block: _Block & { notAvailable: string | null }) => {
if (block.notAvailable) return;
// For agent blocks, fetch the full graph to get schemas
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
const graphID = block.hardcodedValues.graph_id as string;
const graphVersion = block.hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
addBlock(block.id, block.name, {
...block.hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
});
} else {
// Fallback: add without schemas (will be incomplete)
console.error("Failed to fetch graph data for agent block");
addBlock(block.id, block.name, block.hardcodedValues || {});
}
} else {
addBlock(block.id, block.name, block.hardcodedValues || {});
}
},
[addBlock],
);
// Extract unique categories from blocks
const categories = useMemo(() => {
return Array.from(
@@ -303,10 +338,7 @@ export function BlocksControl({
}),
);
}}
onClick={() =>
!block.notAvailable &&
addBlock(block.id, block.name, block?.hardcodedValues || {})
}
onClick={() => handleAddBlock(block)}
title={block.notAvailable ?? undefined}
>
<div

View File

@@ -29,13 +29,17 @@ import "@xyflow/react/dist/style.css";
import { ConnectedEdge, CustomNode } from "../CustomNode/CustomNode";
import "./flow.css";
import {
BlockIORootSchema,
BlockUIType,
formatEdgeID,
GraphExecutionID,
GraphID,
GraphMeta,
LibraryAgent,
SpecialBlockID,
} from "@/lib/autogpt-server-api";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
import { Key, storage } from "@/services/storage/local-storage";
import { findNewlyAddedBlockCoordinates, getTypeColor } from "@/lib/utils";
@@ -687,8 +691,94 @@ const FlowEditor: React.FC<{
[getNode, updateNode, nodes],
);
/* Shared helper to create and add a node */
const createAndAddNode = useCallback(
async (
blockID: string,
blockName: string,
hardcodedValues: Record<string, any>,
position: { x: number; y: number },
): Promise<CustomNode | null> => {
const nodeSchema = availableBlocks.find((node) => node.id === blockID);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockID}`);
return null;
}
// For agent blocks, fetch the full graph to get schemas
let inputSchema: BlockIORootSchema = nodeSchema.inputSchema;
let outputSchema: BlockIORootSchema = nodeSchema.outputSchema;
let finalHardcodedValues = hardcodedValues;
if (blockID === SpecialBlockID.AGENT) {
const graphID = hardcodedValues.graph_id as string;
const graphVersion = hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
inputSchema = graphData.input_schema as BlockIORootSchema;
outputSchema = graphData.output_schema as BlockIORootSchema;
finalHardcodedValues = {
...hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
};
} else {
console.error("Failed to fetch graph data for agent block");
}
}
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position,
data: {
blockType: blockName,
blockCosts: nodeSchema.costs || [],
title: `${blockName} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: inputSchema,
outputSchema: outputSchema,
hardcodedValues: finalHardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockID,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
addNodes(newNode);
setNodeId((prevId) => prevId + 1);
clearNodesStatusAndOutput();
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
redo: () => addNodes(newNode),
});
return newNode;
},
[
availableBlocks,
nodeId,
addNodes,
deleteElements,
clearNodesStatusAndOutput,
],
);
const addNode = useCallback(
(blockId: string, nodeType: string, hardcodedValues: any = {}) => {
async (
blockId: string,
nodeType: string,
hardcodedValues: Record<string, any> = {},
) => {
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockId}`);
@@ -707,73 +797,42 @@ const FlowEditor: React.FC<{
// Alternative: We could also use D3 force, Intersection for this (React flow Pro examples)
const { x, y } = getViewport();
const viewportCoordinates =
const position =
nodeDimensions && Object.keys(nodeDimensions).length > 0
? // we will get all the dimension of nodes, then store
findNewlyAddedBlockCoordinates(
? findNewlyAddedBlockCoordinates(
nodeDimensions,
nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500,
60,
1.0,
)
: // we will get all the dimension of nodes, then store
{
: {
x: window.innerWidth / 2 - x,
y: window.innerHeight / 2 - y,
};
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position: viewportCoordinates, // Set the position to the calculated viewport center
data: {
blockType: nodeType,
blockCosts: nodeSchema.costs,
title: `${nodeType} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: nodeSchema.inputSchema,
outputSchema: nodeSchema.outputSchema,
hardcodedValues: hardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockId,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
addNodes(newNode);
setNodeId((prevId) => prevId + 1);
clearNodesStatusAndOutput(); // Clear status and output when a new node is added
const newNode = await createAndAddNode(
blockId,
nodeType,
hardcodedValues,
position,
);
if (!newNode) return;
setViewport(
{
// Rough estimate of the dimension of the node is: 500x400px.
// Though we skip shifting the X, considering the block menu side-bar.
x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2,
y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2,
x: -position.x * 0.8 + (window.innerWidth - 0.0) / 2,
y: -position.y * 0.8 + (window.innerHeight - 400) / 2,
zoom: 0.8,
},
{ duration: 500 },
);
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
redo: () => addNodes(newNode),
});
},
[
nodeId,
getViewport,
setViewport,
availableBlocks,
addNodes,
nodeDimensions,
deleteElements,
clearNodesStatusAndOutput,
createAndAddNode,
],
);
@@ -920,7 +979,7 @@ const FlowEditor: React.FC<{
}, []);
const onDrop = useCallback(
(event: React.DragEvent) => {
async (event: React.DragEvent) => {
event.preventDefault();
const blockData = event.dataTransfer.getData("application/reactflow");
@@ -935,62 +994,17 @@ const FlowEditor: React.FC<{
y: event.clientY,
});
// Find the block schema
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockId}`);
return;
}
// Create the new node at the drop position
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
await createAndAddNode(
blockId,
blockName,
hardcodedValues || {},
position,
data: {
blockType: blockName,
blockCosts: nodeSchema.costs || [],
title: `${blockName} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: nodeSchema.inputSchema,
outputSchema: nodeSchema.outputSchema,
hardcodedValues: hardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockId,
uiType: nodeSchema.uiType,
},
};
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => {
deleteElements({ nodes: [{ id: newNode.id } as any], edges: [] });
},
redo: () => {
addNodes([newNode]);
},
});
addNodes([newNode]);
clearNodesStatusAndOutput();
setNodeId((prevId) => prevId + 1);
);
} catch (error) {
console.error("Failed to drop block:", error);
}
},
[
nodeId,
availableBlocks,
nodes,
edges,
addNodes,
screenToFlowPosition,
deleteElements,
clearNodesStatusAndOutput,
],
[screenToFlowPosition, createAndAddNode],
);
const buildContextValue: BuilderContextType = useMemo(

View File

@@ -4,13 +4,13 @@ import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/componen
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import type {
CredentialsMetaInput,
GraphMeta,
Graph,
} from "@/lib/autogpt-server-api/types";
interface RunInputDialogProps {
isOpen: boolean;
doClose: () => void;
graph: GraphMeta;
graph: Graph;
doRun?: (
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,

View File

@@ -9,13 +9,13 @@ import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder
import {
BlockUIType,
CredentialsMetaInput,
GraphMeta,
Graph,
} from "@/lib/autogpt-server-api/types";
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
import { RunnerInputDialog } from "./RunnerInputUI";
interface RunnerUIWrapperProps {
graph: GraphMeta;
graph: Graph;
nodes: Node<CustomNodeData>[];
graphExecutionError?: string | null;
saveAndRun: (

View File

@@ -1,5 +1,5 @@
import { GraphInputSchema } from "@/lib/autogpt-server-api";
import { GraphMetaLike, IncompatibilityInfo } from "./types";
import { GraphLike, IncompatibilityInfo } from "./types";
// Helper type for schema properties - the generated types are too loose
type SchemaProperties = Record<string, GraphInputSchema["properties"][string]>;
@@ -36,7 +36,7 @@ export function getSchemaRequired(schema: unknown): SchemaRequired {
*/
export function createUpdatedAgentNodeInputs(
currentInputs: Record<string, unknown>,
latestSubGraphVersion: GraphMetaLike,
latestSubGraphVersion: GraphLike,
): Record<string, unknown> {
return {
...currentInputs,

View File

@@ -1,7 +1,11 @@
import type { GraphMeta as LegacyGraphMeta } from "@/lib/autogpt-server-api";
import type {
Graph as LegacyGraph,
GraphMeta as LegacyGraphMeta,
} from "@/lib/autogpt-server-api";
import type { GraphModel as GeneratedGraph } from "@/app/api/__generated__/models/graphModel";
import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta";
export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
export type SubAgentUpdateInfo<T extends GraphLike = GraphLike> = {
hasUpdate: boolean;
currentVersion: number;
latestVersion: number;
@@ -10,7 +14,10 @@ export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
incompatibilities: IncompatibilityInfo | null;
};
// Union type for GraphMeta that works with both legacy and new builder
// Union type for Graph (with schemas) that works with both legacy and new builder
export type GraphLike = LegacyGraph | GeneratedGraph;
// Union type for GraphMeta (without schemas) for version detection
export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta;
export type IncompatibilityInfo = {

View File

@@ -1,5 +1,11 @@
import { useMemo } from "react";
import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api";
import type {
GraphInputSchema,
GraphOutputSchema,
} from "@/lib/autogpt-server-api";
import type { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
import { getEffectiveType } from "@/lib/utils";
import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers";
import {
@@ -11,26 +17,38 @@ import {
/**
* Checks if a newer version of a sub-agent is available and determines compatibility
*/
export function useSubAgentUpdate<T extends GraphMetaLike>(
export function useSubAgentUpdate(
nodeID: string,
graphID: string | undefined,
graphVersion: number | undefined,
currentInputSchema: GraphInputSchema | undefined,
currentOutputSchema: GraphOutputSchema | undefined,
connections: EdgeLike[],
availableGraphs: T[],
): SubAgentUpdateInfo<T> {
availableGraphs: GraphMetaLike[],
): SubAgentUpdateInfo<GraphModel> {
// Find the latest version of the same graph
const latestGraph = useMemo(() => {
const latestGraphInfo = useMemo(() => {
if (!graphID) return null;
return availableGraphs.find((graph) => graph.id === graphID) || null;
}, [graphID, availableGraphs]);
// Check if there's an update available
// Check if there's a newer version available
const hasUpdate = useMemo(() => {
if (!latestGraph || graphVersion === undefined) return false;
return latestGraph.version! > graphVersion;
}, [latestGraph, graphVersion]);
if (!latestGraphInfo || graphVersion === undefined) return false;
return latestGraphInfo.version! > graphVersion;
}, [latestGraphInfo, graphVersion]);
// Fetch full graph IF an update is detected
const { data: latestGraph } = useGetV1GetSpecificGraph(
graphID ?? "",
{ version: latestGraphInfo?.version },
{
query: {
enabled: hasUpdate && !!graphID && !!latestGraphInfo?.version,
select: okData,
},
},
);
// Get connected input and output handles for this specific node
const connectedHandles = useMemo(() => {
@@ -152,8 +170,8 @@ export function useSubAgentUpdate<T extends GraphMetaLike>(
return {
hasUpdate,
currentVersion: graphVersion || 0,
latestVersion: latestGraph?.version || 0,
latestGraph,
latestVersion: latestGraphInfo?.version || 0,
latestGraph: latestGraph || null,
isCompatible: compatibilityResult.isCompatible,
incompatibilities: compatibilityResult.incompatibilities,
};

View File

@@ -18,7 +18,7 @@ interface GraphStore {
outputSchema: Record<string, any> | null,
) => void;
// Available graphs; used for sub-graph updates
// Available graphs; used for sub-graph updated version detection
availableSubGraphs: GraphMeta[];
setAvailableSubGraphs: (graphs: GraphMeta[]) => void;

View File

@@ -10,8 +10,8 @@ import React, {
import {
CredentialsMetaInput,
CredentialsType,
Graph,
GraphExecutionID,
GraphMeta,
LibraryAgentPreset,
LibraryAgentPresetID,
LibraryAgentPresetUpdatable,
@@ -69,7 +69,7 @@ export function AgentRunDraftView({
className,
recommendedScheduleCron,
}: {
graph: GraphMeta;
graph: Graph;
agentActions?: ButtonAction[];
recommendedScheduleCron?: string | null;
doRun?: (

View File

@@ -2,8 +2,8 @@
import React, { useCallback, useMemo } from "react";
import {
Graph,
GraphExecutionID,
GraphMeta,
Schedule,
ScheduleID,
} from "@/lib/autogpt-server-api";
@@ -35,7 +35,7 @@ export function AgentScheduleDetailsView({
onForcedRun,
doDeleteSchedule,
}: {
graph: GraphMeta;
graph: Graph;
schedule: Schedule;
agentActions: ButtonAction[];
onForcedRun: (runID: GraphExecutionID) => void;

View File

@@ -5629,7 +5629,9 @@
"description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/GraphMeta" }
"schema": {
"$ref": "#/components/schemas/GraphModelWithoutNodes"
}
}
}
},
@@ -6495,18 +6497,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -6514,11 +6504,22 @@
"forked_from_version": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
}
},
"type": "object",
"required": ["name", "description"],
"title": "BaseGraph"
"title": "BaseGraph",
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
},
"BaseGraph-Output": {
"properties": {
@@ -6539,18 +6540,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -6559,6 +6548,16 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"input_schema": {
"additionalProperties": true,
"type": "object",
@@ -6605,7 +6604,8 @@
"has_sensitive_action",
"trigger_setup_info"
],
"title": "BaseGraph"
"title": "BaseGraph",
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
},
"BlockCategoryResponse": {
"properties": {
@@ -7399,18 +7399,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -7419,16 +7407,26 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Input" },
"type": "array",
"title": "Sub Graphs",
"default": []
"title": "Sub Graphs"
}
},
"type": "object",
"required": ["name", "description"],
"title": "Graph"
"title": "Graph",
"description": "Creatable graph model used in API create/update endpoints."
},
"GraphExecution": {
"properties": {
@@ -7778,6 +7776,52 @@
"description": "Response schema for paginated graph executions."
},
"GraphMeta": {
"properties": {
"id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version" },
"is_active": {
"type": "boolean",
"title": "Is Active",
"default": true
},
"name": { "type": "string", "title": "Name" },
"description": { "type": "string", "title": "Description" },
"instructions": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Instructions"
},
"recommended_schedule_cron": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
},
"forked_from_version": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
"format": "date-time",
"title": "Created At"
}
},
"type": "object",
"required": [
"id",
"version",
"name",
"description",
"user_id",
"created_at"
],
"title": "GraphMeta",
"description": "Lightweight graph metadata model representing an existing graph from the database,\nfor use in listings and summaries.\n\nLacks `GraphModel`'s nodes, links, and expensive computed fields.\nUse for list endpoints where full graph data is not needed and performance matters."
},
"GraphModel": {
"properties": {
"id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version", "default": 1 },
@@ -7804,13 +7848,27 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
"format": "date-time",
"title": "Created At"
},
"nodes": {
"items": { "$ref": "#/components/schemas/NodeModel" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
"type": "array",
"title": "Sub Graphs",
"default": []
"title": "Sub Graphs"
},
"user_id": { "type": "string", "title": "User Id" },
"input_schema": {
"additionalProperties": true,
"type": "object",
@@ -7857,6 +7915,7 @@
"name",
"description",
"user_id",
"created_at",
"input_schema",
"output_schema",
"has_external_trigger",
@@ -7865,9 +7924,10 @@
"trigger_setup_info",
"credentials_input_schema"
],
"title": "GraphMeta"
"title": "GraphModel",
"description": "Full graph model representing an existing graph from the database.\n\nThis is the primary model for working with persisted graphs. Includes all\ngraph data (nodes, links, sub_graphs) plus user ownership and timestamps.\nProvides computed fields (input_schema, output_schema, etc.) used during\nset-up (frontend) and execution (backend).\n\nInherits from:\n- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas\n- `GraphMeta`: provides user_id, created_at for database records"
},
"GraphModel": {
"GraphModelWithoutNodes": {
"properties": {
"id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version", "default": 1 },
@@ -7886,18 +7946,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/NodeModel" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -7906,12 +7954,6 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
"type": "array",
"title": "Sub Graphs",
"default": []
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
@@ -7973,7 +8015,8 @@
"trigger_setup_info",
"credentials_input_schema"
],
"title": "GraphModel"
"title": "GraphModelWithoutNodes",
"description": "GraphModel variant that excludes nodes, links, and sub-graphs from serialization.\n\nUsed in contexts like the store where exposing internal graph structure\nis not desired. Inherits all computed fields from GraphModel but marks\nnodes and links as excluded from JSON output."
},
"GraphSettings": {
"properties": {
@@ -8613,26 +8656,22 @@
"input_default": {
"additionalProperties": true,
"type": "object",
"title": "Input Default",
"default": {}
"title": "Input Default"
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata",
"default": {}
"title": "Metadata"
},
"input_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Input Links",
"default": []
"title": "Input Links"
},
"output_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Output Links",
"default": []
"title": "Output Links"
}
},
"type": "object",
@@ -8712,26 +8751,22 @@
"input_default": {
"additionalProperties": true,
"type": "object",
"title": "Input Default",
"default": {}
"title": "Input Default"
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata",
"default": {}
"title": "Metadata"
},
"input_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Input Links",
"default": []
"title": "Input Links"
},
"output_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Output Links",
"default": []
"title": "Output Links"
},
"graph_id": { "type": "string", "title": "Graph Id" },
"graph_version": { "type": "integer", "title": "Graph Version" },
@@ -12272,7 +12307,9 @@
"title": "Location"
},
"msg": { "type": "string", "title": "Message" },
"type": { "type": "string", "title": "Error Type" }
"type": { "type": "string", "title": "Error Type" },
"input": { "title": "Input" },
"ctx": { "type": "object", "title": "Context" }
},
"type": "object",
"required": ["loc", "msg", "type"],

View File

@@ -362,25 +362,14 @@ export type GraphMeta = {
user_id: UserID;
version: number;
is_active: boolean;
created_at: Date;
name: string;
description: string;
instructions?: string | null;
recommended_schedule_cron: string | null;
forked_from_id?: GraphID | null;
forked_from_version?: number | null;
input_schema: GraphInputSchema;
output_schema: GraphOutputSchema;
credentials_input_schema: CredentialsInputSchema;
} & (
| {
has_external_trigger: true;
trigger_setup_info: GraphTriggerInfo;
}
| {
has_external_trigger: false;
trigger_setup_info: null;
}
);
};
export type GraphID = Brand<string, "GraphID">;
@@ -447,11 +436,22 @@ export type GraphTriggerInfo = {
/* Mirror of backend/data/graph.py:Graph */
export type Graph = GraphMeta & {
created_at: Date;
nodes: Node[];
links: Link[];
sub_graphs: Omit<Graph, "sub_graphs">[]; // Flattened sub-graphs
};
input_schema: GraphInputSchema;
output_schema: GraphOutputSchema;
credentials_input_schema: CredentialsInputSchema;
} & (
| {
has_external_trigger: true;
trigger_setup_info: GraphTriggerInfo;
}
| {
has_external_trigger: false;
trigger_setup_info: null;
}
);
export type GraphUpdateable = Omit<
Graph,