Compare commits

..

1 Commits

Author SHA1 Message Date
Reinier van der Leer
8fddc9d71f fix(backend): Reduce GET /api/graphs expense + latency (#11986)
[SECRT-1896: Fix crazy `GET /api/graphs` latency (P95 =
107s)](https://linear.app/autogpt/issue/SECRT-1896)

These changes should decrease latency of this endpoint by ~~60-65%~~ a
lot.

### Changes 🏗️

- Make `Graph.credentials_input_schema` cheaper by avoiding constructing
a new `BlockSchema` subclass
- Strip down `GraphMeta` - drop all computed fields
- Replace with either `GraphModel` or `GraphModelWithoutNodes` wherever
those computed fields are used
- Simplify usage in `list_graphs_paginated` and
`fetch_graph_from_store_slug`
- Refactor and clarify relationships between the different graph models
  - Split `BaseGraph` into `GraphBaseMeta` + `BaseGraph`
- Strip down `Graph` - move `credentials_input_schema` and
`aggregate_credentials_inputs` to `GraphModel`
- Refactor to eliminate double `aggregate_credentials_inputs()` call in
`credentials_input_schema` call tree
  - Add `GraphModelWithoutNodes` (similar to current `GraphMeta`)

### Checklist 📋

#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
  - [x] `GET /api/graphs` works as it should
  - [x] Running a graph succeeds
  - [x] Adding a sub-agent in the Builder works as it should
2026-02-06 19:13:21 +00:00
30 changed files with 617 additions and 754 deletions

View File

@@ -157,16 +157,6 @@ yield "image_url", result_url
3. Write tests alongside the route file 3. Write tests alongside the route file
4. Run `poetry run test` to verify 4. Run `poetry run test` to verify
## Workspace & Media Files
**Read [Workspace & Media Architecture](../../docs/platform/workspace-media-architecture.md) when:**
- Working on CoPilot file upload/download features
- Building blocks that handle `MediaFileType` inputs/outputs
- Modifying `WorkspaceManager` or `store_media_file()`
- Debugging file persistence or virus scanning issues
Covers: `WorkspaceManager` (persistent storage with session scoping), `store_media_file()` (media normalization pipeline), and responsibility boundaries for virus scanning and persistence.
## Security Implementation ## Security Implementation
### Cache Protection Middleware ### Cache Protection Middleware

View File

@@ -6,7 +6,6 @@ from typing import Any
from backend.api.features.library import db as library_db from backend.api.features.library import db as library_db
from backend.api.features.library import model as library_model from backend.api.features.library import model as library_model
from backend.api.features.store import db as store_db from backend.api.features.store import db as store_db
from backend.data import graph as graph_db
from backend.data.graph import GraphModel from backend.data.graph import GraphModel
from backend.data.model import ( from backend.data.model import (
CredentialsFieldInfo, CredentialsFieldInfo,
@@ -44,14 +43,8 @@ async def fetch_graph_from_store_slug(
return None, None return None, None
# Get the graph from store listing version # Get the graph from store listing version
graph_meta = await store_db.get_available_graph( graph = await store_db.get_available_graph(
store_agent.store_listing_version_id store_agent.store_listing_version_id, hide_nodes=False
)
graph = await graph_db.get_graph(
graph_id=graph_meta.id,
version=graph_meta.version,
user_id=None, # Public access
include_subgraphs=True,
) )
return graph, store_agent return graph, store_agent
@@ -128,7 +121,7 @@ def build_missing_credentials_from_graph(
return { return {
field_key: _serialize_missing_credential(field_key, field_info) field_key: _serialize_missing_credential(field_key, field_info)
for field_key, (field_info, _node_fields) in aggregated_fields.items() for field_key, (field_info, _, _) in aggregated_fields.items()
if field_key not in matched_keys if field_key not in matched_keys
} }
@@ -269,7 +262,8 @@ async def match_user_credentials_to_graph(
# provider is in the set of acceptable providers. # provider is in the set of acceptable providers.
for credential_field_name, ( for credential_field_name, (
credential_requirements, credential_requirements,
_node_fields, _,
_,
) in aggregated_creds.items(): ) in aggregated_creds.items():
# Find first matching credential by provider, type, and scopes # Find first matching credential by provider, type, and scopes
matching_cred = next( matching_cred = next(

View File

@@ -9,6 +9,7 @@ from pydantic import BaseModel
from backend.api.features.chat.model import ChatSession from backend.api.features.chat.model import ChatSession
from backend.data.workspace import get_or_create_workspace from backend.data.workspace import get_or_create_workspace
from backend.util.settings import Config from backend.util.settings import Config
from backend.util.virus_scanner import scan_content_safe
from backend.util.workspace import WorkspaceManager from backend.util.workspace import WorkspaceManager
from .base import BaseTool from .base import BaseTool
@@ -474,6 +475,9 @@ class WriteWorkspaceFileTool(BaseTool):
) )
try: try:
# Virus scan
await scan_content_safe(content, filename=filename)
workspace = await get_or_create_workspace(user_id) workspace = await get_or_create_workspace(user_id)
# Pass session_id for session-scoped file access # Pass session_id for session-scoped file access
manager = WorkspaceManager(user_id, workspace.id, session_id) manager = WorkspaceManager(user_id, workspace.id, session_id)

View File

@@ -374,7 +374,7 @@ async def get_library_agent_by_graph_id(
async def add_generated_agent_image( async def add_generated_agent_image(
graph: graph_db.BaseGraph, graph: graph_db.GraphBaseMeta,
user_id: str, user_id: str,
library_agent_id: str, library_agent_id: str,
) -> Optional[prisma.models.LibraryAgent]: ) -> Optional[prisma.models.LibraryAgent]:

View File

@@ -1,7 +1,7 @@
import asyncio import asyncio
import logging import logging
from datetime import datetime, timezone from datetime import datetime, timezone
from typing import Any, Literal from typing import Any, Literal, overload
import fastapi import fastapi
import prisma.enums import prisma.enums
@@ -11,8 +11,8 @@ import prisma.types
from backend.data.db import transaction from backend.data.db import transaction
from backend.data.graph import ( from backend.data.graph import (
GraphMeta,
GraphModel, GraphModel,
GraphModelWithoutNodes,
get_graph, get_graph,
get_graph_as_admin, get_graph_as_admin,
get_sub_graphs, get_sub_graphs,
@@ -334,7 +334,22 @@ async def get_store_agent_details(
raise DatabaseError("Failed to fetch agent details") from e raise DatabaseError("Failed to fetch agent details") from e
async def get_available_graph(store_listing_version_id: str) -> GraphMeta: @overload
async def get_available_graph(
store_listing_version_id: str, hide_nodes: Literal[False]
) -> GraphModel: ...
@overload
async def get_available_graph(
store_listing_version_id: str, hide_nodes: Literal[True] = True
) -> GraphModelWithoutNodes: ...
async def get_available_graph(
store_listing_version_id: str,
hide_nodes: bool = True,
) -> GraphModelWithoutNodes | GraphModel:
try: try:
# Get avaialble, non-deleted store listing version # Get avaialble, non-deleted store listing version
store_listing_version = ( store_listing_version = (
@@ -344,7 +359,7 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
"isAvailable": True, "isAvailable": True,
"isDeleted": False, "isDeleted": False,
}, },
include={"AgentGraph": {"include": {"Nodes": True}}}, include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
) )
) )
@@ -354,7 +369,9 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
detail=f"Store listing version {store_listing_version_id} not found", detail=f"Store listing version {store_listing_version_id} not found",
) )
return GraphModel.from_db(store_listing_version.AgentGraph).meta() return (GraphModelWithoutNodes if hide_nodes else GraphModel).from_db(
store_listing_version.AgentGraph
)
except Exception as e: except Exception as e:
logger.error(f"Error getting agent: {e}") logger.error(f"Error getting agent: {e}")

View File

@@ -16,7 +16,7 @@ from backend.blocks.ideogram import (
StyleType, StyleType,
UpscaleOption, UpscaleOption,
) )
from backend.data.graph import BaseGraph from backend.data.graph import GraphBaseMeta
from backend.data.model import CredentialsMetaInput, ProviderName from backend.data.model import CredentialsMetaInput, ProviderName
from backend.integrations.credentials_store import ideogram_credentials from backend.integrations.credentials_store import ideogram_credentials
from backend.util.request import Requests from backend.util.request import Requests
@@ -34,14 +34,14 @@ class ImageStyle(str, Enum):
DIGITAL_ART = "digital art" DIGITAL_ART = "digital art"
async def generate_agent_image(agent: BaseGraph | AgentGraph) -> io.BytesIO: async def generate_agent_image(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
if settings.config.use_agent_image_generation_v2: if settings.config.use_agent_image_generation_v2:
return await generate_agent_image_v2(graph=agent) return await generate_agent_image_v2(graph=agent)
else: else:
return await generate_agent_image_v1(agent=agent) return await generate_agent_image_v1(agent=agent)
async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO: async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.BytesIO:
""" """
Generate an image for an agent using Ideogram model. Generate an image for an agent using Ideogram model.
Returns: Returns:
@@ -54,14 +54,17 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
description = f"{name} ({graph.description})" if graph.description else name description = f"{name} ({graph.description})" if graph.description else name
prompt = ( prompt = (
f"Create a visually striking retro-futuristic vector pop art illustration prominently featuring " "Create a visually striking retro-futuristic vector pop art illustration "
f'"{name}" in bold typography. The image clearly and literally depicts a {description}, ' f'prominently featuring "{name}" in bold typography. The image clearly and '
f"along with recognizable objects directly associated with the primary function of a {name}. " f"literally depicts a {description}, along with recognizable objects directly "
f"Ensure the imagery is concrete, intuitive, and immediately understandable, clearly conveying the " f"associated with the primary function of a {name}. "
f"purpose of a {name}. Maintain vibrant, limited-palette colors, sharp vector lines, geometric " f"Ensure the imagery is concrete, intuitive, and immediately understandable, "
f"shapes, flat illustration techniques, and solid colors without gradients or shading. Preserve a " f"clearly conveying the purpose of a {name}. "
f"retro-futuristic aesthetic influenced by mid-century futurism and 1960s psychedelia, " "Maintain vibrant, limited-palette colors, sharp vector lines, "
f"prioritizing clear visual storytelling and thematic clarity above all else." "geometric shapes, flat illustration techniques, and solid colors "
"without gradients or shading. Preserve a retro-futuristic aesthetic "
"influenced by mid-century futurism and 1960s psychedelia, "
"prioritizing clear visual storytelling and thematic clarity above all else."
) )
custom_colors = [ custom_colors = [
@@ -99,12 +102,12 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
return io.BytesIO(response.content) return io.BytesIO(response.content)
async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO: async def generate_agent_image_v1(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
""" """
Generate an image for an agent using Flux model via Replicate API. Generate an image for an agent using Flux model via Replicate API.
Args: Args:
agent (Graph): The agent to generate an image for agent (GraphBaseMeta | AgentGraph): The agent to generate an image for
Returns: Returns:
io.BytesIO: The generated image as bytes io.BytesIO: The generated image as bytes
@@ -114,7 +117,13 @@ async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
raise ValueError("Missing Replicate API key in settings") raise ValueError("Missing Replicate API key in settings")
# Construct prompt from agent details # Construct prompt from agent details
prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design." prompt = (
"Create a visually engaging app store thumbnail for the AI agent "
"that highlights what it does in a clear and captivating way:\n"
f"- **Name**: {agent.name}\n"
f"- **Description**: {agent.description}\n"
f"Focus on showcasing its core functionality with an appealing design."
)
# Set up Replicate client # Set up Replicate client
client = ReplicateClient(api_token=settings.secrets.replicate_api_key) client = ReplicateClient(api_token=settings.secrets.replicate_api_key)

View File

@@ -278,7 +278,7 @@ async def get_agent(
) )
async def get_graph_meta_by_store_listing_version_id( async def get_graph_meta_by_store_listing_version_id(
store_listing_version_id: str, store_listing_version_id: str,
) -> backend.data.graph.GraphMeta: ) -> backend.data.graph.GraphModelWithoutNodes:
""" """
Get Agent Graph from Store Listing Version ID. Get Agent Graph from Store Listing Version ID.
""" """

View File

@@ -246,7 +246,9 @@ class BlockSchema(BaseModel):
f"is not of type {CredentialsMetaInput.__name__}" f"is not of type {CredentialsMetaInput.__name__}"
) )
credentials_fields[field_name].validate_credentials_field_schema(cls) CredentialsMetaInput.validate_credentials_field_schema(
cls.get_field_schema(field_name), field_name
)
elif field_name in credentials_fields: elif field_name in credentials_fields:
raise KeyError( raise KeyError(

View File

@@ -3,7 +3,7 @@ import logging
import uuid import uuid
from collections import defaultdict from collections import defaultdict
from datetime import datetime, timezone from datetime import datetime, timezone
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Self, cast
from prisma.enums import SubmissionStatus from prisma.enums import SubmissionStatus
from prisma.models import ( from prisma.models import (
@@ -20,7 +20,7 @@ from prisma.types import (
AgentNodeLinkCreateInput, AgentNodeLinkCreateInput,
StoreListingVersionWhereInput, StoreListingVersionWhereInput,
) )
from pydantic import BaseModel, BeforeValidator, Field, create_model from pydantic import BaseModel, BeforeValidator, Field
from pydantic.fields import computed_field from pydantic.fields import computed_field
from backend.blocks.agent import AgentExecutorBlock from backend.blocks.agent import AgentExecutorBlock
@@ -30,7 +30,6 @@ from backend.data.db import prisma as db
from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
from backend.data.model import ( from backend.data.model import (
CredentialsField,
CredentialsFieldInfo, CredentialsFieldInfo,
CredentialsMetaInput, CredentialsMetaInput,
is_credentials_field_name, is_credentials_field_name,
@@ -45,7 +44,6 @@ from .block import (
AnyBlockSchema, AnyBlockSchema,
Block, Block,
BlockInput, BlockInput,
BlockSchema,
BlockType, BlockType,
EmptySchema, EmptySchema,
get_block, get_block,
@@ -113,10 +111,12 @@ class Link(BaseDbModel):
class Node(BaseDbModel): class Node(BaseDbModel):
block_id: str block_id: str
input_default: BlockInput = {} # dict[input_name, default_value] input_default: BlockInput = Field( # dict[input_name, default_value]
metadata: dict[str, Any] = {} default_factory=dict
input_links: list[Link] = [] )
output_links: list[Link] = [] metadata: dict[str, Any] = Field(default_factory=dict)
input_links: list[Link] = Field(default_factory=list)
output_links: list[Link] = Field(default_factory=list)
@property @property
def credentials_optional(self) -> bool: def credentials_optional(self) -> bool:
@@ -221,18 +221,33 @@ class NodeModel(Node):
return result return result
class BaseGraph(BaseDbModel): class GraphBaseMeta(BaseDbModel):
"""
Shared base for `GraphMeta` and `BaseGraph`, with core graph metadata fields.
"""
version: int = 1 version: int = 1
is_active: bool = True is_active: bool = True
name: str name: str
description: str description: str
instructions: str | None = None instructions: str | None = None
recommended_schedule_cron: str | None = None recommended_schedule_cron: str | None = None
nodes: list[Node] = []
links: list[Link] = []
forked_from_id: str | None = None forked_from_id: str | None = None
forked_from_version: int | None = None forked_from_version: int | None = None
class BaseGraph(GraphBaseMeta):
"""
Graph with nodes, links, and computed I/O schema fields.
Used to represent sub-graphs within a `Graph`. Contains the full graph
structure including nodes and links, plus computed fields for schemas
and trigger info. Does NOT include user_id or created_at (see GraphModel).
"""
nodes: list[Node] = Field(default_factory=list)
links: list[Link] = Field(default_factory=list)
@computed_field @computed_field
@property @property
def input_schema(self) -> dict[str, Any]: def input_schema(self) -> dict[str, Any]:
@@ -361,44 +376,79 @@ class GraphTriggerInfo(BaseModel):
class Graph(BaseGraph): class Graph(BaseGraph):
sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs """Creatable graph model used in API create/update endpoints."""
sub_graphs: list[BaseGraph] = Field(default_factory=list) # Flattened sub-graphs
class GraphMeta(GraphBaseMeta):
"""
Lightweight graph metadata model representing an existing graph from the database,
for use in listings and summaries.
Lacks `GraphModel`'s nodes, links, and expensive computed fields.
Use for list endpoints where full graph data is not needed and performance matters.
"""
id: str # type: ignore
version: int # type: ignore
user_id: str
created_at: datetime
@classmethod
def from_db(cls, graph: "AgentGraph") -> Self:
return cls(
id=graph.id,
version=graph.version,
is_active=graph.isActive,
name=graph.name or "",
description=graph.description or "",
instructions=graph.instructions,
recommended_schedule_cron=graph.recommendedScheduleCron,
forked_from_id=graph.forkedFromId,
forked_from_version=graph.forkedFromVersion,
user_id=graph.userId,
created_at=graph.createdAt,
)
class GraphModel(Graph, GraphMeta):
"""
Full graph model representing an existing graph from the database.
This is the primary model for working with persisted graphs. Includes all
graph data (nodes, links, sub_graphs) plus user ownership and timestamps.
Provides computed fields (input_schema, output_schema, etc.) used during
set-up (frontend) and execution (backend).
Inherits from:
- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas
- `GraphMeta`: provides user_id, created_at for database records
"""
nodes: list[NodeModel] = Field(default_factory=list) # type: ignore
@property
def starting_nodes(self) -> list[NodeModel]:
outbound_nodes = {link.sink_id for link in self.links}
input_nodes = {
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
}
return [
node
for node in self.nodes
if node.id not in outbound_nodes or node.id in input_nodes
]
@property
def webhook_input_node(self) -> NodeModel | None: # type: ignore
return cast(NodeModel, super().webhook_input_node)
@computed_field @computed_field
@property @property
def credentials_input_schema(self) -> dict[str, Any]: def credentials_input_schema(self) -> dict[str, Any]:
schema = self._credentials_input_schema.jsonschema()
# Determine which credential fields are required based on credentials_optional metadata
graph_credentials_inputs = self.aggregate_credentials_inputs() graph_credentials_inputs = self.aggregate_credentials_inputs()
required_fields = []
# Build a map of node_id -> node for quick lookup
all_nodes = {node.id: node for node in self.nodes}
for sub_graph in self.sub_graphs:
for node in sub_graph.nodes:
all_nodes[node.id] = node
for field_key, (
_field_info,
node_field_pairs,
) in graph_credentials_inputs.items():
# A field is required if ANY node using it has credentials_optional=False
is_required = False
for node_id, _field_name in node_field_pairs:
node = all_nodes.get(node_id)
if node and not node.credentials_optional:
is_required = True
break
if is_required:
required_fields.append(field_key)
schema["required"] = required_fields
return schema
@property
def _credentials_input_schema(self) -> type[BlockSchema]:
graph_credentials_inputs = self.aggregate_credentials_inputs()
logger.debug( logger.debug(
f"Combined credentials input fields for graph #{self.id} ({self.name}): " f"Combined credentials input fields for graph #{self.id} ({self.name}): "
f"{graph_credentials_inputs}" f"{graph_credentials_inputs}"
@@ -406,8 +456,8 @@ class Graph(BaseGraph):
# Warn if same-provider credentials inputs can't be combined (= bad UX) # Warn if same-provider credentials inputs can't be combined (= bad UX)
graph_cred_fields = list(graph_credentials_inputs.values()) graph_cred_fields = list(graph_credentials_inputs.values())
for i, (field, keys) in enumerate(graph_cred_fields): for i, (field, keys, _) in enumerate(graph_cred_fields):
for other_field, other_keys in list(graph_cred_fields)[i + 1 :]: for other_field, other_keys, _ in list(graph_cred_fields)[i + 1 :]:
if field.provider != other_field.provider: if field.provider != other_field.provider:
continue continue
if ProviderName.HTTP in field.provider: if ProviderName.HTTP in field.provider:
@@ -423,31 +473,78 @@ class Graph(BaseGraph):
f"keys: {keys} <> {other_keys}." f"keys: {keys} <> {other_keys}."
) )
fields: dict[str, tuple[type[CredentialsMetaInput], CredentialsMetaInput]] = { # Build JSON schema directly to avoid expensive create_model + validation overhead
agg_field_key: ( properties = {}
CredentialsMetaInput[ required_fields = []
Literal[tuple(field_info.provider)], # type: ignore
Literal[tuple(field_info.supported_types)], # type: ignore for agg_field_key, (
], field_info,
CredentialsField( _,
required_scopes=set(field_info.required_scopes or []), is_required,
discriminator=field_info.discriminator, ) in graph_credentials_inputs.items():
discriminator_mapping=field_info.discriminator_mapping, providers = list(field_info.provider)
discriminator_values=field_info.discriminator_values, cred_types = list(field_info.supported_types)
field_schema: dict[str, Any] = {
"credentials_provider": providers,
"credentials_types": cred_types,
"type": "object",
"properties": {
"id": {"title": "Id", "type": "string"},
"title": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"title": "Title",
},
"provider": {
"title": "Provider",
"type": "string",
**(
{"enum": providers}
if len(providers) > 1
else {"const": providers[0]}
), ),
) },
for agg_field_key, (field_info, _) in graph_credentials_inputs.items() "type": {
"title": "Type",
"type": "string",
**(
{"enum": cred_types}
if len(cred_types) > 1
else {"const": cred_types[0]}
),
},
},
"required": ["id", "provider", "type"],
} }
return create_model( # Add other (optional) field info items
self.name.replace(" ", "") + "CredentialsInputSchema", field_schema.update(
__base__=BlockSchema, field_info.model_dump(
**fields, # type: ignore by_alias=True,
exclude_defaults=True,
exclude={"provider", "supported_types"}, # already included above
) )
)
# Ensure field schema is well-formed
CredentialsMetaInput.validate_credentials_field_schema(
field_schema, agg_field_key
)
properties[agg_field_key] = field_schema
if is_required:
required_fields.append(agg_field_key)
return {
"type": "object",
"properties": properties,
"required": required_fields,
}
def aggregate_credentials_inputs( def aggregate_credentials_inputs(
self, self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]]]]: ) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
""" """
Returns: Returns:
dict[aggregated_field_key, tuple( dict[aggregated_field_key, tuple(
@@ -455,13 +552,19 @@ class Graph(BaseGraph):
(now includes discriminator_values from matching nodes) (now includes discriminator_values from matching nodes)
set[(node_id, field_name)]: Node credentials fields that are set[(node_id, field_name)]: Node credentials fields that are
compatible with this aggregated field spec compatible with this aggregated field spec
bool: True if the field is required (any node has credentials_optional=False)
)] )]
""" """
# First collect all credential field data with input defaults # First collect all credential field data with input defaults
node_credential_data = [] # Track (field_info, (node_id, field_name), is_required) for each credential field
node_credential_data: list[tuple[CredentialsFieldInfo, tuple[str, str]]] = []
node_required_map: dict[str, bool] = {} # node_id -> is_required
for graph in [self] + self.sub_graphs: for graph in [self] + self.sub_graphs:
for node in graph.nodes: for node in graph.nodes:
# Track if this node requires credentials (credentials_optional=False means required)
node_required_map[node.id] = not node.credentials_optional
for ( for (
field_name, field_name,
field_info, field_info,
@@ -485,37 +588,21 @@ class Graph(BaseGraph):
) )
# Combine credential field info (this will merge discriminator_values automatically) # Combine credential field info (this will merge discriminator_values automatically)
return CredentialsFieldInfo.combine(*node_credential_data) combined = CredentialsFieldInfo.combine(*node_credential_data)
# Add is_required flag to each aggregated field
class GraphModel(Graph): # A field is required if ANY node using it has credentials_optional=False
user_id: str return {
nodes: list[NodeModel] = [] # type: ignore key: (
field_info,
created_at: datetime node_field_pairs,
any(
@property node_required_map.get(node_id, True)
def starting_nodes(self) -> list[NodeModel]: for node_id, _ in node_field_pairs
outbound_nodes = {link.sink_id for link in self.links} ),
input_nodes = { )
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT for key, (field_info, node_field_pairs) in combined.items()
} }
return [
node
for node in self.nodes
if node.id not in outbound_nodes or node.id in input_nodes
]
@property
def webhook_input_node(self) -> NodeModel | None: # type: ignore
return cast(NodeModel, super().webhook_input_node)
def meta(self) -> "GraphMeta":
"""
Returns a GraphMeta object with metadata about the graph.
This is used to return metadata about the graph without exposing nodes and links.
"""
return GraphMeta.from_graph(self)
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False): def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
""" """
@@ -799,13 +886,14 @@ class GraphModel(Graph):
if is_static_output_block(link.source_id): if is_static_output_block(link.source_id):
link.is_static = True # Each value block output should be static. link.is_static = True # Each value block output should be static.
@staticmethod @classmethod
def from_db( def from_db( # type: ignore[reportIncompatibleMethodOverride]
cls,
graph: AgentGraph, graph: AgentGraph,
for_export: bool = False, for_export: bool = False,
sub_graphs: list[AgentGraph] | None = None, sub_graphs: list[AgentGraph] | None = None,
) -> "GraphModel": ) -> Self:
return GraphModel( return cls(
id=graph.id, id=graph.id,
user_id=graph.userId if not for_export else "", user_id=graph.userId if not for_export else "",
version=graph.version, version=graph.version,
@@ -831,17 +919,28 @@ class GraphModel(Graph):
], ],
) )
def hide_nodes(self) -> "GraphModelWithoutNodes":
"""
Returns a copy of the `GraphModel` with nodes, links, and sub-graphs hidden
(excluded from serialization). They are still present in the model instance
so all computed fields (e.g. `credentials_input_schema`) still work.
"""
return GraphModelWithoutNodes.model_validate(self, from_attributes=True)
class GraphMeta(Graph):
user_id: str
# Easy work-around to prevent exposing nodes and links in the API response class GraphModelWithoutNodes(GraphModel):
nodes: list[NodeModel] = Field(default=[], exclude=True) # type: ignore """
links: list[Link] = Field(default=[], exclude=True) GraphModel variant that excludes nodes, links, and sub-graphs from serialization.
@staticmethod Used in contexts like the store where exposing internal graph structure
def from_graph(graph: GraphModel) -> "GraphMeta": is not desired. Inherits all computed fields from GraphModel but marks
return GraphMeta(**graph.model_dump()) nodes and links as excluded from JSON output.
"""
nodes: list[NodeModel] = Field(default_factory=list, exclude=True)
links: list[Link] = Field(default_factory=list, exclude=True)
sub_graphs: list[BaseGraph] = Field(default_factory=list, exclude=True)
class GraphsPaginated(BaseModel): class GraphsPaginated(BaseModel):
@@ -912,21 +1011,11 @@ async def list_graphs_paginated(
where=where_clause, where=where_clause,
distinct=["id"], distinct=["id"],
order={"version": "desc"}, order={"version": "desc"},
include=AGENT_GRAPH_INCLUDE,
skip=offset, skip=offset,
take=page_size, take=page_size,
) )
graph_models: list[GraphMeta] = [] graph_models = [GraphMeta.from_db(graph) for graph in graphs]
for graph in graphs:
try:
graph_meta = GraphModel.from_db(graph).meta()
# Trigger serialization to validate that the graph is well formed
graph_meta.model_dump()
graph_models.append(graph_meta)
except Exception as e:
logger.error(f"Error processing graph {graph.id}: {e}")
continue
return GraphsPaginated( return GraphsPaginated(
graphs=graph_models, graphs=graph_models,

View File

@@ -163,7 +163,6 @@ class User(BaseModel):
if TYPE_CHECKING: if TYPE_CHECKING:
from prisma.models import User as PrismaUser from prisma.models import User as PrismaUser
from backend.data.block import BlockSchema
T = TypeVar("T") T = TypeVar("T")
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -508,15 +507,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
def allowed_cred_types(cls) -> tuple[CredentialsType, ...]: def allowed_cred_types(cls) -> tuple[CredentialsType, ...]:
return get_args(cls.model_fields["type"].annotation) return get_args(cls.model_fields["type"].annotation)
@classmethod @staticmethod
def validate_credentials_field_schema(cls, model: type["BlockSchema"]): def validate_credentials_field_schema(
field_schema: dict[str, Any], field_name: str
):
"""Validates the schema of a credentials input field""" """Validates the schema of a credentials input field"""
field_name = next(
name for name, type in model.get_credentials_fields().items() if type is cls
)
field_schema = model.jsonschema()["properties"][field_name]
try: try:
schema_extra = CredentialsFieldInfo[CP, CT].model_validate(field_schema) field_info = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
except ValidationError as e: except ValidationError as e:
if "Field required [type=missing" not in str(e): if "Field required [type=missing" not in str(e):
raise raise
@@ -526,11 +523,11 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
f"{field_schema}" f"{field_schema}"
) from e ) from e
providers = cls.allowed_providers() providers = field_info.provider
if ( if (
providers is not None providers is not None
and len(providers) > 1 and len(providers) > 1
and not schema_extra.discriminator and not field_info.discriminator
): ):
raise TypeError( raise TypeError(
f"Multi-provider CredentialsField '{field_name}' " f"Multi-provider CredentialsField '{field_name}' "

View File

@@ -373,7 +373,7 @@ def make_node_credentials_input_map(
# Get aggregated credentials fields for the graph # Get aggregated credentials fields for the graph
graph_cred_inputs = graph.aggregate_credentials_inputs() graph_cred_inputs = graph.aggregate_credentials_inputs()
for graph_input_name, (_, compatible_node_fields) in graph_cred_inputs.items(): for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
# Best-effort map: skip missing items # Best-effort map: skip missing items
if graph_input_name not in graph_credentials_input: if graph_input_name not in graph_credentials_input:
continue continue

View File

@@ -188,6 +188,7 @@ class WorkspaceManager:
f"{Config().max_file_size_mb}MB limit" f"{Config().max_file_size_mb}MB limit"
) )
# Virus scan content before persisting (defense in depth)
await scan_content_safe(content, filename=filename) await scan_content_safe(content, filename=filename)
# Determine path with session scoping # Determine path with session scoping

View File

@@ -3,7 +3,6 @@
"credentials_input_schema": { "credentials_input_schema": {
"properties": {}, "properties": {},
"required": [], "required": [],
"title": "TestGraphCredentialsInputSchema",
"type": "object" "type": "object"
}, },
"description": "A test graph", "description": "A test graph",

View File

@@ -1,34 +1,14 @@
[ [
{ {
"credentials_input_schema": { "created_at": "2025-09-04T13:37:00",
"properties": {},
"required": [],
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"description": "A test graph", "description": "A test graph",
"forked_from_id": null, "forked_from_id": null,
"forked_from_version": null, "forked_from_version": null,
"has_external_trigger": false,
"has_human_in_the_loop": false,
"has_sensitive_action": false,
"id": "graph-123", "id": "graph-123",
"input_schema": {
"properties": {},
"required": [],
"type": "object"
},
"instructions": null, "instructions": null,
"is_active": true, "is_active": true,
"name": "Test Graph", "name": "Test Graph",
"output_schema": {
"properties": {},
"required": [],
"type": "object"
},
"recommended_schedule_cron": null, "recommended_schedule_cron": null,
"sub_graphs": [],
"trigger_setup_info": null,
"user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
"version": 1 "version": 1
} }

View File

@@ -1,5 +1,5 @@
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput"; import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta"; import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput"; import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import { useState } from "react"; import { useState } from "react";
import { getSchemaDefaultCredentials } from "../../helpers"; import { getSchemaDefaultCredentials } from "../../helpers";
@@ -9,7 +9,7 @@ type Credential = CredentialsMetaInput | undefined;
type Credentials = Record<string, Credential>; type Credentials = Record<string, Credential>;
type Props = { type Props = {
agent: GraphMeta | null; agent: GraphModel | null;
siblingInputs?: Record<string, any>; siblingInputs?: Record<string, any>;
onCredentialsChange: ( onCredentialsChange: (
credentials: Record<string, CredentialsMetaInput>, credentials: Record<string, CredentialsMetaInput>,

View File

@@ -1,9 +1,9 @@
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput"; import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta"; import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types"; import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types";
export function getCredentialFields( export function getCredentialFields(
agent: GraphMeta | null, agent: GraphModel | null,
): AgentCredentialsFields { ): AgentCredentialsFields {
if (!agent) return {}; if (!agent) return {};

View File

@@ -3,10 +3,10 @@ import type {
CredentialsMetaInput, CredentialsMetaInput,
} from "@/lib/autogpt-server-api/types"; } from "@/lib/autogpt-server-api/types";
import type { InputValues } from "./types"; import type { InputValues } from "./types";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta"; import { GraphModel } from "@/app/api/__generated__/models/graphModel";
export function computeInitialAgentInputs( export function computeInitialAgentInputs(
agent: GraphMeta | null, agent: GraphModel | null,
existingInputs?: InputValues | null, existingInputs?: InputValues | null,
): InputValues { ): InputValues {
const properties = agent?.input_schema?.properties || {}; const properties = agent?.input_schema?.properties || {};
@@ -29,7 +29,7 @@ export function computeInitialAgentInputs(
} }
type IsRunDisabledParams = { type IsRunDisabledParams = {
agent: GraphMeta | null; agent: GraphModel | null;
isRunning: boolean; isRunning: boolean;
agentInputs: InputValues | null | undefined; agentInputs: InputValues | null | undefined;
}; };

View File

@@ -30,6 +30,8 @@ import {
} from "@/components/atoms/Tooltip/BaseTooltip"; } from "@/components/atoms/Tooltip/BaseTooltip";
import { GraphMeta } from "@/lib/autogpt-server-api"; import { GraphMeta } from "@/lib/autogpt-server-api";
import jaro from "jaro-winkler"; import jaro from "jaro-winkler";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & { type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
uiKey?: string; uiKey?: string;
@@ -107,6 +109,8 @@ export function BlocksControl({
.filter((b) => b.uiType !== BlockUIType.AGENT) .filter((b) => b.uiType !== BlockUIType.AGENT)
.sort((a, b) => a.name.localeCompare(b.name)); .sort((a, b) => a.name.localeCompare(b.name));
// Agent blocks are created from GraphMeta which doesn't include schemas.
// Schemas will be fetched on-demand when the block is actually added.
const agentBlockList = flows const agentBlockList = flows
.map((flow): _Block => { .map((flow): _Block => {
return { return {
@@ -116,8 +120,9 @@ export function BlocksControl({
`Ver.${flow.version}` + `Ver.${flow.version}` +
(flow.description ? ` | ${flow.description}` : ""), (flow.description ? ` | ${flow.description}` : ""),
categories: [{ category: "AGENT", description: "" }], categories: [{ category: "AGENT", description: "" }],
inputSchema: flow.input_schema, // Empty schemas - will be populated when block is added
outputSchema: flow.output_schema, inputSchema: { type: "object", properties: {} },
outputSchema: { type: "object", properties: {} },
staticOutput: false, staticOutput: false,
uiType: BlockUIType.AGENT, uiType: BlockUIType.AGENT,
costs: [], costs: [],
@@ -125,8 +130,7 @@ export function BlocksControl({
hardcodedValues: { hardcodedValues: {
graph_id: flow.id, graph_id: flow.id,
graph_version: flow.version, graph_version: flow.version,
input_schema: flow.input_schema, // Schemas will be fetched on-demand when block is added
output_schema: flow.output_schema,
}, },
}; };
}) })
@@ -182,6 +186,37 @@ export function BlocksControl({
setSelectedCategory(null); setSelectedCategory(null);
}, []); }, []);
// Handler to add a block, fetching graph data on-demand for agent blocks
const handleAddBlock = useCallback(
async (block: _Block & { notAvailable: string | null }) => {
if (block.notAvailable) return;
// For agent blocks, fetch the full graph to get schemas
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
const graphID = block.hardcodedValues.graph_id as string;
const graphVersion = block.hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
addBlock(block.id, block.name, {
...block.hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
});
} else {
// Fallback: add without schemas (will be incomplete)
console.error("Failed to fetch graph data for agent block");
addBlock(block.id, block.name, block.hardcodedValues || {});
}
} else {
addBlock(block.id, block.name, block.hardcodedValues || {});
}
},
[addBlock],
);
// Extract unique categories from blocks // Extract unique categories from blocks
const categories = useMemo(() => { const categories = useMemo(() => {
return Array.from( return Array.from(
@@ -303,10 +338,7 @@ export function BlocksControl({
}), }),
); );
}} }}
onClick={() => onClick={() => handleAddBlock(block)}
!block.notAvailable &&
addBlock(block.id, block.name, block?.hardcodedValues || {})
}
title={block.notAvailable ?? undefined} title={block.notAvailable ?? undefined}
> >
<div <div

View File

@@ -29,13 +29,17 @@ import "@xyflow/react/dist/style.css";
import { ConnectedEdge, CustomNode } from "../CustomNode/CustomNode"; import { ConnectedEdge, CustomNode } from "../CustomNode/CustomNode";
import "./flow.css"; import "./flow.css";
import { import {
BlockIORootSchema,
BlockUIType, BlockUIType,
formatEdgeID, formatEdgeID,
GraphExecutionID, GraphExecutionID,
GraphID, GraphID,
GraphMeta, GraphMeta,
LibraryAgent, LibraryAgent,
SpecialBlockID,
} from "@/lib/autogpt-server-api"; } from "@/lib/autogpt-server-api";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types"; import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
import { Key, storage } from "@/services/storage/local-storage"; import { Key, storage } from "@/services/storage/local-storage";
import { findNewlyAddedBlockCoordinates, getTypeColor } from "@/lib/utils"; import { findNewlyAddedBlockCoordinates, getTypeColor } from "@/lib/utils";
@@ -687,8 +691,94 @@ const FlowEditor: React.FC<{
[getNode, updateNode, nodes], [getNode, updateNode, nodes],
); );
/* Shared helper to create and add a node */
const createAndAddNode = useCallback(
async (
blockID: string,
blockName: string,
hardcodedValues: Record<string, any>,
position: { x: number; y: number },
): Promise<CustomNode | null> => {
const nodeSchema = availableBlocks.find((node) => node.id === blockID);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockID}`);
return null;
}
// For agent blocks, fetch the full graph to get schemas
let inputSchema: BlockIORootSchema = nodeSchema.inputSchema;
let outputSchema: BlockIORootSchema = nodeSchema.outputSchema;
let finalHardcodedValues = hardcodedValues;
if (blockID === SpecialBlockID.AGENT) {
const graphID = hardcodedValues.graph_id as string;
const graphVersion = hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
inputSchema = graphData.input_schema as BlockIORootSchema;
outputSchema = graphData.output_schema as BlockIORootSchema;
finalHardcodedValues = {
...hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
};
} else {
console.error("Failed to fetch graph data for agent block");
}
}
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position,
data: {
blockType: blockName,
blockCosts: nodeSchema.costs || [],
title: `${blockName} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: inputSchema,
outputSchema: outputSchema,
hardcodedValues: finalHardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockID,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
addNodes(newNode);
setNodeId((prevId) => prevId + 1);
clearNodesStatusAndOutput();
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
redo: () => addNodes(newNode),
});
return newNode;
},
[
availableBlocks,
nodeId,
addNodes,
deleteElements,
clearNodesStatusAndOutput,
],
);
const addNode = useCallback( const addNode = useCallback(
(blockId: string, nodeType: string, hardcodedValues: any = {}) => { async (
blockId: string,
nodeType: string,
hardcodedValues: Record<string, any> = {},
) => {
const nodeSchema = availableBlocks.find((node) => node.id === blockId); const nodeSchema = availableBlocks.find((node) => node.id === blockId);
if (!nodeSchema) { if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockId}`); console.error(`Schema not found for block ID: ${blockId}`);
@@ -707,73 +797,42 @@ const FlowEditor: React.FC<{
// Alternative: We could also use D3 force, Intersection for this (React flow Pro examples) // Alternative: We could also use D3 force, Intersection for this (React flow Pro examples)
const { x, y } = getViewport(); const { x, y } = getViewport();
const viewportCoordinates = const position =
nodeDimensions && Object.keys(nodeDimensions).length > 0 nodeDimensions && Object.keys(nodeDimensions).length > 0
? // we will get all the dimension of nodes, then store ? findNewlyAddedBlockCoordinates(
findNewlyAddedBlockCoordinates(
nodeDimensions, nodeDimensions,
nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500, nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500,
60, 60,
1.0, 1.0,
) )
: // we will get all the dimension of nodes, then store : {
{
x: window.innerWidth / 2 - x, x: window.innerWidth / 2 - x,
y: window.innerHeight / 2 - y, y: window.innerHeight / 2 - y,
}; };
const newNode: CustomNode = { const newNode = await createAndAddNode(
id: nodeId.toString(), blockId,
type: "custom", nodeType,
position: viewportCoordinates, // Set the position to the calculated viewport center hardcodedValues,
data: { position,
blockType: nodeType, );
blockCosts: nodeSchema.costs, if (!newNode) return;
title: `${nodeType} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: nodeSchema.inputSchema,
outputSchema: nodeSchema.outputSchema,
hardcodedValues: hardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockId,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
addNodes(newNode);
setNodeId((prevId) => prevId + 1);
clearNodesStatusAndOutput(); // Clear status and output when a new node is added
setViewport( setViewport(
{ {
// Rough estimate of the dimension of the node is: 500x400px. x: -position.x * 0.8 + (window.innerWidth - 0.0) / 2,
// Though we skip shifting the X, considering the block menu side-bar. y: -position.y * 0.8 + (window.innerHeight - 400) / 2,
x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2,
y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2,
zoom: 0.8, zoom: 0.8,
}, },
{ duration: 500 }, { duration: 500 },
); );
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
redo: () => addNodes(newNode),
});
}, },
[ [
nodeId,
getViewport, getViewport,
setViewport, setViewport,
availableBlocks, availableBlocks,
addNodes,
nodeDimensions, nodeDimensions,
deleteElements, createAndAddNode,
clearNodesStatusAndOutput,
], ],
); );
@@ -920,7 +979,7 @@ const FlowEditor: React.FC<{
}, []); }, []);
const onDrop = useCallback( const onDrop = useCallback(
(event: React.DragEvent) => { async (event: React.DragEvent) => {
event.preventDefault(); event.preventDefault();
const blockData = event.dataTransfer.getData("application/reactflow"); const blockData = event.dataTransfer.getData("application/reactflow");
@@ -935,62 +994,17 @@ const FlowEditor: React.FC<{
y: event.clientY, y: event.clientY,
}); });
// Find the block schema await createAndAddNode(
const nodeSchema = availableBlocks.find((node) => node.id === blockId); blockId,
if (!nodeSchema) { blockName,
console.error(`Schema not found for block ID: ${blockId}`); hardcodedValues || {},
return;
}
// Create the new node at the drop position
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position, position,
data: { );
blockType: blockName,
blockCosts: nodeSchema.costs || [],
title: `${blockName} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: nodeSchema.inputSchema,
outputSchema: nodeSchema.outputSchema,
hardcodedValues: hardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockId,
uiType: nodeSchema.uiType,
},
};
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => {
deleteElements({ nodes: [{ id: newNode.id } as any], edges: [] });
},
redo: () => {
addNodes([newNode]);
},
});
addNodes([newNode]);
clearNodesStatusAndOutput();
setNodeId((prevId) => prevId + 1);
} catch (error) { } catch (error) {
console.error("Failed to drop block:", error); console.error("Failed to drop block:", error);
} }
}, },
[ [screenToFlowPosition, createAndAddNode],
nodeId,
availableBlocks,
nodes,
edges,
addNodes,
screenToFlowPosition,
deleteElements,
clearNodesStatusAndOutput,
],
); );
const buildContextValue: BuilderContextType = useMemo( const buildContextValue: BuilderContextType = useMemo(

View File

@@ -4,13 +4,13 @@ import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/componen
import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { Dialog } from "@/components/molecules/Dialog/Dialog";
import type { import type {
CredentialsMetaInput, CredentialsMetaInput,
GraphMeta, Graph,
} from "@/lib/autogpt-server-api/types"; } from "@/lib/autogpt-server-api/types";
interface RunInputDialogProps { interface RunInputDialogProps {
isOpen: boolean; isOpen: boolean;
doClose: () => void; doClose: () => void;
graph: GraphMeta; graph: Graph;
doRun?: ( doRun?: (
inputs: Record<string, any>, inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>, credentialsInputs: Record<string, CredentialsMetaInput>,

View File

@@ -9,13 +9,13 @@ import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder
import { import {
BlockUIType, BlockUIType,
CredentialsMetaInput, CredentialsMetaInput,
GraphMeta, Graph,
} from "@/lib/autogpt-server-api/types"; } from "@/lib/autogpt-server-api/types";
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI"; import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
import { RunnerInputDialog } from "./RunnerInputUI"; import { RunnerInputDialog } from "./RunnerInputUI";
interface RunnerUIWrapperProps { interface RunnerUIWrapperProps {
graph: GraphMeta; graph: Graph;
nodes: Node<CustomNodeData>[]; nodes: Node<CustomNodeData>[];
graphExecutionError?: string | null; graphExecutionError?: string | null;
saveAndRun: ( saveAndRun: (

View File

@@ -1,5 +1,5 @@
import { GraphInputSchema } from "@/lib/autogpt-server-api"; import { GraphInputSchema } from "@/lib/autogpt-server-api";
import { GraphMetaLike, IncompatibilityInfo } from "./types"; import { GraphLike, IncompatibilityInfo } from "./types";
// Helper type for schema properties - the generated types are too loose // Helper type for schema properties - the generated types are too loose
type SchemaProperties = Record<string, GraphInputSchema["properties"][string]>; type SchemaProperties = Record<string, GraphInputSchema["properties"][string]>;
@@ -36,7 +36,7 @@ export function getSchemaRequired(schema: unknown): SchemaRequired {
*/ */
export function createUpdatedAgentNodeInputs( export function createUpdatedAgentNodeInputs(
currentInputs: Record<string, unknown>, currentInputs: Record<string, unknown>,
latestSubGraphVersion: GraphMetaLike, latestSubGraphVersion: GraphLike,
): Record<string, unknown> { ): Record<string, unknown> {
return { return {
...currentInputs, ...currentInputs,

View File

@@ -1,7 +1,11 @@
import type { GraphMeta as LegacyGraphMeta } from "@/lib/autogpt-server-api"; import type {
Graph as LegacyGraph,
GraphMeta as LegacyGraphMeta,
} from "@/lib/autogpt-server-api";
import type { GraphModel as GeneratedGraph } from "@/app/api/__generated__/models/graphModel";
import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta"; import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta";
export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = { export type SubAgentUpdateInfo<T extends GraphLike = GraphLike> = {
hasUpdate: boolean; hasUpdate: boolean;
currentVersion: number; currentVersion: number;
latestVersion: number; latestVersion: number;
@@ -10,7 +14,10 @@ export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
incompatibilities: IncompatibilityInfo | null; incompatibilities: IncompatibilityInfo | null;
}; };
// Union type for GraphMeta that works with both legacy and new builder // Union type for Graph (with schemas) that works with both legacy and new builder
export type GraphLike = LegacyGraph | GeneratedGraph;
// Union type for GraphMeta (without schemas) for version detection
export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta; export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta;
export type IncompatibilityInfo = { export type IncompatibilityInfo = {

View File

@@ -1,5 +1,11 @@
import { useMemo } from "react"; import { useMemo } from "react";
import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api"; import type {
GraphInputSchema,
GraphOutputSchema,
} from "@/lib/autogpt-server-api";
import type { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
import { getEffectiveType } from "@/lib/utils"; import { getEffectiveType } from "@/lib/utils";
import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers"; import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers";
import { import {
@@ -11,26 +17,38 @@ import {
/** /**
* Checks if a newer version of a sub-agent is available and determines compatibility * Checks if a newer version of a sub-agent is available and determines compatibility
*/ */
export function useSubAgentUpdate<T extends GraphMetaLike>( export function useSubAgentUpdate(
nodeID: string, nodeID: string,
graphID: string | undefined, graphID: string | undefined,
graphVersion: number | undefined, graphVersion: number | undefined,
currentInputSchema: GraphInputSchema | undefined, currentInputSchema: GraphInputSchema | undefined,
currentOutputSchema: GraphOutputSchema | undefined, currentOutputSchema: GraphOutputSchema | undefined,
connections: EdgeLike[], connections: EdgeLike[],
availableGraphs: T[], availableGraphs: GraphMetaLike[],
): SubAgentUpdateInfo<T> { ): SubAgentUpdateInfo<GraphModel> {
// Find the latest version of the same graph // Find the latest version of the same graph
const latestGraph = useMemo(() => { const latestGraphInfo = useMemo(() => {
if (!graphID) return null; if (!graphID) return null;
return availableGraphs.find((graph) => graph.id === graphID) || null; return availableGraphs.find((graph) => graph.id === graphID) || null;
}, [graphID, availableGraphs]); }, [graphID, availableGraphs]);
// Check if there's an update available // Check if there's a newer version available
const hasUpdate = useMemo(() => { const hasUpdate = useMemo(() => {
if (!latestGraph || graphVersion === undefined) return false; if (!latestGraphInfo || graphVersion === undefined) return false;
return latestGraph.version! > graphVersion; return latestGraphInfo.version! > graphVersion;
}, [latestGraph, graphVersion]); }, [latestGraphInfo, graphVersion]);
// Fetch full graph IF an update is detected
const { data: latestGraph } = useGetV1GetSpecificGraph(
graphID ?? "",
{ version: latestGraphInfo?.version },
{
query: {
enabled: hasUpdate && !!graphID && !!latestGraphInfo?.version,
select: okData,
},
},
);
// Get connected input and output handles for this specific node // Get connected input and output handles for this specific node
const connectedHandles = useMemo(() => { const connectedHandles = useMemo(() => {
@@ -152,8 +170,8 @@ export function useSubAgentUpdate<T extends GraphMetaLike>(
return { return {
hasUpdate, hasUpdate,
currentVersion: graphVersion || 0, currentVersion: graphVersion || 0,
latestVersion: latestGraph?.version || 0, latestVersion: latestGraphInfo?.version || 0,
latestGraph, latestGraph: latestGraph || null,
isCompatible: compatibilityResult.isCompatible, isCompatible: compatibilityResult.isCompatible,
incompatibilities: compatibilityResult.incompatibilities, incompatibilities: compatibilityResult.incompatibilities,
}; };

View File

@@ -18,7 +18,7 @@ interface GraphStore {
outputSchema: Record<string, any> | null, outputSchema: Record<string, any> | null,
) => void; ) => void;
// Available graphs; used for sub-graph updates // Available graphs; used for sub-graph updated version detection
availableSubGraphs: GraphMeta[]; availableSubGraphs: GraphMeta[];
setAvailableSubGraphs: (graphs: GraphMeta[]) => void; setAvailableSubGraphs: (graphs: GraphMeta[]) => void;

View File

@@ -10,8 +10,8 @@ import React, {
import { import {
CredentialsMetaInput, CredentialsMetaInput,
CredentialsType, CredentialsType,
Graph,
GraphExecutionID, GraphExecutionID,
GraphMeta,
LibraryAgentPreset, LibraryAgentPreset,
LibraryAgentPresetID, LibraryAgentPresetID,
LibraryAgentPresetUpdatable, LibraryAgentPresetUpdatable,
@@ -69,7 +69,7 @@ export function AgentRunDraftView({
className, className,
recommendedScheduleCron, recommendedScheduleCron,
}: { }: {
graph: GraphMeta; graph: Graph;
agentActions?: ButtonAction[]; agentActions?: ButtonAction[];
recommendedScheduleCron?: string | null; recommendedScheduleCron?: string | null;
doRun?: ( doRun?: (

View File

@@ -2,8 +2,8 @@
import React, { useCallback, useMemo } from "react"; import React, { useCallback, useMemo } from "react";
import { import {
Graph,
GraphExecutionID, GraphExecutionID,
GraphMeta,
Schedule, Schedule,
ScheduleID, ScheduleID,
} from "@/lib/autogpt-server-api"; } from "@/lib/autogpt-server-api";
@@ -35,7 +35,7 @@ export function AgentScheduleDetailsView({
onForcedRun, onForcedRun,
doDeleteSchedule, doDeleteSchedule,
}: { }: {
graph: GraphMeta; graph: Graph;
schedule: Schedule; schedule: Schedule;
agentActions: ButtonAction[]; agentActions: ButtonAction[];
onForcedRun: (runID: GraphExecutionID) => void; onForcedRun: (runID: GraphExecutionID) => void;

View File

@@ -5629,7 +5629,9 @@
"description": "Successful Response", "description": "Successful Response",
"content": { "content": {
"application/json": { "application/json": {
"schema": { "$ref": "#/components/schemas/GraphMeta" } "schema": {
"$ref": "#/components/schemas/GraphModelWithoutNodes"
}
} }
} }
}, },
@@ -6495,18 +6497,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }], "anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron" "title": "Recommended Schedule Cron"
}, },
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": { "forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }], "anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id" "title": "Forked From Id"
@@ -6514,11 +6504,22 @@
"forked_from_version": { "forked_from_version": {
"anyOf": [{ "type": "integer" }, { "type": "null" }], "anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version" "title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
} }
}, },
"type": "object", "type": "object",
"required": ["name", "description"], "required": ["name", "description"],
"title": "BaseGraph" "title": "BaseGraph",
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
}, },
"BaseGraph-Output": { "BaseGraph-Output": {
"properties": { "properties": {
@@ -6539,18 +6540,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }], "anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron" "title": "Recommended Schedule Cron"
}, },
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": { "forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }], "anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id" "title": "Forked From Id"
@@ -6559,6 +6548,16 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }], "anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version" "title": "Forked From Version"
}, },
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"input_schema": { "input_schema": {
"additionalProperties": true, "additionalProperties": true,
"type": "object", "type": "object",
@@ -6605,7 +6604,8 @@
"has_sensitive_action", "has_sensitive_action",
"trigger_setup_info" "trigger_setup_info"
], ],
"title": "BaseGraph" "title": "BaseGraph",
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
}, },
"BlockCategoryResponse": { "BlockCategoryResponse": {
"properties": { "properties": {
@@ -7399,18 +7399,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }], "anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron" "title": "Recommended Schedule Cron"
}, },
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": { "forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }], "anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id" "title": "Forked From Id"
@@ -7419,16 +7407,26 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }], "anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version" "title": "Forked From Version"
}, },
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"sub_graphs": { "sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Input" }, "items": { "$ref": "#/components/schemas/BaseGraph-Input" },
"type": "array", "type": "array",
"title": "Sub Graphs", "title": "Sub Graphs"
"default": []
} }
}, },
"type": "object", "type": "object",
"required": ["name", "description"], "required": ["name", "description"],
"title": "Graph" "title": "Graph",
"description": "Creatable graph model used in API create/update endpoints."
}, },
"GraphExecution": { "GraphExecution": {
"properties": { "properties": {
@@ -7778,6 +7776,52 @@
"description": "Response schema for paginated graph executions." "description": "Response schema for paginated graph executions."
}, },
"GraphMeta": { "GraphMeta": {
"properties": {
"id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version" },
"is_active": {
"type": "boolean",
"title": "Is Active",
"default": true
},
"name": { "type": "string", "title": "Name" },
"description": { "type": "string", "title": "Description" },
"instructions": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Instructions"
},
"recommended_schedule_cron": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
},
"forked_from_version": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
"format": "date-time",
"title": "Created At"
}
},
"type": "object",
"required": [
"id",
"version",
"name",
"description",
"user_id",
"created_at"
],
"title": "GraphMeta",
"description": "Lightweight graph metadata model representing an existing graph from the database,\nfor use in listings and summaries.\n\nLacks `GraphModel`'s nodes, links, and expensive computed fields.\nUse for list endpoints where full graph data is not needed and performance matters."
},
"GraphModel": {
"properties": { "properties": {
"id": { "type": "string", "title": "Id" }, "id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version", "default": 1 }, "version": { "type": "integer", "title": "Version", "default": 1 },
@@ -7804,13 +7848,27 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }], "anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version" "title": "Forked From Version"
}, },
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
"format": "date-time",
"title": "Created At"
},
"nodes": {
"items": { "$ref": "#/components/schemas/NodeModel" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"sub_graphs": { "sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Output" }, "items": { "$ref": "#/components/schemas/BaseGraph-Output" },
"type": "array", "type": "array",
"title": "Sub Graphs", "title": "Sub Graphs"
"default": []
}, },
"user_id": { "type": "string", "title": "User Id" },
"input_schema": { "input_schema": {
"additionalProperties": true, "additionalProperties": true,
"type": "object", "type": "object",
@@ -7857,6 +7915,7 @@
"name", "name",
"description", "description",
"user_id", "user_id",
"created_at",
"input_schema", "input_schema",
"output_schema", "output_schema",
"has_external_trigger", "has_external_trigger",
@@ -7865,9 +7924,10 @@
"trigger_setup_info", "trigger_setup_info",
"credentials_input_schema" "credentials_input_schema"
], ],
"title": "GraphMeta" "title": "GraphModel",
"description": "Full graph model representing an existing graph from the database.\n\nThis is the primary model for working with persisted graphs. Includes all\ngraph data (nodes, links, sub_graphs) plus user ownership and timestamps.\nProvides computed fields (input_schema, output_schema, etc.) used during\nset-up (frontend) and execution (backend).\n\nInherits from:\n- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas\n- `GraphMeta`: provides user_id, created_at for database records"
}, },
"GraphModel": { "GraphModelWithoutNodes": {
"properties": { "properties": {
"id": { "type": "string", "title": "Id" }, "id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version", "default": 1 }, "version": { "type": "integer", "title": "Version", "default": 1 },
@@ -7886,18 +7946,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }], "anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron" "title": "Recommended Schedule Cron"
}, },
"nodes": {
"items": { "$ref": "#/components/schemas/NodeModel" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": { "forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }], "anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id" "title": "Forked From Id"
@@ -7906,12 +7954,6 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }], "anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version" "title": "Forked From Version"
}, },
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
"type": "array",
"title": "Sub Graphs",
"default": []
},
"user_id": { "type": "string", "title": "User Id" }, "user_id": { "type": "string", "title": "User Id" },
"created_at": { "created_at": {
"type": "string", "type": "string",
@@ -7973,7 +8015,8 @@
"trigger_setup_info", "trigger_setup_info",
"credentials_input_schema" "credentials_input_schema"
], ],
"title": "GraphModel" "title": "GraphModelWithoutNodes",
"description": "GraphModel variant that excludes nodes, links, and sub-graphs from serialization.\n\nUsed in contexts like the store where exposing internal graph structure\nis not desired. Inherits all computed fields from GraphModel but marks\nnodes and links as excluded from JSON output."
}, },
"GraphSettings": { "GraphSettings": {
"properties": { "properties": {
@@ -8613,26 +8656,22 @@
"input_default": { "input_default": {
"additionalProperties": true, "additionalProperties": true,
"type": "object", "type": "object",
"title": "Input Default", "title": "Input Default"
"default": {}
}, },
"metadata": { "metadata": {
"additionalProperties": true, "additionalProperties": true,
"type": "object", "type": "object",
"title": "Metadata", "title": "Metadata"
"default": {}
}, },
"input_links": { "input_links": {
"items": { "$ref": "#/components/schemas/Link" }, "items": { "$ref": "#/components/schemas/Link" },
"type": "array", "type": "array",
"title": "Input Links", "title": "Input Links"
"default": []
}, },
"output_links": { "output_links": {
"items": { "$ref": "#/components/schemas/Link" }, "items": { "$ref": "#/components/schemas/Link" },
"type": "array", "type": "array",
"title": "Output Links", "title": "Output Links"
"default": []
} }
}, },
"type": "object", "type": "object",
@@ -8712,26 +8751,22 @@
"input_default": { "input_default": {
"additionalProperties": true, "additionalProperties": true,
"type": "object", "type": "object",
"title": "Input Default", "title": "Input Default"
"default": {}
}, },
"metadata": { "metadata": {
"additionalProperties": true, "additionalProperties": true,
"type": "object", "type": "object",
"title": "Metadata", "title": "Metadata"
"default": {}
}, },
"input_links": { "input_links": {
"items": { "$ref": "#/components/schemas/Link" }, "items": { "$ref": "#/components/schemas/Link" },
"type": "array", "type": "array",
"title": "Input Links", "title": "Input Links"
"default": []
}, },
"output_links": { "output_links": {
"items": { "$ref": "#/components/schemas/Link" }, "items": { "$ref": "#/components/schemas/Link" },
"type": "array", "type": "array",
"title": "Output Links", "title": "Output Links"
"default": []
}, },
"graph_id": { "type": "string", "title": "Graph Id" }, "graph_id": { "type": "string", "title": "Graph Id" },
"graph_version": { "type": "integer", "title": "Graph Version" }, "graph_version": { "type": "integer", "title": "Graph Version" },

View File

@@ -362,25 +362,14 @@ export type GraphMeta = {
user_id: UserID; user_id: UserID;
version: number; version: number;
is_active: boolean; is_active: boolean;
created_at: Date;
name: string; name: string;
description: string; description: string;
instructions?: string | null; instructions?: string | null;
recommended_schedule_cron: string | null; recommended_schedule_cron: string | null;
forked_from_id?: GraphID | null; forked_from_id?: GraphID | null;
forked_from_version?: number | null; forked_from_version?: number | null;
input_schema: GraphInputSchema; };
output_schema: GraphOutputSchema;
credentials_input_schema: CredentialsInputSchema;
} & (
| {
has_external_trigger: true;
trigger_setup_info: GraphTriggerInfo;
}
| {
has_external_trigger: false;
trigger_setup_info: null;
}
);
export type GraphID = Brand<string, "GraphID">; export type GraphID = Brand<string, "GraphID">;
@@ -447,11 +436,22 @@ export type GraphTriggerInfo = {
/* Mirror of backend/data/graph.py:Graph */ /* Mirror of backend/data/graph.py:Graph */
export type Graph = GraphMeta & { export type Graph = GraphMeta & {
created_at: Date;
nodes: Node[]; nodes: Node[];
links: Link[]; links: Link[];
sub_graphs: Omit<Graph, "sub_graphs">[]; // Flattened sub-graphs sub_graphs: Omit<Graph, "sub_graphs">[]; // Flattened sub-graphs
}; input_schema: GraphInputSchema;
output_schema: GraphOutputSchema;
credentials_input_schema: CredentialsInputSchema;
} & (
| {
has_external_trigger: true;
trigger_setup_info: GraphTriggerInfo;
}
| {
has_external_trigger: false;
trigger_setup_info: null;
}
);
export type GraphUpdateable = Omit< export type GraphUpdateable = Omit<
Graph, Graph,

View File

@@ -1,325 +0,0 @@
# Workspace & Media File Architecture
This document describes the architecture for handling user files in AutoGPT Platform, covering persistent user storage (Workspace) and ephemeral media processing pipelines.
## Overview
The platform has two distinct file-handling layers:
| Layer | Purpose | Persistence | Scope |
|-------|---------|-------------|-------|
| **Workspace** | Long-term user file storage | Persistent (DB + GCS/local) | Per-user, session-scoped access |
| **Media Pipeline** | Ephemeral file processing for blocks | Temporary (local disk) | Per-execution |
## Database Models
### UserWorkspace
Represents a user's file storage space. Created on-demand (one per user).
```prisma
model UserWorkspace {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
userId String @unique
Files UserWorkspaceFile[]
}
```
**Key points:**
- One workspace per user (enforced by `@unique` on `userId`)
- Created lazily via `get_or_create_workspace()`
- Uses upsert to handle race conditions
### UserWorkspaceFile
Represents a file stored in a user's workspace.
```prisma
model UserWorkspaceFile {
id String @id @default(uuid())
workspaceId String
name String // User-visible filename
path String // Virtual path (e.g., "/sessions/abc123/image.png")
storagePath String // Actual storage path (gcs://... or local://...)
mimeType String
sizeBytes BigInt
checksum String? // SHA256 for integrity
isDeleted Boolean @default(false)
deletedAt DateTime?
metadata Json @default("{}")
@@unique([workspaceId, path]) // Enforce unique paths within workspace
}
```
**Key points:**
- `path` is a virtual path for organizing files (not actual filesystem path)
- `storagePath` contains the actual GCS or local storage location
- Soft-delete pattern: `isDeleted` flag with `deletedAt` timestamp
- Path is modified on delete to free up the virtual path for reuse
---
## WorkspaceManager
**Location:** `backend/util/workspace.py`
High-level API for workspace file operations. Combines storage backend operations with database record management.
### Initialization
```python
from backend.util.workspace import WorkspaceManager
# Basic usage
manager = WorkspaceManager(user_id="user-123", workspace_id="ws-456")
# With session scoping (CoPilot sessions)
manager = WorkspaceManager(
user_id="user-123",
workspace_id="ws-456",
session_id="session-789"
)
```
### Session Scoping
When `session_id` is provided, files are isolated to `/sessions/{session_id}/`:
```python
# With session_id="abc123":
manager.write_file(content, "image.png")
# → stored at /sessions/abc123/image.png
# Cross-session access is explicit:
manager.read_file("/sessions/other-session/file.txt") # Works
```
**Why session scoping?**
- CoPilot conversations need file isolation
- Prevents file collisions between concurrent sessions
- Allows session cleanup without affecting other sessions
### Core Methods
| Method | Description |
|--------|-------------|
| `write_file(content, filename, path?, mime_type?, overwrite?)` | Write file to workspace |
| `read_file(path)` | Read file by virtual path |
| `read_file_by_id(file_id)` | Read file by ID |
| `list_files(path?, limit?, offset?, include_all_sessions?)` | List files |
| `delete_file(file_id)` | Soft-delete a file |
| `get_download_url(file_id, expires_in?)` | Get signed download URL |
| `get_file_info(file_id)` | Get file metadata |
| `get_file_count(path?, include_all_sessions?)` | Count files |
### Storage Backends
WorkspaceManager delegates to `WorkspaceStorageBackend`:
| Backend | When Used | Storage Path Format |
|---------|-----------|---------------------|
| `GCSWorkspaceStorage` | `media_gcs_bucket_name` is configured | `gcs://bucket/workspaces/{ws_id}/{file_id}/{filename}` |
| `LocalWorkspaceStorage` | No GCS bucket configured | `local://{ws_id}/{file_id}/{filename}` |
---
## store_media_file()
**Location:** `backend/util/file.py`
The media normalization pipeline. Handles various input types and normalizes them for processing or output.
### Purpose
Blocks receive files in many formats (URLs, data URIs, workspace references, local paths). `store_media_file()` normalizes these to a consistent format based on what the block needs.
### Input Types Handled
| Input Format | Example | How It's Processed |
|--------------|---------|-------------------|
| Data URI | `data:image/png;base64,iVBOR...` | Decoded, virus scanned, written locally |
| HTTP(S) URL | `https://example.com/image.png` | Downloaded, virus scanned, written locally |
| Workspace URI | `workspace://abc123` or `workspace:///path/to/file` | Read from workspace, virus scanned, written locally |
| Cloud path | `gcs://bucket/path` | Downloaded, virus scanned, written locally |
| Local path | `image.png` | Verified to exist in exec_file directory |
### Return Formats
The `return_format` parameter determines what you get back:
```python
from backend.util.file import store_media_file
# For local processing (ffmpeg, MoviePy, PIL)
local_path = await store_media_file(
file=input_file,
execution_context=ctx,
return_format="for_local_processing"
)
# Returns: "image.png" (relative path in exec_file dir)
# For external APIs (Replicate, OpenAI, etc.)
data_uri = await store_media_file(
file=input_file,
execution_context=ctx,
return_format="for_external_api"
)
# Returns: "data:image/png;base64,iVBOR..."
# For block output (adapts to execution context)
output = await store_media_file(
file=input_file,
execution_context=ctx,
return_format="for_block_output"
)
# In CoPilot: Returns "workspace://file-id#image/png"
# In graphs: Returns "data:image/png;base64,..."
```
### Execution Context
`store_media_file()` requires an `ExecutionContext` with:
- `graph_exec_id` - Required for temp file location
- `user_id` - Required for workspace access
- `workspace_id` - Optional; enables workspace features
- `session_id` - Optional; for session scoping in CoPilot
---
## Responsibility Boundaries
### Virus Scanning
| Component | Scans? | Notes |
|-----------|--------|-------|
| `store_media_file()` | ✅ Yes | Scans **all** content before writing to local disk |
| `WorkspaceManager.write_file()` | ✅ Yes | Scans content before persisting |
**Scanning happens at:**
1. `store_media_file()` — scans everything it downloads/decodes
2. `WorkspaceManager.write_file()` — scans before persistence
Tools like `WriteWorkspaceFileTool` don't need to scan because `WorkspaceManager.write_file()` handles it.
### Persistence
| Component | Persists To | Lifecycle |
|-----------|-------------|-----------|
| `store_media_file()` | Temp dir (`/tmp/exec_file/{exec_id}/`) | Cleaned after execution |
| `WorkspaceManager` | GCS or local storage + DB | Persistent until deleted |
**Automatic cleanup:** `clean_exec_files(graph_exec_id)` removes temp files after execution completes.
---
## Decision Tree: WorkspaceManager vs store_media_file
```
┌─────────────────────────────────────────────────────┐
│ What do you need to do with the file? │
└─────────────────────────────────────────────────────┘
┌─────────────┴─────────────┐
▼ ▼
Process in a block Store for user access
(ffmpeg, PIL, etc.) (CoPilot files, uploads)
│ │
▼ ▼
store_media_file() WorkspaceManager
with appropriate
return_format
┌──────┴──────┐
▼ ▼
"for_local_ "for_block_
processing" output"
│ │
▼ ▼
Get local Auto-saves to
path for workspace in
tools CoPilot context
```
### Quick Reference
| Scenario | Use |
|----------|-----|
| Block needs to process a file with ffmpeg | `store_media_file(..., return_format="for_local_processing")` |
| Block needs to send file to external API | `store_media_file(..., return_format="for_external_api")` |
| Block returning a generated file | `store_media_file(..., return_format="for_block_output")` |
| API endpoint handling file upload | `WorkspaceManager.write_file()` (after virus scan) |
| API endpoint serving file download | `WorkspaceManager.get_download_url()` |
| Listing user's files | `WorkspaceManager.list_files()` |
---
## Key Files Reference
| File | Purpose |
|------|---------|
| `backend/data/workspace.py` | Database CRUD operations for UserWorkspace and UserWorkspaceFile |
| `backend/util/workspace.py` | `WorkspaceManager` class - high-level workspace API |
| `backend/util/workspace_storage.py` | Storage backends (GCS, local) and `WorkspaceStorageBackend` interface |
| `backend/util/file.py` | `store_media_file()` and media processing utilities |
| `backend/util/virus_scanner.py` | `VirusScannerService` and `scan_content_safe()` |
| `schema.prisma` | Database model definitions |
---
## Common Patterns
### Block Processing a User's File
```python
async def run(self, input_data, *, execution_context, **kwargs):
# Normalize input to local path
local_path = await store_media_file(
file=input_data.video,
execution_context=execution_context,
return_format="for_local_processing",
)
# Process with local tools
output_path = process_video(local_path)
# Return (auto-saves to workspace in CoPilot)
result = await store_media_file(
file=output_path,
execution_context=execution_context,
return_format="for_block_output",
)
yield "output", result
```
### API Upload Endpoint
```python
async def upload_file(file: UploadFile, user_id: str, workspace_id: str):
content = await file.read()
# write_file handles virus scanning
manager = WorkspaceManager(user_id, workspace_id)
workspace_file = await manager.write_file(
content=content,
filename=file.filename,
)
return {"file_id": workspace_file.id}
```
---
## Configuration
| Setting | Purpose | Default |
|---------|---------|---------|
| `media_gcs_bucket_name` | GCS bucket for workspace storage | None (uses local) |
| `workspace_storage_dir` | Local storage directory | `{app_data}/workspaces` |
| `max_file_size_mb` | Maximum file size in MB | 100 |
| `clamav_service_enabled` | Enable virus scanning | true |
| `clamav_service_host` | ClamAV daemon host | localhost |
| `clamav_service_port` | ClamAV daemon port | 3310 |