From 29ee85c86f0b427a3f9c98e2979c1e870bd0a38d Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 5 Feb 2026 22:38:32 -0600 Subject: [PATCH 1/5] fix: add virus scanning to WorkspaceManager.write_file() (#11990) ## Summary Adds virus scanning at the `WorkspaceManager.write_file()` layer for defense in depth. ## Problem Previously, virus scanning was only performed at entry points: - `store_media_file()` in `backend/util/file.py` - `WriteWorkspaceFileTool` in `backend/api/features/chat/tools/workspace_files.py` This created a trust boundary where any new caller of `WorkspaceManager.write_file()` would need to remember to scan first. ## Solution Add `scan_content_safe()` call directly in `WorkspaceManager.write_file()` before persisting to storage. This ensures all content is scanned regardless of the caller. ## Changes - Added import for `scan_content_safe` from `backend.util.virus_scanner` - Added virus scan call after file size validation, before storage ## Testing Existing tests should pass. The scan is a no-op in test environments where ClamAV isn't running. Closes https://linear.app/autogpt/issue/OPEN-2993 --- > [!NOTE] > **Medium Risk** > Introduces a new required async scan step in the workspace write path, which can add latency or cause new failures if the scanner/ClamAV is misconfigured or unavailable. > > **Overview** > Adds a **defense-in-depth** virus scan to `WorkspaceManager.write_file()` by invoking `scan_content_safe()` after file-size validation and before any storage/database persistence. > > This centralizes scanning so any caller writing workspace files gets the same malware check without relying on upstream entry points to remember to scan. > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit 0f5ac68b92319b7fcc4c49625d1acd845bf87b13. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). --- autogpt_platform/backend/backend/util/workspace.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/autogpt_platform/backend/backend/util/workspace.py b/autogpt_platform/backend/backend/util/workspace.py index a2f1a61b9e..86413b640a 100644 --- a/autogpt_platform/backend/backend/util/workspace.py +++ b/autogpt_platform/backend/backend/util/workspace.py @@ -22,6 +22,7 @@ from backend.data.workspace import ( soft_delete_workspace_file, ) from backend.util.settings import Config +from backend.util.virus_scanner import scan_content_safe from backend.util.workspace_storage import compute_file_checksum, get_workspace_storage logger = logging.getLogger(__name__) @@ -187,6 +188,9 @@ class WorkspaceManager: f"{Config().max_file_size_mb}MB limit" ) + # Virus scan content before persisting (defense in depth) + await scan_content_safe(content, filename=filename) + # Determine path with session scoping if path is None: path = f"/{filename}" From e0fab7e34ec21641d07ce3f7caef5868271f2071 Mon Sep 17 00:00:00 2001 From: Otto Date: Fri, 6 Feb 2026 08:41:06 +0000 Subject: [PATCH 2/5] fix(frontend): Improve clarification answer message formatting (#11985) ## Summary Improves the auto-generated message format when users submit clarification answers in the agent generator. ## Before ``` I have the answers to your questions: keyword_1: User answer 1 keyword_2: User answer 2 Please proceed with creating the agent. ``` image ## After ``` **Here are my answers:** > What is the primary purpose? User answer 1 > What is the target audience? User answer 2 Please proceed with creating the agent. ``` image ## Changes - Use human-readable question text instead of machine-readable keywords - Use blockquote format for questions (natural "quote and reply" pattern) - Use double newlines for proper Markdown paragraph breaks - Iterate over `message.questions` array to preserve original question order - Move handler inside conditional block for proper TypeScript type narrowing ## Why - The old format was ugly and hard to read (raw keywords, no line breaks) - The new format uses a natural "quoting and replying" pattern - Better readability for both users and the LLM (verified: backend does NOT parse keywords) ## Linear Ticket Fixes [SECRT-1822](https://linear.app/autogpt/issue/SECRT-1822) ## Testing - [ ] Trigger agent creation that requires clarifying questions - [ ] Fill out the form and submit - [ ] Verify message appears with new blockquote format - [ ] Verify questions appear in original order - [ ] Verify agent generation proceeds correctly Co-authored-by: Toran Bruce Richards --- .../components/ChatMessage/ChatMessage.tsx | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx index 851c3b33e8..44dae40eb4 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx @@ -102,18 +102,6 @@ export function ChatMessage({ } } - function handleClarificationAnswers(answers: Record) { - if (onSendMessage) { - const contextMessage = Object.entries(answers) - .map(([keyword, answer]) => `${keyword}: ${answer}`) - .join("\n"); - - onSendMessage( - `I have the answers to your questions:\n\n${contextMessage}\n\nPlease proceed with creating the agent.`, - ); - } - } - const handleCopy = useCallback( async function handleCopy() { if (message.type !== "message") return; @@ -162,6 +150,22 @@ export function ChatMessage({ .slice(index + 1) .some((m) => m.type === "message" && m.role === "user"); + const handleClarificationAnswers = (answers: Record) => { + if (onSendMessage) { + // Iterate over questions (preserves original order) instead of answers + const contextMessage = message.questions + .map((q) => { + const answer = answers[q.keyword] || ""; + return `> ${q.question}\n\n${answer}`; + }) + .join("\n\n"); + + onSendMessage( + `**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with creating the agent.`, + ); + } + }; + return ( Date: Fri, 6 Feb 2026 12:23:32 +0100 Subject: [PATCH 3/5] fix(frontend): Revert ThinkingMessage progress bar delay to original values (#11993) --- .../Chat/components/ThinkingMessage/ThinkingMessage.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx index 34018f0292..2202705e65 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx @@ -19,13 +19,13 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) { if (timerRef.current === null) { timerRef.current = setTimeout(() => { setShowSlowLoader(true); - }, 3000); + }, 8000); } if (coffeeTimerRef.current === null) { coffeeTimerRef.current = setTimeout(() => { setShowCoffeeMessage(true); - }, 8000); + }, 10000); } return () => { From 3d1cd03fc8d9d30b61a01f859163d0cef904ccb0 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Fri, 6 Feb 2026 19:17:25 +0700 Subject: [PATCH 4/5] ci(frontend): disable chromatic for this month (#11994) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ - we react the max snapshots quota and don't wanna upgrade - make it run (when re-enabled) on `src/components` changes only to reduce snapshots ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] CI hope for the best --- .github/workflows/platform-frontend-ci.yml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 499bb03170..14676a6a1f 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -27,11 +27,20 @@ jobs: runs-on: ubuntu-latest outputs: cache-key: ${{ steps.cache-key.outputs.key }} + components-changed: ${{ steps.filter.outputs.components }} steps: - name: Checkout repository uses: actions/checkout@v4 + - name: Check for component changes + uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + components: + - 'autogpt_platform/frontend/src/components/**' + - name: Set up Node.js uses: actions/setup-node@v4 with: @@ -90,8 +99,11 @@ jobs: chromatic: runs-on: ubuntu-latest needs: setup - # Only run on dev branch pushes or PRs targeting dev - if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev' + # Disabled: to re-enable, remove 'false &&' from the condition below + if: >- + false + && (github.ref == 'refs/heads/dev' || github.base_ref == 'dev') + && needs.setup.outputs.components-changed == 'true' steps: - name: Checkout repository From 8fddc9d71fa4c8d37f55a6124858ecb1ef43db20 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Fri, 6 Feb 2026 20:13:21 +0100 Subject: [PATCH 5/5] fix(backend): Reduce `GET /api/graphs` expense + latency (#11986) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [SECRT-1896: Fix crazy `GET /api/graphs` latency (P95 = 107s)](https://linear.app/autogpt/issue/SECRT-1896) These changes should decrease latency of this endpoint by ~~60-65%~~ a lot. ### Changes 🏗️ - Make `Graph.credentials_input_schema` cheaper by avoiding constructing a new `BlockSchema` subclass - Strip down `GraphMeta` - drop all computed fields - Replace with either `GraphModel` or `GraphModelWithoutNodes` wherever those computed fields are used - Simplify usage in `list_graphs_paginated` and `fetch_graph_from_store_slug` - Refactor and clarify relationships between the different graph models - Split `BaseGraph` into `GraphBaseMeta` + `BaseGraph` - Strip down `Graph` - move `credentials_input_schema` and `aggregate_credentials_inputs` to `GraphModel` - Refactor to eliminate double `aggregate_credentials_inputs()` call in `credentials_input_schema` call tree - Add `GraphModelWithoutNodes` (similar to current `GraphMeta`) ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] `GET /api/graphs` works as it should - [x] Running a graph succeeds - [x] Adding a sub-agent in the Builder works as it should --- .../backend/api/features/chat/tools/utils.py | 16 +- .../backend/api/features/library/db.py | 2 +- .../backend/backend/api/features/store/db.py | 27 +- .../backend/api/features/store/image_gen.py | 37 +- .../backend/api/features/store/routes.py | 2 +- .../backend/backend/data/block.py | 4 +- .../backend/backend/data/graph.py | 327 +++++++++++------- .../backend/backend/data/model.py | 17 +- .../backend/backend/executor/utils.py | 2 +- .../backend/snapshots/grph_single | 1 - autogpt_platform/backend/snapshots/grphs_all | 22 +- .../AgentOnboardingCredentials.tsx | 4 +- .../AgentOnboardingCredentials/helpers.ts | 4 +- .../(no-navbar)/onboarding/5-run/helpers.ts | 6 +- .../legacy-builder/BlocksControl.tsx | 48 ++- .../components/legacy-builder/Flow/Flow.tsx | 208 +++++------ .../legacy-builder/RunnerInputUI.tsx | 4 +- .../legacy-builder/RunnerUIWrapper.tsx | 4 +- .../build/hooks/useSubAgentUpdate/helpers.ts | 4 +- .../build/hooks/useSubAgentUpdate/types.ts | 13 +- .../useSubAgentUpdate/useSubAgentUpdate.ts | 40 ++- .../app/(platform)/build/stores/graphStore.ts | 2 +- .../components/agent-run-draft-view.tsx | 4 +- .../agent-schedule-details-view.tsx | 4 +- .../frontend/src/app/api/openapi.json | 199 ++++++----- .../src/lib/autogpt-server-api/types.ts | 30 +- 26 files changed, 612 insertions(+), 419 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py index bd25594b8a..cda0914809 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py @@ -6,7 +6,6 @@ from typing import Any from backend.api.features.library import db as library_db from backend.api.features.library import model as library_model from backend.api.features.store import db as store_db -from backend.data import graph as graph_db from backend.data.graph import GraphModel from backend.data.model import ( CredentialsFieldInfo, @@ -44,14 +43,8 @@ async def fetch_graph_from_store_slug( return None, None # Get the graph from store listing version - graph_meta = await store_db.get_available_graph( - store_agent.store_listing_version_id - ) - graph = await graph_db.get_graph( - graph_id=graph_meta.id, - version=graph_meta.version, - user_id=None, # Public access - include_subgraphs=True, + graph = await store_db.get_available_graph( + store_agent.store_listing_version_id, hide_nodes=False ) return graph, store_agent @@ -128,7 +121,7 @@ def build_missing_credentials_from_graph( return { field_key: _serialize_missing_credential(field_key, field_info) - for field_key, (field_info, _node_fields) in aggregated_fields.items() + for field_key, (field_info, _, _) in aggregated_fields.items() if field_key not in matched_keys } @@ -269,7 +262,8 @@ async def match_user_credentials_to_graph( # provider is in the set of acceptable providers. for credential_field_name, ( credential_requirements, - _node_fields, + _, + _, ) in aggregated_creds.items(): # Find first matching credential by provider, type, and scopes matching_cred = next( diff --git a/autogpt_platform/backend/backend/api/features/library/db.py b/autogpt_platform/backend/backend/api/features/library/db.py index 6bebfb573c..32479c18a3 100644 --- a/autogpt_platform/backend/backend/api/features/library/db.py +++ b/autogpt_platform/backend/backend/api/features/library/db.py @@ -374,7 +374,7 @@ async def get_library_agent_by_graph_id( async def add_generated_agent_image( - graph: graph_db.BaseGraph, + graph: graph_db.GraphBaseMeta, user_id: str, library_agent_id: str, ) -> Optional[prisma.models.LibraryAgent]: diff --git a/autogpt_platform/backend/backend/api/features/store/db.py b/autogpt_platform/backend/backend/api/features/store/db.py index 850a2bc3e9..87b72d6a9c 100644 --- a/autogpt_platform/backend/backend/api/features/store/db.py +++ b/autogpt_platform/backend/backend/api/features/store/db.py @@ -1,7 +1,7 @@ import asyncio import logging from datetime import datetime, timezone -from typing import Any, Literal +from typing import Any, Literal, overload import fastapi import prisma.enums @@ -11,8 +11,8 @@ import prisma.types from backend.data.db import transaction from backend.data.graph import ( - GraphMeta, GraphModel, + GraphModelWithoutNodes, get_graph, get_graph_as_admin, get_sub_graphs, @@ -334,7 +334,22 @@ async def get_store_agent_details( raise DatabaseError("Failed to fetch agent details") from e -async def get_available_graph(store_listing_version_id: str) -> GraphMeta: +@overload +async def get_available_graph( + store_listing_version_id: str, hide_nodes: Literal[False] +) -> GraphModel: ... + + +@overload +async def get_available_graph( + store_listing_version_id: str, hide_nodes: Literal[True] = True +) -> GraphModelWithoutNodes: ... + + +async def get_available_graph( + store_listing_version_id: str, + hide_nodes: bool = True, +) -> GraphModelWithoutNodes | GraphModel: try: # Get avaialble, non-deleted store listing version store_listing_version = ( @@ -344,7 +359,7 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta: "isAvailable": True, "isDeleted": False, }, - include={"AgentGraph": {"include": {"Nodes": True}}}, + include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}}, ) ) @@ -354,7 +369,9 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta: detail=f"Store listing version {store_listing_version_id} not found", ) - return GraphModel.from_db(store_listing_version.AgentGraph).meta() + return (GraphModelWithoutNodes if hide_nodes else GraphModel).from_db( + store_listing_version.AgentGraph + ) except Exception as e: logger.error(f"Error getting agent: {e}") diff --git a/autogpt_platform/backend/backend/api/features/store/image_gen.py b/autogpt_platform/backend/backend/api/features/store/image_gen.py index 87b7b601df..087a7895ba 100644 --- a/autogpt_platform/backend/backend/api/features/store/image_gen.py +++ b/autogpt_platform/backend/backend/api/features/store/image_gen.py @@ -16,7 +16,7 @@ from backend.blocks.ideogram import ( StyleType, UpscaleOption, ) -from backend.data.graph import BaseGraph +from backend.data.graph import GraphBaseMeta from backend.data.model import CredentialsMetaInput, ProviderName from backend.integrations.credentials_store import ideogram_credentials from backend.util.request import Requests @@ -34,14 +34,14 @@ class ImageStyle(str, Enum): DIGITAL_ART = "digital art" -async def generate_agent_image(agent: BaseGraph | AgentGraph) -> io.BytesIO: +async def generate_agent_image(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO: if settings.config.use_agent_image_generation_v2: return await generate_agent_image_v2(graph=agent) else: return await generate_agent_image_v1(agent=agent) -async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO: +async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.BytesIO: """ Generate an image for an agent using Ideogram model. Returns: @@ -54,14 +54,17 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO: description = f"{name} ({graph.description})" if graph.description else name prompt = ( - f"Create a visually striking retro-futuristic vector pop art illustration prominently featuring " - f'"{name}" in bold typography. The image clearly and literally depicts a {description}, ' - f"along with recognizable objects directly associated with the primary function of a {name}. " - f"Ensure the imagery is concrete, intuitive, and immediately understandable, clearly conveying the " - f"purpose of a {name}. Maintain vibrant, limited-palette colors, sharp vector lines, geometric " - f"shapes, flat illustration techniques, and solid colors without gradients or shading. Preserve a " - f"retro-futuristic aesthetic influenced by mid-century futurism and 1960s psychedelia, " - f"prioritizing clear visual storytelling and thematic clarity above all else." + "Create a visually striking retro-futuristic vector pop art illustration " + f'prominently featuring "{name}" in bold typography. The image clearly and ' + f"literally depicts a {description}, along with recognizable objects directly " + f"associated with the primary function of a {name}. " + f"Ensure the imagery is concrete, intuitive, and immediately understandable, " + f"clearly conveying the purpose of a {name}. " + "Maintain vibrant, limited-palette colors, sharp vector lines, " + "geometric shapes, flat illustration techniques, and solid colors " + "without gradients or shading. Preserve a retro-futuristic aesthetic " + "influenced by mid-century futurism and 1960s psychedelia, " + "prioritizing clear visual storytelling and thematic clarity above all else." ) custom_colors = [ @@ -99,12 +102,12 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO: return io.BytesIO(response.content) -async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO: +async def generate_agent_image_v1(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO: """ Generate an image for an agent using Flux model via Replicate API. Args: - agent (Graph): The agent to generate an image for + agent (GraphBaseMeta | AgentGraph): The agent to generate an image for Returns: io.BytesIO: The generated image as bytes @@ -114,7 +117,13 @@ async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO: raise ValueError("Missing Replicate API key in settings") # Construct prompt from agent details - prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design." + prompt = ( + "Create a visually engaging app store thumbnail for the AI agent " + "that highlights what it does in a clear and captivating way:\n" + f"- **Name**: {agent.name}\n" + f"- **Description**: {agent.description}\n" + f"Focus on showcasing its core functionality with an appealing design." + ) # Set up Replicate client client = ReplicateClient(api_token=settings.secrets.replicate_api_key) diff --git a/autogpt_platform/backend/backend/api/features/store/routes.py b/autogpt_platform/backend/backend/api/features/store/routes.py index 2f3c7bfb04..d93fe60f15 100644 --- a/autogpt_platform/backend/backend/api/features/store/routes.py +++ b/autogpt_platform/backend/backend/api/features/store/routes.py @@ -278,7 +278,7 @@ async def get_agent( ) async def get_graph_meta_by_store_listing_version_id( store_listing_version_id: str, -) -> backend.data.graph.GraphMeta: +) -> backend.data.graph.GraphModelWithoutNodes: """ Get Agent Graph from Store Listing Version ID. """ diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py index eb9360b037..f67134ceb3 100644 --- a/autogpt_platform/backend/backend/data/block.py +++ b/autogpt_platform/backend/backend/data/block.py @@ -246,7 +246,9 @@ class BlockSchema(BaseModel): f"is not of type {CredentialsMetaInput.__name__}" ) - credentials_fields[field_name].validate_credentials_field_schema(cls) + CredentialsMetaInput.validate_credentials_field_schema( + cls.get_field_schema(field_name), field_name + ) elif field_name in credentials_fields: raise KeyError( diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index ee6cd2e4b0..0dc3eea887 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -3,7 +3,7 @@ import logging import uuid from collections import defaultdict from datetime import datetime, timezone -from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast +from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Self, cast from prisma.enums import SubmissionStatus from prisma.models import ( @@ -20,7 +20,7 @@ from prisma.types import ( AgentNodeLinkCreateInput, StoreListingVersionWhereInput, ) -from pydantic import BaseModel, BeforeValidator, Field, create_model +from pydantic import BaseModel, BeforeValidator, Field from pydantic.fields import computed_field from backend.blocks.agent import AgentExecutorBlock @@ -30,7 +30,6 @@ from backend.data.db import prisma as db from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH from backend.data.model import ( - CredentialsField, CredentialsFieldInfo, CredentialsMetaInput, is_credentials_field_name, @@ -45,7 +44,6 @@ from .block import ( AnyBlockSchema, Block, BlockInput, - BlockSchema, BlockType, EmptySchema, get_block, @@ -113,10 +111,12 @@ class Link(BaseDbModel): class Node(BaseDbModel): block_id: str - input_default: BlockInput = {} # dict[input_name, default_value] - metadata: dict[str, Any] = {} - input_links: list[Link] = [] - output_links: list[Link] = [] + input_default: BlockInput = Field( # dict[input_name, default_value] + default_factory=dict + ) + metadata: dict[str, Any] = Field(default_factory=dict) + input_links: list[Link] = Field(default_factory=list) + output_links: list[Link] = Field(default_factory=list) @property def credentials_optional(self) -> bool: @@ -221,18 +221,33 @@ class NodeModel(Node): return result -class BaseGraph(BaseDbModel): +class GraphBaseMeta(BaseDbModel): + """ + Shared base for `GraphMeta` and `BaseGraph`, with core graph metadata fields. + """ + version: int = 1 is_active: bool = True name: str description: str instructions: str | None = None recommended_schedule_cron: str | None = None - nodes: list[Node] = [] - links: list[Link] = [] forked_from_id: str | None = None forked_from_version: int | None = None + +class BaseGraph(GraphBaseMeta): + """ + Graph with nodes, links, and computed I/O schema fields. + + Used to represent sub-graphs within a `Graph`. Contains the full graph + structure including nodes and links, plus computed fields for schemas + and trigger info. Does NOT include user_id or created_at (see GraphModel). + """ + + nodes: list[Node] = Field(default_factory=list) + links: list[Link] = Field(default_factory=list) + @computed_field @property def input_schema(self) -> dict[str, Any]: @@ -361,44 +376,79 @@ class GraphTriggerInfo(BaseModel): class Graph(BaseGraph): - sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs + """Creatable graph model used in API create/update endpoints.""" + + sub_graphs: list[BaseGraph] = Field(default_factory=list) # Flattened sub-graphs + + +class GraphMeta(GraphBaseMeta): + """ + Lightweight graph metadata model representing an existing graph from the database, + for use in listings and summaries. + + Lacks `GraphModel`'s nodes, links, and expensive computed fields. + Use for list endpoints where full graph data is not needed and performance matters. + """ + + id: str # type: ignore + version: int # type: ignore + user_id: str + created_at: datetime + + @classmethod + def from_db(cls, graph: "AgentGraph") -> Self: + return cls( + id=graph.id, + version=graph.version, + is_active=graph.isActive, + name=graph.name or "", + description=graph.description or "", + instructions=graph.instructions, + recommended_schedule_cron=graph.recommendedScheduleCron, + forked_from_id=graph.forkedFromId, + forked_from_version=graph.forkedFromVersion, + user_id=graph.userId, + created_at=graph.createdAt, + ) + + +class GraphModel(Graph, GraphMeta): + """ + Full graph model representing an existing graph from the database. + + This is the primary model for working with persisted graphs. Includes all + graph data (nodes, links, sub_graphs) plus user ownership and timestamps. + Provides computed fields (input_schema, output_schema, etc.) used during + set-up (frontend) and execution (backend). + + Inherits from: + - `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas + - `GraphMeta`: provides user_id, created_at for database records + """ + + nodes: list[NodeModel] = Field(default_factory=list) # type: ignore + + @property + def starting_nodes(self) -> list[NodeModel]: + outbound_nodes = {link.sink_id for link in self.links} + input_nodes = { + node.id for node in self.nodes if node.block.block_type == BlockType.INPUT + } + return [ + node + for node in self.nodes + if node.id not in outbound_nodes or node.id in input_nodes + ] + + @property + def webhook_input_node(self) -> NodeModel | None: # type: ignore + return cast(NodeModel, super().webhook_input_node) @computed_field @property def credentials_input_schema(self) -> dict[str, Any]: - schema = self._credentials_input_schema.jsonschema() - - # Determine which credential fields are required based on credentials_optional metadata graph_credentials_inputs = self.aggregate_credentials_inputs() - required_fields = [] - # Build a map of node_id -> node for quick lookup - all_nodes = {node.id: node for node in self.nodes} - for sub_graph in self.sub_graphs: - for node in sub_graph.nodes: - all_nodes[node.id] = node - - for field_key, ( - _field_info, - node_field_pairs, - ) in graph_credentials_inputs.items(): - # A field is required if ANY node using it has credentials_optional=False - is_required = False - for node_id, _field_name in node_field_pairs: - node = all_nodes.get(node_id) - if node and not node.credentials_optional: - is_required = True - break - - if is_required: - required_fields.append(field_key) - - schema["required"] = required_fields - return schema - - @property - def _credentials_input_schema(self) -> type[BlockSchema]: - graph_credentials_inputs = self.aggregate_credentials_inputs() logger.debug( f"Combined credentials input fields for graph #{self.id} ({self.name}): " f"{graph_credentials_inputs}" @@ -406,8 +456,8 @@ class Graph(BaseGraph): # Warn if same-provider credentials inputs can't be combined (= bad UX) graph_cred_fields = list(graph_credentials_inputs.values()) - for i, (field, keys) in enumerate(graph_cred_fields): - for other_field, other_keys in list(graph_cred_fields)[i + 1 :]: + for i, (field, keys, _) in enumerate(graph_cred_fields): + for other_field, other_keys, _ in list(graph_cred_fields)[i + 1 :]: if field.provider != other_field.provider: continue if ProviderName.HTTP in field.provider: @@ -423,31 +473,78 @@ class Graph(BaseGraph): f"keys: {keys} <> {other_keys}." ) - fields: dict[str, tuple[type[CredentialsMetaInput], CredentialsMetaInput]] = { - agg_field_key: ( - CredentialsMetaInput[ - Literal[tuple(field_info.provider)], # type: ignore - Literal[tuple(field_info.supported_types)], # type: ignore - ], - CredentialsField( - required_scopes=set(field_info.required_scopes or []), - discriminator=field_info.discriminator, - discriminator_mapping=field_info.discriminator_mapping, - discriminator_values=field_info.discriminator_values, - ), - ) - for agg_field_key, (field_info, _) in graph_credentials_inputs.items() - } + # Build JSON schema directly to avoid expensive create_model + validation overhead + properties = {} + required_fields = [] - return create_model( - self.name.replace(" ", "") + "CredentialsInputSchema", - __base__=BlockSchema, - **fields, # type: ignore - ) + for agg_field_key, ( + field_info, + _, + is_required, + ) in graph_credentials_inputs.items(): + providers = list(field_info.provider) + cred_types = list(field_info.supported_types) + + field_schema: dict[str, Any] = { + "credentials_provider": providers, + "credentials_types": cred_types, + "type": "object", + "properties": { + "id": {"title": "Id", "type": "string"}, + "title": { + "anyOf": [{"type": "string"}, {"type": "null"}], + "default": None, + "title": "Title", + }, + "provider": { + "title": "Provider", + "type": "string", + **( + {"enum": providers} + if len(providers) > 1 + else {"const": providers[0]} + ), + }, + "type": { + "title": "Type", + "type": "string", + **( + {"enum": cred_types} + if len(cred_types) > 1 + else {"const": cred_types[0]} + ), + }, + }, + "required": ["id", "provider", "type"], + } + + # Add other (optional) field info items + field_schema.update( + field_info.model_dump( + by_alias=True, + exclude_defaults=True, + exclude={"provider", "supported_types"}, # already included above + ) + ) + + # Ensure field schema is well-formed + CredentialsMetaInput.validate_credentials_field_schema( + field_schema, agg_field_key + ) + + properties[agg_field_key] = field_schema + if is_required: + required_fields.append(agg_field_key) + + return { + "type": "object", + "properties": properties, + "required": required_fields, + } def aggregate_credentials_inputs( self, - ) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]]]]: + ) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]: """ Returns: dict[aggregated_field_key, tuple( @@ -455,13 +552,19 @@ class Graph(BaseGraph): (now includes discriminator_values from matching nodes) set[(node_id, field_name)]: Node credentials fields that are compatible with this aggregated field spec + bool: True if the field is required (any node has credentials_optional=False) )] """ # First collect all credential field data with input defaults - node_credential_data = [] + # Track (field_info, (node_id, field_name), is_required) for each credential field + node_credential_data: list[tuple[CredentialsFieldInfo, tuple[str, str]]] = [] + node_required_map: dict[str, bool] = {} # node_id -> is_required for graph in [self] + self.sub_graphs: for node in graph.nodes: + # Track if this node requires credentials (credentials_optional=False means required) + node_required_map[node.id] = not node.credentials_optional + for ( field_name, field_info, @@ -485,37 +588,21 @@ class Graph(BaseGraph): ) # Combine credential field info (this will merge discriminator_values automatically) - return CredentialsFieldInfo.combine(*node_credential_data) + combined = CredentialsFieldInfo.combine(*node_credential_data) - -class GraphModel(Graph): - user_id: str - nodes: list[NodeModel] = [] # type: ignore - - created_at: datetime - - @property - def starting_nodes(self) -> list[NodeModel]: - outbound_nodes = {link.sink_id for link in self.links} - input_nodes = { - node.id for node in self.nodes if node.block.block_type == BlockType.INPUT + # Add is_required flag to each aggregated field + # A field is required if ANY node using it has credentials_optional=False + return { + key: ( + field_info, + node_field_pairs, + any( + node_required_map.get(node_id, True) + for node_id, _ in node_field_pairs + ), + ) + for key, (field_info, node_field_pairs) in combined.items() } - return [ - node - for node in self.nodes - if node.id not in outbound_nodes or node.id in input_nodes - ] - - @property - def webhook_input_node(self) -> NodeModel | None: # type: ignore - return cast(NodeModel, super().webhook_input_node) - - def meta(self) -> "GraphMeta": - """ - Returns a GraphMeta object with metadata about the graph. - This is used to return metadata about the graph without exposing nodes and links. - """ - return GraphMeta.from_graph(self) def reassign_ids(self, user_id: str, reassign_graph_id: bool = False): """ @@ -799,13 +886,14 @@ class GraphModel(Graph): if is_static_output_block(link.source_id): link.is_static = True # Each value block output should be static. - @staticmethod - def from_db( + @classmethod + def from_db( # type: ignore[reportIncompatibleMethodOverride] + cls, graph: AgentGraph, for_export: bool = False, sub_graphs: list[AgentGraph] | None = None, - ) -> "GraphModel": - return GraphModel( + ) -> Self: + return cls( id=graph.id, user_id=graph.userId if not for_export else "", version=graph.version, @@ -831,17 +919,28 @@ class GraphModel(Graph): ], ) + def hide_nodes(self) -> "GraphModelWithoutNodes": + """ + Returns a copy of the `GraphModel` with nodes, links, and sub-graphs hidden + (excluded from serialization). They are still present in the model instance + so all computed fields (e.g. `credentials_input_schema`) still work. + """ + return GraphModelWithoutNodes.model_validate(self, from_attributes=True) -class GraphMeta(Graph): - user_id: str - # Easy work-around to prevent exposing nodes and links in the API response - nodes: list[NodeModel] = Field(default=[], exclude=True) # type: ignore - links: list[Link] = Field(default=[], exclude=True) +class GraphModelWithoutNodes(GraphModel): + """ + GraphModel variant that excludes nodes, links, and sub-graphs from serialization. - @staticmethod - def from_graph(graph: GraphModel) -> "GraphMeta": - return GraphMeta(**graph.model_dump()) + Used in contexts like the store where exposing internal graph structure + is not desired. Inherits all computed fields from GraphModel but marks + nodes and links as excluded from JSON output. + """ + + nodes: list[NodeModel] = Field(default_factory=list, exclude=True) + links: list[Link] = Field(default_factory=list, exclude=True) + + sub_graphs: list[BaseGraph] = Field(default_factory=list, exclude=True) class GraphsPaginated(BaseModel): @@ -912,21 +1011,11 @@ async def list_graphs_paginated( where=where_clause, distinct=["id"], order={"version": "desc"}, - include=AGENT_GRAPH_INCLUDE, skip=offset, take=page_size, ) - graph_models: list[GraphMeta] = [] - for graph in graphs: - try: - graph_meta = GraphModel.from_db(graph).meta() - # Trigger serialization to validate that the graph is well formed - graph_meta.model_dump() - graph_models.append(graph_meta) - except Exception as e: - logger.error(f"Error processing graph {graph.id}: {e}") - continue + graph_models = [GraphMeta.from_db(graph) for graph in graphs] return GraphsPaginated( graphs=graph_models, diff --git a/autogpt_platform/backend/backend/data/model.py b/autogpt_platform/backend/backend/data/model.py index 5a09c591c9..7bdfef059b 100644 --- a/autogpt_platform/backend/backend/data/model.py +++ b/autogpt_platform/backend/backend/data/model.py @@ -163,7 +163,6 @@ class User(BaseModel): if TYPE_CHECKING: from prisma.models import User as PrismaUser - from backend.data.block import BlockSchema T = TypeVar("T") logger = logging.getLogger(__name__) @@ -508,15 +507,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]): def allowed_cred_types(cls) -> tuple[CredentialsType, ...]: return get_args(cls.model_fields["type"].annotation) - @classmethod - def validate_credentials_field_schema(cls, model: type["BlockSchema"]): + @staticmethod + def validate_credentials_field_schema( + field_schema: dict[str, Any], field_name: str + ): """Validates the schema of a credentials input field""" - field_name = next( - name for name, type in model.get_credentials_fields().items() if type is cls - ) - field_schema = model.jsonschema()["properties"][field_name] try: - schema_extra = CredentialsFieldInfo[CP, CT].model_validate(field_schema) + field_info = CredentialsFieldInfo[CP, CT].model_validate(field_schema) except ValidationError as e: if "Field required [type=missing" not in str(e): raise @@ -526,11 +523,11 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]): f"{field_schema}" ) from e - providers = cls.allowed_providers() + providers = field_info.provider if ( providers is not None and len(providers) > 1 - and not schema_extra.discriminator + and not field_info.discriminator ): raise TypeError( f"Multi-provider CredentialsField '{field_name}' " diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py index fa264c30a7..d26424aefc 100644 --- a/autogpt_platform/backend/backend/executor/utils.py +++ b/autogpt_platform/backend/backend/executor/utils.py @@ -373,7 +373,7 @@ def make_node_credentials_input_map( # Get aggregated credentials fields for the graph graph_cred_inputs = graph.aggregate_credentials_inputs() - for graph_input_name, (_, compatible_node_fields) in graph_cred_inputs.items(): + for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items(): # Best-effort map: skip missing items if graph_input_name not in graph_credentials_input: continue diff --git a/autogpt_platform/backend/snapshots/grph_single b/autogpt_platform/backend/snapshots/grph_single index 1811a57ec8..7fa5783577 100644 --- a/autogpt_platform/backend/snapshots/grph_single +++ b/autogpt_platform/backend/snapshots/grph_single @@ -3,7 +3,6 @@ "credentials_input_schema": { "properties": {}, "required": [], - "title": "TestGraphCredentialsInputSchema", "type": "object" }, "description": "A test graph", diff --git a/autogpt_platform/backend/snapshots/grphs_all b/autogpt_platform/backend/snapshots/grphs_all index 0b314d96f9..9ccb4a6dc8 100644 --- a/autogpt_platform/backend/snapshots/grphs_all +++ b/autogpt_platform/backend/snapshots/grphs_all @@ -1,34 +1,14 @@ [ { - "credentials_input_schema": { - "properties": {}, - "required": [], - "title": "TestGraphCredentialsInputSchema", - "type": "object" - }, + "created_at": "2025-09-04T13:37:00", "description": "A test graph", "forked_from_id": null, "forked_from_version": null, - "has_external_trigger": false, - "has_human_in_the_loop": false, - "has_sensitive_action": false, "id": "graph-123", - "input_schema": { - "properties": {}, - "required": [], - "type": "object" - }, "instructions": null, "is_active": true, "name": "Test Graph", - "output_schema": { - "properties": {}, - "required": [], - "type": "object" - }, "recommended_schedule_cron": null, - "sub_graphs": [], - "trigger_setup_info": null, "user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "version": 1 } diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx index f0bb652a06..a8efa344a2 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx +++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx @@ -1,5 +1,5 @@ import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput"; -import { GraphMeta } from "@/app/api/__generated__/models/graphMeta"; +import { GraphModel } from "@/app/api/__generated__/models/graphModel"; import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput"; import { useState } from "react"; import { getSchemaDefaultCredentials } from "../../helpers"; @@ -9,7 +9,7 @@ type Credential = CredentialsMetaInput | undefined; type Credentials = Record; type Props = { - agent: GraphMeta | null; + agent: GraphModel | null; siblingInputs?: Record; onCredentialsChange: ( credentials: Record, diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/helpers.ts b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/helpers.ts index 7a456d63e4..a4947015c4 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/helpers.ts +++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/helpers.ts @@ -1,9 +1,9 @@ import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput"; -import { GraphMeta } from "@/app/api/__generated__/models/graphMeta"; +import { GraphModel } from "@/app/api/__generated__/models/graphModel"; import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types"; export function getCredentialFields( - agent: GraphMeta | null, + agent: GraphModel | null, ): AgentCredentialsFields { if (!agent) return {}; diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/helpers.ts b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/helpers.ts index 62f5c564ff..ff1f8d452c 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/helpers.ts +++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/helpers.ts @@ -3,10 +3,10 @@ import type { CredentialsMetaInput, } from "@/lib/autogpt-server-api/types"; import type { InputValues } from "./types"; -import { GraphMeta } from "@/app/api/__generated__/models/graphMeta"; +import { GraphModel } from "@/app/api/__generated__/models/graphModel"; export function computeInitialAgentInputs( - agent: GraphMeta | null, + agent: GraphModel | null, existingInputs?: InputValues | null, ): InputValues { const properties = agent?.input_schema?.properties || {}; @@ -29,7 +29,7 @@ export function computeInitialAgentInputs( } type IsRunDisabledParams = { - agent: GraphMeta | null; + agent: GraphModel | null; isRunning: boolean; agentInputs: InputValues | null | undefined; }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/BlocksControl.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/BlocksControl.tsx index f5451e6d4d..99b66fe1dc 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/BlocksControl.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/BlocksControl.tsx @@ -30,6 +30,8 @@ import { } from "@/components/atoms/Tooltip/BaseTooltip"; import { GraphMeta } from "@/lib/autogpt-server-api"; import jaro from "jaro-winkler"; +import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { okData } from "@/app/api/helpers"; type _Block = Omit & { uiKey?: string; @@ -107,6 +109,8 @@ export function BlocksControl({ .filter((b) => b.uiType !== BlockUIType.AGENT) .sort((a, b) => a.name.localeCompare(b.name)); + // Agent blocks are created from GraphMeta which doesn't include schemas. + // Schemas will be fetched on-demand when the block is actually added. const agentBlockList = flows .map((flow): _Block => { return { @@ -116,8 +120,9 @@ export function BlocksControl({ `Ver.${flow.version}` + (flow.description ? ` | ${flow.description}` : ""), categories: [{ category: "AGENT", description: "" }], - inputSchema: flow.input_schema, - outputSchema: flow.output_schema, + // Empty schemas - will be populated when block is added + inputSchema: { type: "object", properties: {} }, + outputSchema: { type: "object", properties: {} }, staticOutput: false, uiType: BlockUIType.AGENT, costs: [], @@ -125,8 +130,7 @@ export function BlocksControl({ hardcodedValues: { graph_id: flow.id, graph_version: flow.version, - input_schema: flow.input_schema, - output_schema: flow.output_schema, + // Schemas will be fetched on-demand when block is added }, }; }) @@ -182,6 +186,37 @@ export function BlocksControl({ setSelectedCategory(null); }, []); + // Handler to add a block, fetching graph data on-demand for agent blocks + const handleAddBlock = useCallback( + async (block: _Block & { notAvailable: string | null }) => { + if (block.notAvailable) return; + + // For agent blocks, fetch the full graph to get schemas + if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) { + const graphID = block.hardcodedValues.graph_id as string; + const graphVersion = block.hardcodedValues.graph_version as number; + const graphData = okData( + await getV1GetSpecificGraph(graphID, { version: graphVersion }), + ); + + if (graphData) { + addBlock(block.id, block.name, { + ...block.hardcodedValues, + input_schema: graphData.input_schema, + output_schema: graphData.output_schema, + }); + } else { + // Fallback: add without schemas (will be incomplete) + console.error("Failed to fetch graph data for agent block"); + addBlock(block.id, block.name, block.hardcodedValues || {}); + } + } else { + addBlock(block.id, block.name, block.hardcodedValues || {}); + } + }, + [addBlock], + ); + // Extract unique categories from blocks const categories = useMemo(() => { return Array.from( @@ -303,10 +338,7 @@ export function BlocksControl({ }), ); }} - onClick={() => - !block.notAvailable && - addBlock(block.id, block.name, block?.hardcodedValues || {}) - } + onClick={() => handleAddBlock(block)} title={block.notAvailable ?? undefined} >
, + position: { x: number; y: number }, + ): Promise => { + const nodeSchema = availableBlocks.find((node) => node.id === blockID); + if (!nodeSchema) { + console.error(`Schema not found for block ID: ${blockID}`); + return null; + } + + // For agent blocks, fetch the full graph to get schemas + let inputSchema: BlockIORootSchema = nodeSchema.inputSchema; + let outputSchema: BlockIORootSchema = nodeSchema.outputSchema; + let finalHardcodedValues = hardcodedValues; + + if (blockID === SpecialBlockID.AGENT) { + const graphID = hardcodedValues.graph_id as string; + const graphVersion = hardcodedValues.graph_version as number; + const graphData = okData( + await getV1GetSpecificGraph(graphID, { version: graphVersion }), + ); + + if (graphData) { + inputSchema = graphData.input_schema as BlockIORootSchema; + outputSchema = graphData.output_schema as BlockIORootSchema; + finalHardcodedValues = { + ...hardcodedValues, + input_schema: graphData.input_schema, + output_schema: graphData.output_schema, + }; + } else { + console.error("Failed to fetch graph data for agent block"); + } + } + + const newNode: CustomNode = { + id: nodeId.toString(), + type: "custom", + position, + data: { + blockType: blockName, + blockCosts: nodeSchema.costs || [], + title: `${blockName} ${nodeId}`, + description: nodeSchema.description, + categories: nodeSchema.categories, + inputSchema: inputSchema, + outputSchema: outputSchema, + hardcodedValues: finalHardcodedValues, + connections: [], + isOutputOpen: false, + block_id: blockID, + isOutputStatic: nodeSchema.staticOutput, + uiType: nodeSchema.uiType, + }, + }; + + addNodes(newNode); + setNodeId((prevId) => prevId + 1); + clearNodesStatusAndOutput(); + + history.push({ + type: "ADD_NODE", + payload: { node: { ...newNode, ...newNode.data } }, + undo: () => deleteElements({ nodes: [{ id: newNode.id }] }), + redo: () => addNodes(newNode), + }); + + return newNode; + }, + [ + availableBlocks, + nodeId, + addNodes, + deleteElements, + clearNodesStatusAndOutput, + ], + ); + const addNode = useCallback( - (blockId: string, nodeType: string, hardcodedValues: any = {}) => { + async ( + blockId: string, + nodeType: string, + hardcodedValues: Record = {}, + ) => { const nodeSchema = availableBlocks.find((node) => node.id === blockId); if (!nodeSchema) { console.error(`Schema not found for block ID: ${blockId}`); @@ -707,73 +797,42 @@ const FlowEditor: React.FC<{ // Alternative: We could also use D3 force, Intersection for this (React flow Pro examples) const { x, y } = getViewport(); - const viewportCoordinates = + const position = nodeDimensions && Object.keys(nodeDimensions).length > 0 - ? // we will get all the dimension of nodes, then store - findNewlyAddedBlockCoordinates( + ? findNewlyAddedBlockCoordinates( nodeDimensions, nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500, 60, 1.0, ) - : // we will get all the dimension of nodes, then store - { + : { x: window.innerWidth / 2 - x, y: window.innerHeight / 2 - y, }; - const newNode: CustomNode = { - id: nodeId.toString(), - type: "custom", - position: viewportCoordinates, // Set the position to the calculated viewport center - data: { - blockType: nodeType, - blockCosts: nodeSchema.costs, - title: `${nodeType} ${nodeId}`, - description: nodeSchema.description, - categories: nodeSchema.categories, - inputSchema: nodeSchema.inputSchema, - outputSchema: nodeSchema.outputSchema, - hardcodedValues: hardcodedValues, - connections: [], - isOutputOpen: false, - block_id: blockId, - isOutputStatic: nodeSchema.staticOutput, - uiType: nodeSchema.uiType, - }, - }; - - addNodes(newNode); - setNodeId((prevId) => prevId + 1); - clearNodesStatusAndOutput(); // Clear status and output when a new node is added + const newNode = await createAndAddNode( + blockId, + nodeType, + hardcodedValues, + position, + ); + if (!newNode) return; setViewport( { - // Rough estimate of the dimension of the node is: 500x400px. - // Though we skip shifting the X, considering the block menu side-bar. - x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2, - y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2, + x: -position.x * 0.8 + (window.innerWidth - 0.0) / 2, + y: -position.y * 0.8 + (window.innerHeight - 400) / 2, zoom: 0.8, }, { duration: 500 }, ); - - history.push({ - type: "ADD_NODE", - payload: { node: { ...newNode, ...newNode.data } }, - undo: () => deleteElements({ nodes: [{ id: newNode.id }] }), - redo: () => addNodes(newNode), - }); }, [ - nodeId, getViewport, setViewport, availableBlocks, - addNodes, nodeDimensions, - deleteElements, - clearNodesStatusAndOutput, + createAndAddNode, ], ); @@ -920,7 +979,7 @@ const FlowEditor: React.FC<{ }, []); const onDrop = useCallback( - (event: React.DragEvent) => { + async (event: React.DragEvent) => { event.preventDefault(); const blockData = event.dataTransfer.getData("application/reactflow"); @@ -935,62 +994,17 @@ const FlowEditor: React.FC<{ y: event.clientY, }); - // Find the block schema - const nodeSchema = availableBlocks.find((node) => node.id === blockId); - if (!nodeSchema) { - console.error(`Schema not found for block ID: ${blockId}`); - return; - } - - // Create the new node at the drop position - const newNode: CustomNode = { - id: nodeId.toString(), - type: "custom", + await createAndAddNode( + blockId, + blockName, + hardcodedValues || {}, position, - data: { - blockType: blockName, - blockCosts: nodeSchema.costs || [], - title: `${blockName} ${nodeId}`, - description: nodeSchema.description, - categories: nodeSchema.categories, - inputSchema: nodeSchema.inputSchema, - outputSchema: nodeSchema.outputSchema, - hardcodedValues: hardcodedValues, - connections: [], - isOutputOpen: false, - block_id: blockId, - uiType: nodeSchema.uiType, - }, - }; - - history.push({ - type: "ADD_NODE", - payload: { node: { ...newNode, ...newNode.data } }, - undo: () => { - deleteElements({ nodes: [{ id: newNode.id } as any], edges: [] }); - }, - redo: () => { - addNodes([newNode]); - }, - }); - addNodes([newNode]); - clearNodesStatusAndOutput(); - - setNodeId((prevId) => prevId + 1); + ); } catch (error) { console.error("Failed to drop block:", error); } }, - [ - nodeId, - availableBlocks, - nodes, - edges, - addNodes, - screenToFlowPosition, - deleteElements, - clearNodesStatusAndOutput, - ], + [screenToFlowPosition, createAndAddNode], ); const buildContextValue: BuilderContextType = useMemo( diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx index 15983be9f5..cb06a79683 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerInputUI.tsx @@ -4,13 +4,13 @@ import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/componen import { Dialog } from "@/components/molecules/Dialog/Dialog"; import type { CredentialsMetaInput, - GraphMeta, + Graph, } from "@/lib/autogpt-server-api/types"; interface RunInputDialogProps { isOpen: boolean; doClose: () => void; - graph: GraphMeta; + graph: Graph; doRun?: ( inputs: Record, credentialsInputs: Record, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerUIWrapper.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerUIWrapper.tsx index a9af065a5d..b1d40fb919 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerUIWrapper.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/RunnerUIWrapper.tsx @@ -9,13 +9,13 @@ import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder import { BlockUIType, CredentialsMetaInput, - GraphMeta, + Graph, } from "@/lib/autogpt-server-api/types"; import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI"; import { RunnerInputDialog } from "./RunnerInputUI"; interface RunnerUIWrapperProps { - graph: GraphMeta; + graph: Graph; nodes: Node[]; graphExecutionError?: string | null; saveAndRun: ( diff --git a/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/helpers.ts index aece7e9811..69593a142b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/helpers.ts @@ -1,5 +1,5 @@ import { GraphInputSchema } from "@/lib/autogpt-server-api"; -import { GraphMetaLike, IncompatibilityInfo } from "./types"; +import { GraphLike, IncompatibilityInfo } from "./types"; // Helper type for schema properties - the generated types are too loose type SchemaProperties = Record; @@ -36,7 +36,7 @@ export function getSchemaRequired(schema: unknown): SchemaRequired { */ export function createUpdatedAgentNodeInputs( currentInputs: Record, - latestSubGraphVersion: GraphMetaLike, + latestSubGraphVersion: GraphLike, ): Record { return { ...currentInputs, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/types.ts b/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/types.ts index 83f83155db..6c115f20a3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/types.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/types.ts @@ -1,7 +1,11 @@ -import type { GraphMeta as LegacyGraphMeta } from "@/lib/autogpt-server-api"; +import type { + Graph as LegacyGraph, + GraphMeta as LegacyGraphMeta, +} from "@/lib/autogpt-server-api"; +import type { GraphModel as GeneratedGraph } from "@/app/api/__generated__/models/graphModel"; import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta"; -export type SubAgentUpdateInfo = { +export type SubAgentUpdateInfo = { hasUpdate: boolean; currentVersion: number; latestVersion: number; @@ -10,7 +14,10 @@ export type SubAgentUpdateInfo = { incompatibilities: IncompatibilityInfo | null; }; -// Union type for GraphMeta that works with both legacy and new builder +// Union type for Graph (with schemas) that works with both legacy and new builder +export type GraphLike = LegacyGraph | GeneratedGraph; + +// Union type for GraphMeta (without schemas) for version detection export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta; export type IncompatibilityInfo = { diff --git a/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/useSubAgentUpdate.ts b/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/useSubAgentUpdate.ts index 315e337cd6..7ad10ea697 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/useSubAgentUpdate.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/hooks/useSubAgentUpdate/useSubAgentUpdate.ts @@ -1,5 +1,11 @@ import { useMemo } from "react"; -import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api"; +import type { + GraphInputSchema, + GraphOutputSchema, +} from "@/lib/autogpt-server-api"; +import type { GraphModel } from "@/app/api/__generated__/models/graphModel"; +import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { okData } from "@/app/api/helpers"; import { getEffectiveType } from "@/lib/utils"; import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers"; import { @@ -11,26 +17,38 @@ import { /** * Checks if a newer version of a sub-agent is available and determines compatibility */ -export function useSubAgentUpdate( +export function useSubAgentUpdate( nodeID: string, graphID: string | undefined, graphVersion: number | undefined, currentInputSchema: GraphInputSchema | undefined, currentOutputSchema: GraphOutputSchema | undefined, connections: EdgeLike[], - availableGraphs: T[], -): SubAgentUpdateInfo { + availableGraphs: GraphMetaLike[], +): SubAgentUpdateInfo { // Find the latest version of the same graph - const latestGraph = useMemo(() => { + const latestGraphInfo = useMemo(() => { if (!graphID) return null; return availableGraphs.find((graph) => graph.id === graphID) || null; }, [graphID, availableGraphs]); - // Check if there's an update available + // Check if there's a newer version available const hasUpdate = useMemo(() => { - if (!latestGraph || graphVersion === undefined) return false; - return latestGraph.version! > graphVersion; - }, [latestGraph, graphVersion]); + if (!latestGraphInfo || graphVersion === undefined) return false; + return latestGraphInfo.version! > graphVersion; + }, [latestGraphInfo, graphVersion]); + + // Fetch full graph IF an update is detected + const { data: latestGraph } = useGetV1GetSpecificGraph( + graphID ?? "", + { version: latestGraphInfo?.version }, + { + query: { + enabled: hasUpdate && !!graphID && !!latestGraphInfo?.version, + select: okData, + }, + }, + ); // Get connected input and output handles for this specific node const connectedHandles = useMemo(() => { @@ -152,8 +170,8 @@ export function useSubAgentUpdate( return { hasUpdate, currentVersion: graphVersion || 0, - latestVersion: latestGraph?.version || 0, - latestGraph, + latestVersion: latestGraphInfo?.version || 0, + latestGraph: latestGraph || null, isCompatible: compatibilityResult.isCompatible, incompatibilities: compatibilityResult.incompatibilities, }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/graphStore.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/graphStore.ts index 6961884732..c1eba556d2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/stores/graphStore.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/graphStore.ts @@ -18,7 +18,7 @@ interface GraphStore { outputSchema: Record | null, ) => void; - // Available graphs; used for sub-graph updates + // Available graphs; used for sub-graph updated version detection availableSubGraphs: GraphMeta[]; setAvailableSubGraphs: (graphs: GraphMeta[]) => void; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx index 0147c19a5c..b0c3a6ff7b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx @@ -10,8 +10,8 @@ import React, { import { CredentialsMetaInput, CredentialsType, + Graph, GraphExecutionID, - GraphMeta, LibraryAgentPreset, LibraryAgentPresetID, LibraryAgentPresetUpdatable, @@ -69,7 +69,7 @@ export function AgentRunDraftView({ className, recommendedScheduleCron, }: { - graph: GraphMeta; + graph: Graph; agentActions?: ButtonAction[]; recommendedScheduleCron?: string | null; doRun?: ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx index 61161088fc..30b0a82e65 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-schedule-details-view.tsx @@ -2,8 +2,8 @@ import React, { useCallback, useMemo } from "react"; import { + Graph, GraphExecutionID, - GraphMeta, Schedule, ScheduleID, } from "@/lib/autogpt-server-api"; @@ -35,7 +35,7 @@ export function AgentScheduleDetailsView({ onForcedRun, doDeleteSchedule, }: { - graph: GraphMeta; + graph: Graph; schedule: Schedule; agentActions: ButtonAction[]; onForcedRun: (runID: GraphExecutionID) => void; diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 5ed449829d..0e9020272d 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -5629,7 +5629,9 @@ "description": "Successful Response", "content": { "application/json": { - "schema": { "$ref": "#/components/schemas/GraphMeta" } + "schema": { + "$ref": "#/components/schemas/GraphModelWithoutNodes" + } } } }, @@ -6495,18 +6497,6 @@ "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Recommended Schedule Cron" }, - "nodes": { - "items": { "$ref": "#/components/schemas/Node" }, - "type": "array", - "title": "Nodes", - "default": [] - }, - "links": { - "items": { "$ref": "#/components/schemas/Link" }, - "type": "array", - "title": "Links", - "default": [] - }, "forked_from_id": { "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Forked From Id" @@ -6514,11 +6504,22 @@ "forked_from_version": { "anyOf": [{ "type": "integer" }, { "type": "null" }], "title": "Forked From Version" + }, + "nodes": { + "items": { "$ref": "#/components/schemas/Node" }, + "type": "array", + "title": "Nodes" + }, + "links": { + "items": { "$ref": "#/components/schemas/Link" }, + "type": "array", + "title": "Links" } }, "type": "object", "required": ["name", "description"], - "title": "BaseGraph" + "title": "BaseGraph", + "description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)." }, "BaseGraph-Output": { "properties": { @@ -6539,18 +6540,6 @@ "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Recommended Schedule Cron" }, - "nodes": { - "items": { "$ref": "#/components/schemas/Node" }, - "type": "array", - "title": "Nodes", - "default": [] - }, - "links": { - "items": { "$ref": "#/components/schemas/Link" }, - "type": "array", - "title": "Links", - "default": [] - }, "forked_from_id": { "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Forked From Id" @@ -6559,6 +6548,16 @@ "anyOf": [{ "type": "integer" }, { "type": "null" }], "title": "Forked From Version" }, + "nodes": { + "items": { "$ref": "#/components/schemas/Node" }, + "type": "array", + "title": "Nodes" + }, + "links": { + "items": { "$ref": "#/components/schemas/Link" }, + "type": "array", + "title": "Links" + }, "input_schema": { "additionalProperties": true, "type": "object", @@ -6605,7 +6604,8 @@ "has_sensitive_action", "trigger_setup_info" ], - "title": "BaseGraph" + "title": "BaseGraph", + "description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)." }, "BlockCategoryResponse": { "properties": { @@ -7399,18 +7399,6 @@ "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Recommended Schedule Cron" }, - "nodes": { - "items": { "$ref": "#/components/schemas/Node" }, - "type": "array", - "title": "Nodes", - "default": [] - }, - "links": { - "items": { "$ref": "#/components/schemas/Link" }, - "type": "array", - "title": "Links", - "default": [] - }, "forked_from_id": { "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Forked From Id" @@ -7419,16 +7407,26 @@ "anyOf": [{ "type": "integer" }, { "type": "null" }], "title": "Forked From Version" }, + "nodes": { + "items": { "$ref": "#/components/schemas/Node" }, + "type": "array", + "title": "Nodes" + }, + "links": { + "items": { "$ref": "#/components/schemas/Link" }, + "type": "array", + "title": "Links" + }, "sub_graphs": { "items": { "$ref": "#/components/schemas/BaseGraph-Input" }, "type": "array", - "title": "Sub Graphs", - "default": [] + "title": "Sub Graphs" } }, "type": "object", "required": ["name", "description"], - "title": "Graph" + "title": "Graph", + "description": "Creatable graph model used in API create/update endpoints." }, "GraphExecution": { "properties": { @@ -7778,6 +7776,52 @@ "description": "Response schema for paginated graph executions." }, "GraphMeta": { + "properties": { + "id": { "type": "string", "title": "Id" }, + "version": { "type": "integer", "title": "Version" }, + "is_active": { + "type": "boolean", + "title": "Is Active", + "default": true + }, + "name": { "type": "string", "title": "Name" }, + "description": { "type": "string", "title": "Description" }, + "instructions": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Instructions" + }, + "recommended_schedule_cron": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Recommended Schedule Cron" + }, + "forked_from_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Forked From Id" + }, + "forked_from_version": { + "anyOf": [{ "type": "integer" }, { "type": "null" }], + "title": "Forked From Version" + }, + "user_id": { "type": "string", "title": "User Id" }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + } + }, + "type": "object", + "required": [ + "id", + "version", + "name", + "description", + "user_id", + "created_at" + ], + "title": "GraphMeta", + "description": "Lightweight graph metadata model representing an existing graph from the database,\nfor use in listings and summaries.\n\nLacks `GraphModel`'s nodes, links, and expensive computed fields.\nUse for list endpoints where full graph data is not needed and performance matters." + }, + "GraphModel": { "properties": { "id": { "type": "string", "title": "Id" }, "version": { "type": "integer", "title": "Version", "default": 1 }, @@ -7804,13 +7848,27 @@ "anyOf": [{ "type": "integer" }, { "type": "null" }], "title": "Forked From Version" }, + "user_id": { "type": "string", "title": "User Id" }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + }, + "nodes": { + "items": { "$ref": "#/components/schemas/NodeModel" }, + "type": "array", + "title": "Nodes" + }, + "links": { + "items": { "$ref": "#/components/schemas/Link" }, + "type": "array", + "title": "Links" + }, "sub_graphs": { "items": { "$ref": "#/components/schemas/BaseGraph-Output" }, "type": "array", - "title": "Sub Graphs", - "default": [] + "title": "Sub Graphs" }, - "user_id": { "type": "string", "title": "User Id" }, "input_schema": { "additionalProperties": true, "type": "object", @@ -7857,6 +7915,7 @@ "name", "description", "user_id", + "created_at", "input_schema", "output_schema", "has_external_trigger", @@ -7865,9 +7924,10 @@ "trigger_setup_info", "credentials_input_schema" ], - "title": "GraphMeta" + "title": "GraphModel", + "description": "Full graph model representing an existing graph from the database.\n\nThis is the primary model for working with persisted graphs. Includes all\ngraph data (nodes, links, sub_graphs) plus user ownership and timestamps.\nProvides computed fields (input_schema, output_schema, etc.) used during\nset-up (frontend) and execution (backend).\n\nInherits from:\n- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas\n- `GraphMeta`: provides user_id, created_at for database records" }, - "GraphModel": { + "GraphModelWithoutNodes": { "properties": { "id": { "type": "string", "title": "Id" }, "version": { "type": "integer", "title": "Version", "default": 1 }, @@ -7886,18 +7946,6 @@ "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Recommended Schedule Cron" }, - "nodes": { - "items": { "$ref": "#/components/schemas/NodeModel" }, - "type": "array", - "title": "Nodes", - "default": [] - }, - "links": { - "items": { "$ref": "#/components/schemas/Link" }, - "type": "array", - "title": "Links", - "default": [] - }, "forked_from_id": { "anyOf": [{ "type": "string" }, { "type": "null" }], "title": "Forked From Id" @@ -7906,12 +7954,6 @@ "anyOf": [{ "type": "integer" }, { "type": "null" }], "title": "Forked From Version" }, - "sub_graphs": { - "items": { "$ref": "#/components/schemas/BaseGraph-Output" }, - "type": "array", - "title": "Sub Graphs", - "default": [] - }, "user_id": { "type": "string", "title": "User Id" }, "created_at": { "type": "string", @@ -7973,7 +8015,8 @@ "trigger_setup_info", "credentials_input_schema" ], - "title": "GraphModel" + "title": "GraphModelWithoutNodes", + "description": "GraphModel variant that excludes nodes, links, and sub-graphs from serialization.\n\nUsed in contexts like the store where exposing internal graph structure\nis not desired. Inherits all computed fields from GraphModel but marks\nnodes and links as excluded from JSON output." }, "GraphSettings": { "properties": { @@ -8613,26 +8656,22 @@ "input_default": { "additionalProperties": true, "type": "object", - "title": "Input Default", - "default": {} + "title": "Input Default" }, "metadata": { "additionalProperties": true, "type": "object", - "title": "Metadata", - "default": {} + "title": "Metadata" }, "input_links": { "items": { "$ref": "#/components/schemas/Link" }, "type": "array", - "title": "Input Links", - "default": [] + "title": "Input Links" }, "output_links": { "items": { "$ref": "#/components/schemas/Link" }, "type": "array", - "title": "Output Links", - "default": [] + "title": "Output Links" } }, "type": "object", @@ -8712,26 +8751,22 @@ "input_default": { "additionalProperties": true, "type": "object", - "title": "Input Default", - "default": {} + "title": "Input Default" }, "metadata": { "additionalProperties": true, "type": "object", - "title": "Metadata", - "default": {} + "title": "Metadata" }, "input_links": { "items": { "$ref": "#/components/schemas/Link" }, "type": "array", - "title": "Input Links", - "default": [] + "title": "Input Links" }, "output_links": { "items": { "$ref": "#/components/schemas/Link" }, "type": "array", - "title": "Output Links", - "default": [] + "title": "Output Links" }, "graph_id": { "type": "string", "title": "Graph Id" }, "graph_version": { "type": "integer", "title": "Graph Version" }, diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 74855f5e28..44fb25dbfc 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -362,25 +362,14 @@ export type GraphMeta = { user_id: UserID; version: number; is_active: boolean; + created_at: Date; name: string; description: string; instructions?: string | null; recommended_schedule_cron: string | null; forked_from_id?: GraphID | null; forked_from_version?: number | null; - input_schema: GraphInputSchema; - output_schema: GraphOutputSchema; - credentials_input_schema: CredentialsInputSchema; -} & ( - | { - has_external_trigger: true; - trigger_setup_info: GraphTriggerInfo; - } - | { - has_external_trigger: false; - trigger_setup_info: null; - } -); +}; export type GraphID = Brand; @@ -447,11 +436,22 @@ export type GraphTriggerInfo = { /* Mirror of backend/data/graph.py:Graph */ export type Graph = GraphMeta & { - created_at: Date; nodes: Node[]; links: Link[]; sub_graphs: Omit[]; // Flattened sub-graphs -}; + input_schema: GraphInputSchema; + output_schema: GraphOutputSchema; + credentials_input_schema: CredentialsInputSchema; +} & ( + | { + has_external_trigger: true; + trigger_setup_info: GraphTriggerInfo; + } + | { + has_external_trigger: false; + trigger_setup_info: null; + } + ); export type GraphUpdateable = Omit< Graph,