Compare commits

..

5 Commits

Author SHA1 Message Date
Nicholas Tindle
f4f81bc4fc fix(backend): Remove _credentials_id key on fork instead of setting to None
Setting _credentials_id to None on fork was ambiguous — both "forked,
needs re-auth" and "chained data from upstream" were represented as None.
This caused _acquire_auto_credentials to silently skip credential
acquisition for forked agents, leading to confusing TypeErrors at runtime.

Now the key is deleted entirely, making the three states unambiguous:
- Present with value: user-selected credentials
- Present as None: chained data from upstream block
- Absent: forked/needs re-authentication

Also adds pre-run validation for the missing key case and makes error
messages provider-agnostic.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 17:34:16 -06:00
Nicholas Tindle
c5abc01f25 fix(backend): Add error handling for auto-credentials store lookup
Wrap get_creds_by_id call in try/except in the auto-credentials
validation path to match the error handling pattern used for regular
credentials.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 16:53:29 -06:00
Nicholas Tindle
8b7053c1de merge: Resolve conflicts with dev (PR #11986 graph model refactor)
Adapt auto-credentials filtering to dev's refactored graph model:
- aggregate_credentials_inputs() now returns 3-tuples (field_info, node_pairs, is_required)
- credentials_input_schema moved to GraphModel, builds JSON schema directly
- Update regular/auto_credentials_inputs properties for 3-tuple format
- Update test mocks and assertions for new tuple format and class hierarchy

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 16:39:57 -06:00
Nicholas Tindle
e00c1202ad fix(platform): Fix Google Drive auto-credentials handling across the platform
- Tag auto-credentials with `is_auto_credential` and `input_field_name` on `CredentialsFieldInfo` to distinguish them from regular user-provided credentials
- Add `regular_credentials_inputs` and `auto_credentials_inputs` properties to `Graph` so UI schemas, CoPilot, and library presets only surface regular credentials
- Extract `_acquire_auto_credentials()` helper in executor to resolve embedded `_credentials_id` at execution time with proper lock management
- Validate auto-credentials ownership in `_validate_node_input_credentials()` to catch stale/missing credentials before execution
- Clear `_credentials_id` in `_reassign_ids()` on graph fork so cloned agents require re-authentication
- Propagate `is_auto_credential` through `combine()` and `discriminate()` on `CredentialsFieldInfo`
- Add `referrerPolicy: "no-referrer-when-downgrade"` to Google API script loading to fix Firefox API key validation
- Comprehensive test coverage for all new behavior

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 16:08:53 -06:00
Reinier van der Leer
8fddc9d71f fix(backend): Reduce GET /api/graphs expense + latency (#11986)
[SECRT-1896: Fix crazy `GET /api/graphs` latency (P95 =
107s)](https://linear.app/autogpt/issue/SECRT-1896)

These changes should decrease latency of this endpoint by ~~60-65%~~ a
lot.

### Changes 🏗️

- Make `Graph.credentials_input_schema` cheaper by avoiding constructing
a new `BlockSchema` subclass
- Strip down `GraphMeta` - drop all computed fields
- Replace with either `GraphModel` or `GraphModelWithoutNodes` wherever
those computed fields are used
- Simplify usage in `list_graphs_paginated` and
`fetch_graph_from_store_slug`
- Refactor and clarify relationships between the different graph models
  - Split `BaseGraph` into `GraphBaseMeta` + `BaseGraph`
- Strip down `Graph` - move `credentials_input_schema` and
`aggregate_credentials_inputs` to `GraphModel`
- Refactor to eliminate double `aggregate_credentials_inputs()` call in
`credentials_input_schema` call tree
  - Add `GraphModelWithoutNodes` (similar to current `GraphMeta`)

### Checklist 📋

#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
  - [x] `GET /api/graphs` works as it should
  - [x] Running a graph succeeds
  - [x] Adding a sub-agent in the Builder works as it should
2026-02-06 19:13:21 +00:00
33 changed files with 1846 additions and 873 deletions

View File

@@ -1,412 +0,0 @@
name: CLA Label Sync
on:
# Real-time: when CLA status changes (CLA-assistant uses Status API)
status:
# When PRs are opened or updated
pull_request_target:
types: [opened, synchronize, reopened]
# Scheduled sweep - check stale PRs daily
schedule:
- cron: '0 9 * * *' # 9 AM UTC daily
# Manual trigger for testing
workflow_dispatch:
inputs:
pr_number:
description: 'Specific PR number to check (optional)'
required: false
permissions:
pull-requests: write
contents: read
statuses: read
checks: read
env:
CLA_CHECK_NAME: 'license/cla'
LABEL_PENDING: 'cla: pending'
LABEL_SIGNED: 'cla: signed'
# Timing configuration (all independently configurable)
REMINDER_DAYS: 3 # Days before first reminder
CLOSE_WARNING_DAYS: 7 # Days before "closing soon" warning
CLOSE_DAYS: 10 # Days before auto-close
jobs:
sync-labels:
runs-on: ubuntu-latest
# Only run on status events if it's the CLA check
if: github.event_name != 'status' || github.event.context == 'license/cla'
steps:
- name: Ensure CLA labels exist
uses: actions/github-script@v7
with:
script: |
const labels = [
{ name: 'cla: pending', color: 'fbca04', description: 'CLA not yet signed by all contributors' },
{ name: 'cla: signed', color: '0e8a16', description: 'CLA signed by all contributors' }
];
for (const label of labels) {
try {
await github.rest.issues.getLabel({
owner: context.repo.owner,
repo: context.repo.repo,
name: label.name
});
} catch (e) {
if (e.status === 404) {
await github.rest.issues.createLabel({
owner: context.repo.owner,
repo: context.repo.repo,
name: label.name,
color: label.color,
description: label.description
});
console.log(`Created label: ${label.name}`);
}
}
}
- name: Sync CLA labels and handle stale PRs
uses: actions/github-script@v7
with:
script: |
const CLA_CHECK_NAME = process.env.CLA_CHECK_NAME;
const LABEL_PENDING = process.env.LABEL_PENDING;
const LABEL_SIGNED = process.env.LABEL_SIGNED;
const REMINDER_DAYS = parseInt(process.env.REMINDER_DAYS);
const CLOSE_WARNING_DAYS = parseInt(process.env.CLOSE_WARNING_DAYS);
const CLOSE_DAYS = parseInt(process.env.CLOSE_DAYS);
// Validate timing configuration
if ([REMINDER_DAYS, CLOSE_WARNING_DAYS, CLOSE_DAYS].some(Number.isNaN)) {
core.setFailed('Invalid timing configuration — REMINDER_DAYS, CLOSE_WARNING_DAYS, and CLOSE_DAYS must be numeric.');
return;
}
if (!(REMINDER_DAYS < CLOSE_WARNING_DAYS && CLOSE_WARNING_DAYS < CLOSE_DAYS)) {
core.warning(`Timing order looks odd: REMINDER(${REMINDER_DAYS}) < WARNING(${CLOSE_WARNING_DAYS}) < CLOSE(${CLOSE_DAYS}) expected.`);
}
const CLA_SIGN_URL = `https://cla-assistant.io/${context.repo.owner}/${context.repo.repo}`;
// Helper: Get CLA status for a commit
async function getClaStatus(headSha) {
// CLA-assistant uses the commit status API (not checks API)
const { data: statuses } = await github.rest.repos.getCombinedStatusForRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: headSha
});
const claStatus = statuses.statuses.find(
s => s.context === CLA_CHECK_NAME
);
if (claStatus) {
return {
found: true,
passed: claStatus.state === 'success',
state: claStatus.state,
description: claStatus.description
};
}
// Fallback: check the Checks API too
const { data: checkRuns } = await github.rest.checks.listForRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: headSha
});
const claCheck = checkRuns.check_runs.find(
check => check.name === CLA_CHECK_NAME
);
if (claCheck) {
return {
found: true,
passed: claCheck.conclusion === 'success',
state: claCheck.conclusion,
description: claCheck.output?.summary || ''
};
}
return { found: false, passed: false, state: 'unknown' };
}
// Helper: Check if bot already commented with a specific marker (paginated)
async function hasCommentWithMarker(prNumber, marker) {
// Use paginate to fetch ALL comments, not just first 100
const comments = await github.paginate(
github.rest.issues.listComments,
{
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
per_page: 100
}
);
return comments.some(c =>
c.user?.type === 'Bot' &&
c.body?.includes(marker)
);
}
// Helper: Days since a date
function daysSince(dateString) {
const date = new Date(dateString);
const now = new Date();
return Math.floor((now - date) / (1000 * 60 * 60 * 24));
}
// Determine which PRs to check
let prsToCheck = [];
if (context.eventName === 'status') {
// Status event from CLA-assistant - find PRs with this commit
const sha = context.payload.sha;
console.log(`Status event for SHA: ${sha}, context: ${context.payload.context}`);
// Search for open PRs with this head SHA
const { data: prs } = await github.rest.pulls.list({
owner: context.repo.owner,
repo: context.repo.repo,
state: 'open',
per_page: 100
});
prsToCheck = prs.filter(pr => pr.head.sha === sha).map(pr => pr.number);
if (prsToCheck.length === 0) {
console.log('No open PRs found with this SHA');
return;
}
} else if (context.eventName === 'pull_request_target') {
prsToCheck = [context.payload.pull_request.number];
} else if (context.eventName === 'workflow_dispatch' && context.payload.inputs?.pr_number) {
prsToCheck = [parseInt(context.payload.inputs.pr_number)];
} else {
// Scheduled run: check all open PRs (paginated to handle >100 PRs)
const openPRs = await github.paginate(
github.rest.pulls.list,
{
owner: context.repo.owner,
repo: context.repo.repo,
state: 'open',
per_page: 100
}
);
prsToCheck = openPRs.map(pr => pr.number);
}
console.log(`Checking ${prsToCheck.length} PR(s): ${prsToCheck.join(', ')}`);
for (const prNumber of prsToCheck) {
try {
// Get PR details
const { data: pr } = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: prNumber
});
// Skip if PR is from a bot
if (pr.user.type === 'Bot') {
console.log(`PR #${prNumber}: Skipping bot PR`);
continue;
}
// Skip if PR is not open (closed/merged)
if (pr.state !== 'open') {
console.log(`PR #${prNumber}: Skipping non-open PR (state=${pr.state})`);
continue;
}
// Skip if PR doesn't touch platform code (CLA automation only for autogpt_platform/)
const PLATFORM_PATH = 'autogpt_platform/';
const { data: files } = await github.rest.pulls.listFiles({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: prNumber,
per_page: 100
});
const touchesPlatform = files.some(f => f.filename.startsWith(PLATFORM_PATH));
if (!touchesPlatform) {
console.log(`PR #${prNumber}: Skipping - doesn't touch ${PLATFORM_PATH}`);
continue;
}
const claStatus = await getClaStatus(pr.head.sha);
const currentLabels = pr.labels.map(l => l.name);
const hasPending = currentLabels.includes(LABEL_PENDING);
const hasSigned = currentLabels.includes(LABEL_SIGNED);
const prAgeDays = daysSince(pr.created_at);
console.log(`PR #${prNumber}: CLA ${claStatus.passed ? 'passed' : 'pending'} (${claStatus.state}), age: ${prAgeDays} days`);
if (claStatus.passed) {
// ✅ CLA signed - add signed label, remove pending
if (!hasSigned) {
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
labels: [LABEL_SIGNED]
});
console.log(`Added '${LABEL_SIGNED}' to PR #${prNumber}`);
}
if (hasPending) {
await github.rest.issues.removeLabel({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
name: LABEL_PENDING
});
console.log(`Removed '${LABEL_PENDING}' from PR #${prNumber}`);
}
} else {
// ⏳ CLA pending
// Add pending label if not present
if (!hasPending) {
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
labels: [LABEL_PENDING]
});
console.log(`Added '${LABEL_PENDING}' to PR #${prNumber}`);
}
if (hasSigned) {
await github.rest.issues.removeLabel({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
name: LABEL_SIGNED
});
console.log(`Removed '${LABEL_SIGNED}' from PR #${prNumber}`);
}
// Check if we need to send reminder or close
const REMINDER_MARKER = '<!-- cla-reminder -->';
const CLOSE_WARNING_MARKER = '<!-- cla-close-warning -->';
// 📢 Reminder after REMINDER_DAYS (but before warning window)
if (prAgeDays >= REMINDER_DAYS && prAgeDays < CLOSE_WARNING_DAYS) {
const hasReminder = await hasCommentWithMarker(prNumber, REMINDER_MARKER);
if (!hasReminder) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: `${REMINDER_MARKER}
👋 **Friendly reminder:** This PR is waiting on a signed CLA.
All contributors need to sign our Contributor License Agreement before we can merge this PR.
**➡️ [Sign the CLA here](${CLA_SIGN_URL}?pullRequest=${prNumber})**
<details>
<summary>Why do we need a CLA?</summary>
The CLA protects both you and the project by clarifying the terms under which your contribution is made. It's a one-time process — once signed, it covers all your future contributions.
</details>
<details>
<summary>Common issues</summary>
- **Email mismatch:** Make sure your Git commit email matches your GitHub account email
- **Merge commits:** If you merged \`dev\` into your branch, try rebasing instead: \`git rebase origin/dev && git push --force-with-lease\`
- **Multiple authors:** All commit authors need to sign, not just the PR author
</details>
If you have questions, just ask! 🙂`
});
console.log(`Posted reminder on PR #${prNumber}`);
}
}
// ⚠️ Close warning at CLOSE_WARNING_DAYS
if (prAgeDays >= CLOSE_WARNING_DAYS && prAgeDays < CLOSE_DAYS) {
const hasCloseWarning = await hasCommentWithMarker(prNumber, CLOSE_WARNING_MARKER);
if (!hasCloseWarning) {
const daysRemaining = CLOSE_DAYS - prAgeDays;
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: `${CLOSE_WARNING_MARKER}
⚠️ **This PR will be automatically closed in ${daysRemaining} day${daysRemaining === 1 ? '' : 's'}** if the CLA is not signed.
We haven't received a signed CLA from all contributors yet. Please sign it to keep this PR open:
**➡️ [Sign the CLA here](${CLA_SIGN_URL}?pullRequest=${prNumber})**
If you're unable to sign or have questions, please let us know — we're happy to help!`
});
console.log(`Posted close warning on PR #${prNumber}`);
}
}
// 🚪 Auto-close after CLOSE_DAYS
if (prAgeDays >= CLOSE_DAYS) {
const CLOSE_MARKER = '<!-- cla-auto-closed -->';
const OVERRIDE_LABEL = 'cla: override';
// Check for override label (maintainer wants to keep PR open)
if (currentLabels.includes(OVERRIDE_LABEL)) {
console.log(`PR #${prNumber}: Skipping close due to '${OVERRIDE_LABEL}' label`);
} else {
// Check if we already posted a close comment
const hasCloseComment = await hasCommentWithMarker(prNumber, CLOSE_MARKER);
if (!hasCloseComment) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: `${CLOSE_MARKER}
👋 Closing this PR due to unsigned CLA after ${CLOSE_DAYS} days.
Thank you for your contribution! If you'd still like to contribute:
1. [Sign the CLA](${CLA_SIGN_URL})
2. Re-open this PR or create a new one
We appreciate your interest in AutoGPT and hope to see you back! 🚀`
});
}
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: prNumber,
state: 'closed'
});
console.log(`Closed PR #${prNumber} due to unsigned CLA`);
}
}
}
} catch (error) {
console.error(`Error processing PR #${prNumber}: ${error.message}`);
}
}
console.log('CLA label sync complete!');

View File

@@ -6,7 +6,6 @@ from typing import Any
from backend.api.features.library import db as library_db
from backend.api.features.library import model as library_model
from backend.api.features.store import db as store_db
from backend.data import graph as graph_db
from backend.data.graph import GraphModel
from backend.data.model import (
CredentialsFieldInfo,
@@ -44,14 +43,8 @@ async def fetch_graph_from_store_slug(
return None, None
# Get the graph from store listing version
graph_meta = await store_db.get_available_graph(
store_agent.store_listing_version_id
)
graph = await graph_db.get_graph(
graph_id=graph_meta.id,
version=graph_meta.version,
user_id=None, # Public access
include_subgraphs=True,
graph = await store_db.get_available_graph(
store_agent.store_listing_version_id, hide_nodes=False
)
return graph, store_agent
@@ -124,11 +117,11 @@ def build_missing_credentials_from_graph(
preserving all supported credential types for each field.
"""
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
aggregated_fields = graph.aggregate_credentials_inputs()
aggregated_fields = graph.regular_credentials_inputs
return {
field_key: _serialize_missing_credential(field_key, field_info)
for field_key, (field_info, _node_fields) in aggregated_fields.items()
for field_key, (field_info, _, _) in aggregated_fields.items()
if field_key not in matched_keys
}
@@ -251,7 +244,7 @@ async def match_user_credentials_to_graph(
missing_creds: list[str] = []
# Get aggregated credentials requirements from the graph
aggregated_creds = graph.aggregate_credentials_inputs()
aggregated_creds = graph.regular_credentials_inputs
logger.debug(
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
)
@@ -269,7 +262,8 @@ async def match_user_credentials_to_graph(
# provider is in the set of acceptable providers.
for credential_field_name, (
credential_requirements,
_node_fields,
_,
_,
) in aggregated_creds.items():
# Find first matching credential by provider, type, and scopes
matching_cred = next(

View File

@@ -0,0 +1,78 @@
"""Tests for chat tools utility functions."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from backend.data.model import CredentialsFieldInfo
def _make_regular_field() -> CredentialsFieldInfo:
return CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
def test_build_missing_credentials_excludes_auto_creds():
"""
build_missing_credentials_from_graph() should use regular_credentials_inputs
and thus exclude auto_credentials from the "missing" set.
"""
from backend.api.features.chat.tools.utils import (
build_missing_credentials_from_graph,
)
regular_field = _make_regular_field()
mock_graph = MagicMock()
# regular_credentials_inputs should only return the non-auto field
mock_graph.regular_credentials_inputs = {
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
}
result = build_missing_credentials_from_graph(mock_graph, matched_credentials=None)
# Should include the regular credential
assert "github_api_key" in result
# Should NOT include the auto_credential (not in regular_credentials_inputs)
assert "google_oauth2" not in result
@pytest.mark.asyncio
async def test_match_user_credentials_excludes_auto_creds():
"""
match_user_credentials_to_graph() should use regular_credentials_inputs
and thus exclude auto_credentials from matching.
"""
from backend.api.features.chat.tools.utils import match_user_credentials_to_graph
regular_field = _make_regular_field()
mock_graph = MagicMock()
mock_graph.id = "test-graph"
# regular_credentials_inputs returns only non-auto fields
mock_graph.regular_credentials_inputs = {
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
}
# Mock the credentials manager to return no credentials
with patch(
"backend.api.features.chat.tools.utils.IntegrationCredentialsManager"
) as MockCredsMgr:
mock_store = AsyncMock()
mock_store.get_all_creds.return_value = []
MockCredsMgr.return_value.store = mock_store
matched, missing = await match_user_credentials_to_graph(
user_id="test-user", graph=mock_graph
)
# No credentials available, so github should be missing
assert len(matched) == 0
assert len(missing) == 1
assert "github_api_key" in missing[0]

View File

@@ -374,7 +374,7 @@ async def get_library_agent_by_graph_id(
async def add_generated_agent_image(
graph: graph_db.BaseGraph,
graph: graph_db.GraphBaseMeta,
user_id: str,
library_agent_id: str,
) -> Optional[prisma.models.LibraryAgent]:
@@ -1103,7 +1103,7 @@ async def create_preset_from_graph_execution(
raise NotFoundError(
f"Graph #{graph_execution.graph_id} not found or accessible"
)
elif len(graph.aggregate_credentials_inputs()) > 0:
elif len(graph.regular_credentials_inputs) > 0:
raise ValueError(
f"Graph execution #{graph_exec_id} can't be turned into a preset "
"because it was run before this feature existed "

View File

@@ -1,7 +1,7 @@
import asyncio
import logging
from datetime import datetime, timezone
from typing import Any, Literal
from typing import Any, Literal, overload
import fastapi
import prisma.enums
@@ -11,8 +11,8 @@ import prisma.types
from backend.data.db import transaction
from backend.data.graph import (
GraphMeta,
GraphModel,
GraphModelWithoutNodes,
get_graph,
get_graph_as_admin,
get_sub_graphs,
@@ -334,7 +334,22 @@ async def get_store_agent_details(
raise DatabaseError("Failed to fetch agent details") from e
async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
@overload
async def get_available_graph(
store_listing_version_id: str, hide_nodes: Literal[False]
) -> GraphModel: ...
@overload
async def get_available_graph(
store_listing_version_id: str, hide_nodes: Literal[True] = True
) -> GraphModelWithoutNodes: ...
async def get_available_graph(
store_listing_version_id: str,
hide_nodes: bool = True,
) -> GraphModelWithoutNodes | GraphModel:
try:
# Get avaialble, non-deleted store listing version
store_listing_version = (
@@ -344,7 +359,7 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
"isAvailable": True,
"isDeleted": False,
},
include={"AgentGraph": {"include": {"Nodes": True}}},
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
)
)
@@ -354,7 +369,9 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
detail=f"Store listing version {store_listing_version_id} not found",
)
return GraphModel.from_db(store_listing_version.AgentGraph).meta()
return (GraphModelWithoutNodes if hide_nodes else GraphModel).from_db(
store_listing_version.AgentGraph
)
except Exception as e:
logger.error(f"Error getting agent: {e}")

View File

@@ -16,7 +16,7 @@ from backend.blocks.ideogram import (
StyleType,
UpscaleOption,
)
from backend.data.graph import BaseGraph
from backend.data.graph import GraphBaseMeta
from backend.data.model import CredentialsMetaInput, ProviderName
from backend.integrations.credentials_store import ideogram_credentials
from backend.util.request import Requests
@@ -34,14 +34,14 @@ class ImageStyle(str, Enum):
DIGITAL_ART = "digital art"
async def generate_agent_image(agent: BaseGraph | AgentGraph) -> io.BytesIO:
async def generate_agent_image(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
if settings.config.use_agent_image_generation_v2:
return await generate_agent_image_v2(graph=agent)
else:
return await generate_agent_image_v1(agent=agent)
async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.BytesIO:
"""
Generate an image for an agent using Ideogram model.
Returns:
@@ -54,14 +54,17 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
description = f"{name} ({graph.description})" if graph.description else name
prompt = (
f"Create a visually striking retro-futuristic vector pop art illustration prominently featuring "
f'"{name}" in bold typography. The image clearly and literally depicts a {description}, '
f"along with recognizable objects directly associated with the primary function of a {name}. "
f"Ensure the imagery is concrete, intuitive, and immediately understandable, clearly conveying the "
f"purpose of a {name}. Maintain vibrant, limited-palette colors, sharp vector lines, geometric "
f"shapes, flat illustration techniques, and solid colors without gradients or shading. Preserve a "
f"retro-futuristic aesthetic influenced by mid-century futurism and 1960s psychedelia, "
f"prioritizing clear visual storytelling and thematic clarity above all else."
"Create a visually striking retro-futuristic vector pop art illustration "
f'prominently featuring "{name}" in bold typography. The image clearly and '
f"literally depicts a {description}, along with recognizable objects directly "
f"associated with the primary function of a {name}. "
f"Ensure the imagery is concrete, intuitive, and immediately understandable, "
f"clearly conveying the purpose of a {name}. "
"Maintain vibrant, limited-palette colors, sharp vector lines, "
"geometric shapes, flat illustration techniques, and solid colors "
"without gradients or shading. Preserve a retro-futuristic aesthetic "
"influenced by mid-century futurism and 1960s psychedelia, "
"prioritizing clear visual storytelling and thematic clarity above all else."
)
custom_colors = [
@@ -99,12 +102,12 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
return io.BytesIO(response.content)
async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
async def generate_agent_image_v1(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
"""
Generate an image for an agent using Flux model via Replicate API.
Args:
agent (Graph): The agent to generate an image for
agent (GraphBaseMeta | AgentGraph): The agent to generate an image for
Returns:
io.BytesIO: The generated image as bytes
@@ -114,7 +117,13 @@ async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
raise ValueError("Missing Replicate API key in settings")
# Construct prompt from agent details
prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design."
prompt = (
"Create a visually engaging app store thumbnail for the AI agent "
"that highlights what it does in a clear and captivating way:\n"
f"- **Name**: {agent.name}\n"
f"- **Description**: {agent.description}\n"
f"Focus on showcasing its core functionality with an appealing design."
)
# Set up Replicate client
client = ReplicateClient(api_token=settings.secrets.replicate_api_key)

View File

@@ -278,7 +278,7 @@ async def get_agent(
)
async def get_graph_meta_by_store_listing_version_id(
store_listing_version_id: str,
) -> backend.data.graph.GraphMeta:
) -> backend.data.graph.GraphModelWithoutNodes:
"""
Get Agent Graph from Store Listing Version ID.
"""

View File

@@ -246,7 +246,9 @@ class BlockSchema(BaseModel):
f"is not of type {CredentialsMetaInput.__name__}"
)
credentials_fields[field_name].validate_credentials_field_schema(cls)
CredentialsMetaInput.validate_credentials_field_schema(
cls.get_field_schema(field_name), field_name
)
elif field_name in credentials_fields:
raise KeyError(
@@ -317,6 +319,8 @@ class BlockSchema(BaseModel):
"credentials_provider": [config.get("provider", "google")],
"credentials_types": [config.get("type", "oauth2")],
"credentials_scopes": config.get("scopes"),
"is_auto_credential": True,
"input_field_name": info["field_name"],
}
result[kwarg_name] = CredentialsFieldInfo.model_validate(
auto_schema, by_alias=True

View File

@@ -3,7 +3,7 @@ import logging
import uuid
from collections import defaultdict
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Self, cast
from prisma.enums import SubmissionStatus
from prisma.models import (
@@ -20,7 +20,7 @@ from prisma.types import (
AgentNodeLinkCreateInput,
StoreListingVersionWhereInput,
)
from pydantic import BaseModel, BeforeValidator, Field, create_model
from pydantic import BaseModel, BeforeValidator, Field
from pydantic.fields import computed_field
from backend.blocks.agent import AgentExecutorBlock
@@ -30,7 +30,6 @@ from backend.data.db import prisma as db
from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
from backend.data.model import (
CredentialsField,
CredentialsFieldInfo,
CredentialsMetaInput,
is_credentials_field_name,
@@ -45,7 +44,6 @@ from .block import (
AnyBlockSchema,
Block,
BlockInput,
BlockSchema,
BlockType,
EmptySchema,
get_block,
@@ -113,10 +111,12 @@ class Link(BaseDbModel):
class Node(BaseDbModel):
block_id: str
input_default: BlockInput = {} # dict[input_name, default_value]
metadata: dict[str, Any] = {}
input_links: list[Link] = []
output_links: list[Link] = []
input_default: BlockInput = Field( # dict[input_name, default_value]
default_factory=dict
)
metadata: dict[str, Any] = Field(default_factory=dict)
input_links: list[Link] = Field(default_factory=list)
output_links: list[Link] = Field(default_factory=list)
@property
def credentials_optional(self) -> bool:
@@ -221,18 +221,33 @@ class NodeModel(Node):
return result
class BaseGraph(BaseDbModel):
class GraphBaseMeta(BaseDbModel):
"""
Shared base for `GraphMeta` and `BaseGraph`, with core graph metadata fields.
"""
version: int = 1
is_active: bool = True
name: str
description: str
instructions: str | None = None
recommended_schedule_cron: str | None = None
nodes: list[Node] = []
links: list[Link] = []
forked_from_id: str | None = None
forked_from_version: int | None = None
class BaseGraph(GraphBaseMeta):
"""
Graph with nodes, links, and computed I/O schema fields.
Used to represent sub-graphs within a `Graph`. Contains the full graph
structure including nodes and links, plus computed fields for schemas
and trigger info. Does NOT include user_id or created_at (see GraphModel).
"""
nodes: list[Node] = Field(default_factory=list)
links: list[Link] = Field(default_factory=list)
@computed_field
@property
def input_schema(self) -> dict[str, Any]:
@@ -361,44 +376,78 @@ class GraphTriggerInfo(BaseModel):
class Graph(BaseGraph):
sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs
"""Creatable graph model used in API create/update endpoints."""
sub_graphs: list[BaseGraph] = Field(default_factory=list) # Flattened sub-graphs
class GraphMeta(GraphBaseMeta):
"""
Lightweight graph metadata model representing an existing graph from the database,
for use in listings and summaries.
Lacks `GraphModel`'s nodes, links, and expensive computed fields.
Use for list endpoints where full graph data is not needed and performance matters.
"""
id: str # type: ignore
version: int # type: ignore
user_id: str
created_at: datetime
@classmethod
def from_db(cls, graph: "AgentGraph") -> Self:
return cls(
id=graph.id,
version=graph.version,
is_active=graph.isActive,
name=graph.name or "",
description=graph.description or "",
instructions=graph.instructions,
recommended_schedule_cron=graph.recommendedScheduleCron,
forked_from_id=graph.forkedFromId,
forked_from_version=graph.forkedFromVersion,
user_id=graph.userId,
created_at=graph.createdAt,
)
class GraphModel(Graph, GraphMeta):
"""
Full graph model representing an existing graph from the database.
This is the primary model for working with persisted graphs. Includes all
graph data (nodes, links, sub_graphs) plus user ownership and timestamps.
Provides computed fields (input_schema, output_schema, etc.) used during
set-up (frontend) and execution (backend).
Inherits from:
- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas
- `GraphMeta`: provides user_id, created_at for database records
"""
nodes: list[NodeModel] = Field(default_factory=list) # type: ignore
@property
def starting_nodes(self) -> list[NodeModel]:
outbound_nodes = {link.sink_id for link in self.links}
input_nodes = {
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
}
return [
node
for node in self.nodes
if node.id not in outbound_nodes or node.id in input_nodes
]
@property
def webhook_input_node(self) -> NodeModel | None: # type: ignore
return cast(NodeModel, super().webhook_input_node)
@computed_field
@property
def credentials_input_schema(self) -> dict[str, Any]:
schema = self._credentials_input_schema.jsonschema()
# Determine which credential fields are required based on credentials_optional metadata
graph_credentials_inputs = self.aggregate_credentials_inputs()
required_fields = []
# Build a map of node_id -> node for quick lookup
all_nodes = {node.id: node for node in self.nodes}
for sub_graph in self.sub_graphs:
for node in sub_graph.nodes:
all_nodes[node.id] = node
for field_key, (
_field_info,
node_field_pairs,
) in graph_credentials_inputs.items():
# A field is required if ANY node using it has credentials_optional=False
is_required = False
for node_id, _field_name in node_field_pairs:
node = all_nodes.get(node_id)
if node and not node.credentials_optional:
is_required = True
break
if is_required:
required_fields.append(field_key)
schema["required"] = required_fields
return schema
@property
def _credentials_input_schema(self) -> type[BlockSchema]:
graph_credentials_inputs = self.aggregate_credentials_inputs()
graph_credentials_inputs = self.regular_credentials_inputs
logger.debug(
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
f"{graph_credentials_inputs}"
@@ -406,8 +455,8 @@ class Graph(BaseGraph):
# Warn if same-provider credentials inputs can't be combined (= bad UX)
graph_cred_fields = list(graph_credentials_inputs.values())
for i, (field, keys) in enumerate(graph_cred_fields):
for other_field, other_keys in list(graph_cred_fields)[i + 1 :]:
for i, (field, keys, _) in enumerate(graph_cred_fields):
for other_field, other_keys, _ in list(graph_cred_fields)[i + 1 :]:
if field.provider != other_field.provider:
continue
if ProviderName.HTTP in field.provider:
@@ -423,31 +472,78 @@ class Graph(BaseGraph):
f"keys: {keys} <> {other_keys}."
)
fields: dict[str, tuple[type[CredentialsMetaInput], CredentialsMetaInput]] = {
agg_field_key: (
CredentialsMetaInput[
Literal[tuple(field_info.provider)], # type: ignore
Literal[tuple(field_info.supported_types)], # type: ignore
],
CredentialsField(
required_scopes=set(field_info.required_scopes or []),
discriminator=field_info.discriminator,
discriminator_mapping=field_info.discriminator_mapping,
discriminator_values=field_info.discriminator_values,
),
)
for agg_field_key, (field_info, _) in graph_credentials_inputs.items()
}
# Build JSON schema directly to avoid expensive create_model + validation overhead
properties = {}
required_fields = []
return create_model(
self.name.replace(" ", "") + "CredentialsInputSchema",
__base__=BlockSchema,
**fields, # type: ignore
)
for agg_field_key, (
field_info,
_,
is_required,
) in graph_credentials_inputs.items():
providers = list(field_info.provider)
cred_types = list(field_info.supported_types)
field_schema: dict[str, Any] = {
"credentials_provider": providers,
"credentials_types": cred_types,
"type": "object",
"properties": {
"id": {"title": "Id", "type": "string"},
"title": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"title": "Title",
},
"provider": {
"title": "Provider",
"type": "string",
**(
{"enum": providers}
if len(providers) > 1
else {"const": providers[0]}
),
},
"type": {
"title": "Type",
"type": "string",
**(
{"enum": cred_types}
if len(cred_types) > 1
else {"const": cred_types[0]}
),
},
},
"required": ["id", "provider", "type"],
}
# Add other (optional) field info items
field_schema.update(
field_info.model_dump(
by_alias=True,
exclude_defaults=True,
exclude={"provider", "supported_types"}, # already included above
)
)
# Ensure field schema is well-formed
CredentialsMetaInput.validate_credentials_field_schema(
field_schema, agg_field_key
)
properties[agg_field_key] = field_schema
if is_required:
required_fields.append(agg_field_key)
return {
"type": "object",
"properties": properties,
"required": required_fields,
}
def aggregate_credentials_inputs(
self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]]]]:
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
"""
Returns:
dict[aggregated_field_key, tuple(
@@ -455,13 +551,19 @@ class Graph(BaseGraph):
(now includes discriminator_values from matching nodes)
set[(node_id, field_name)]: Node credentials fields that are
compatible with this aggregated field spec
bool: True if the field is required (any node has credentials_optional=False)
)]
"""
# First collect all credential field data with input defaults
node_credential_data = []
# Track (field_info, (node_id, field_name), is_required) for each credential field
node_credential_data: list[tuple[CredentialsFieldInfo, tuple[str, str]]] = []
node_required_map: dict[str, bool] = {} # node_id -> is_required
for graph in [self] + self.sub_graphs:
for node in graph.nodes:
# Track if this node requires credentials (credentials_optional=False means required)
node_required_map[node.id] = not node.credentials_optional
for (
field_name,
field_info,
@@ -485,37 +587,43 @@ class Graph(BaseGraph):
)
# Combine credential field info (this will merge discriminator_values automatically)
return CredentialsFieldInfo.combine(*node_credential_data)
combined = CredentialsFieldInfo.combine(*node_credential_data)
class GraphModel(Graph):
user_id: str
nodes: list[NodeModel] = [] # type: ignore
created_at: datetime
@property
def starting_nodes(self) -> list[NodeModel]:
outbound_nodes = {link.sink_id for link in self.links}
input_nodes = {
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
# Add is_required flag to each aggregated field
# A field is required if ANY node using it has credentials_optional=False
return {
key: (
field_info,
node_field_pairs,
any(
node_required_map.get(node_id, True)
for node_id, _ in node_field_pairs
),
)
for key, (field_info, node_field_pairs) in combined.items()
}
return [
node
for node in self.nodes
if node.id not in outbound_nodes or node.id in input_nodes
]
@property
def webhook_input_node(self) -> NodeModel | None: # type: ignore
return cast(NodeModel, super().webhook_input_node)
def regular_credentials_inputs(
self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
"""Credentials that need explicit user mapping (CredentialsMetaInput fields)."""
return {
k: v
for k, v in self.aggregate_credentials_inputs().items()
if not v[0].is_auto_credential
}
def meta(self) -> "GraphMeta":
"""
Returns a GraphMeta object with metadata about the graph.
This is used to return metadata about the graph without exposing nodes and links.
"""
return GraphMeta.from_graph(self)
@property
def auto_credentials_inputs(
self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
"""Credentials embedded in file fields (_credentials_id), resolved at execution time."""
return {
k: v
for k, v in self.aggregate_credentials_inputs().items()
if v[0].is_auto_credential
}
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
"""
@@ -567,6 +675,16 @@ class GraphModel(Graph):
) and graph_id in graph_id_map:
node.input_default["graph_id"] = graph_id_map[graph_id]
# Clear auto-credentials references (e.g., _credentials_id in
# GoogleDriveFile fields) so the new user must re-authenticate
# with their own account
for node in graph.nodes:
if not node.input_default:
continue
for key, value in node.input_default.items():
if isinstance(value, dict) and "_credentials_id" in value:
del value["_credentials_id"]
def validate_graph(
self,
for_run: bool = False,
@@ -799,13 +917,14 @@ class GraphModel(Graph):
if is_static_output_block(link.source_id):
link.is_static = True # Each value block output should be static.
@staticmethod
def from_db(
@classmethod
def from_db( # type: ignore[reportIncompatibleMethodOverride]
cls,
graph: AgentGraph,
for_export: bool = False,
sub_graphs: list[AgentGraph] | None = None,
) -> "GraphModel":
return GraphModel(
) -> Self:
return cls(
id=graph.id,
user_id=graph.userId if not for_export else "",
version=graph.version,
@@ -831,17 +950,28 @@ class GraphModel(Graph):
],
)
def hide_nodes(self) -> "GraphModelWithoutNodes":
"""
Returns a copy of the `GraphModel` with nodes, links, and sub-graphs hidden
(excluded from serialization). They are still present in the model instance
so all computed fields (e.g. `credentials_input_schema`) still work.
"""
return GraphModelWithoutNodes.model_validate(self, from_attributes=True)
class GraphMeta(Graph):
user_id: str
# Easy work-around to prevent exposing nodes and links in the API response
nodes: list[NodeModel] = Field(default=[], exclude=True) # type: ignore
links: list[Link] = Field(default=[], exclude=True)
class GraphModelWithoutNodes(GraphModel):
"""
GraphModel variant that excludes nodes, links, and sub-graphs from serialization.
@staticmethod
def from_graph(graph: GraphModel) -> "GraphMeta":
return GraphMeta(**graph.model_dump())
Used in contexts like the store where exposing internal graph structure
is not desired. Inherits all computed fields from GraphModel but marks
nodes and links as excluded from JSON output.
"""
nodes: list[NodeModel] = Field(default_factory=list, exclude=True)
links: list[Link] = Field(default_factory=list, exclude=True)
sub_graphs: list[BaseGraph] = Field(default_factory=list, exclude=True)
class GraphsPaginated(BaseModel):
@@ -912,21 +1042,11 @@ async def list_graphs_paginated(
where=where_clause,
distinct=["id"],
order={"version": "desc"},
include=AGENT_GRAPH_INCLUDE,
skip=offset,
take=page_size,
)
graph_models: list[GraphMeta] = []
for graph in graphs:
try:
graph_meta = GraphModel.from_db(graph).meta()
# Trigger serialization to validate that the graph is well formed
graph_meta.model_dump()
graph_models.append(graph_meta)
except Exception as e:
logger.error(f"Error processing graph {graph.id}: {e}")
continue
graph_models = [GraphMeta.from_db(graph) for graph in graphs]
return GraphsPaginated(
graphs=graph_models,

View File

@@ -463,3 +463,328 @@ def test_node_credentials_optional_with_other_metadata():
assert node.credentials_optional is True
assert node.metadata["position"] == {"x": 100, "y": 200}
assert node.metadata["customized_name"] == "My Custom Node"
# ============================================================================
# Tests for _reassign_ids credential clearing (Fix 3: SECRT-1772)
def test_combine_preserves_is_auto_credential_flag():
"""
CredentialsFieldInfo.combine() must propagate is_auto_credential and
input_field_name to the combined result. Regression test for reviewer
finding that combine() dropped these fields.
"""
from backend.data.model import CredentialsFieldInfo
auto_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google"],
"credentials_types": ["oauth2"],
"credentials_scopes": ["drive.readonly"],
"is_auto_credential": True,
"input_field_name": "spreadsheet",
},
by_alias=True,
)
# combine() takes *args of (field_info, key) tuples
combined = CredentialsFieldInfo.combine(
(auto_field, ("node-1", "credentials")),
(auto_field, ("node-2", "credentials")),
)
assert len(combined) == 1
group_key = next(iter(combined))
combined_info, combined_keys = combined[group_key]
assert combined_info.is_auto_credential is True
assert combined_info.input_field_name == "spreadsheet"
assert combined_keys == {("node-1", "credentials"), ("node-2", "credentials")}
def test_combine_preserves_regular_credential_defaults():
"""Regular credentials should have is_auto_credential=False after combine()."""
from backend.data.model import CredentialsFieldInfo
regular_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
combined = CredentialsFieldInfo.combine(
(regular_field, ("node-1", "credentials")),
)
group_key = next(iter(combined))
combined_info, _ = combined[group_key]
assert combined_info.is_auto_credential is False
assert combined_info.input_field_name is None
# ============================================================================
def test_reassign_ids_clears_credentials_id():
"""
[SECRT-1772] _reassign_ids should clear _credentials_id from
GoogleDriveFile-style input_default fields so forked agents
don't retain the original creator's credential references.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"spreadsheet": {
"_credentials_id": "original-cred-id",
"id": "file-123",
"name": "test.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
"url": "https://docs.google.com/spreadsheets/d/file-123",
},
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
# _credentials_id key should be removed (not set to None) so that
# _acquire_auto_credentials correctly errors instead of treating it as chained data
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
def test_reassign_ids_preserves_non_credential_fields():
"""
Regression guard: _reassign_ids should NOT modify non-credential fields
like name, mimeType, id, url.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"spreadsheet": {
"_credentials_id": "cred-abc",
"id": "file-123",
"name": "test.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
"url": "https://docs.google.com/spreadsheets/d/file-123",
},
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
field = graph.nodes[0].input_default["spreadsheet"]
assert field["id"] == "file-123"
assert field["name"] == "test.xlsx"
assert field["mimeType"] == "application/vnd.google-apps.spreadsheet"
assert field["url"] == "https://docs.google.com/spreadsheets/d/file-123"
def test_reassign_ids_handles_no_credentials():
"""
Regression guard: _reassign_ids should not error when input_default
has no dict fields with _credentials_id.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"input": "some value",
"another_input": 42,
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
# Should not error, fields unchanged
assert graph.nodes[0].input_default["input"] == "some value"
assert graph.nodes[0].input_default["another_input"] == 42
def test_reassign_ids_handles_multiple_credential_fields():
"""
[SECRT-1772] When a node has multiple dict fields with _credentials_id,
ALL of them should be cleared.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"spreadsheet": {
"_credentials_id": "cred-1",
"id": "file-1",
"name": "file1.xlsx",
},
"doc_file": {
"_credentials_id": "cred-2",
"id": "file-2",
"name": "file2.docx",
},
"plain_input": "not a dict",
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
assert "_credentials_id" not in graph.nodes[0].input_default["doc_file"]
assert graph.nodes[0].input_default["plain_input"] == "not a dict"
# ============================================================================
# Tests for discriminate() field propagation
def test_discriminate_preserves_is_auto_credential_flag():
"""
CredentialsFieldInfo.discriminate() must propagate is_auto_credential and
input_field_name to the discriminated result. Regression test for
discriminate() dropping these fields (same class of bug as combine()).
"""
from backend.data.model import CredentialsFieldInfo
auto_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google", "openai"],
"credentials_types": ["oauth2"],
"credentials_scopes": ["drive.readonly"],
"is_auto_credential": True,
"input_field_name": "spreadsheet",
"discriminator": "model",
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
},
by_alias=True,
)
discriminated = auto_field.discriminate("gemini")
assert discriminated.is_auto_credential is True
assert discriminated.input_field_name == "spreadsheet"
assert discriminated.provider == frozenset(["google"])
def test_discriminate_preserves_regular_credential_defaults():
"""Regular credentials should have is_auto_credential=False after discriminate()."""
from backend.data.model import CredentialsFieldInfo
regular_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google", "openai"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
"discriminator": "model",
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
},
by_alias=True,
)
discriminated = regular_field.discriminate("gpt-4")
assert discriminated.is_auto_credential is False
assert discriminated.input_field_name is None
assert discriminated.provider == frozenset(["openai"])
# ============================================================================
# Tests for credentials_input_schema excluding auto_credentials
def test_credentials_input_schema_excludes_auto_creds():
"""
GraphModel.credentials_input_schema should exclude auto_credentials
(is_auto_credential=True) from the schema. Auto_credentials are
transparently resolved at execution time via file picker data.
"""
from datetime import datetime, timezone
from unittest.mock import PropertyMock, patch
from backend.data.graph import GraphModel, NodeModel
from backend.data.model import CredentialsFieldInfo
regular_field_info = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
graph = GraphModel(
id="test-graph",
version=1,
name="Test",
description="Test",
user_id="test-user",
created_at=datetime.now(timezone.utc),
nodes=[
NodeModel(
id="node-1",
block_id=StoreValueBlock().id,
input_default={},
graph_id="test-graph",
graph_version=1,
),
],
links=[],
)
# Mock regular_credentials_inputs to return only the non-auto field (3-tuple)
regular_only = {
"github_credentials": (
regular_field_info,
{("node-1", "credentials")},
True,
),
}
with patch.object(
type(graph),
"regular_credentials_inputs",
new_callable=PropertyMock,
return_value=regular_only,
):
schema = graph.credentials_input_schema
field_names = set(schema.get("properties", {}).keys())
# Should include regular credential but NOT auto_credential
assert "github_credentials" in field_names
assert "google_credentials" not in field_names

View File

@@ -163,7 +163,6 @@ class User(BaseModel):
if TYPE_CHECKING:
from prisma.models import User as PrismaUser
from backend.data.block import BlockSchema
T = TypeVar("T")
logger = logging.getLogger(__name__)
@@ -508,15 +507,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
def allowed_cred_types(cls) -> tuple[CredentialsType, ...]:
return get_args(cls.model_fields["type"].annotation)
@classmethod
def validate_credentials_field_schema(cls, model: type["BlockSchema"]):
@staticmethod
def validate_credentials_field_schema(
field_schema: dict[str, Any], field_name: str
):
"""Validates the schema of a credentials input field"""
field_name = next(
name for name, type in model.get_credentials_fields().items() if type is cls
)
field_schema = model.jsonschema()["properties"][field_name]
try:
schema_extra = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
field_info = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
except ValidationError as e:
if "Field required [type=missing" not in str(e):
raise
@@ -526,11 +523,11 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
f"{field_schema}"
) from e
providers = cls.allowed_providers()
providers = field_info.provider
if (
providers is not None
and len(providers) > 1
and not schema_extra.discriminator
and not field_info.discriminator
):
raise TypeError(
f"Multi-provider CredentialsField '{field_name}' "
@@ -574,6 +571,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
discriminator: Optional[str] = None
discriminator_mapping: Optional[dict[str, CP]] = None
discriminator_values: set[Any] = Field(default_factory=set)
is_auto_credential: bool = False
input_field_name: Optional[str] = None
@classmethod
def combine(
@@ -654,6 +653,9 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
+ "_credentials"
)
# Propagate is_auto_credential from the combined field.
# All fields in a group should share the same is_auto_credential
# value since auto and regular credentials serve different purposes.
result[group_key] = (
CredentialsFieldInfo[CP, CT](
credentials_provider=combined.provider,
@@ -662,6 +664,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
discriminator=combined.discriminator,
discriminator_mapping=combined.discriminator_mapping,
discriminator_values=set(all_discriminator_values),
is_auto_credential=combined.is_auto_credential,
input_field_name=combined.input_field_name,
),
combined_keys,
)
@@ -687,6 +691,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
discriminator=self.discriminator,
discriminator_mapping=self.discriminator_mapping,
discriminator_values=self.discriminator_values,
is_auto_credential=self.is_auto_credential,
input_field_name=self.input_field_name,
)

View File

@@ -172,6 +172,81 @@ def execute_graph(
T = TypeVar("T")
async def _acquire_auto_credentials(
input_model: type[BlockSchema],
input_data: dict[str, Any],
creds_manager: "IntegrationCredentialsManager",
user_id: str,
) -> tuple[dict[str, Any], list[AsyncRedisLock]]:
"""
Resolve auto_credentials from GoogleDriveFileField-style inputs.
Returns:
(extra_exec_kwargs, locks): kwargs to inject into block execution, and
credential locks to release after execution completes.
"""
extra_exec_kwargs: dict[str, Any] = {}
locks: list[AsyncRedisLock] = []
# NOTE: If a block ever has multiple auto-credential fields, a ValueError
# on a later field will strand locks acquired for earlier fields. They'll
# auto-expire via Redis TTL, but add a try/except to release partial locks
# if that becomes a real scenario.
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
field_name = info["field_name"]
field_data = input_data.get(field_name)
if field_data and isinstance(field_data, dict):
# Check if _credentials_id key exists in the field data
if "_credentials_id" in field_data:
cred_id = field_data["_credentials_id"]
if cred_id:
# Credential ID provided - acquire credentials
provider = info.get("config", {}).get(
"provider", "external service"
)
file_name = field_data.get("name", "selected file")
try:
credentials, lock = await creds_manager.acquire(
user_id, cred_id
)
locks.append(lock)
extra_exec_kwargs[kwarg_name] = credentials
except ValueError:
raise ValueError(
f"{provider.capitalize()} credentials for "
f"'{file_name}' in field '{field_name}' are not "
f"available in your account. "
f"This can happen if the agent was created by another "
f"user or the credentials were deleted. "
f"Please open the agent in the builder and re-select "
f"the file to authenticate with your own account."
)
# else: _credentials_id is explicitly None, skip (chained data)
else:
# _credentials_id key missing entirely - this is an error
provider = info.get("config", {}).get("provider", "external service")
file_name = field_data.get("name", "selected file")
raise ValueError(
f"Authentication missing for '{file_name}' in field "
f"'{field_name}'. Please re-select the file to authenticate "
f"with {provider.capitalize()}."
)
elif field_data is None and field_name not in input_data:
# Field not in input_data at all = connected from upstream block, skip
pass
else:
# field_data is None/empty but key IS in input_data = user didn't select
provider = info.get("config", {}).get("provider", "external service")
raise ValueError(
f"No file selected for '{field_name}'. "
f"Please select a file to provide "
f"{provider.capitalize()} authentication."
)
return extra_exec_kwargs, locks
async def execute_node(
node: Node,
data: NodeExecutionEntry,
@@ -271,41 +346,14 @@ async def execute_node(
extra_exec_kwargs[field_name] = credentials
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
field_name = info["field_name"]
field_data = input_data.get(field_name)
if field_data and isinstance(field_data, dict):
# Check if _credentials_id key exists in the field data
if "_credentials_id" in field_data:
cred_id = field_data["_credentials_id"]
if cred_id:
# Credential ID provided - acquire credentials
provider = info.get("config", {}).get(
"provider", "external service"
)
file_name = field_data.get("name", "selected file")
try:
credentials, lock = await creds_manager.acquire(
user_id, cred_id
)
creds_locks.append(lock)
extra_exec_kwargs[kwarg_name] = credentials
except ValueError:
# Credential was deleted or doesn't exist
raise ValueError(
f"Authentication expired for '{file_name}' in field '{field_name}'. "
f"The saved {provider.capitalize()} credentials no longer exist. "
f"Please re-select the file to re-authenticate."
)
# else: _credentials_id is explicitly None, skip credentials (for chained data)
else:
# _credentials_id key missing entirely - this is an error
provider = info.get("config", {}).get("provider", "external service")
file_name = field_data.get("name", "selected file")
raise ValueError(
f"Authentication missing for '{file_name}' in field '{field_name}'. "
f"Please re-select the file to authenticate with {provider.capitalize()}."
)
auto_extra_kwargs, auto_locks = await _acquire_auto_credentials(
input_model=input_model,
input_data=input_data,
creds_manager=creds_manager,
user_id=user_id,
)
extra_exec_kwargs.update(auto_extra_kwargs)
creds_locks.extend(auto_locks)
output_size = 0

View File

@@ -0,0 +1,320 @@
"""
Tests for auto_credentials handling in execute_node().
These test the _acquire_auto_credentials() helper function extracted from
execute_node() (manager.py lines 273-308).
"""
import pytest
from pytest_mock import MockerFixture
@pytest.fixture
def google_drive_file_data():
return {
"valid": {
"_credentials_id": "cred-id-123",
"id": "file-123",
"name": "test.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
},
"chained": {
"_credentials_id": None,
"id": "file-456",
"name": "chained.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
},
"missing_key": {
"id": "file-789",
"name": "bad.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
},
}
@pytest.fixture
def mock_input_model(mocker: MockerFixture):
"""Create a mock input model with get_auto_credentials_fields() returning one field."""
input_model = mocker.MagicMock()
input_model.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.readonly"],
},
}
}
return input_model
@pytest.fixture
def mock_creds_manager(mocker: MockerFixture):
manager = mocker.AsyncMock()
mock_lock = mocker.AsyncMock()
mock_creds = mocker.MagicMock()
mock_creds.id = "cred-id-123"
mock_creds.provider = "google"
manager.acquire.return_value = (mock_creds, mock_lock)
return manager, mock_creds, mock_lock
@pytest.mark.asyncio
async def test_auto_credentials_happy_path(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""When field_data has a valid _credentials_id, credentials should be acquired."""
from backend.executor.manager import _acquire_auto_credentials
manager, mock_creds, mock_lock = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["valid"]}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
assert extra_kwargs["credentials"] == mock_creds
assert mock_lock in locks
@pytest.mark.asyncio
async def test_auto_credentials_field_none_static_raises(
mocker: MockerFixture,
mock_input_model,
mock_creds_manager,
):
"""
[THE BUG FIX TEST — OPEN-2895]
When field_data is None and the key IS in input_data (user didn't select a file),
should raise ValueError instead of silently skipping.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
# Key is present but value is None = user didn't select a file
input_data = {"spreadsheet": None}
with pytest.raises(ValueError, match="No file selected"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
@pytest.mark.asyncio
async def test_auto_credentials_field_absent_skips(
mocker: MockerFixture,
mock_input_model,
mock_creds_manager,
):
"""
When the field key is NOT in input_data at all (upstream connection),
should skip without error.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
# Key not present = connected from upstream block
input_data = {}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
manager.acquire.assert_not_called()
assert "credentials" not in extra_kwargs
assert locks == []
@pytest.mark.asyncio
async def test_auto_credentials_chained_cred_id_none(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
When _credentials_id is explicitly None (chained data from upstream),
should skip credential acquisition.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["chained"]}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
manager.acquire.assert_not_called()
assert "credentials" not in extra_kwargs
@pytest.mark.asyncio
async def test_auto_credentials_missing_cred_id_key_raises(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
When _credentials_id key is missing entirely from field_data dict,
should raise ValueError.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["missing_key"]}
with pytest.raises(ValueError, match="Authentication missing"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
@pytest.mark.asyncio
async def test_auto_credentials_ownership_mismatch_error(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
[SECRT-1772] When acquire() raises ValueError (credential belongs to another user),
the error message should mention 'not available' (not 'expired').
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
manager.acquire.side_effect = ValueError(
"Credentials #cred-id-123 for user #user-2 not found"
)
input_data = {"spreadsheet": google_drive_file_data["valid"]}
with pytest.raises(ValueError, match="not available in your account"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-2",
)
@pytest.mark.asyncio
async def test_auto_credentials_deleted_credential_error(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
[SECRT-1772] When acquire() raises ValueError (credential was deleted),
the error message should mention 'not available' (not 'expired').
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
manager.acquire.side_effect = ValueError(
"Credentials #cred-id-123 for user #user-1 not found"
)
input_data = {"spreadsheet": google_drive_file_data["valid"]}
with pytest.raises(ValueError, match="not available in your account"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
@pytest.mark.asyncio
async def test_auto_credentials_lock_appended(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""Lock from acquire() should be included in returned locks list."""
from backend.executor.manager import _acquire_auto_credentials
manager, _, mock_lock = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["valid"]}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
assert len(locks) == 1
assert locks[0] is mock_lock
@pytest.mark.asyncio
async def test_auto_credentials_multiple_fields(
mocker: MockerFixture,
mock_creds_manager,
):
"""When there are multiple auto_credentials fields, only valid ones should acquire."""
from backend.executor.manager import _acquire_auto_credentials
manager, mock_creds, mock_lock = mock_creds_manager
input_model = mocker.MagicMock()
input_model.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
},
"credentials2": {
"field_name": "doc_file",
"config": {"provider": "google", "type": "oauth2"},
},
}
input_data = {
"spreadsheet": {
"_credentials_id": "cred-id-123",
"id": "file-1",
"name": "file1.xlsx",
},
"doc_file": {
"_credentials_id": None,
"id": "file-2",
"name": "chained.doc",
},
}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
# Only the first field should have acquired credentials
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
assert "credentials" in extra_kwargs
assert "credentials2" not in extra_kwargs
assert len(locks) == 1

View File

@@ -259,7 +259,8 @@ async def _validate_node_input_credentials(
# Find any fields of type CredentialsMetaInput
credentials_fields = block.input_schema.get_credentials_fields()
if not credentials_fields:
auto_credentials_fields = block.input_schema.get_auto_credentials_fields()
if not credentials_fields and not auto_credentials_fields:
continue
# Track if any credential field is missing for this node
@@ -339,6 +340,47 @@ async def _validate_node_input_credentials(
] = "Invalid credentials: type/provider mismatch"
continue
# Validate auto-credentials (GoogleDriveFileField-based)
# These have _credentials_id embedded in the file field data
if auto_credentials_fields:
for _kwarg_name, info in auto_credentials_fields.items():
field_name = info["field_name"]
# Check input_default and nodes_input_masks for the field value
field_value = node.input_default.get(field_name)
if nodes_input_masks and node.id in nodes_input_masks:
field_value = nodes_input_masks[node.id].get(
field_name, field_value
)
if field_value and isinstance(field_value, dict):
if "_credentials_id" not in field_value:
# Key removed (e.g., on fork) — needs re-auth
has_missing_credentials = True
credential_errors[node.id][field_name] = (
"Authentication missing for the selected file. "
"Please re-select the file to authenticate with "
"your own account."
)
continue
cred_id = field_value.get("_credentials_id")
if cred_id and isinstance(cred_id, str):
try:
creds_store = get_integration_credentials_store()
creds = await creds_store.get_creds_by_id(user_id, cred_id)
except Exception as e:
has_missing_credentials = True
credential_errors[node.id][
field_name
] = f"Credentials not available: {e}"
continue
if not creds:
has_missing_credentials = True
credential_errors[node.id][field_name] = (
"The saved credentials are not available "
"for your account. Please re-select the file to "
"authenticate with your own account."
)
# If node has optional credentials and any are missing, mark for skipping
# But only if there are no other errors for this node
if (
@@ -370,10 +412,11 @@ def make_node_credentials_input_map(
"""
result: dict[str, dict[str, JsonValue]] = {}
# Get aggregated credentials fields for the graph
graph_cred_inputs = graph.aggregate_credentials_inputs()
# Only map regular credentials (not auto_credentials, which are resolved
# at execution time from _credentials_id in file field data)
graph_cred_inputs = graph.regular_credentials_inputs
for graph_input_name, (_, compatible_node_fields) in graph_cred_inputs.items():
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
# Best-effort map: skip missing items
if graph_input_name not in graph_credentials_input:
continue

View File

@@ -907,3 +907,335 @@ async def test_stop_graph_execution_cascades_to_child_with_reviews(
# Verify both parent and child status updates
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
# ============================================================================
# Tests for auto_credentials validation in _validate_node_input_credentials
# (Fix 3: SECRT-1772 + Fix 4: Path 4)
# ============================================================================
@pytest.mark.asyncio
async def test_validate_node_input_credentials_auto_creds_valid(
mocker: MockerFixture,
):
"""
[SECRT-1772] When a node has auto_credentials with a valid _credentials_id
that exists in the store, validation should pass without errors.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-auto-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"spreadsheet": {
"_credentials_id": "valid-cred-id",
"id": "file-123",
"name": "test.xlsx",
}
}
mock_block = mocker.MagicMock()
# No regular credentials fields
mock_block.input_schema.get_credentials_fields.return_value = {}
# Has auto_credentials fields
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
# Mock the credentials store to return valid credentials
mock_store = mocker.MagicMock()
mock_creds = mocker.MagicMock()
mock_creds.id = "valid-cred-id"
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=mock_creds)
mocker.patch(
"backend.executor.utils.get_integration_credentials_store",
return_value=mock_store,
)
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="test-user",
nodes_input_masks=None,
)
assert mock_node.id not in errors
assert mock_node.id not in nodes_to_skip
@pytest.mark.asyncio
async def test_validate_node_input_credentials_auto_creds_missing(
mocker: MockerFixture,
):
"""
[SECRT-1772] When a node has auto_credentials with a _credentials_id
that doesn't exist for the current user, validation should report an error.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-bad-auto-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"spreadsheet": {
"_credentials_id": "other-users-cred-id",
"id": "file-123",
"name": "test.xlsx",
}
}
mock_block = mocker.MagicMock()
mock_block.input_schema.get_credentials_fields.return_value = {}
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
# Mock the credentials store to return None (cred not found for this user)
mock_store = mocker.MagicMock()
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=None)
mocker.patch(
"backend.executor.utils.get_integration_credentials_store",
return_value=mock_store,
)
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="different-user",
nodes_input_masks=None,
)
assert mock_node.id in errors
assert "spreadsheet" in errors[mock_node.id]
assert "not available" in errors[mock_node.id]["spreadsheet"].lower()
@pytest.mark.asyncio
async def test_validate_node_input_credentials_both_regular_and_auto(
mocker: MockerFixture,
):
"""
[SECRT-1772] A node that has BOTH regular credentials AND auto_credentials
should have both validated.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-both-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"credentials": {
"id": "regular-cred-id",
"provider": "github",
"type": "api_key",
},
"spreadsheet": {
"_credentials_id": "auto-cred-id",
"id": "file-123",
"name": "test.xlsx",
},
}
mock_credentials_field_type = mocker.MagicMock()
mock_credentials_meta = mocker.MagicMock()
mock_credentials_meta.id = "regular-cred-id"
mock_credentials_meta.provider = "github"
mock_credentials_meta.type = "api_key"
mock_credentials_field_type.model_validate.return_value = mock_credentials_meta
mock_block = mocker.MagicMock()
# Regular credentials field
mock_block.input_schema.get_credentials_fields.return_value = {
"credentials": mock_credentials_field_type,
}
# Auto-credentials field
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"auto_credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
# Mock the credentials store to return valid credentials for both
mock_store = mocker.MagicMock()
mock_regular_creds = mocker.MagicMock()
mock_regular_creds.id = "regular-cred-id"
mock_regular_creds.provider = "github"
mock_regular_creds.type = "api_key"
mock_auto_creds = mocker.MagicMock()
mock_auto_creds.id = "auto-cred-id"
def get_creds_side_effect(user_id, cred_id):
if cred_id == "regular-cred-id":
return mock_regular_creds
elif cred_id == "auto-cred-id":
return mock_auto_creds
return None
mock_store.get_creds_by_id = mocker.AsyncMock(side_effect=get_creds_side_effect)
mocker.patch(
"backend.executor.utils.get_integration_credentials_store",
return_value=mock_store,
)
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="test-user",
nodes_input_masks=None,
)
# Both should validate successfully - no errors
assert mock_node.id not in errors
assert mock_node.id not in nodes_to_skip
@pytest.mark.asyncio
async def test_validate_node_input_credentials_auto_creds_skipped_when_none(
mocker: MockerFixture,
):
"""
When a node has auto_credentials but the field value has _credentials_id=None
(e.g., from upstream connection), validation should skip it without error.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-chained-auto-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"spreadsheet": {
"_credentials_id": None,
"id": "file-123",
"name": "test.xlsx",
}
}
mock_block = mocker.MagicMock()
mock_block.input_schema.get_credentials_fields.return_value = {}
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="test-user",
nodes_input_masks=None,
)
# No error - chained data with None cred_id is valid
assert mock_node.id not in errors
# ============================================================================
# Tests for CredentialsFieldInfo auto_credential tag (Fix 4: Path 4)
# ============================================================================
def test_credentials_field_info_auto_credential_tag():
"""
[Path 4] CredentialsFieldInfo should support is_auto_credential and
input_field_name fields for distinguishing auto from regular credentials.
"""
from backend.data.model import CredentialsFieldInfo
# Regular credential should have is_auto_credential=False by default
regular = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
},
by_alias=True,
)
assert regular.is_auto_credential is False
assert regular.input_field_name is None
# Auto credential should have is_auto_credential=True
auto = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google"],
"credentials_types": ["oauth2"],
"is_auto_credential": True,
"input_field_name": "spreadsheet",
},
by_alias=True,
)
assert auto.is_auto_credential is True
assert auto.input_field_name == "spreadsheet"
def test_make_node_credentials_input_map_excludes_auto_creds(
mocker: MockerFixture,
):
"""
[Path 4] make_node_credentials_input_map should only include regular credentials,
not auto_credentials (which are resolved at execution time).
"""
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
from backend.executor.utils import make_node_credentials_input_map
from backend.integrations.providers import ProviderName
# Create a mock graph with aggregate_credentials_inputs that returns
# both regular and auto credentials
mock_graph = mocker.MagicMock()
regular_field_info = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
# Mock regular_credentials_inputs property (auto_credentials are excluded)
mock_graph.regular_credentials_inputs = {
"github_creds": (regular_field_info, {("node-1", "credentials")}, True),
}
graph_credentials_input = {
"github_creds": CredentialsMetaInput(
id="cred-123",
provider=ProviderName("github"),
type="api_key",
),
}
result = make_node_credentials_input_map(mock_graph, graph_credentials_input)
# Regular credentials should be mapped
assert "node-1" in result
assert "credentials" in result["node-1"]
# Auto credentials should NOT appear in the result
# (they would have been mapped to the kwarg_name "credentials" not "spreadsheet")
for node_id, fields in result.items():
for field_name, value in fields.items():
# Verify no auto-credential phantom entries
if isinstance(value, dict):
assert "_credentials_id" not in value

View File

@@ -3,7 +3,6 @@
"credentials_input_schema": {
"properties": {},
"required": [],
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"description": "A test graph",

View File

@@ -1,34 +1,14 @@
[
{
"credentials_input_schema": {
"properties": {},
"required": [],
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"created_at": "2025-09-04T13:37:00",
"description": "A test graph",
"forked_from_id": null,
"forked_from_version": null,
"has_external_trigger": false,
"has_human_in_the_loop": false,
"has_sensitive_action": false,
"id": "graph-123",
"input_schema": {
"properties": {},
"required": [],
"type": "object"
},
"instructions": null,
"is_active": true,
"name": "Test Graph",
"output_schema": {
"properties": {},
"required": [],
"type": "object"
},
"recommended_schedule_cron": null,
"sub_graphs": [],
"trigger_setup_info": null,
"user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
"version": 1
}

View File

@@ -1,5 +1,5 @@
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import { useState } from "react";
import { getSchemaDefaultCredentials } from "../../helpers";
@@ -9,7 +9,7 @@ type Credential = CredentialsMetaInput | undefined;
type Credentials = Record<string, Credential>;
type Props = {
agent: GraphMeta | null;
agent: GraphModel | null;
siblingInputs?: Record<string, any>;
onCredentialsChange: (
credentials: Record<string, CredentialsMetaInput>,

View File

@@ -1,9 +1,9 @@
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types";
export function getCredentialFields(
agent: GraphMeta | null,
agent: GraphModel | null,
): AgentCredentialsFields {
if (!agent) return {};

View File

@@ -3,10 +3,10 @@ import type {
CredentialsMetaInput,
} from "@/lib/autogpt-server-api/types";
import type { InputValues } from "./types";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
export function computeInitialAgentInputs(
agent: GraphMeta | null,
agent: GraphModel | null,
existingInputs?: InputValues | null,
): InputValues {
const properties = agent?.input_schema?.properties || {};
@@ -29,7 +29,7 @@ export function computeInitialAgentInputs(
}
type IsRunDisabledParams = {
agent: GraphMeta | null;
agent: GraphModel | null;
isRunning: boolean;
agentInputs: InputValues | null | undefined;
};

View File

@@ -30,6 +30,8 @@ import {
} from "@/components/atoms/Tooltip/BaseTooltip";
import { GraphMeta } from "@/lib/autogpt-server-api";
import jaro from "jaro-winkler";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
uiKey?: string;
@@ -107,6 +109,8 @@ export function BlocksControl({
.filter((b) => b.uiType !== BlockUIType.AGENT)
.sort((a, b) => a.name.localeCompare(b.name));
// Agent blocks are created from GraphMeta which doesn't include schemas.
// Schemas will be fetched on-demand when the block is actually added.
const agentBlockList = flows
.map((flow): _Block => {
return {
@@ -116,8 +120,9 @@ export function BlocksControl({
`Ver.${flow.version}` +
(flow.description ? ` | ${flow.description}` : ""),
categories: [{ category: "AGENT", description: "" }],
inputSchema: flow.input_schema,
outputSchema: flow.output_schema,
// Empty schemas - will be populated when block is added
inputSchema: { type: "object", properties: {} },
outputSchema: { type: "object", properties: {} },
staticOutput: false,
uiType: BlockUIType.AGENT,
costs: [],
@@ -125,8 +130,7 @@ export function BlocksControl({
hardcodedValues: {
graph_id: flow.id,
graph_version: flow.version,
input_schema: flow.input_schema,
output_schema: flow.output_schema,
// Schemas will be fetched on-demand when block is added
},
};
})
@@ -182,6 +186,37 @@ export function BlocksControl({
setSelectedCategory(null);
}, []);
// Handler to add a block, fetching graph data on-demand for agent blocks
const handleAddBlock = useCallback(
async (block: _Block & { notAvailable: string | null }) => {
if (block.notAvailable) return;
// For agent blocks, fetch the full graph to get schemas
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
const graphID = block.hardcodedValues.graph_id as string;
const graphVersion = block.hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
addBlock(block.id, block.name, {
...block.hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
});
} else {
// Fallback: add without schemas (will be incomplete)
console.error("Failed to fetch graph data for agent block");
addBlock(block.id, block.name, block.hardcodedValues || {});
}
} else {
addBlock(block.id, block.name, block.hardcodedValues || {});
}
},
[addBlock],
);
// Extract unique categories from blocks
const categories = useMemo(() => {
return Array.from(
@@ -303,10 +338,7 @@ export function BlocksControl({
}),
);
}}
onClick={() =>
!block.notAvailable &&
addBlock(block.id, block.name, block?.hardcodedValues || {})
}
onClick={() => handleAddBlock(block)}
title={block.notAvailable ?? undefined}
>
<div

View File

@@ -29,13 +29,17 @@ import "@xyflow/react/dist/style.css";
import { ConnectedEdge, CustomNode } from "../CustomNode/CustomNode";
import "./flow.css";
import {
BlockIORootSchema,
BlockUIType,
formatEdgeID,
GraphExecutionID,
GraphID,
GraphMeta,
LibraryAgent,
SpecialBlockID,
} from "@/lib/autogpt-server-api";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
import { Key, storage } from "@/services/storage/local-storage";
import { findNewlyAddedBlockCoordinates, getTypeColor } from "@/lib/utils";
@@ -687,8 +691,94 @@ const FlowEditor: React.FC<{
[getNode, updateNode, nodes],
);
/* Shared helper to create and add a node */
const createAndAddNode = useCallback(
async (
blockID: string,
blockName: string,
hardcodedValues: Record<string, any>,
position: { x: number; y: number },
): Promise<CustomNode | null> => {
const nodeSchema = availableBlocks.find((node) => node.id === blockID);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockID}`);
return null;
}
// For agent blocks, fetch the full graph to get schemas
let inputSchema: BlockIORootSchema = nodeSchema.inputSchema;
let outputSchema: BlockIORootSchema = nodeSchema.outputSchema;
let finalHardcodedValues = hardcodedValues;
if (blockID === SpecialBlockID.AGENT) {
const graphID = hardcodedValues.graph_id as string;
const graphVersion = hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
inputSchema = graphData.input_schema as BlockIORootSchema;
outputSchema = graphData.output_schema as BlockIORootSchema;
finalHardcodedValues = {
...hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
};
} else {
console.error("Failed to fetch graph data for agent block");
}
}
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position,
data: {
blockType: blockName,
blockCosts: nodeSchema.costs || [],
title: `${blockName} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: inputSchema,
outputSchema: outputSchema,
hardcodedValues: finalHardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockID,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
addNodes(newNode);
setNodeId((prevId) => prevId + 1);
clearNodesStatusAndOutput();
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
redo: () => addNodes(newNode),
});
return newNode;
},
[
availableBlocks,
nodeId,
addNodes,
deleteElements,
clearNodesStatusAndOutput,
],
);
const addNode = useCallback(
(blockId: string, nodeType: string, hardcodedValues: any = {}) => {
async (
blockId: string,
nodeType: string,
hardcodedValues: Record<string, any> = {},
) => {
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockId}`);
@@ -707,73 +797,42 @@ const FlowEditor: React.FC<{
// Alternative: We could also use D3 force, Intersection for this (React flow Pro examples)
const { x, y } = getViewport();
const viewportCoordinates =
const position =
nodeDimensions && Object.keys(nodeDimensions).length > 0
? // we will get all the dimension of nodes, then store
findNewlyAddedBlockCoordinates(
? findNewlyAddedBlockCoordinates(
nodeDimensions,
nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500,
60,
1.0,
)
: // we will get all the dimension of nodes, then store
{
: {
x: window.innerWidth / 2 - x,
y: window.innerHeight / 2 - y,
};
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position: viewportCoordinates, // Set the position to the calculated viewport center
data: {
blockType: nodeType,
blockCosts: nodeSchema.costs,
title: `${nodeType} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: nodeSchema.inputSchema,
outputSchema: nodeSchema.outputSchema,
hardcodedValues: hardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockId,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
addNodes(newNode);
setNodeId((prevId) => prevId + 1);
clearNodesStatusAndOutput(); // Clear status and output when a new node is added
const newNode = await createAndAddNode(
blockId,
nodeType,
hardcodedValues,
position,
);
if (!newNode) return;
setViewport(
{
// Rough estimate of the dimension of the node is: 500x400px.
// Though we skip shifting the X, considering the block menu side-bar.
x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2,
y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2,
x: -position.x * 0.8 + (window.innerWidth - 0.0) / 2,
y: -position.y * 0.8 + (window.innerHeight - 400) / 2,
zoom: 0.8,
},
{ duration: 500 },
);
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
redo: () => addNodes(newNode),
});
},
[
nodeId,
getViewport,
setViewport,
availableBlocks,
addNodes,
nodeDimensions,
deleteElements,
clearNodesStatusAndOutput,
createAndAddNode,
],
);
@@ -920,7 +979,7 @@ const FlowEditor: React.FC<{
}, []);
const onDrop = useCallback(
(event: React.DragEvent) => {
async (event: React.DragEvent) => {
event.preventDefault();
const blockData = event.dataTransfer.getData("application/reactflow");
@@ -935,62 +994,17 @@ const FlowEditor: React.FC<{
y: event.clientY,
});
// Find the block schema
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockId}`);
return;
}
// Create the new node at the drop position
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
await createAndAddNode(
blockId,
blockName,
hardcodedValues || {},
position,
data: {
blockType: blockName,
blockCosts: nodeSchema.costs || [],
title: `${blockName} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: nodeSchema.inputSchema,
outputSchema: nodeSchema.outputSchema,
hardcodedValues: hardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockId,
uiType: nodeSchema.uiType,
},
};
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => {
deleteElements({ nodes: [{ id: newNode.id } as any], edges: [] });
},
redo: () => {
addNodes([newNode]);
},
});
addNodes([newNode]);
clearNodesStatusAndOutput();
setNodeId((prevId) => prevId + 1);
);
} catch (error) {
console.error("Failed to drop block:", error);
}
},
[
nodeId,
availableBlocks,
nodes,
edges,
addNodes,
screenToFlowPosition,
deleteElements,
clearNodesStatusAndOutput,
],
[screenToFlowPosition, createAndAddNode],
);
const buildContextValue: BuilderContextType = useMemo(

View File

@@ -4,13 +4,13 @@ import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/componen
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import type {
CredentialsMetaInput,
GraphMeta,
Graph,
} from "@/lib/autogpt-server-api/types";
interface RunInputDialogProps {
isOpen: boolean;
doClose: () => void;
graph: GraphMeta;
graph: Graph;
doRun?: (
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,

View File

@@ -9,13 +9,13 @@ import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder
import {
BlockUIType,
CredentialsMetaInput,
GraphMeta,
Graph,
} from "@/lib/autogpt-server-api/types";
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
import { RunnerInputDialog } from "./RunnerInputUI";
interface RunnerUIWrapperProps {
graph: GraphMeta;
graph: Graph;
nodes: Node<CustomNodeData>[];
graphExecutionError?: string | null;
saveAndRun: (

View File

@@ -1,5 +1,5 @@
import { GraphInputSchema } from "@/lib/autogpt-server-api";
import { GraphMetaLike, IncompatibilityInfo } from "./types";
import { GraphLike, IncompatibilityInfo } from "./types";
// Helper type for schema properties - the generated types are too loose
type SchemaProperties = Record<string, GraphInputSchema["properties"][string]>;
@@ -36,7 +36,7 @@ export function getSchemaRequired(schema: unknown): SchemaRequired {
*/
export function createUpdatedAgentNodeInputs(
currentInputs: Record<string, unknown>,
latestSubGraphVersion: GraphMetaLike,
latestSubGraphVersion: GraphLike,
): Record<string, unknown> {
return {
...currentInputs,

View File

@@ -1,7 +1,11 @@
import type { GraphMeta as LegacyGraphMeta } from "@/lib/autogpt-server-api";
import type {
Graph as LegacyGraph,
GraphMeta as LegacyGraphMeta,
} from "@/lib/autogpt-server-api";
import type { GraphModel as GeneratedGraph } from "@/app/api/__generated__/models/graphModel";
import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta";
export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
export type SubAgentUpdateInfo<T extends GraphLike = GraphLike> = {
hasUpdate: boolean;
currentVersion: number;
latestVersion: number;
@@ -10,7 +14,10 @@ export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
incompatibilities: IncompatibilityInfo | null;
};
// Union type for GraphMeta that works with both legacy and new builder
// Union type for Graph (with schemas) that works with both legacy and new builder
export type GraphLike = LegacyGraph | GeneratedGraph;
// Union type for GraphMeta (without schemas) for version detection
export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta;
export type IncompatibilityInfo = {

View File

@@ -1,5 +1,11 @@
import { useMemo } from "react";
import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api";
import type {
GraphInputSchema,
GraphOutputSchema,
} from "@/lib/autogpt-server-api";
import type { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
import { getEffectiveType } from "@/lib/utils";
import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers";
import {
@@ -11,26 +17,38 @@ import {
/**
* Checks if a newer version of a sub-agent is available and determines compatibility
*/
export function useSubAgentUpdate<T extends GraphMetaLike>(
export function useSubAgentUpdate(
nodeID: string,
graphID: string | undefined,
graphVersion: number | undefined,
currentInputSchema: GraphInputSchema | undefined,
currentOutputSchema: GraphOutputSchema | undefined,
connections: EdgeLike[],
availableGraphs: T[],
): SubAgentUpdateInfo<T> {
availableGraphs: GraphMetaLike[],
): SubAgentUpdateInfo<GraphModel> {
// Find the latest version of the same graph
const latestGraph = useMemo(() => {
const latestGraphInfo = useMemo(() => {
if (!graphID) return null;
return availableGraphs.find((graph) => graph.id === graphID) || null;
}, [graphID, availableGraphs]);
// Check if there's an update available
// Check if there's a newer version available
const hasUpdate = useMemo(() => {
if (!latestGraph || graphVersion === undefined) return false;
return latestGraph.version! > graphVersion;
}, [latestGraph, graphVersion]);
if (!latestGraphInfo || graphVersion === undefined) return false;
return latestGraphInfo.version! > graphVersion;
}, [latestGraphInfo, graphVersion]);
// Fetch full graph IF an update is detected
const { data: latestGraph } = useGetV1GetSpecificGraph(
graphID ?? "",
{ version: latestGraphInfo?.version },
{
query: {
enabled: hasUpdate && !!graphID && !!latestGraphInfo?.version,
select: okData,
},
},
);
// Get connected input and output handles for this specific node
const connectedHandles = useMemo(() => {
@@ -152,8 +170,8 @@ export function useSubAgentUpdate<T extends GraphMetaLike>(
return {
hasUpdate,
currentVersion: graphVersion || 0,
latestVersion: latestGraph?.version || 0,
latestGraph,
latestVersion: latestGraphInfo?.version || 0,
latestGraph: latestGraph || null,
isCompatible: compatibilityResult.isCompatible,
incompatibilities: compatibilityResult.incompatibilities,
};

View File

@@ -18,7 +18,7 @@ interface GraphStore {
outputSchema: Record<string, any> | null,
) => void;
// Available graphs; used for sub-graph updates
// Available graphs; used for sub-graph updated version detection
availableSubGraphs: GraphMeta[];
setAvailableSubGraphs: (graphs: GraphMeta[]) => void;

View File

@@ -10,8 +10,8 @@ import React, {
import {
CredentialsMetaInput,
CredentialsType,
Graph,
GraphExecutionID,
GraphMeta,
LibraryAgentPreset,
LibraryAgentPresetID,
LibraryAgentPresetUpdatable,
@@ -69,7 +69,7 @@ export function AgentRunDraftView({
className,
recommendedScheduleCron,
}: {
graph: GraphMeta;
graph: Graph;
agentActions?: ButtonAction[];
recommendedScheduleCron?: string | null;
doRun?: (

View File

@@ -2,8 +2,8 @@
import React, { useCallback, useMemo } from "react";
import {
Graph,
GraphExecutionID,
GraphMeta,
Schedule,
ScheduleID,
} from "@/lib/autogpt-server-api";
@@ -35,7 +35,7 @@ export function AgentScheduleDetailsView({
onForcedRun,
doDeleteSchedule,
}: {
graph: GraphMeta;
graph: Graph;
schedule: Schedule;
agentActions: ButtonAction[];
onForcedRun: (runID: GraphExecutionID) => void;

View File

@@ -5629,7 +5629,9 @@
"description": "Successful Response",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/GraphMeta" }
"schema": {
"$ref": "#/components/schemas/GraphModelWithoutNodes"
}
}
}
},
@@ -6495,18 +6497,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -6514,11 +6504,22 @@
"forked_from_version": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
}
},
"type": "object",
"required": ["name", "description"],
"title": "BaseGraph"
"title": "BaseGraph",
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
},
"BaseGraph-Output": {
"properties": {
@@ -6539,18 +6540,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -6559,6 +6548,16 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"input_schema": {
"additionalProperties": true,
"type": "object",
@@ -6605,7 +6604,8 @@
"has_sensitive_action",
"trigger_setup_info"
],
"title": "BaseGraph"
"title": "BaseGraph",
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
},
"BlockCategoryResponse": {
"properties": {
@@ -7399,18 +7399,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -7419,16 +7407,26 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Input" },
"type": "array",
"title": "Sub Graphs",
"default": []
"title": "Sub Graphs"
}
},
"type": "object",
"required": ["name", "description"],
"title": "Graph"
"title": "Graph",
"description": "Creatable graph model used in API create/update endpoints."
},
"GraphExecution": {
"properties": {
@@ -7778,6 +7776,52 @@
"description": "Response schema for paginated graph executions."
},
"GraphMeta": {
"properties": {
"id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version" },
"is_active": {
"type": "boolean",
"title": "Is Active",
"default": true
},
"name": { "type": "string", "title": "Name" },
"description": { "type": "string", "title": "Description" },
"instructions": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Instructions"
},
"recommended_schedule_cron": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
},
"forked_from_version": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
"format": "date-time",
"title": "Created At"
}
},
"type": "object",
"required": [
"id",
"version",
"name",
"description",
"user_id",
"created_at"
],
"title": "GraphMeta",
"description": "Lightweight graph metadata model representing an existing graph from the database,\nfor use in listings and summaries.\n\nLacks `GraphModel`'s nodes, links, and expensive computed fields.\nUse for list endpoints where full graph data is not needed and performance matters."
},
"GraphModel": {
"properties": {
"id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version", "default": 1 },
@@ -7804,13 +7848,27 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
"format": "date-time",
"title": "Created At"
},
"nodes": {
"items": { "$ref": "#/components/schemas/NodeModel" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
"type": "array",
"title": "Sub Graphs",
"default": []
"title": "Sub Graphs"
},
"user_id": { "type": "string", "title": "User Id" },
"input_schema": {
"additionalProperties": true,
"type": "object",
@@ -7857,6 +7915,7 @@
"name",
"description",
"user_id",
"created_at",
"input_schema",
"output_schema",
"has_external_trigger",
@@ -7865,9 +7924,10 @@
"trigger_setup_info",
"credentials_input_schema"
],
"title": "GraphMeta"
"title": "GraphModel",
"description": "Full graph model representing an existing graph from the database.\n\nThis is the primary model for working with persisted graphs. Includes all\ngraph data (nodes, links, sub_graphs) plus user ownership and timestamps.\nProvides computed fields (input_schema, output_schema, etc.) used during\nset-up (frontend) and execution (backend).\n\nInherits from:\n- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas\n- `GraphMeta`: provides user_id, created_at for database records"
},
"GraphModel": {
"GraphModelWithoutNodes": {
"properties": {
"id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version", "default": 1 },
@@ -7886,18 +7946,6 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/NodeModel" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -7906,12 +7954,6 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
"type": "array",
"title": "Sub Graphs",
"default": []
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
@@ -7973,7 +8015,8 @@
"trigger_setup_info",
"credentials_input_schema"
],
"title": "GraphModel"
"title": "GraphModelWithoutNodes",
"description": "GraphModel variant that excludes nodes, links, and sub-graphs from serialization.\n\nUsed in contexts like the store where exposing internal graph structure\nis not desired. Inherits all computed fields from GraphModel but marks\nnodes and links as excluded from JSON output."
},
"GraphSettings": {
"properties": {
@@ -8613,26 +8656,22 @@
"input_default": {
"additionalProperties": true,
"type": "object",
"title": "Input Default",
"default": {}
"title": "Input Default"
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata",
"default": {}
"title": "Metadata"
},
"input_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Input Links",
"default": []
"title": "Input Links"
},
"output_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Output Links",
"default": []
"title": "Output Links"
}
},
"type": "object",
@@ -8712,26 +8751,22 @@
"input_default": {
"additionalProperties": true,
"type": "object",
"title": "Input Default",
"default": {}
"title": "Input Default"
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata",
"default": {}
"title": "Metadata"
},
"input_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Input Links",
"default": []
"title": "Input Links"
},
"output_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Output Links",
"default": []
"title": "Output Links"
},
"graph_id": { "type": "string", "title": "Graph Id" },
"graph_version": { "type": "integer", "title": "Graph Version" },

View File

@@ -4,7 +4,9 @@ import { loadScript } from "@/services/scripts/scripts";
export async function loadGoogleAPIPicker(): Promise<void> {
validateWindow();
await loadScript("https://apis.google.com/js/api.js");
await loadScript("https://apis.google.com/js/api.js", {
referrerPolicy: "no-referrer-when-downgrade",
});
const googleAPI = window.gapi;
if (!googleAPI) {
@@ -27,7 +29,9 @@ export async function loadGoogleIdentityServices(): Promise<void> {
throw new Error("Google Identity Services cannot load on server");
}
await loadScript("https://accounts.google.com/gsi/client");
await loadScript("https://accounts.google.com/gsi/client", {
referrerPolicy: "no-referrer-when-downgrade",
});
const google = window.google;
if (!google?.accounts?.oauth2) {

View File

@@ -362,25 +362,14 @@ export type GraphMeta = {
user_id: UserID;
version: number;
is_active: boolean;
created_at: Date;
name: string;
description: string;
instructions?: string | null;
recommended_schedule_cron: string | null;
forked_from_id?: GraphID | null;
forked_from_version?: number | null;
input_schema: GraphInputSchema;
output_schema: GraphOutputSchema;
credentials_input_schema: CredentialsInputSchema;
} & (
| {
has_external_trigger: true;
trigger_setup_info: GraphTriggerInfo;
}
| {
has_external_trigger: false;
trigger_setup_info: null;
}
);
};
export type GraphID = Brand<string, "GraphID">;
@@ -447,11 +436,22 @@ export type GraphTriggerInfo = {
/* Mirror of backend/data/graph.py:Graph */
export type Graph = GraphMeta & {
created_at: Date;
nodes: Node[];
links: Link[];
sub_graphs: Omit<Graph, "sub_graphs">[]; // Flattened sub-graphs
};
input_schema: GraphInputSchema;
output_schema: GraphOutputSchema;
credentials_input_schema: CredentialsInputSchema;
} & (
| {
has_external_trigger: true;
trigger_setup_info: GraphTriggerInfo;
}
| {
has_external_trigger: false;
trigger_setup_info: null;
}
);
export type GraphUpdateable = Omit<
Graph,