Compare commits

..

8 Commits

Author SHA1 Message Date
Otto-AGPT
0f75c408f2 feat: only run CLA automation for PRs touching autogpt_platform/
CLA check still runs on all PRs (CLA-assistant config).
But label automation, reminders, and auto-close only apply to
platform code (Polyform Shield License).

Uses simple first-page check (per_page: 100) - covers 99%+ of PRs.
2026-02-06 20:04:29 +00:00
Otto-AGPT
b3e200f450 feat: replace check_run with status trigger for real-time CLA updates
CLA-assistant uses Status API, not Checks API, so check_run never fires.
- Added status event trigger
- Job-level guard: only runs if context == 'license/cla'
- Finds PRs by matching head SHA from status event
2026-02-06 19:36:31 +00:00
Otto-AGPT
9708ea3fd7 fix: address remaining review feedback
- Skip non-open PRs (closed/merged) early in loop
- Fix overlapping time windows: reminder only before warning period
- Add marker to close comment (prevents duplicates)
- Add 'cla: override' label support (maintainer bypass)
2026-02-06 19:35:44 +00:00
Otto-AGPT
46ed9a8b3c fix: address CodeRabbit review feedback
- Add checks:read permission for Checks API fallback
- Validate timing env vars (fail fast on NaN, warn on bad order)
- Remove unused prNumber param from getClaStatus()
2026-02-06 19:34:36 +00:00
Otto-AGPT
b309c018f0 fix: add statuses:read permission for commit status API
Required to read CLA check status via repos.getCombinedStatusForRef().
pull-requests:write does not include statuses:read per GitHub docs.
2026-02-06 19:32:07 +00:00
Otto-AGPT
abe47e845e fix: add pagination for PRs and comments
Addresses review feedback:
- Use github.paginate() for pulls.list to handle >100 open PRs
- Use github.paginate() for issues.listComments to handle >100 comments
- Prevents missing PRs in scheduled sweeps
- Prevents duplicate reminder comments on busy PRs
2026-02-06 19:22:21 +00:00
Otto-AGPT
c881510d09 fix: make close warning timing independently configurable
- Add CLOSE_WARNING_DAYS env var (separate from CLOSE_DAYS)
- Calculate days remaining dynamically in warning message
- Default timing: reminder at 3d, warning at 7d, close at 10d
2026-02-06 19:17:37 +00:00
Otto-AGPT
eccc26176c ci: add CLA label automation workflow
Adds a GitHub Actions workflow that:
- Creates 'cla: pending' and 'cla: signed' labels
- Auto-labels PRs based on CLA check status
- Posts reminder comment after 7 days if CLA unsigned
- Posts close warning at 23 days
- Auto-closes PRs after 30 days with unsigned CLA

Triggers:
- Real-time on check_run completion (license/cla)
- On PR open/sync/reopen
- Daily scheduled sweep at 9 AM UTC
- Manual workflow_dispatch for testing

Configurable timing via env vars (REMINDER_DAYS, CLOSE_DAYS).
2026-02-06 19:14:48 +00:00
60 changed files with 4503 additions and 6554 deletions

412
.github/workflows/cla-label-sync.yml vendored Normal file
View File

@@ -0,0 +1,412 @@
name: CLA Label Sync
on:
# Real-time: when CLA status changes (CLA-assistant uses Status API)
status:
# When PRs are opened or updated
pull_request_target:
types: [opened, synchronize, reopened]
# Scheduled sweep - check stale PRs daily
schedule:
- cron: '0 9 * * *' # 9 AM UTC daily
# Manual trigger for testing
workflow_dispatch:
inputs:
pr_number:
description: 'Specific PR number to check (optional)'
required: false
permissions:
pull-requests: write
contents: read
statuses: read
checks: read
env:
CLA_CHECK_NAME: 'license/cla'
LABEL_PENDING: 'cla: pending'
LABEL_SIGNED: 'cla: signed'
# Timing configuration (all independently configurable)
REMINDER_DAYS: 3 # Days before first reminder
CLOSE_WARNING_DAYS: 7 # Days before "closing soon" warning
CLOSE_DAYS: 10 # Days before auto-close
jobs:
sync-labels:
runs-on: ubuntu-latest
# Only run on status events if it's the CLA check
if: github.event_name != 'status' || github.event.context == 'license/cla'
steps:
- name: Ensure CLA labels exist
uses: actions/github-script@v7
with:
script: |
const labels = [
{ name: 'cla: pending', color: 'fbca04', description: 'CLA not yet signed by all contributors' },
{ name: 'cla: signed', color: '0e8a16', description: 'CLA signed by all contributors' }
];
for (const label of labels) {
try {
await github.rest.issues.getLabel({
owner: context.repo.owner,
repo: context.repo.repo,
name: label.name
});
} catch (e) {
if (e.status === 404) {
await github.rest.issues.createLabel({
owner: context.repo.owner,
repo: context.repo.repo,
name: label.name,
color: label.color,
description: label.description
});
console.log(`Created label: ${label.name}`);
}
}
}
- name: Sync CLA labels and handle stale PRs
uses: actions/github-script@v7
with:
script: |
const CLA_CHECK_NAME = process.env.CLA_CHECK_NAME;
const LABEL_PENDING = process.env.LABEL_PENDING;
const LABEL_SIGNED = process.env.LABEL_SIGNED;
const REMINDER_DAYS = parseInt(process.env.REMINDER_DAYS);
const CLOSE_WARNING_DAYS = parseInt(process.env.CLOSE_WARNING_DAYS);
const CLOSE_DAYS = parseInt(process.env.CLOSE_DAYS);
// Validate timing configuration
if ([REMINDER_DAYS, CLOSE_WARNING_DAYS, CLOSE_DAYS].some(Number.isNaN)) {
core.setFailed('Invalid timing configuration — REMINDER_DAYS, CLOSE_WARNING_DAYS, and CLOSE_DAYS must be numeric.');
return;
}
if (!(REMINDER_DAYS < CLOSE_WARNING_DAYS && CLOSE_WARNING_DAYS < CLOSE_DAYS)) {
core.warning(`Timing order looks odd: REMINDER(${REMINDER_DAYS}) < WARNING(${CLOSE_WARNING_DAYS}) < CLOSE(${CLOSE_DAYS}) expected.`);
}
const CLA_SIGN_URL = `https://cla-assistant.io/${context.repo.owner}/${context.repo.repo}`;
// Helper: Get CLA status for a commit
async function getClaStatus(headSha) {
// CLA-assistant uses the commit status API (not checks API)
const { data: statuses } = await github.rest.repos.getCombinedStatusForRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: headSha
});
const claStatus = statuses.statuses.find(
s => s.context === CLA_CHECK_NAME
);
if (claStatus) {
return {
found: true,
passed: claStatus.state === 'success',
state: claStatus.state,
description: claStatus.description
};
}
// Fallback: check the Checks API too
const { data: checkRuns } = await github.rest.checks.listForRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: headSha
});
const claCheck = checkRuns.check_runs.find(
check => check.name === CLA_CHECK_NAME
);
if (claCheck) {
return {
found: true,
passed: claCheck.conclusion === 'success',
state: claCheck.conclusion,
description: claCheck.output?.summary || ''
};
}
return { found: false, passed: false, state: 'unknown' };
}
// Helper: Check if bot already commented with a specific marker (paginated)
async function hasCommentWithMarker(prNumber, marker) {
// Use paginate to fetch ALL comments, not just first 100
const comments = await github.paginate(
github.rest.issues.listComments,
{
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
per_page: 100
}
);
return comments.some(c =>
c.user?.type === 'Bot' &&
c.body?.includes(marker)
);
}
// Helper: Days since a date
function daysSince(dateString) {
const date = new Date(dateString);
const now = new Date();
return Math.floor((now - date) / (1000 * 60 * 60 * 24));
}
// Determine which PRs to check
let prsToCheck = [];
if (context.eventName === 'status') {
// Status event from CLA-assistant - find PRs with this commit
const sha = context.payload.sha;
console.log(`Status event for SHA: ${sha}, context: ${context.payload.context}`);
// Search for open PRs with this head SHA
const { data: prs } = await github.rest.pulls.list({
owner: context.repo.owner,
repo: context.repo.repo,
state: 'open',
per_page: 100
});
prsToCheck = prs.filter(pr => pr.head.sha === sha).map(pr => pr.number);
if (prsToCheck.length === 0) {
console.log('No open PRs found with this SHA');
return;
}
} else if (context.eventName === 'pull_request_target') {
prsToCheck = [context.payload.pull_request.number];
} else if (context.eventName === 'workflow_dispatch' && context.payload.inputs?.pr_number) {
prsToCheck = [parseInt(context.payload.inputs.pr_number)];
} else {
// Scheduled run: check all open PRs (paginated to handle >100 PRs)
const openPRs = await github.paginate(
github.rest.pulls.list,
{
owner: context.repo.owner,
repo: context.repo.repo,
state: 'open',
per_page: 100
}
);
prsToCheck = openPRs.map(pr => pr.number);
}
console.log(`Checking ${prsToCheck.length} PR(s): ${prsToCheck.join(', ')}`);
for (const prNumber of prsToCheck) {
try {
// Get PR details
const { data: pr } = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: prNumber
});
// Skip if PR is from a bot
if (pr.user.type === 'Bot') {
console.log(`PR #${prNumber}: Skipping bot PR`);
continue;
}
// Skip if PR is not open (closed/merged)
if (pr.state !== 'open') {
console.log(`PR #${prNumber}: Skipping non-open PR (state=${pr.state})`);
continue;
}
// Skip if PR doesn't touch platform code (CLA automation only for autogpt_platform/)
const PLATFORM_PATH = 'autogpt_platform/';
const { data: files } = await github.rest.pulls.listFiles({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: prNumber,
per_page: 100
});
const touchesPlatform = files.some(f => f.filename.startsWith(PLATFORM_PATH));
if (!touchesPlatform) {
console.log(`PR #${prNumber}: Skipping - doesn't touch ${PLATFORM_PATH}`);
continue;
}
const claStatus = await getClaStatus(pr.head.sha);
const currentLabels = pr.labels.map(l => l.name);
const hasPending = currentLabels.includes(LABEL_PENDING);
const hasSigned = currentLabels.includes(LABEL_SIGNED);
const prAgeDays = daysSince(pr.created_at);
console.log(`PR #${prNumber}: CLA ${claStatus.passed ? 'passed' : 'pending'} (${claStatus.state}), age: ${prAgeDays} days`);
if (claStatus.passed) {
// ✅ CLA signed - add signed label, remove pending
if (!hasSigned) {
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
labels: [LABEL_SIGNED]
});
console.log(`Added '${LABEL_SIGNED}' to PR #${prNumber}`);
}
if (hasPending) {
await github.rest.issues.removeLabel({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
name: LABEL_PENDING
});
console.log(`Removed '${LABEL_PENDING}' from PR #${prNumber}`);
}
} else {
// ⏳ CLA pending
// Add pending label if not present
if (!hasPending) {
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
labels: [LABEL_PENDING]
});
console.log(`Added '${LABEL_PENDING}' to PR #${prNumber}`);
}
if (hasSigned) {
await github.rest.issues.removeLabel({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
name: LABEL_SIGNED
});
console.log(`Removed '${LABEL_SIGNED}' from PR #${prNumber}`);
}
// Check if we need to send reminder or close
const REMINDER_MARKER = '<!-- cla-reminder -->';
const CLOSE_WARNING_MARKER = '<!-- cla-close-warning -->';
// 📢 Reminder after REMINDER_DAYS (but before warning window)
if (prAgeDays >= REMINDER_DAYS && prAgeDays < CLOSE_WARNING_DAYS) {
const hasReminder = await hasCommentWithMarker(prNumber, REMINDER_MARKER);
if (!hasReminder) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: `${REMINDER_MARKER}
👋 **Friendly reminder:** This PR is waiting on a signed CLA.
All contributors need to sign our Contributor License Agreement before we can merge this PR.
**➡️ [Sign the CLA here](${CLA_SIGN_URL}?pullRequest=${prNumber})**
<details>
<summary>Why do we need a CLA?</summary>
The CLA protects both you and the project by clarifying the terms under which your contribution is made. It's a one-time process — once signed, it covers all your future contributions.
</details>
<details>
<summary>Common issues</summary>
- **Email mismatch:** Make sure your Git commit email matches your GitHub account email
- **Merge commits:** If you merged \`dev\` into your branch, try rebasing instead: \`git rebase origin/dev && git push --force-with-lease\`
- **Multiple authors:** All commit authors need to sign, not just the PR author
</details>
If you have questions, just ask! 🙂`
});
console.log(`Posted reminder on PR #${prNumber}`);
}
}
// ⚠️ Close warning at CLOSE_WARNING_DAYS
if (prAgeDays >= CLOSE_WARNING_DAYS && prAgeDays < CLOSE_DAYS) {
const hasCloseWarning = await hasCommentWithMarker(prNumber, CLOSE_WARNING_MARKER);
if (!hasCloseWarning) {
const daysRemaining = CLOSE_DAYS - prAgeDays;
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: `${CLOSE_WARNING_MARKER}
⚠️ **This PR will be automatically closed in ${daysRemaining} day${daysRemaining === 1 ? '' : 's'}** if the CLA is not signed.
We haven't received a signed CLA from all contributors yet. Please sign it to keep this PR open:
**➡️ [Sign the CLA here](${CLA_SIGN_URL}?pullRequest=${prNumber})**
If you're unable to sign or have questions, please let us know — we're happy to help!`
});
console.log(`Posted close warning on PR #${prNumber}`);
}
}
// 🚪 Auto-close after CLOSE_DAYS
if (prAgeDays >= CLOSE_DAYS) {
const CLOSE_MARKER = '<!-- cla-auto-closed -->';
const OVERRIDE_LABEL = 'cla: override';
// Check for override label (maintainer wants to keep PR open)
if (currentLabels.includes(OVERRIDE_LABEL)) {
console.log(`PR #${prNumber}: Skipping close due to '${OVERRIDE_LABEL}' label`);
} else {
// Check if we already posted a close comment
const hasCloseComment = await hasCommentWithMarker(prNumber, CLOSE_MARKER);
if (!hasCloseComment) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: `${CLOSE_MARKER}
👋 Closing this PR due to unsigned CLA after ${CLOSE_DAYS} days.
Thank you for your contribution! If you'd still like to contribute:
1. [Sign the CLA](${CLA_SIGN_URL})
2. Re-open this PR or create a new one
We appreciate your interest in AutoGPT and hope to see you back! 🚀`
});
}
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: prNumber,
state: 'closed'
});
console.log(`Closed PR #${prNumber} due to unsigned CLA`);
}
}
}
} catch (error) {
console.error(`Error processing PR #${prNumber}: ${error.message}`);
}
}
console.log('CLA label sync complete!');

View File

@@ -49,7 +49,7 @@ jobs:
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v8
uses: peter-evans/create-pull-request@v7
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}

View File

@@ -42,7 +42,7 @@ jobs:
- name: Get CI failure details
id: failure_details
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
const run = await github.rest.actions.getWorkflowRun({

View File

@@ -41,7 +41,7 @@ jobs:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
@@ -78,7 +78,7 @@ jobs:
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22"
@@ -91,7 +91,7 @@ jobs:
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
@@ -124,7 +124,7 @@ jobs:
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
@@ -309,7 +309,6 @@ jobs:
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
allowed_bots: "dependabot[bot]"
claude_args: |
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*)"
prompt: |

View File

@@ -57,7 +57,7 @@ jobs:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
@@ -94,7 +94,7 @@ jobs:
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22"
@@ -107,7 +107,7 @@ jobs:
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
@@ -140,7 +140,7 @@ jobs:
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes

View File

@@ -39,7 +39,7 @@ jobs:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
@@ -76,7 +76,7 @@ jobs:
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22"
@@ -89,7 +89,7 @@ jobs:
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
@@ -132,7 +132,7 @@ jobs:
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes

View File

@@ -33,7 +33,7 @@ jobs:
python-version: "3.11"
- name: Set up Python dependency cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}

View File

@@ -33,7 +33,7 @@ jobs:
python-version: "3.11"
- name: Set up Python dependency cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}

View File

@@ -38,7 +38,7 @@ jobs:
python-version: "3.11"
- name: Set up Python dependency cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}

View File

@@ -88,7 +88,7 @@ jobs:
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}

View File

@@ -17,7 +17,7 @@ jobs:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
const commentBody = context.payload.comment.body.trim();
@@ -55,7 +55,7 @@ jobs:
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
@@ -68,7 +68,7 @@ jobs:
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
const pr = await github.rest.pulls.get({
@@ -98,7 +98,7 @@ jobs:
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
@@ -126,7 +126,7 @@ jobs:
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
@@ -139,7 +139,7 @@ jobs:
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
const comments = await github.rest.issues.listComments({
@@ -187,7 +187,7 @@ jobs:
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({

View File

@@ -42,7 +42,7 @@ jobs:
- 'autogpt_platform/frontend/src/components/**'
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
@@ -54,7 +54,7 @@ jobs:
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
@@ -74,7 +74,7 @@ jobs:
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
@@ -82,7 +82,7 @@ jobs:
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
@@ -112,7 +112,7 @@ jobs:
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
@@ -120,7 +120,7 @@ jobs:
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
@@ -153,7 +153,7 @@ jobs:
submodules: recursive
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
@@ -176,7 +176,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Cache Docker layers
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
@@ -231,7 +231,7 @@ jobs:
fi
- name: Restore dependencies cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
@@ -282,7 +282,7 @@ jobs:
submodules: recursive
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
@@ -290,7 +290,7 @@ jobs:
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}

View File

@@ -32,7 +32,7 @@ jobs:
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
@@ -44,7 +44,7 @@ jobs:
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
@@ -68,7 +68,7 @@ jobs:
submodules: recursive
- name: Set up Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
@@ -88,7 +88,7 @@ jobs:
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
- name: Restore dependencies cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}

File diff suppressed because it is too large Load Diff

View File

@@ -9,25 +9,25 @@ packages = [{ include = "autogpt_libs" }]
[tool.poetry.dependencies]
python = ">=3.10,<4.0"
colorama = "^0.4.6"
cryptography = "^46.0"
cryptography = "^45.0"
expiringdict = "^1.2.2"
fastapi = "^0.128.0"
google-cloud-logging = "^3.13.0"
launchdarkly-server-sdk = "^9.14.1"
pydantic = "^2.12.5"
pydantic-settings = "^2.12.0"
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
fastapi = "^0.116.1"
google-cloud-logging = "^3.12.1"
launchdarkly-server-sdk = "^9.12.0"
pydantic = "^2.11.7"
pydantic-settings = "^2.10.1"
pyjwt = { version = "^2.10.1", extras = ["crypto"] }
redis = "^6.2.0"
supabase = "^2.27.2"
uvicorn = "^0.40.0"
supabase = "^2.16.0"
uvicorn = "^0.35.0"
[tool.poetry.group.dev.dependencies]
pyright = "^1.1.408"
pyright = "^1.1.404"
pytest = "^8.4.1"
pytest-asyncio = "^1.3.0"
pytest-mock = "^3.15.1"
pytest-asyncio = "^1.1.0"
pytest-mock = "^3.14.1"
pytest-cov = "^6.2.1"
ruff = "^0.15.0"
ruff = "^0.12.11"
[build-system]
requires = ["poetry-core"]

View File

@@ -13,32 +13,10 @@ from backend.api.features.chat.tools.models import (
NoResultsResponse,
)
from backend.api.features.store.hybrid_search import unified_hybrid_search
from backend.data.block import BlockType, get_block
from backend.data.block import get_block
logger = logging.getLogger(__name__)
_TARGET_RESULTS = 10
# Over-fetch to compensate for post-hoc filtering of graph-only blocks.
# 40 is 2x current removed; speed of query 10 vs 40 is minimial
_OVERFETCH_PAGE_SIZE = 40
# Block types that only work within graphs and cannot run standalone in CoPilot.
COPILOT_EXCLUDED_BLOCK_TYPES = {
BlockType.INPUT, # Graph interface definition - data enters via chat, not graph inputs
BlockType.OUTPUT, # Graph interface definition - data exits via chat, not graph outputs
BlockType.WEBHOOK, # Wait for external events - would hang forever in CoPilot
BlockType.WEBHOOK_MANUAL, # Same as WEBHOOK
BlockType.NOTE, # Visual annotation only - no runtime behavior
BlockType.HUMAN_IN_THE_LOOP, # Pauses for human approval - CoPilot IS human-in-the-loop
BlockType.AGENT, # AgentExecutorBlock requires execution_context - use run_agent tool
}
# Specific block IDs excluded from CoPilot (STANDARD type but still require graph context)
COPILOT_EXCLUDED_BLOCK_IDS = {
# SmartDecisionMakerBlock - dynamically discovers downstream blocks via graph topology
"3b191d9f-356f-482d-8238-ba04b6d18381",
}
class FindBlockTool(BaseTool):
"""Tool for searching available blocks."""
@@ -110,7 +88,7 @@ class FindBlockTool(BaseTool):
query=query,
content_types=[ContentType.BLOCK],
page=1,
page_size=_OVERFETCH_PAGE_SIZE,
page_size=10,
)
if not results:
@@ -130,90 +108,60 @@ class FindBlockTool(BaseTool):
block = get_block(block_id)
# Skip disabled blocks
if not block or block.disabled:
continue
if block and not block.disabled:
# Get input/output schemas
input_schema = {}
output_schema = {}
try:
input_schema = block.input_schema.jsonschema()
except Exception:
pass
try:
output_schema = block.output_schema.jsonschema()
except Exception:
pass
# Skip blocks excluded from CoPilot (graph-only blocks)
if (
block.block_type in COPILOT_EXCLUDED_BLOCK_TYPES
or block.id in COPILOT_EXCLUDED_BLOCK_IDS
):
continue
# Get categories from block instance
categories = []
if hasattr(block, "categories") and block.categories:
categories = [cat.value for cat in block.categories]
# Get input/output schemas
input_schema = {}
output_schema = {}
try:
input_schema = block.input_schema.jsonschema()
except Exception as e:
logger.debug(
"Failed to generate input schema for block %s: %s",
block_id,
e,
)
try:
output_schema = block.output_schema.jsonschema()
except Exception as e:
logger.debug(
"Failed to generate output schema for block %s: %s",
block_id,
e,
)
# Get categories from block instance
categories = []
if hasattr(block, "categories") and block.categories:
categories = [cat.value for cat in block.categories]
# Extract required inputs for easier use
required_inputs: list[BlockInputFieldInfo] = []
if input_schema:
properties = input_schema.get("properties", {})
required_fields = set(input_schema.get("required", []))
# Get credential field names to exclude from required inputs
credentials_fields = set(
block.input_schema.get_credentials_fields().keys()
)
for field_name, field_schema in properties.items():
# Skip credential fields - they're handled separately
if field_name in credentials_fields:
continue
required_inputs.append(
BlockInputFieldInfo(
name=field_name,
type=field_schema.get("type", "string"),
description=field_schema.get("description", ""),
required=field_name in required_fields,
default=field_schema.get("default"),
)
# Extract required inputs for easier use
required_inputs: list[BlockInputFieldInfo] = []
if input_schema:
properties = input_schema.get("properties", {})
required_fields = set(input_schema.get("required", []))
# Get credential field names to exclude from required inputs
credentials_fields = set(
block.input_schema.get_credentials_fields().keys()
)
blocks.append(
BlockInfoSummary(
id=block_id,
name=block.name,
description=block.description or "",
categories=categories,
input_schema=input_schema,
output_schema=output_schema,
required_inputs=required_inputs,
for field_name, field_schema in properties.items():
# Skip credential fields - they're handled separately
if field_name in credentials_fields:
continue
required_inputs.append(
BlockInputFieldInfo(
name=field_name,
type=field_schema.get("type", "string"),
description=field_schema.get("description", ""),
required=field_name in required_fields,
default=field_schema.get("default"),
)
)
blocks.append(
BlockInfoSummary(
id=block_id,
name=block.name,
description=block.description or "",
categories=categories,
input_schema=input_schema,
output_schema=output_schema,
required_inputs=required_inputs,
)
)
)
if len(blocks) >= _TARGET_RESULTS:
break
if blocks and len(blocks) < _TARGET_RESULTS:
logger.debug(
"find_block returned %d/%d results for query '%s' "
"(filtered %d excluded/disabled blocks)",
len(blocks),
_TARGET_RESULTS,
query,
len(results) - len(blocks),
)
if not blocks:
return NoResultsResponse(

View File

@@ -1,139 +0,0 @@
"""Tests for block filtering in FindBlockTool."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from backend.api.features.chat.tools.find_block import (
COPILOT_EXCLUDED_BLOCK_IDS,
COPILOT_EXCLUDED_BLOCK_TYPES,
FindBlockTool,
)
from backend.api.features.chat.tools.models import BlockListResponse
from backend.data.block import BlockType
from ._test_data import make_session
_TEST_USER_ID = "test-user-find-block"
def make_mock_block(
block_id: str, name: str, block_type: BlockType, disabled: bool = False
):
"""Create a mock block for testing."""
mock = MagicMock()
mock.id = block_id
mock.name = name
mock.description = f"{name} description"
mock.block_type = block_type
mock.disabled = disabled
mock.input_schema = MagicMock()
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
mock.input_schema.get_credentials_fields.return_value = {}
mock.output_schema = MagicMock()
mock.output_schema.jsonschema.return_value = {}
mock.categories = []
return mock
class TestFindBlockFiltering:
"""Tests for block filtering in FindBlockTool."""
def test_excluded_block_types_contains_expected_types(self):
"""Verify COPILOT_EXCLUDED_BLOCK_TYPES contains all graph-only types."""
assert BlockType.INPUT in COPILOT_EXCLUDED_BLOCK_TYPES
assert BlockType.OUTPUT in COPILOT_EXCLUDED_BLOCK_TYPES
assert BlockType.WEBHOOK in COPILOT_EXCLUDED_BLOCK_TYPES
assert BlockType.WEBHOOK_MANUAL in COPILOT_EXCLUDED_BLOCK_TYPES
assert BlockType.NOTE in COPILOT_EXCLUDED_BLOCK_TYPES
assert BlockType.HUMAN_IN_THE_LOOP in COPILOT_EXCLUDED_BLOCK_TYPES
assert BlockType.AGENT in COPILOT_EXCLUDED_BLOCK_TYPES
def test_excluded_block_ids_contains_smart_decision_maker(self):
"""Verify SmartDecisionMakerBlock is in COPILOT_EXCLUDED_BLOCK_IDS."""
assert "3b191d9f-356f-482d-8238-ba04b6d18381" in COPILOT_EXCLUDED_BLOCK_IDS
@pytest.mark.asyncio(loop_scope="session")
async def test_excluded_block_type_filtered_from_results(self):
"""Verify blocks with excluded BlockTypes are filtered from search results."""
session = make_session(user_id=_TEST_USER_ID)
# Mock search returns an INPUT block (excluded) and a STANDARD block (included)
search_results = [
{"content_id": "input-block-id", "score": 0.9},
{"content_id": "standard-block-id", "score": 0.8},
]
input_block = make_mock_block("input-block-id", "Input Block", BlockType.INPUT)
standard_block = make_mock_block(
"standard-block-id", "HTTP Request", BlockType.STANDARD
)
def mock_get_block(block_id):
return {
"input-block-id": input_block,
"standard-block-id": standard_block,
}.get(block_id)
with patch(
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
new_callable=AsyncMock,
return_value=(search_results, 2),
):
with patch(
"backend.api.features.chat.tools.find_block.get_block",
side_effect=mock_get_block,
):
tool = FindBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID, session=session, query="test"
)
# Should only return the standard block, not the INPUT block
assert isinstance(response, BlockListResponse)
assert len(response.blocks) == 1
assert response.blocks[0].id == "standard-block-id"
@pytest.mark.asyncio(loop_scope="session")
async def test_excluded_block_id_filtered_from_results(self):
"""Verify SmartDecisionMakerBlock is filtered from search results."""
session = make_session(user_id=_TEST_USER_ID)
smart_decision_id = "3b191d9f-356f-482d-8238-ba04b6d18381"
search_results = [
{"content_id": smart_decision_id, "score": 0.9},
{"content_id": "normal-block-id", "score": 0.8},
]
# SmartDecisionMakerBlock has STANDARD type but is excluded by ID
smart_block = make_mock_block(
smart_decision_id, "Smart Decision Maker", BlockType.STANDARD
)
normal_block = make_mock_block(
"normal-block-id", "Normal Block", BlockType.STANDARD
)
def mock_get_block(block_id):
return {
smart_decision_id: smart_block,
"normal-block-id": normal_block,
}.get(block_id)
with patch(
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
new_callable=AsyncMock,
return_value=(search_results, 2),
):
with patch(
"backend.api.features.chat.tools.find_block.get_block",
side_effect=mock_get_block,
):
tool = FindBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID, session=session, query="decision"
)
# Should only return normal block, not SmartDecisionMakerBlock
assert isinstance(response, BlockListResponse)
assert len(response.blocks) == 1
assert response.blocks[0].id == "normal-block-id"

View File

@@ -8,10 +8,6 @@ from typing import Any
from pydantic_core import PydanticUndefined
from backend.api.features.chat.model import ChatSession
from backend.api.features.chat.tools.find_block import (
COPILOT_EXCLUDED_BLOCK_IDS,
COPILOT_EXCLUDED_BLOCK_TYPES,
)
from backend.data.block import get_block
from backend.data.execution import ExecutionContext
from backend.data.model import CredentialsMetaInput
@@ -216,19 +212,6 @@ class RunBlockTool(BaseTool):
session_id=session_id,
)
# Check if block is excluded from CoPilot (graph-only blocks)
if (
block.block_type in COPILOT_EXCLUDED_BLOCK_TYPES
or block.id in COPILOT_EXCLUDED_BLOCK_IDS
):
return ErrorResponse(
message=(
f"Block '{block.name}' cannot be run directly in CoPilot. "
"This block is designed for use within graphs only."
),
session_id=session_id,
)
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
creds_manager = IntegrationCredentialsManager()

View File

@@ -1,106 +0,0 @@
"""Tests for block execution guards in RunBlockTool."""
from unittest.mock import MagicMock, patch
import pytest
from backend.api.features.chat.tools.models import ErrorResponse
from backend.api.features.chat.tools.run_block import RunBlockTool
from backend.data.block import BlockType
from ._test_data import make_session
_TEST_USER_ID = "test-user-run-block"
def make_mock_block(
block_id: str, name: str, block_type: BlockType, disabled: bool = False
):
"""Create a mock block for testing."""
mock = MagicMock()
mock.id = block_id
mock.name = name
mock.block_type = block_type
mock.disabled = disabled
mock.input_schema = MagicMock()
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
mock.input_schema.get_credentials_fields_info.return_value = []
return mock
class TestRunBlockFiltering:
"""Tests for block execution guards in RunBlockTool."""
@pytest.mark.asyncio(loop_scope="session")
async def test_excluded_block_type_returns_error(self):
"""Attempting to execute a block with excluded BlockType returns error."""
session = make_session(user_id=_TEST_USER_ID)
input_block = make_mock_block("input-block-id", "Input Block", BlockType.INPUT)
with patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=input_block,
):
tool = RunBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id="input-block-id",
input_data={},
)
assert isinstance(response, ErrorResponse)
assert "cannot be run directly in CoPilot" in response.message
assert "designed for use within graphs only" in response.message
@pytest.mark.asyncio(loop_scope="session")
async def test_excluded_block_id_returns_error(self):
"""Attempting to execute SmartDecisionMakerBlock returns error."""
session = make_session(user_id=_TEST_USER_ID)
smart_decision_id = "3b191d9f-356f-482d-8238-ba04b6d18381"
smart_block = make_mock_block(
smart_decision_id, "Smart Decision Maker", BlockType.STANDARD
)
with patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=smart_block,
):
tool = RunBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id=smart_decision_id,
input_data={},
)
assert isinstance(response, ErrorResponse)
assert "cannot be run directly in CoPilot" in response.message
@pytest.mark.asyncio(loop_scope="session")
async def test_non_excluded_block_passes_guard(self):
"""Non-excluded blocks pass the filtering guard (may fail later for other reasons)."""
session = make_session(user_id=_TEST_USER_ID)
standard_block = make_mock_block(
"standard-id", "HTTP Request", BlockType.STANDARD
)
with patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=standard_block,
):
tool = RunBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id="standard-id",
input_data={},
)
# Should NOT be an ErrorResponse about CoPilot exclusion
# (may be other errors like missing credentials, but not the exclusion guard)
if isinstance(response, ErrorResponse):
assert "cannot be run directly in CoPilot" not in response.message

View File

@@ -6,6 +6,7 @@ from typing import Any
from backend.api.features.library import db as library_db
from backend.api.features.library import model as library_model
from backend.api.features.store import db as store_db
from backend.data import graph as graph_db
from backend.data.graph import GraphModel
from backend.data.model import (
CredentialsFieldInfo,
@@ -43,8 +44,14 @@ async def fetch_graph_from_store_slug(
return None, None
# Get the graph from store listing version
graph = await store_db.get_available_graph(
store_agent.store_listing_version_id, hide_nodes=False
graph_meta = await store_db.get_available_graph(
store_agent.store_listing_version_id
)
graph = await graph_db.get_graph(
graph_id=graph_meta.id,
version=graph_meta.version,
user_id=None, # Public access
include_subgraphs=True,
)
return graph, store_agent
@@ -121,7 +128,7 @@ def build_missing_credentials_from_graph(
return {
field_key: _serialize_missing_credential(field_key, field_info)
for field_key, (field_info, _, _) in aggregated_fields.items()
for field_key, (field_info, _node_fields) in aggregated_fields.items()
if field_key not in matched_keys
}
@@ -262,8 +269,7 @@ async def match_user_credentials_to_graph(
# provider is in the set of acceptable providers.
for credential_field_name, (
credential_requirements,
_,
_,
_node_fields,
) in aggregated_creds.items():
# Find first matching credential by provider, type, and scopes
matching_cred = next(

View File

@@ -374,7 +374,7 @@ async def get_library_agent_by_graph_id(
async def add_generated_agent_image(
graph: graph_db.GraphBaseMeta,
graph: graph_db.BaseGraph,
user_id: str,
library_agent_id: str,
) -> Optional[prisma.models.LibraryAgent]:

View File

@@ -1,7 +1,7 @@
import asyncio
import logging
from datetime import datetime, timezone
from typing import Any, Literal, overload
from typing import Any, Literal
import fastapi
import prisma.enums
@@ -11,8 +11,8 @@ import prisma.types
from backend.data.db import transaction
from backend.data.graph import (
GraphMeta,
GraphModel,
GraphModelWithoutNodes,
get_graph,
get_graph_as_admin,
get_sub_graphs,
@@ -334,22 +334,7 @@ async def get_store_agent_details(
raise DatabaseError("Failed to fetch agent details") from e
@overload
async def get_available_graph(
store_listing_version_id: str, hide_nodes: Literal[False]
) -> GraphModel: ...
@overload
async def get_available_graph(
store_listing_version_id: str, hide_nodes: Literal[True] = True
) -> GraphModelWithoutNodes: ...
async def get_available_graph(
store_listing_version_id: str,
hide_nodes: bool = True,
) -> GraphModelWithoutNodes | GraphModel:
async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
try:
# Get avaialble, non-deleted store listing version
store_listing_version = (
@@ -359,7 +344,7 @@ async def get_available_graph(
"isAvailable": True,
"isDeleted": False,
},
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
include={"AgentGraph": {"include": {"Nodes": True}}},
)
)
@@ -369,9 +354,7 @@ async def get_available_graph(
detail=f"Store listing version {store_listing_version_id} not found",
)
return (GraphModelWithoutNodes if hide_nodes else GraphModel).from_db(
store_listing_version.AgentGraph
)
return GraphModel.from_db(store_listing_version.AgentGraph).meta()
except Exception as e:
logger.error(f"Error getting agent: {e}")

View File

@@ -16,7 +16,7 @@ from backend.blocks.ideogram import (
StyleType,
UpscaleOption,
)
from backend.data.graph import GraphBaseMeta
from backend.data.graph import BaseGraph
from backend.data.model import CredentialsMetaInput, ProviderName
from backend.integrations.credentials_store import ideogram_credentials
from backend.util.request import Requests
@@ -34,14 +34,14 @@ class ImageStyle(str, Enum):
DIGITAL_ART = "digital art"
async def generate_agent_image(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
async def generate_agent_image(agent: BaseGraph | AgentGraph) -> io.BytesIO:
if settings.config.use_agent_image_generation_v2:
return await generate_agent_image_v2(graph=agent)
else:
return await generate_agent_image_v1(agent=agent)
async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.BytesIO:
async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
"""
Generate an image for an agent using Ideogram model.
Returns:
@@ -54,17 +54,14 @@ async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.Bytes
description = f"{name} ({graph.description})" if graph.description else name
prompt = (
"Create a visually striking retro-futuristic vector pop art illustration "
f'prominently featuring "{name}" in bold typography. The image clearly and '
f"literally depicts a {description}, along with recognizable objects directly "
f"associated with the primary function of a {name}. "
f"Ensure the imagery is concrete, intuitive, and immediately understandable, "
f"clearly conveying the purpose of a {name}. "
"Maintain vibrant, limited-palette colors, sharp vector lines, "
"geometric shapes, flat illustration techniques, and solid colors "
"without gradients or shading. Preserve a retro-futuristic aesthetic "
"influenced by mid-century futurism and 1960s psychedelia, "
"prioritizing clear visual storytelling and thematic clarity above all else."
f"Create a visually striking retro-futuristic vector pop art illustration prominently featuring "
f'"{name}" in bold typography. The image clearly and literally depicts a {description}, '
f"along with recognizable objects directly associated with the primary function of a {name}. "
f"Ensure the imagery is concrete, intuitive, and immediately understandable, clearly conveying the "
f"purpose of a {name}. Maintain vibrant, limited-palette colors, sharp vector lines, geometric "
f"shapes, flat illustration techniques, and solid colors without gradients or shading. Preserve a "
f"retro-futuristic aesthetic influenced by mid-century futurism and 1960s psychedelia, "
f"prioritizing clear visual storytelling and thematic clarity above all else."
)
custom_colors = [
@@ -102,12 +99,12 @@ async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.Bytes
return io.BytesIO(response.content)
async def generate_agent_image_v1(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
"""
Generate an image for an agent using Flux model via Replicate API.
Args:
agent (GraphBaseMeta | AgentGraph): The agent to generate an image for
agent (Graph): The agent to generate an image for
Returns:
io.BytesIO: The generated image as bytes
@@ -117,13 +114,7 @@ async def generate_agent_image_v1(agent: GraphBaseMeta | AgentGraph) -> io.Bytes
raise ValueError("Missing Replicate API key in settings")
# Construct prompt from agent details
prompt = (
"Create a visually engaging app store thumbnail for the AI agent "
"that highlights what it does in a clear and captivating way:\n"
f"- **Name**: {agent.name}\n"
f"- **Description**: {agent.description}\n"
f"Focus on showcasing its core functionality with an appealing design."
)
prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design."
# Set up Replicate client
client = ReplicateClient(api_token=settings.secrets.replicate_api_key)

View File

@@ -278,7 +278,7 @@ async def get_agent(
)
async def get_graph_meta_by_store_listing_version_id(
store_listing_version_id: str,
) -> backend.data.graph.GraphModelWithoutNodes:
) -> backend.data.graph.GraphMeta:
"""
Get Agent Graph from Store Listing Version ID.
"""

View File

@@ -478,7 +478,7 @@ class ExaCreateOrFindWebsetBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
try:
webset = await aexa.websets.get(id=input_data.external_id)
webset = aexa.websets.get(id=input_data.external_id)
webset_result = Webset.model_validate(webset.model_dump(by_alias=True))
yield "webset", webset_result
@@ -494,7 +494,7 @@ class ExaCreateOrFindWebsetBlock(Block):
count=input_data.search_count,
)
webset = await aexa.websets.create(
webset = aexa.websets.create(
params=CreateWebsetParameters(
search=search_params,
external_id=input_data.external_id,
@@ -554,7 +554,7 @@ class ExaUpdateWebsetBlock(Block):
if input_data.metadata is not None:
payload["metadata"] = input_data.metadata
sdk_webset = await aexa.websets.update(id=input_data.webset_id, params=payload)
sdk_webset = aexa.websets.update(id=input_data.webset_id, params=payload)
status_str = (
sdk_webset.status.value
@@ -617,7 +617,7 @@ class ExaListWebsetsBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
response = await aexa.websets.list(
response = aexa.websets.list(
cursor=input_data.cursor,
limit=input_data.limit,
)
@@ -678,7 +678,7 @@ class ExaGetWebsetBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_webset = await aexa.websets.get(id=input_data.webset_id)
sdk_webset = aexa.websets.get(id=input_data.webset_id)
status_str = (
sdk_webset.status.value
@@ -748,7 +748,7 @@ class ExaDeleteWebsetBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_webset = await aexa.websets.delete(id=input_data.webset_id)
deleted_webset = aexa.websets.delete(id=input_data.webset_id)
status_str = (
deleted_webset.status.value
@@ -798,7 +798,7 @@ class ExaCancelWebsetBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
canceled_webset = await aexa.websets.cancel(id=input_data.webset_id)
canceled_webset = aexa.websets.cancel(id=input_data.webset_id)
status_str = (
canceled_webset.status.value
@@ -968,7 +968,7 @@ class ExaPreviewWebsetBlock(Block):
entity["description"] = input_data.entity_description
payload["entity"] = entity
sdk_preview = await aexa.websets.preview(params=payload)
sdk_preview = aexa.websets.preview(params=payload)
preview = PreviewWebsetModel.from_sdk(sdk_preview)
@@ -1051,7 +1051,7 @@ class ExaWebsetStatusBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
webset = await aexa.websets.get(id=input_data.webset_id)
webset = aexa.websets.get(id=input_data.webset_id)
status = (
webset.status.value
@@ -1185,7 +1185,7 @@ class ExaWebsetSummaryBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
webset = await aexa.websets.get(id=input_data.webset_id)
webset = aexa.websets.get(id=input_data.webset_id)
# Extract basic info
webset_id = webset.id
@@ -1211,7 +1211,7 @@ class ExaWebsetSummaryBlock(Block):
total_items = 0
if input_data.include_sample_items and input_data.sample_size > 0:
items_response = await aexa.websets.items.list(
items_response = aexa.websets.items.list(
webset_id=input_data.webset_id, limit=input_data.sample_size
)
sample_items_data = [
@@ -1362,7 +1362,7 @@ class ExaWebsetReadyCheckBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
# Get webset details
webset = await aexa.websets.get(id=input_data.webset_id)
webset = aexa.websets.get(id=input_data.webset_id)
status = (
webset.status.value

View File

@@ -202,7 +202,7 @@ class ExaCreateEnrichmentBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_enrichment = await aexa.websets.enrichments.create(
sdk_enrichment = aexa.websets.enrichments.create(
webset_id=input_data.webset_id, params=payload
)
@@ -223,7 +223,7 @@ class ExaCreateEnrichmentBlock(Block):
items_enriched = 0
while time.time() - poll_start < input_data.polling_timeout:
current_enrich = await aexa.websets.enrichments.get(
current_enrich = aexa.websets.enrichments.get(
webset_id=input_data.webset_id, id=enrichment_id
)
current_status = (
@@ -234,7 +234,7 @@ class ExaCreateEnrichmentBlock(Block):
if current_status in ["completed", "failed", "cancelled"]:
# Estimate items from webset searches
webset = await aexa.websets.get(id=input_data.webset_id)
webset = aexa.websets.get(id=input_data.webset_id)
if webset.searches:
for search in webset.searches:
if search.progress:
@@ -329,7 +329,7 @@ class ExaGetEnrichmentBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_enrichment = await aexa.websets.enrichments.get(
sdk_enrichment = aexa.websets.enrichments.get(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
@@ -474,7 +474,7 @@ class ExaDeleteEnrichmentBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_enrichment = await aexa.websets.enrichments.delete(
deleted_enrichment = aexa.websets.enrichments.delete(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
@@ -525,13 +525,13 @@ class ExaCancelEnrichmentBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
canceled_enrichment = await aexa.websets.enrichments.cancel(
canceled_enrichment = aexa.websets.enrichments.cancel(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
# Try to estimate how many items were enriched before cancellation
items_enriched = 0
items_response = await aexa.websets.items.list(
items_response = aexa.websets.items.list(
webset_id=input_data.webset_id, limit=100
)

View File

@@ -222,7 +222,7 @@ class ExaCreateImportBlock(Block):
def _create_test_mock():
"""Create test mocks for the AsyncExa SDK."""
from datetime import datetime
from unittest.mock import AsyncMock, MagicMock
from unittest.mock import MagicMock
# Create mock SDK import object
mock_import = MagicMock()
@@ -247,7 +247,7 @@ class ExaCreateImportBlock(Block):
return {
"_get_client": lambda *args, **kwargs: MagicMock(
websets=MagicMock(
imports=MagicMock(create=AsyncMock(return_value=mock_import))
imports=MagicMock(create=lambda *args, **kwargs: mock_import)
)
)
}
@@ -294,7 +294,7 @@ class ExaCreateImportBlock(Block):
if input_data.metadata:
payload["metadata"] = input_data.metadata
sdk_import = await aexa.websets.imports.create(
sdk_import = aexa.websets.imports.create(
params=payload, csv_data=input_data.csv_data
)
@@ -360,7 +360,7 @@ class ExaGetImportBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_import = await aexa.websets.imports.get(import_id=input_data.import_id)
sdk_import = aexa.websets.imports.get(import_id=input_data.import_id)
import_obj = ImportModel.from_sdk(sdk_import)
@@ -426,7 +426,7 @@ class ExaListImportsBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
response = await aexa.websets.imports.list(
response = aexa.websets.imports.list(
cursor=input_data.cursor,
limit=input_data.limit,
)
@@ -474,9 +474,7 @@ class ExaDeleteImportBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_import = await aexa.websets.imports.delete(
import_id=input_data.import_id
)
deleted_import = aexa.websets.imports.delete(import_id=input_data.import_id)
yield "import_id", deleted_import.id
yield "success", "true"
@@ -575,14 +573,14 @@ class ExaExportWebsetBlock(Block):
}
)
# Create async iterator for list_all
async def async_item_iterator(*args, **kwargs):
for item in [mock_item1, mock_item2]:
yield item
# Create mock iterator
mock_items = [mock_item1, mock_item2]
return {
"_get_client": lambda *args, **kwargs: MagicMock(
websets=MagicMock(items=MagicMock(list_all=async_item_iterator))
websets=MagicMock(
items=MagicMock(list_all=lambda *args, **kwargs: iter(mock_items))
)
)
}
@@ -604,7 +602,7 @@ class ExaExportWebsetBlock(Block):
webset_id=input_data.webset_id, limit=input_data.max_items
)
async for sdk_item in item_iterator:
for sdk_item in item_iterator:
if len(all_items) >= input_data.max_items:
break

View File

@@ -178,7 +178,7 @@ class ExaGetWebsetItemBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_item = await aexa.websets.items.get(
sdk_item = aexa.websets.items.get(
webset_id=input_data.webset_id, id=input_data.item_id
)
@@ -269,7 +269,7 @@ class ExaListWebsetItemsBlock(Block):
response = None
while time.time() - start_time < input_data.wait_timeout:
response = await aexa.websets.items.list(
response = aexa.websets.items.list(
webset_id=input_data.webset_id,
cursor=input_data.cursor,
limit=input_data.limit,
@@ -282,13 +282,13 @@ class ExaListWebsetItemsBlock(Block):
interval = min(interval * 1.2, 10)
if not response:
response = await aexa.websets.items.list(
response = aexa.websets.items.list(
webset_id=input_data.webset_id,
cursor=input_data.cursor,
limit=input_data.limit,
)
else:
response = await aexa.websets.items.list(
response = aexa.websets.items.list(
webset_id=input_data.webset_id,
cursor=input_data.cursor,
limit=input_data.limit,
@@ -340,7 +340,7 @@ class ExaDeleteWebsetItemBlock(Block):
) -> BlockOutput:
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_item = await aexa.websets.items.delete(
deleted_item = aexa.websets.items.delete(
webset_id=input_data.webset_id, id=input_data.item_id
)
@@ -408,7 +408,7 @@ class ExaBulkWebsetItemsBlock(Block):
webset_id=input_data.webset_id, limit=input_data.max_items
)
async for sdk_item in item_iterator:
for sdk_item in item_iterator:
if len(all_items) >= input_data.max_items:
break
@@ -475,7 +475,7 @@ class ExaWebsetItemsSummaryBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
webset = await aexa.websets.get(id=input_data.webset_id)
webset = aexa.websets.get(id=input_data.webset_id)
entity_type = "unknown"
if webset.searches:
@@ -495,7 +495,7 @@ class ExaWebsetItemsSummaryBlock(Block):
# Get sample items if requested
sample_items: List[WebsetItemModel] = []
if input_data.sample_size > 0:
items_response = await aexa.websets.items.list(
items_response = aexa.websets.items.list(
webset_id=input_data.webset_id, limit=input_data.sample_size
)
# Convert to our stable models
@@ -569,7 +569,7 @@ class ExaGetNewItemsBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
# Get items starting from cursor
response = await aexa.websets.items.list(
response = aexa.websets.items.list(
webset_id=input_data.webset_id,
cursor=input_data.since_cursor,
limit=input_data.max_items,

View File

@@ -233,7 +233,7 @@ class ExaCreateMonitorBlock(Block):
def _create_test_mock():
"""Create test mocks for the AsyncExa SDK."""
from datetime import datetime
from unittest.mock import AsyncMock, MagicMock
from unittest.mock import MagicMock
# Create mock SDK monitor object
mock_monitor = MagicMock()
@@ -263,7 +263,7 @@ class ExaCreateMonitorBlock(Block):
return {
"_get_client": lambda *args, **kwargs: MagicMock(
websets=MagicMock(
monitors=MagicMock(create=AsyncMock(return_value=mock_monitor))
monitors=MagicMock(create=lambda *args, **kwargs: mock_monitor)
)
)
}
@@ -320,7 +320,7 @@ class ExaCreateMonitorBlock(Block):
if input_data.metadata:
payload["metadata"] = input_data.metadata
sdk_monitor = await aexa.websets.monitors.create(params=payload)
sdk_monitor = aexa.websets.monitors.create(params=payload)
monitor = MonitorModel.from_sdk(sdk_monitor)
@@ -384,7 +384,7 @@ class ExaGetMonitorBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_monitor = await aexa.websets.monitors.get(monitor_id=input_data.monitor_id)
sdk_monitor = aexa.websets.monitors.get(monitor_id=input_data.monitor_id)
monitor = MonitorModel.from_sdk(sdk_monitor)
@@ -476,7 +476,7 @@ class ExaUpdateMonitorBlock(Block):
if input_data.metadata is not None:
payload["metadata"] = input_data.metadata
sdk_monitor = await aexa.websets.monitors.update(
sdk_monitor = aexa.websets.monitors.update(
monitor_id=input_data.monitor_id, params=payload
)
@@ -522,9 +522,7 @@ class ExaDeleteMonitorBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
deleted_monitor = await aexa.websets.monitors.delete(
monitor_id=input_data.monitor_id
)
deleted_monitor = aexa.websets.monitors.delete(monitor_id=input_data.monitor_id)
yield "monitor_id", deleted_monitor.id
yield "success", "true"
@@ -581,7 +579,7 @@ class ExaListMonitorsBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
response = await aexa.websets.monitors.list(
response = aexa.websets.monitors.list(
cursor=input_data.cursor,
limit=input_data.limit,
webset_id=input_data.webset_id,

View File

@@ -121,7 +121,7 @@ class ExaWaitForWebsetBlock(Block):
WebsetTargetStatus.IDLE,
WebsetTargetStatus.ANY_COMPLETE,
]:
final_webset = await aexa.websets.wait_until_idle(
final_webset = aexa.websets.wait_until_idle(
id=input_data.webset_id,
timeout=input_data.timeout,
poll_interval=input_data.check_interval,
@@ -164,7 +164,7 @@ class ExaWaitForWebsetBlock(Block):
interval = input_data.check_interval
while time.time() - start_time < input_data.timeout:
# Get current webset status
webset = await aexa.websets.get(id=input_data.webset_id)
webset = aexa.websets.get(id=input_data.webset_id)
current_status = (
webset.status.value
if hasattr(webset.status, "value")
@@ -209,7 +209,7 @@ class ExaWaitForWebsetBlock(Block):
# Timeout reached
elapsed = time.time() - start_time
webset = await aexa.websets.get(id=input_data.webset_id)
webset = aexa.websets.get(id=input_data.webset_id)
final_status = (
webset.status.value
if hasattr(webset.status, "value")
@@ -345,7 +345,7 @@ class ExaWaitForSearchBlock(Block):
try:
while time.time() - start_time < input_data.timeout:
# Get current search status using SDK
search = await aexa.websets.searches.get(
search = aexa.websets.searches.get(
webset_id=input_data.webset_id, id=input_data.search_id
)
@@ -401,7 +401,7 @@ class ExaWaitForSearchBlock(Block):
elapsed = time.time() - start_time
# Get last known status
search = await aexa.websets.searches.get(
search = aexa.websets.searches.get(
webset_id=input_data.webset_id, id=input_data.search_id
)
final_status = (
@@ -503,7 +503,7 @@ class ExaWaitForEnrichmentBlock(Block):
try:
while time.time() - start_time < input_data.timeout:
# Get current enrichment status using SDK
enrichment = await aexa.websets.enrichments.get(
enrichment = aexa.websets.enrichments.get(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
@@ -548,7 +548,7 @@ class ExaWaitForEnrichmentBlock(Block):
elapsed = time.time() - start_time
# Get last known status
enrichment = await aexa.websets.enrichments.get(
enrichment = aexa.websets.enrichments.get(
webset_id=input_data.webset_id, id=input_data.enrichment_id
)
final_status = (
@@ -575,7 +575,7 @@ class ExaWaitForEnrichmentBlock(Block):
) -> tuple[list[SampleEnrichmentModel], int]:
"""Get sample enriched data and count."""
# Get a few items to see enrichment results using SDK
response = await aexa.websets.items.list(webset_id=webset_id, limit=5)
response = aexa.websets.items.list(webset_id=webset_id, limit=5)
sample_data: list[SampleEnrichmentModel] = []
enriched_count = 0

View File

@@ -317,7 +317,7 @@ class ExaCreateWebsetSearchBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_search = await aexa.websets.searches.create(
sdk_search = aexa.websets.searches.create(
webset_id=input_data.webset_id, params=payload
)
@@ -350,7 +350,7 @@ class ExaCreateWebsetSearchBlock(Block):
poll_start = time.time()
while time.time() - poll_start < input_data.polling_timeout:
current_search = await aexa.websets.searches.get(
current_search = aexa.websets.searches.get(
webset_id=input_data.webset_id, id=search_id
)
current_status = (
@@ -442,7 +442,7 @@ class ExaGetWebsetSearchBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
sdk_search = await aexa.websets.searches.get(
sdk_search = aexa.websets.searches.get(
webset_id=input_data.webset_id, id=input_data.search_id
)
@@ -523,7 +523,7 @@ class ExaCancelWebsetSearchBlock(Block):
# Use AsyncExa SDK
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
canceled_search = await aexa.websets.searches.cancel(
canceled_search = aexa.websets.searches.cancel(
webset_id=input_data.webset_id, id=input_data.search_id
)
@@ -604,7 +604,7 @@ class ExaFindOrCreateSearchBlock(Block):
aexa = AsyncExa(api_key=credentials.api_key.get_secret_value())
# Get webset to check existing searches
webset = await aexa.websets.get(id=input_data.webset_id)
webset = aexa.websets.get(id=input_data.webset_id)
# Look for existing search with same query
existing_search = None
@@ -636,7 +636,7 @@ class ExaFindOrCreateSearchBlock(Block):
if input_data.entity_type != SearchEntityType.AUTO:
payload["entity"] = {"type": input_data.entity_type.value}
sdk_search = await aexa.websets.searches.create(
sdk_search = aexa.websets.searches.create(
webset_id=input_data.webset_id, params=payload
)

View File

@@ -531,12 +531,12 @@ class LLMResponse(BaseModel):
def convert_openai_tool_fmt_to_anthropic(
openai_tools: list[dict] | None = None,
) -> Iterable[ToolParam] | anthropic.Omit:
) -> Iterable[ToolParam] | anthropic.NotGiven:
"""
Convert OpenAI tool format to Anthropic tool format.
"""
if not openai_tools or len(openai_tools) == 0:
return anthropic.omit
return anthropic.NOT_GIVEN
anthropic_tools = []
for tool in openai_tools:
@@ -596,10 +596,10 @@ def extract_openai_tool_calls(response) -> list[ToolContentBlock] | None:
def get_parallel_tool_calls_param(
llm_model: LlmModel, parallel_tool_calls: bool | None
) -> bool | openai.Omit:
):
"""Get the appropriate parallel_tool_calls parameter for OpenAI-compatible APIs."""
if llm_model.startswith("o") or parallel_tool_calls is None:
return openai.omit
return openai.NOT_GIVEN
return parallel_tool_calls

View File

@@ -246,9 +246,7 @@ class BlockSchema(BaseModel):
f"is not of type {CredentialsMetaInput.__name__}"
)
CredentialsMetaInput.validate_credentials_field_schema(
cls.get_field_schema(field_name), field_name
)
credentials_fields[field_name].validate_credentials_field_schema(cls)
elif field_name in credentials_fields:
raise KeyError(

View File

@@ -3,7 +3,7 @@ import logging
import uuid
from collections import defaultdict
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Self, cast
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast
from prisma.enums import SubmissionStatus
from prisma.models import (
@@ -20,7 +20,7 @@ from prisma.types import (
AgentNodeLinkCreateInput,
StoreListingVersionWhereInput,
)
from pydantic import BaseModel, BeforeValidator, Field
from pydantic import BaseModel, BeforeValidator, Field, create_model
from pydantic.fields import computed_field
from backend.blocks.agent import AgentExecutorBlock
@@ -30,6 +30,7 @@ from backend.data.db import prisma as db
from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
from backend.data.model import (
CredentialsField,
CredentialsFieldInfo,
CredentialsMetaInput,
is_credentials_field_name,
@@ -44,6 +45,7 @@ from .block import (
AnyBlockSchema,
Block,
BlockInput,
BlockSchema,
BlockType,
EmptySchema,
get_block,
@@ -111,12 +113,10 @@ class Link(BaseDbModel):
class Node(BaseDbModel):
block_id: str
input_default: BlockInput = Field( # dict[input_name, default_value]
default_factory=dict
)
metadata: dict[str, Any] = Field(default_factory=dict)
input_links: list[Link] = Field(default_factory=list)
output_links: list[Link] = Field(default_factory=list)
input_default: BlockInput = {} # dict[input_name, default_value]
metadata: dict[str, Any] = {}
input_links: list[Link] = []
output_links: list[Link] = []
@property
def credentials_optional(self) -> bool:
@@ -221,33 +221,18 @@ class NodeModel(Node):
return result
class GraphBaseMeta(BaseDbModel):
"""
Shared base for `GraphMeta` and `BaseGraph`, with core graph metadata fields.
"""
class BaseGraph(BaseDbModel):
version: int = 1
is_active: bool = True
name: str
description: str
instructions: str | None = None
recommended_schedule_cron: str | None = None
nodes: list[Node] = []
links: list[Link] = []
forked_from_id: str | None = None
forked_from_version: int | None = None
class BaseGraph(GraphBaseMeta):
"""
Graph with nodes, links, and computed I/O schema fields.
Used to represent sub-graphs within a `Graph`. Contains the full graph
structure including nodes and links, plus computed fields for schemas
and trigger info. Does NOT include user_id or created_at (see GraphModel).
"""
nodes: list[Node] = Field(default_factory=list)
links: list[Link] = Field(default_factory=list)
@computed_field
@property
def input_schema(self) -> dict[str, Any]:
@@ -376,79 +361,44 @@ class GraphTriggerInfo(BaseModel):
class Graph(BaseGraph):
"""Creatable graph model used in API create/update endpoints."""
sub_graphs: list[BaseGraph] = Field(default_factory=list) # Flattened sub-graphs
class GraphMeta(GraphBaseMeta):
"""
Lightweight graph metadata model representing an existing graph from the database,
for use in listings and summaries.
Lacks `GraphModel`'s nodes, links, and expensive computed fields.
Use for list endpoints where full graph data is not needed and performance matters.
"""
id: str # type: ignore
version: int # type: ignore
user_id: str
created_at: datetime
@classmethod
def from_db(cls, graph: "AgentGraph") -> Self:
return cls(
id=graph.id,
version=graph.version,
is_active=graph.isActive,
name=graph.name or "",
description=graph.description or "",
instructions=graph.instructions,
recommended_schedule_cron=graph.recommendedScheduleCron,
forked_from_id=graph.forkedFromId,
forked_from_version=graph.forkedFromVersion,
user_id=graph.userId,
created_at=graph.createdAt,
)
class GraphModel(Graph, GraphMeta):
"""
Full graph model representing an existing graph from the database.
This is the primary model for working with persisted graphs. Includes all
graph data (nodes, links, sub_graphs) plus user ownership and timestamps.
Provides computed fields (input_schema, output_schema, etc.) used during
set-up (frontend) and execution (backend).
Inherits from:
- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas
- `GraphMeta`: provides user_id, created_at for database records
"""
nodes: list[NodeModel] = Field(default_factory=list) # type: ignore
@property
def starting_nodes(self) -> list[NodeModel]:
outbound_nodes = {link.sink_id for link in self.links}
input_nodes = {
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
}
return [
node
for node in self.nodes
if node.id not in outbound_nodes or node.id in input_nodes
]
@property
def webhook_input_node(self) -> NodeModel | None: # type: ignore
return cast(NodeModel, super().webhook_input_node)
sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs
@computed_field
@property
def credentials_input_schema(self) -> dict[str, Any]:
graph_credentials_inputs = self.aggregate_credentials_inputs()
schema = self._credentials_input_schema.jsonschema()
# Determine which credential fields are required based on credentials_optional metadata
graph_credentials_inputs = self.aggregate_credentials_inputs()
required_fields = []
# Build a map of node_id -> node for quick lookup
all_nodes = {node.id: node for node in self.nodes}
for sub_graph in self.sub_graphs:
for node in sub_graph.nodes:
all_nodes[node.id] = node
for field_key, (
_field_info,
node_field_pairs,
) in graph_credentials_inputs.items():
# A field is required if ANY node using it has credentials_optional=False
is_required = False
for node_id, _field_name in node_field_pairs:
node = all_nodes.get(node_id)
if node and not node.credentials_optional:
is_required = True
break
if is_required:
required_fields.append(field_key)
schema["required"] = required_fields
return schema
@property
def _credentials_input_schema(self) -> type[BlockSchema]:
graph_credentials_inputs = self.aggregate_credentials_inputs()
logger.debug(
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
f"{graph_credentials_inputs}"
@@ -456,8 +406,8 @@ class GraphModel(Graph, GraphMeta):
# Warn if same-provider credentials inputs can't be combined (= bad UX)
graph_cred_fields = list(graph_credentials_inputs.values())
for i, (field, keys, _) in enumerate(graph_cred_fields):
for other_field, other_keys, _ in list(graph_cred_fields)[i + 1 :]:
for i, (field, keys) in enumerate(graph_cred_fields):
for other_field, other_keys in list(graph_cred_fields)[i + 1 :]:
if field.provider != other_field.provider:
continue
if ProviderName.HTTP in field.provider:
@@ -473,78 +423,31 @@ class GraphModel(Graph, GraphMeta):
f"keys: {keys} <> {other_keys}."
)
# Build JSON schema directly to avoid expensive create_model + validation overhead
properties = {}
required_fields = []
for agg_field_key, (
field_info,
_,
is_required,
) in graph_credentials_inputs.items():
providers = list(field_info.provider)
cred_types = list(field_info.supported_types)
field_schema: dict[str, Any] = {
"credentials_provider": providers,
"credentials_types": cred_types,
"type": "object",
"properties": {
"id": {"title": "Id", "type": "string"},
"title": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"title": "Title",
},
"provider": {
"title": "Provider",
"type": "string",
**(
{"enum": providers}
if len(providers) > 1
else {"const": providers[0]}
),
},
"type": {
"title": "Type",
"type": "string",
**(
{"enum": cred_types}
if len(cred_types) > 1
else {"const": cred_types[0]}
),
},
},
"required": ["id", "provider", "type"],
}
# Add other (optional) field info items
field_schema.update(
field_info.model_dump(
by_alias=True,
exclude_defaults=True,
exclude={"provider", "supported_types"}, # already included above
)
fields: dict[str, tuple[type[CredentialsMetaInput], CredentialsMetaInput]] = {
agg_field_key: (
CredentialsMetaInput[
Literal[tuple(field_info.provider)], # type: ignore
Literal[tuple(field_info.supported_types)], # type: ignore
],
CredentialsField(
required_scopes=set(field_info.required_scopes or []),
discriminator=field_info.discriminator,
discriminator_mapping=field_info.discriminator_mapping,
discriminator_values=field_info.discriminator_values,
),
)
# Ensure field schema is well-formed
CredentialsMetaInput.validate_credentials_field_schema(
field_schema, agg_field_key
)
properties[agg_field_key] = field_schema
if is_required:
required_fields.append(agg_field_key)
return {
"type": "object",
"properties": properties,
"required": required_fields,
for agg_field_key, (field_info, _) in graph_credentials_inputs.items()
}
return create_model(
self.name.replace(" ", "") + "CredentialsInputSchema",
__base__=BlockSchema,
**fields, # type: ignore
)
def aggregate_credentials_inputs(
self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]]]]:
"""
Returns:
dict[aggregated_field_key, tuple(
@@ -552,19 +455,13 @@ class GraphModel(Graph, GraphMeta):
(now includes discriminator_values from matching nodes)
set[(node_id, field_name)]: Node credentials fields that are
compatible with this aggregated field spec
bool: True if the field is required (any node has credentials_optional=False)
)]
"""
# First collect all credential field data with input defaults
# Track (field_info, (node_id, field_name), is_required) for each credential field
node_credential_data: list[tuple[CredentialsFieldInfo, tuple[str, str]]] = []
node_required_map: dict[str, bool] = {} # node_id -> is_required
node_credential_data = []
for graph in [self] + self.sub_graphs:
for node in graph.nodes:
# Track if this node requires credentials (credentials_optional=False means required)
node_required_map[node.id] = not node.credentials_optional
for (
field_name,
field_info,
@@ -588,21 +485,37 @@ class GraphModel(Graph, GraphMeta):
)
# Combine credential field info (this will merge discriminator_values automatically)
combined = CredentialsFieldInfo.combine(*node_credential_data)
return CredentialsFieldInfo.combine(*node_credential_data)
# Add is_required flag to each aggregated field
# A field is required if ANY node using it has credentials_optional=False
return {
key: (
field_info,
node_field_pairs,
any(
node_required_map.get(node_id, True)
for node_id, _ in node_field_pairs
),
)
for key, (field_info, node_field_pairs) in combined.items()
class GraphModel(Graph):
user_id: str
nodes: list[NodeModel] = [] # type: ignore
created_at: datetime
@property
def starting_nodes(self) -> list[NodeModel]:
outbound_nodes = {link.sink_id for link in self.links}
input_nodes = {
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
}
return [
node
for node in self.nodes
if node.id not in outbound_nodes or node.id in input_nodes
]
@property
def webhook_input_node(self) -> NodeModel | None: # type: ignore
return cast(NodeModel, super().webhook_input_node)
def meta(self) -> "GraphMeta":
"""
Returns a GraphMeta object with metadata about the graph.
This is used to return metadata about the graph without exposing nodes and links.
"""
return GraphMeta.from_graph(self)
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
"""
@@ -886,14 +799,13 @@ class GraphModel(Graph, GraphMeta):
if is_static_output_block(link.source_id):
link.is_static = True # Each value block output should be static.
@classmethod
def from_db( # type: ignore[reportIncompatibleMethodOverride]
cls,
@staticmethod
def from_db(
graph: AgentGraph,
for_export: bool = False,
sub_graphs: list[AgentGraph] | None = None,
) -> Self:
return cls(
) -> "GraphModel":
return GraphModel(
id=graph.id,
user_id=graph.userId if not for_export else "",
version=graph.version,
@@ -919,28 +831,17 @@ class GraphModel(Graph, GraphMeta):
],
)
def hide_nodes(self) -> "GraphModelWithoutNodes":
"""
Returns a copy of the `GraphModel` with nodes, links, and sub-graphs hidden
(excluded from serialization). They are still present in the model instance
so all computed fields (e.g. `credentials_input_schema`) still work.
"""
return GraphModelWithoutNodes.model_validate(self, from_attributes=True)
class GraphMeta(Graph):
user_id: str
class GraphModelWithoutNodes(GraphModel):
"""
GraphModel variant that excludes nodes, links, and sub-graphs from serialization.
# Easy work-around to prevent exposing nodes and links in the API response
nodes: list[NodeModel] = Field(default=[], exclude=True) # type: ignore
links: list[Link] = Field(default=[], exclude=True)
Used in contexts like the store where exposing internal graph structure
is not desired. Inherits all computed fields from GraphModel but marks
nodes and links as excluded from JSON output.
"""
nodes: list[NodeModel] = Field(default_factory=list, exclude=True)
links: list[Link] = Field(default_factory=list, exclude=True)
sub_graphs: list[BaseGraph] = Field(default_factory=list, exclude=True)
@staticmethod
def from_graph(graph: GraphModel) -> "GraphMeta":
return GraphMeta(**graph.model_dump())
class GraphsPaginated(BaseModel):
@@ -1011,11 +912,21 @@ async def list_graphs_paginated(
where=where_clause,
distinct=["id"],
order={"version": "desc"},
include=AGENT_GRAPH_INCLUDE,
skip=offset,
take=page_size,
)
graph_models = [GraphMeta.from_db(graph) for graph in graphs]
graph_models: list[GraphMeta] = []
for graph in graphs:
try:
graph_meta = GraphModel.from_db(graph).meta()
# Trigger serialization to validate that the graph is well formed
graph_meta.model_dump()
graph_models.append(graph_meta)
except Exception as e:
logger.error(f"Error processing graph {graph.id}: {e}")
continue
return GraphsPaginated(
graphs=graph_models,

View File

@@ -163,6 +163,7 @@ class User(BaseModel):
if TYPE_CHECKING:
from prisma.models import User as PrismaUser
from backend.data.block import BlockSchema
T = TypeVar("T")
logger = logging.getLogger(__name__)
@@ -507,13 +508,15 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
def allowed_cred_types(cls) -> tuple[CredentialsType, ...]:
return get_args(cls.model_fields["type"].annotation)
@staticmethod
def validate_credentials_field_schema(
field_schema: dict[str, Any], field_name: str
):
@classmethod
def validate_credentials_field_schema(cls, model: type["BlockSchema"]):
"""Validates the schema of a credentials input field"""
field_name = next(
name for name, type in model.get_credentials_fields().items() if type is cls
)
field_schema = model.jsonschema()["properties"][field_name]
try:
field_info = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
schema_extra = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
except ValidationError as e:
if "Field required [type=missing" not in str(e):
raise
@@ -523,11 +526,11 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
f"{field_schema}"
) from e
providers = field_info.provider
providers = cls.allowed_providers()
if (
providers is not None
and len(providers) > 1
and not field_info.discriminator
and not schema_extra.discriminator
):
raise TypeError(
f"Multi-provider CredentialsField '{field_name}' "

View File

@@ -373,7 +373,7 @@ def make_node_credentials_input_map(
# Get aggregated credentials fields for the graph
graph_cred_inputs = graph.aggregate_credentials_inputs()
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
for graph_input_name, (_, compatible_node_fields) in graph_cred_inputs.items():
# Best-effort map: skip missing items
if graph_input_name not in graph_credentials_input:
continue

View File

@@ -342,14 +342,6 @@ async def store_media_file(
if not target_path.is_file():
raise ValueError(f"Local file does not exist: {target_path}")
# Virus scan the local file before any further processing
local_content = target_path.read_bytes()
if len(local_content) > MAX_FILE_SIZE_BYTES:
raise ValueError(
f"File too large: {len(local_content)} bytes > {MAX_FILE_SIZE_BYTES} bytes"
)
await scan_content_safe(local_content, filename=sanitized_file)
# Return based on requested format
if return_format == "for_local_processing":
# Use when processing files locally with tools like ffmpeg, MoviePy, PIL

View File

@@ -247,100 +247,3 @@ class TestFileCloudIntegration:
execution_context=make_test_context(graph_exec_id=graph_exec_id),
return_format="for_local_processing",
)
@pytest.mark.asyncio
async def test_store_media_file_local_path_scanned(self):
"""Test that local file paths are scanned for viruses."""
graph_exec_id = "test-exec-123"
local_file = "test_video.mp4"
file_content = b"fake video content"
with patch(
"backend.util.file.get_cloud_storage_handler"
) as mock_handler_getter, patch(
"backend.util.file.scan_content_safe"
) as mock_scan, patch(
"backend.util.file.Path"
) as mock_path_class:
# Mock cloud storage handler - not a cloud path
mock_handler = MagicMock()
mock_handler.is_cloud_path.return_value = False
mock_handler_getter.return_value = mock_handler
# Mock virus scanner
mock_scan.return_value = None
# Mock file system operations
mock_base_path = MagicMock()
mock_target_path = MagicMock()
mock_resolved_path = MagicMock()
mock_path_class.return_value = mock_base_path
mock_base_path.mkdir = MagicMock()
mock_base_path.__truediv__ = MagicMock(return_value=mock_target_path)
mock_target_path.resolve.return_value = mock_resolved_path
mock_resolved_path.is_relative_to.return_value = True
mock_resolved_path.is_file.return_value = True
mock_resolved_path.read_bytes.return_value = file_content
mock_resolved_path.relative_to.return_value = Path(local_file)
mock_resolved_path.name = local_file
result = await store_media_file(
file=MediaFileType(local_file),
execution_context=make_test_context(graph_exec_id=graph_exec_id),
return_format="for_local_processing",
)
# Verify virus scan was called for local file
mock_scan.assert_called_once_with(file_content, filename=local_file)
# Result should be the relative path
assert str(result) == local_file
@pytest.mark.asyncio
async def test_store_media_file_local_path_virus_detected(self):
"""Test that infected local files raise VirusDetectedError."""
from backend.api.features.store.exceptions import VirusDetectedError
graph_exec_id = "test-exec-123"
local_file = "infected.exe"
file_content = b"malicious content"
with patch(
"backend.util.file.get_cloud_storage_handler"
) as mock_handler_getter, patch(
"backend.util.file.scan_content_safe"
) as mock_scan, patch(
"backend.util.file.Path"
) as mock_path_class:
# Mock cloud storage handler - not a cloud path
mock_handler = MagicMock()
mock_handler.is_cloud_path.return_value = False
mock_handler_getter.return_value = mock_handler
# Mock virus scanner to detect virus
mock_scan.side_effect = VirusDetectedError(
"EICAR-Test-File", "File rejected due to virus detection"
)
# Mock file system operations
mock_base_path = MagicMock()
mock_target_path = MagicMock()
mock_resolved_path = MagicMock()
mock_path_class.return_value = mock_base_path
mock_base_path.mkdir = MagicMock()
mock_base_path.__truediv__ = MagicMock(return_value=mock_target_path)
mock_target_path.resolve.return_value = mock_resolved_path
mock_resolved_path.is_relative_to.return_value = True
mock_resolved_path.is_file.return_value = True
mock_resolved_path.read_bytes.return_value = file_content
with pytest.raises(VirusDetectedError):
await store_media_file(
file=MediaFileType(local_file),
execution_context=make_test_context(graph_exec_id=graph_exec_id),
return_format="for_local_processing",
)

File diff suppressed because it is too large Load Diff

View File

@@ -12,16 +12,16 @@ python = ">=3.10,<3.14"
aio-pika = "^9.5.5"
aiohttp = "^3.10.0"
aiodns = "^3.5.0"
anthropic = "^0.79.0"
anthropic = "^0.59.0"
apscheduler = "^3.11.1"
autogpt-libs = { path = "../autogpt_libs", develop = true }
bleach = { extras = ["css"], version = "^6.2.0" }
click = "^8.2.0"
cryptography = "^46.0"
cryptography = "^45.0"
discord-py = "^2.5.2"
e2b-code-interpreter = "^1.5.2"
elevenlabs = "^1.50.0"
fastapi = "^0.128.5"
fastapi = "^0.116.1"
feedparser = "^6.0.11"
flake8 = "^7.3.0"
google-api-python-client = "^2.177.0"
@@ -35,10 +35,10 @@ jinja2 = "^3.1.6"
jsonref = "^1.1.0"
jsonschema = "^4.25.0"
langfuse = "^3.11.0"
launchdarkly-server-sdk = "^9.14.1"
launchdarkly-server-sdk = "^9.12.0"
mem0ai = "^0.1.115"
moviepy = "^2.1.2"
ollama = "^0.6.1"
ollama = "^0.5.1"
openai = "^1.97.1"
orjson = "^3.10.0"
pika = "^1.3.2"
@@ -48,16 +48,16 @@ postmarker = "^1.0"
praw = "~7.8.1"
prisma = "^0.15.0"
rank-bm25 = "^0.2.2"
prometheus-client = "^0.24.1"
prometheus-client = "^0.22.1"
prometheus-fastapi-instrumentator = "^7.0.0"
psutil = "^7.0.0"
psycopg2-binary = "^2.9.10"
pydantic = { extras = ["email"], version = "^2.12.5" }
pydantic-settings = "^2.12.0"
pydantic = { extras = ["email"], version = "^2.11.7" }
pydantic-settings = "^2.10.1"
pytest = "^8.4.1"
pytest-asyncio = "^1.1.0"
python-dotenv = "^1.1.1"
python-multipart = "^0.0.22"
python-multipart = "^0.0.20"
redis = "^6.2.0"
regex = "^2025.9.18"
replicate = "^1.0.6"
@@ -65,11 +65,11 @@ sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlal
sqlalchemy = "^2.0.40"
strenum = "^0.4.9"
stripe = "^11.5.0"
supabase = "2.27.3"
tenacity = "^9.1.4"
supabase = "2.17.0"
tenacity = "^9.1.2"
todoist-api-python = "^2.1.7"
tweepy = "^4.16.0"
uvicorn = { extras = ["standard"], version = "^0.40.0" }
uvicorn = { extras = ["standard"], version = "^0.35.0" }
websockets = "^15.0"
youtube-transcript-api = "^1.2.1"
yt-dlp = "2025.12.08"
@@ -77,7 +77,7 @@ zerobouncesdk = "^1.1.2"
# NOTE: please insert new dependencies in their alphabetical location
pytest-snapshot = "^0.9.0"
aiofiles = "^24.1.0"
tiktoken = "^0.12.0"
tiktoken = "^0.9.0"
aioclamd = "^1.0.0"
setuptools = "^80.9.0"
gcloud-aio-storage = "^9.5.0"
@@ -95,13 +95,13 @@ black = "^24.10.0"
faker = "^38.2.0"
httpx = "^0.28.1"
isort = "^5.13.2"
poethepoet = "^0.41.0"
poethepoet = "^0.37.0"
pre-commit = "^4.4.0"
pyright = "^1.1.407"
pytest-mock = "^3.15.1"
pytest-watcher = "^0.6.3"
pytest-watcher = "^0.4.2"
requests = "^2.32.5"
ruff = "^0.15.0"
ruff = "^0.14.5"
# NOTE: please insert new dependencies in their alphabetical location
[build-system]

View File

@@ -3,6 +3,7 @@
"credentials_input_schema": {
"properties": {},
"required": [],
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"description": "A test graph",

View File

@@ -1,14 +1,34 @@
[
{
"created_at": "2025-09-04T13:37:00",
"credentials_input_schema": {
"properties": {},
"required": [],
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"description": "A test graph",
"forked_from_id": null,
"forked_from_version": null,
"has_external_trigger": false,
"has_human_in_the_loop": false,
"has_sensitive_action": false,
"id": "graph-123",
"input_schema": {
"properties": {},
"required": [],
"type": "object"
},
"instructions": null,
"is_active": true,
"name": "Test Graph",
"output_schema": {
"properties": {},
"required": [],
"type": "object"
},
"recommended_schedule_cron": null,
"sub_graphs": [],
"trigger_setup_info": null,
"user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
"version": 1
}

View File

@@ -102,7 +102,7 @@
"react-markdown": "9.0.3",
"react-modal": "3.16.3",
"react-shepherd": "6.1.9",
"react-window": "2.2.0",
"react-window": "1.8.11",
"recharts": "3.3.0",
"rehype-autolink-headings": "7.1.0",
"rehype-highlight": "7.0.2",
@@ -140,7 +140,7 @@
"@types/react": "18.3.17",
"@types/react-dom": "18.3.5",
"@types/react-modal": "3.16.3",
"@types/react-window": "2.0.0",
"@types/react-window": "1.8.8",
"@vitejs/plugin-react": "5.1.2",
"axe-playwright": "2.2.2",
"chromatic": "13.3.3",

View File

@@ -228,8 +228,8 @@ importers:
specifier: 6.1.9
version: 6.1.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.9.3)
react-window:
specifier: 2.2.0
version: 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
specifier: 1.8.11
version: 1.8.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
recharts:
specifier: 3.3.0
version: 3.3.0(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react-is@18.3.1)(react@18.3.1)(redux@5.0.1)
@@ -337,8 +337,8 @@ importers:
specifier: 3.16.3
version: 3.16.3
'@types/react-window':
specifier: 2.0.0
version: 2.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
specifier: 1.8.8
version: 1.8.8
'@vitejs/plugin-react':
specifier: 5.1.2
version: 5.1.2(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))
@@ -3469,9 +3469,8 @@ packages:
'@types/react-modal@3.16.3':
resolution: {integrity: sha512-xXuGavyEGaFQDgBv4UVm8/ZsG+qxeQ7f77yNrW3n+1J6XAstUy5rYHeIHPh1KzsGc6IkCIdu6lQ2xWzu1jBTLg==}
'@types/react-window@2.0.0':
resolution: {integrity: sha512-E8hMDtImEpMk1SjswSvqoSmYvk7GEtyVaTa/GJV++FdDNuMVVEzpAClyJ0nqeKYBrMkGiyH6M1+rPLM0Nu1exQ==}
deprecated: This is a stub types definition. react-window provides its own type definitions, so you do not need this installed.
'@types/react-window@1.8.8':
resolution: {integrity: sha512-8Ls660bHR1AUA2kuRvVG9D/4XpRC6wjAaPT9dil7Ckc76eP9TKWZwwmgfq8Q1LANX3QNDnoU4Zp48A3w+zK69Q==}
'@types/react@18.3.17':
resolution: {integrity: sha512-opAQ5no6LqJNo9TqnxBKsgnkIYHozW9KSTlFVoSUJYh1Fl/sswkEoqIugRSm7tbh6pABtYjGAjW+GOS23j8qbw==}
@@ -5977,6 +5976,9 @@ packages:
resolution: {integrity: sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==}
engines: {node: '>= 4.0.0'}
memoize-one@5.2.1:
resolution: {integrity: sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==}
merge-stream@2.0.0:
resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==}
@@ -6889,11 +6891,12 @@ packages:
'@types/react':
optional: true
react-window@2.2.0:
resolution: {integrity: sha512-Y2L7yonHq6K1pQA2P98wT5QdIsEcjBTB7T8o6Mub12hH9eYppXoYu6vgClmcjlh3zfNcW2UrXiJJJqDxUY7GVw==}
react-window@1.8.11:
resolution: {integrity: sha512-+SRbUVT2scadgFSWx+R1P754xHPEqvcfSfVX10QYg6POOz+WNgkN48pS+BtZNIMGiL1HYrSEiCkwsMS15QogEQ==}
engines: {node: '>8.0.0'}
peerDependencies:
react: ^18.0.0 || ^19.0.0
react-dom: ^18.0.0 || ^19.0.0
react: ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
react-dom: ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
react@18.3.1:
resolution: {integrity: sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==}
@@ -11600,12 +11603,9 @@ snapshots:
dependencies:
'@types/react': 18.3.17
'@types/react-window@2.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
'@types/react-window@1.8.8':
dependencies:
react-window: 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
transitivePeerDependencies:
- react
- react-dom
'@types/react': 18.3.17
'@types/react@18.3.17':
dependencies:
@@ -14545,6 +14545,8 @@ snapshots:
dependencies:
fs-monkey: 1.1.0
memoize-one@5.2.1: {}
merge-stream@2.0.0: {}
merge2@1.4.1: {}
@@ -15590,8 +15592,10 @@ snapshots:
optionalDependencies:
'@types/react': 18.3.17
react-window@2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
react-window@1.8.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
'@babel/runtime': 7.28.4
memoize-one: 5.2.1
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)

View File

@@ -1,5 +1,5 @@
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import { useState } from "react";
import { getSchemaDefaultCredentials } from "../../helpers";
@@ -9,7 +9,7 @@ type Credential = CredentialsMetaInput | undefined;
type Credentials = Record<string, Credential>;
type Props = {
agent: GraphModel | null;
agent: GraphMeta | null;
siblingInputs?: Record<string, any>;
onCredentialsChange: (
credentials: Record<string, CredentialsMetaInput>,

View File

@@ -1,9 +1,9 @@
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types";
export function getCredentialFields(
agent: GraphModel | null,
agent: GraphMeta | null,
): AgentCredentialsFields {
if (!agent) return {};

View File

@@ -3,10 +3,10 @@ import type {
CredentialsMetaInput,
} from "@/lib/autogpt-server-api/types";
import type { InputValues } from "./types";
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
export function computeInitialAgentInputs(
agent: GraphModel | null,
agent: GraphMeta | null,
existingInputs?: InputValues | null,
): InputValues {
const properties = agent?.input_schema?.properties || {};
@@ -29,7 +29,7 @@ export function computeInitialAgentInputs(
}
type IsRunDisabledParams = {
agent: GraphModel | null;
agent: GraphMeta | null;
isRunning: boolean;
agentInputs: InputValues | null | undefined;
};

View File

@@ -30,8 +30,6 @@ import {
} from "@/components/atoms/Tooltip/BaseTooltip";
import { GraphMeta } from "@/lib/autogpt-server-api";
import jaro from "jaro-winkler";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
uiKey?: string;
@@ -109,8 +107,6 @@ export function BlocksControl({
.filter((b) => b.uiType !== BlockUIType.AGENT)
.sort((a, b) => a.name.localeCompare(b.name));
// Agent blocks are created from GraphMeta which doesn't include schemas.
// Schemas will be fetched on-demand when the block is actually added.
const agentBlockList = flows
.map((flow): _Block => {
return {
@@ -120,9 +116,8 @@ export function BlocksControl({
`Ver.${flow.version}` +
(flow.description ? ` | ${flow.description}` : ""),
categories: [{ category: "AGENT", description: "" }],
// Empty schemas - will be populated when block is added
inputSchema: { type: "object", properties: {} },
outputSchema: { type: "object", properties: {} },
inputSchema: flow.input_schema,
outputSchema: flow.output_schema,
staticOutput: false,
uiType: BlockUIType.AGENT,
costs: [],
@@ -130,7 +125,8 @@ export function BlocksControl({
hardcodedValues: {
graph_id: flow.id,
graph_version: flow.version,
// Schemas will be fetched on-demand when block is added
input_schema: flow.input_schema,
output_schema: flow.output_schema,
},
};
})
@@ -186,37 +182,6 @@ export function BlocksControl({
setSelectedCategory(null);
}, []);
// Handler to add a block, fetching graph data on-demand for agent blocks
const handleAddBlock = useCallback(
async (block: _Block & { notAvailable: string | null }) => {
if (block.notAvailable) return;
// For agent blocks, fetch the full graph to get schemas
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
const graphID = block.hardcodedValues.graph_id as string;
const graphVersion = block.hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
addBlock(block.id, block.name, {
...block.hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
});
} else {
// Fallback: add without schemas (will be incomplete)
console.error("Failed to fetch graph data for agent block");
addBlock(block.id, block.name, block.hardcodedValues || {});
}
} else {
addBlock(block.id, block.name, block.hardcodedValues || {});
}
},
[addBlock],
);
// Extract unique categories from blocks
const categories = useMemo(() => {
return Array.from(
@@ -338,7 +303,10 @@ export function BlocksControl({
}),
);
}}
onClick={() => handleAddBlock(block)}
onClick={() =>
!block.notAvailable &&
addBlock(block.id, block.name, block?.hardcodedValues || {})
}
title={block.notAvailable ?? undefined}
>
<div

View File

@@ -29,17 +29,13 @@ import "@xyflow/react/dist/style.css";
import { ConnectedEdge, CustomNode } from "../CustomNode/CustomNode";
import "./flow.css";
import {
BlockIORootSchema,
BlockUIType,
formatEdgeID,
GraphExecutionID,
GraphID,
GraphMeta,
LibraryAgent,
SpecialBlockID,
} from "@/lib/autogpt-server-api";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
import { Key, storage } from "@/services/storage/local-storage";
import { findNewlyAddedBlockCoordinates, getTypeColor } from "@/lib/utils";
@@ -691,94 +687,8 @@ const FlowEditor: React.FC<{
[getNode, updateNode, nodes],
);
/* Shared helper to create and add a node */
const createAndAddNode = useCallback(
async (
blockID: string,
blockName: string,
hardcodedValues: Record<string, any>,
position: { x: number; y: number },
): Promise<CustomNode | null> => {
const nodeSchema = availableBlocks.find((node) => node.id === blockID);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockID}`);
return null;
}
// For agent blocks, fetch the full graph to get schemas
let inputSchema: BlockIORootSchema = nodeSchema.inputSchema;
let outputSchema: BlockIORootSchema = nodeSchema.outputSchema;
let finalHardcodedValues = hardcodedValues;
if (blockID === SpecialBlockID.AGENT) {
const graphID = hardcodedValues.graph_id as string;
const graphVersion = hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
inputSchema = graphData.input_schema as BlockIORootSchema;
outputSchema = graphData.output_schema as BlockIORootSchema;
finalHardcodedValues = {
...hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
};
} else {
console.error("Failed to fetch graph data for agent block");
}
}
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position,
data: {
blockType: blockName,
blockCosts: nodeSchema.costs || [],
title: `${blockName} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: inputSchema,
outputSchema: outputSchema,
hardcodedValues: finalHardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockID,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
addNodes(newNode);
setNodeId((prevId) => prevId + 1);
clearNodesStatusAndOutput();
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
redo: () => addNodes(newNode),
});
return newNode;
},
[
availableBlocks,
nodeId,
addNodes,
deleteElements,
clearNodesStatusAndOutput,
],
);
const addNode = useCallback(
async (
blockId: string,
nodeType: string,
hardcodedValues: Record<string, any> = {},
) => {
(blockId: string, nodeType: string, hardcodedValues: any = {}) => {
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockId}`);
@@ -797,42 +707,73 @@ const FlowEditor: React.FC<{
// Alternative: We could also use D3 force, Intersection for this (React flow Pro examples)
const { x, y } = getViewport();
const position =
const viewportCoordinates =
nodeDimensions && Object.keys(nodeDimensions).length > 0
? findNewlyAddedBlockCoordinates(
? // we will get all the dimension of nodes, then store
findNewlyAddedBlockCoordinates(
nodeDimensions,
nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500,
60,
1.0,
)
: {
: // we will get all the dimension of nodes, then store
{
x: window.innerWidth / 2 - x,
y: window.innerHeight / 2 - y,
};
const newNode = await createAndAddNode(
blockId,
nodeType,
hardcodedValues,
position,
);
if (!newNode) return;
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position: viewportCoordinates, // Set the position to the calculated viewport center
data: {
blockType: nodeType,
blockCosts: nodeSchema.costs,
title: `${nodeType} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: nodeSchema.inputSchema,
outputSchema: nodeSchema.outputSchema,
hardcodedValues: hardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockId,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
addNodes(newNode);
setNodeId((prevId) => prevId + 1);
clearNodesStatusAndOutput(); // Clear status and output when a new node is added
setViewport(
{
x: -position.x * 0.8 + (window.innerWidth - 0.0) / 2,
y: -position.y * 0.8 + (window.innerHeight - 400) / 2,
// Rough estimate of the dimension of the node is: 500x400px.
// Though we skip shifting the X, considering the block menu side-bar.
x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2,
y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2,
zoom: 0.8,
},
{ duration: 500 },
);
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
redo: () => addNodes(newNode),
});
},
[
nodeId,
getViewport,
setViewport,
availableBlocks,
addNodes,
nodeDimensions,
createAndAddNode,
deleteElements,
clearNodesStatusAndOutput,
],
);
@@ -979,7 +920,7 @@ const FlowEditor: React.FC<{
}, []);
const onDrop = useCallback(
async (event: React.DragEvent) => {
(event: React.DragEvent) => {
event.preventDefault();
const blockData = event.dataTransfer.getData("application/reactflow");
@@ -994,17 +935,62 @@ const FlowEditor: React.FC<{
y: event.clientY,
});
await createAndAddNode(
blockId,
blockName,
hardcodedValues || {},
// Find the block schema
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockId}`);
return;
}
// Create the new node at the drop position
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position,
);
data: {
blockType: blockName,
blockCosts: nodeSchema.costs || [],
title: `${blockName} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: nodeSchema.inputSchema,
outputSchema: nodeSchema.outputSchema,
hardcodedValues: hardcodedValues,
connections: [],
isOutputOpen: false,
block_id: blockId,
uiType: nodeSchema.uiType,
},
};
history.push({
type: "ADD_NODE",
payload: { node: { ...newNode, ...newNode.data } },
undo: () => {
deleteElements({ nodes: [{ id: newNode.id } as any], edges: [] });
},
redo: () => {
addNodes([newNode]);
},
});
addNodes([newNode]);
clearNodesStatusAndOutput();
setNodeId((prevId) => prevId + 1);
} catch (error) {
console.error("Failed to drop block:", error);
}
},
[screenToFlowPosition, createAndAddNode],
[
nodeId,
availableBlocks,
nodes,
edges,
addNodes,
screenToFlowPosition,
deleteElements,
clearNodesStatusAndOutput,
],
);
const buildContextValue: BuilderContextType = useMemo(

View File

@@ -4,13 +4,13 @@ import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/componen
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import type {
CredentialsMetaInput,
Graph,
GraphMeta,
} from "@/lib/autogpt-server-api/types";
interface RunInputDialogProps {
isOpen: boolean;
doClose: () => void;
graph: Graph;
graph: GraphMeta;
doRun?: (
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,

View File

@@ -9,13 +9,13 @@ import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder
import {
BlockUIType,
CredentialsMetaInput,
Graph,
GraphMeta,
} from "@/lib/autogpt-server-api/types";
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
import { RunnerInputDialog } from "./RunnerInputUI";
interface RunnerUIWrapperProps {
graph: Graph;
graph: GraphMeta;
nodes: Node<CustomNodeData>[];
graphExecutionError?: string | null;
saveAndRun: (

View File

@@ -1,5 +1,5 @@
import { GraphInputSchema } from "@/lib/autogpt-server-api";
import { GraphLike, IncompatibilityInfo } from "./types";
import { GraphMetaLike, IncompatibilityInfo } from "./types";
// Helper type for schema properties - the generated types are too loose
type SchemaProperties = Record<string, GraphInputSchema["properties"][string]>;
@@ -36,7 +36,7 @@ export function getSchemaRequired(schema: unknown): SchemaRequired {
*/
export function createUpdatedAgentNodeInputs(
currentInputs: Record<string, unknown>,
latestSubGraphVersion: GraphLike,
latestSubGraphVersion: GraphMetaLike,
): Record<string, unknown> {
return {
...currentInputs,

View File

@@ -1,11 +1,7 @@
import type {
Graph as LegacyGraph,
GraphMeta as LegacyGraphMeta,
} from "@/lib/autogpt-server-api";
import type { GraphModel as GeneratedGraph } from "@/app/api/__generated__/models/graphModel";
import type { GraphMeta as LegacyGraphMeta } from "@/lib/autogpt-server-api";
import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta";
export type SubAgentUpdateInfo<T extends GraphLike = GraphLike> = {
export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
hasUpdate: boolean;
currentVersion: number;
latestVersion: number;
@@ -14,10 +10,7 @@ export type SubAgentUpdateInfo<T extends GraphLike = GraphLike> = {
incompatibilities: IncompatibilityInfo | null;
};
// Union type for Graph (with schemas) that works with both legacy and new builder
export type GraphLike = LegacyGraph | GeneratedGraph;
// Union type for GraphMeta (without schemas) for version detection
// Union type for GraphMeta that works with both legacy and new builder
export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta;
export type IncompatibilityInfo = {

View File

@@ -1,11 +1,5 @@
import { useMemo } from "react";
import type {
GraphInputSchema,
GraphOutputSchema,
} from "@/lib/autogpt-server-api";
import type { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api";
import { getEffectiveType } from "@/lib/utils";
import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers";
import {
@@ -17,38 +11,26 @@ import {
/**
* Checks if a newer version of a sub-agent is available and determines compatibility
*/
export function useSubAgentUpdate(
export function useSubAgentUpdate<T extends GraphMetaLike>(
nodeID: string,
graphID: string | undefined,
graphVersion: number | undefined,
currentInputSchema: GraphInputSchema | undefined,
currentOutputSchema: GraphOutputSchema | undefined,
connections: EdgeLike[],
availableGraphs: GraphMetaLike[],
): SubAgentUpdateInfo<GraphModel> {
availableGraphs: T[],
): SubAgentUpdateInfo<T> {
// Find the latest version of the same graph
const latestGraphInfo = useMemo(() => {
const latestGraph = useMemo(() => {
if (!graphID) return null;
return availableGraphs.find((graph) => graph.id === graphID) || null;
}, [graphID, availableGraphs]);
// Check if there's a newer version available
// Check if there's an update available
const hasUpdate = useMemo(() => {
if (!latestGraphInfo || graphVersion === undefined) return false;
return latestGraphInfo.version! > graphVersion;
}, [latestGraphInfo, graphVersion]);
// Fetch full graph IF an update is detected
const { data: latestGraph } = useGetV1GetSpecificGraph(
graphID ?? "",
{ version: latestGraphInfo?.version },
{
query: {
enabled: hasUpdate && !!graphID && !!latestGraphInfo?.version,
select: okData,
},
},
);
if (!latestGraph || graphVersion === undefined) return false;
return latestGraph.version! > graphVersion;
}, [latestGraph, graphVersion]);
// Get connected input and output handles for this specific node
const connectedHandles = useMemo(() => {
@@ -170,8 +152,8 @@ export function useSubAgentUpdate(
return {
hasUpdate,
currentVersion: graphVersion || 0,
latestVersion: latestGraphInfo?.version || 0,
latestGraph: latestGraph || null,
latestVersion: latestGraph?.version || 0,
latestGraph,
isCompatible: compatibilityResult.isCompatible,
incompatibilities: compatibilityResult.incompatibilities,
};

View File

@@ -18,7 +18,7 @@ interface GraphStore {
outputSchema: Record<string, any> | null,
) => void;
// Available graphs; used for sub-graph updated version detection
// Available graphs; used for sub-graph updates
availableSubGraphs: GraphMeta[];
setAvailableSubGraphs: (graphs: GraphMeta[]) => void;

View File

@@ -10,8 +10,8 @@ import React, {
import {
CredentialsMetaInput,
CredentialsType,
Graph,
GraphExecutionID,
GraphMeta,
LibraryAgentPreset,
LibraryAgentPresetID,
LibraryAgentPresetUpdatable,
@@ -69,7 +69,7 @@ export function AgentRunDraftView({
className,
recommendedScheduleCron,
}: {
graph: Graph;
graph: GraphMeta;
agentActions?: ButtonAction[];
recommendedScheduleCron?: string | null;
doRun?: (

View File

@@ -2,8 +2,8 @@
import React, { useCallback, useMemo } from "react";
import {
Graph,
GraphExecutionID,
GraphMeta,
Schedule,
ScheduleID,
} from "@/lib/autogpt-server-api";
@@ -35,7 +35,7 @@ export function AgentScheduleDetailsView({
onForcedRun,
doDeleteSchedule,
}: {
graph: Graph;
graph: GraphMeta;
schedule: Schedule;
agentActions: ButtonAction[];
onForcedRun: (runID: GraphExecutionID) => void;

View File

@@ -5629,9 +5629,7 @@
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GraphModelWithoutNodes"
}
"schema": { "$ref": "#/components/schemas/GraphMeta" }
}
}
},
@@ -6497,6 +6495,18 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -6504,22 +6514,11 @@
"forked_from_version": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
}
},
"type": "object",
"required": ["name", "description"],
"title": "BaseGraph",
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
"title": "BaseGraph"
},
"BaseGraph-Output": {
"properties": {
@@ -6540,6 +6539,18 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -6548,16 +6559,6 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"input_schema": {
"additionalProperties": true,
"type": "object",
@@ -6604,8 +6605,7 @@
"has_sensitive_action",
"trigger_setup_info"
],
"title": "BaseGraph",
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
"title": "BaseGraph"
},
"BlockCategoryResponse": {
"properties": {
@@ -7399,6 +7399,18 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
@@ -7407,26 +7419,16 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"nodes": {
"items": { "$ref": "#/components/schemas/Node" },
"type": "array",
"title": "Nodes"
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Input" },
"type": "array",
"title": "Sub Graphs"
"title": "Sub Graphs",
"default": []
}
},
"type": "object",
"required": ["name", "description"],
"title": "Graph",
"description": "Creatable graph model used in API create/update endpoints."
"title": "Graph"
},
"GraphExecution": {
"properties": {
@@ -7778,7 +7780,7 @@
"GraphMeta": {
"properties": {
"id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version" },
"version": { "type": "integer", "title": "Version", "default": 1 },
"is_active": {
"type": "boolean",
"title": "Is Active",
@@ -7802,24 +7804,68 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
"type": "array",
"title": "Sub Graphs",
"default": []
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
"format": "date-time",
"title": "Created At"
"input_schema": {
"additionalProperties": true,
"type": "object",
"title": "Input Schema",
"readOnly": true
},
"output_schema": {
"additionalProperties": true,
"type": "object",
"title": "Output Schema",
"readOnly": true
},
"has_external_trigger": {
"type": "boolean",
"title": "Has External Trigger",
"readOnly": true
},
"has_human_in_the_loop": {
"type": "boolean",
"title": "Has Human In The Loop",
"readOnly": true
},
"has_sensitive_action": {
"type": "boolean",
"title": "Has Sensitive Action",
"readOnly": true
},
"trigger_setup_info": {
"anyOf": [
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
{ "type": "null" }
],
"readOnly": true
},
"credentials_input_schema": {
"additionalProperties": true,
"type": "object",
"title": "Credentials Input Schema",
"readOnly": true
}
},
"type": "object",
"required": [
"id",
"version",
"name",
"description",
"user_id",
"created_at"
"input_schema",
"output_schema",
"has_external_trigger",
"has_human_in_the_loop",
"has_sensitive_action",
"trigger_setup_info",
"credentials_input_schema"
],
"title": "GraphMeta",
"description": "Lightweight graph metadata model representing an existing graph from the database,\nfor use in listings and summaries.\n\nLacks `GraphModel`'s nodes, links, and expensive computed fields.\nUse for list endpoints where full graph data is not needed and performance matters."
"title": "GraphMeta"
},
"GraphModel": {
"properties": {
@@ -7840,111 +7886,17 @@
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Forked From Id"
},
"forked_from_version": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
"format": "date-time",
"title": "Created At"
},
"nodes": {
"items": { "$ref": "#/components/schemas/NodeModel" },
"type": "array",
"title": "Nodes"
"title": "Nodes",
"default": []
},
"links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Links"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
"type": "array",
"title": "Sub Graphs"
},
"input_schema": {
"additionalProperties": true,
"type": "object",
"title": "Input Schema",
"readOnly": true
},
"output_schema": {
"additionalProperties": true,
"type": "object",
"title": "Output Schema",
"readOnly": true
},
"has_external_trigger": {
"type": "boolean",
"title": "Has External Trigger",
"readOnly": true
},
"has_human_in_the_loop": {
"type": "boolean",
"title": "Has Human In The Loop",
"readOnly": true
},
"has_sensitive_action": {
"type": "boolean",
"title": "Has Sensitive Action",
"readOnly": true
},
"trigger_setup_info": {
"anyOf": [
{ "$ref": "#/components/schemas/GraphTriggerInfo" },
{ "type": "null" }
],
"readOnly": true
},
"credentials_input_schema": {
"additionalProperties": true,
"type": "object",
"title": "Credentials Input Schema",
"readOnly": true
}
},
"type": "object",
"required": [
"name",
"description",
"user_id",
"created_at",
"input_schema",
"output_schema",
"has_external_trigger",
"has_human_in_the_loop",
"has_sensitive_action",
"trigger_setup_info",
"credentials_input_schema"
],
"title": "GraphModel",
"description": "Full graph model representing an existing graph from the database.\n\nThis is the primary model for working with persisted graphs. Includes all\ngraph data (nodes, links, sub_graphs) plus user ownership and timestamps.\nProvides computed fields (input_schema, output_schema, etc.) used during\nset-up (frontend) and execution (backend).\n\nInherits from:\n- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas\n- `GraphMeta`: provides user_id, created_at for database records"
},
"GraphModelWithoutNodes": {
"properties": {
"id": { "type": "string", "title": "Id" },
"version": { "type": "integer", "title": "Version", "default": 1 },
"is_active": {
"type": "boolean",
"title": "Is Active",
"default": true
},
"name": { "type": "string", "title": "Name" },
"description": { "type": "string", "title": "Description" },
"instructions": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Instructions"
},
"recommended_schedule_cron": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Recommended Schedule Cron"
"title": "Links",
"default": []
},
"forked_from_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
@@ -7954,6 +7906,12 @@
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"title": "Forked From Version"
},
"sub_graphs": {
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
"type": "array",
"title": "Sub Graphs",
"default": []
},
"user_id": { "type": "string", "title": "User Id" },
"created_at": {
"type": "string",
@@ -8015,8 +7973,7 @@
"trigger_setup_info",
"credentials_input_schema"
],
"title": "GraphModelWithoutNodes",
"description": "GraphModel variant that excludes nodes, links, and sub-graphs from serialization.\n\nUsed in contexts like the store where exposing internal graph structure\nis not desired. Inherits all computed fields from GraphModel but marks\nnodes and links as excluded from JSON output."
"title": "GraphModel"
},
"GraphSettings": {
"properties": {
@@ -8656,22 +8613,26 @@
"input_default": {
"additionalProperties": true,
"type": "object",
"title": "Input Default"
"title": "Input Default",
"default": {}
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata"
"title": "Metadata",
"default": {}
},
"input_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Input Links"
"title": "Input Links",
"default": []
},
"output_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Output Links"
"title": "Output Links",
"default": []
}
},
"type": "object",
@@ -8751,22 +8712,26 @@
"input_default": {
"additionalProperties": true,
"type": "object",
"title": "Input Default"
"title": "Input Default",
"default": {}
},
"metadata": {
"additionalProperties": true,
"type": "object",
"title": "Metadata"
"title": "Metadata",
"default": {}
},
"input_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Input Links"
"title": "Input Links",
"default": []
},
"output_links": {
"items": { "$ref": "#/components/schemas/Link" },
"type": "array",
"title": "Output Links"
"title": "Output Links",
"default": []
},
"graph_id": { "type": "string", "title": "Graph Id" },
"graph_version": { "type": "integer", "title": "Graph Version" },
@@ -12307,9 +12272,7 @@
"title": "Location"
},
"msg": { "type": "string", "title": "Message" },
"type": { "type": "string", "title": "Error Type" },
"input": { "title": "Input" },
"ctx": { "type": "object", "title": "Context" }
"type": { "type": "string", "title": "Error Type" }
},
"type": "object",
"required": ["loc", "msg", "type"],

View File

@@ -4,7 +4,7 @@ import { Button } from "@/components/atoms/Button/Button";
import { Input } from "@/components/atoms/Input/Input";
import { Text } from "@/components/atoms/Text/Text";
import { Bell, MagnifyingGlass, X } from "@phosphor-icons/react";
import { List, type RowComponentProps } from "react-window";
import { FixedSizeList as List } from "react-window";
import { AgentExecutionWithInfo } from "../../helpers";
import { ActivityItem } from "../ActivityItem";
import styles from "./styles.module.css";
@@ -19,16 +19,14 @@ interface Props {
recentFailures: AgentExecutionWithInfo[];
}
interface ActivityRowProps {
executions: AgentExecutionWithInfo[];
interface VirtualizedItemProps {
index: number;
style: React.CSSProperties;
data: AgentExecutionWithInfo[];
}
function VirtualizedActivityItem({
index,
style,
executions,
}: RowComponentProps<ActivityRowProps>) {
const execution = executions[index];
function VirtualizedActivityItem({ index, style, data }: VirtualizedItemProps) {
const execution = data[index];
return (
<div style={style}>
<ActivityItem execution={execution} />
@@ -131,13 +129,14 @@ export function ActivityDropdown({
>
{filteredExecutions.length > 0 ? (
<List
defaultHeight={listHeight}
rowCount={filteredExecutions.length}
rowHeight={itemHeight}
rowProps={{ executions: filteredExecutions }}
rowComponent={VirtualizedActivityItem}
style={{ width: 320, height: listHeight }}
/>
height={listHeight}
width={320} // Match dropdown width (w-80 = 20rem = 320px)
itemCount={filteredExecutions.length}
itemSize={itemHeight}
itemData={filteredExecutions}
>
{VirtualizedActivityItem}
</List>
) : (
<div className="flex h-full flex-col items-center justify-center gap-5 pb-8 pt-6">
<div className="mx-auto inline-flex flex-col items-center justify-center rounded-full bg-bgLightGrey p-6">

View File

@@ -362,14 +362,25 @@ export type GraphMeta = {
user_id: UserID;
version: number;
is_active: boolean;
created_at: Date;
name: string;
description: string;
instructions?: string | null;
recommended_schedule_cron: string | null;
forked_from_id?: GraphID | null;
forked_from_version?: number | null;
};
input_schema: GraphInputSchema;
output_schema: GraphOutputSchema;
credentials_input_schema: CredentialsInputSchema;
} & (
| {
has_external_trigger: true;
trigger_setup_info: GraphTriggerInfo;
}
| {
has_external_trigger: false;
trigger_setup_info: null;
}
);
export type GraphID = Brand<string, "GraphID">;
@@ -436,22 +447,11 @@ export type GraphTriggerInfo = {
/* Mirror of backend/data/graph.py:Graph */
export type Graph = GraphMeta & {
created_at: Date;
nodes: Node[];
links: Link[];
sub_graphs: Omit<Graph, "sub_graphs">[]; // Flattened sub-graphs
input_schema: GraphInputSchema;
output_schema: GraphOutputSchema;
credentials_input_schema: CredentialsInputSchema;
} & (
| {
has_external_trigger: true;
trigger_setup_info: GraphTriggerInfo;
}
| {
has_external_trigger: false;
trigger_setup_info: null;
}
);
};
export type GraphUpdateable = Omit<
Graph,