mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Merge branch 'dev' into add-iffy-moderation
This commit is contained in:
@@ -27,7 +27,7 @@
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/yarn.lock
|
||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
!autogpt_platform/frontend/README.md
|
||||
## config
|
||||
|
||||
64
.github/dependabot.yml
vendored
64
.github/dependabot.yml
vendored
@@ -10,17 +10,19 @@ updates:
|
||||
commit-message:
|
||||
prefix: "chore(libs/deps)"
|
||||
prefix-development: "chore(libs/deps-dev)"
|
||||
ignore:
|
||||
- dependency-name: "poetry"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# backend (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
@@ -32,17 +34,19 @@ updates:
|
||||
commit-message:
|
||||
prefix: "chore(backend/deps)"
|
||||
prefix-development: "chore(backend/deps-dev)"
|
||||
ignore:
|
||||
- dependency-name: "poetry"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# frontend (Next.js project)
|
||||
- package-ecosystem: "npm"
|
||||
@@ -58,13 +62,13 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# infra (Terraform)
|
||||
- package-ecosystem: "terraform"
|
||||
@@ -81,14 +85,13 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
@@ -101,14 +104,13 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# Docker
|
||||
- package-ecosystem: "docker"
|
||||
@@ -121,16 +123,16 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: 'pip'
|
||||
- package-ecosystem: "pip"
|
||||
directory: "docs/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
@@ -142,10 +144,10 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
5
.github/labeler.yml
vendored
5
.github/labeler.yml
vendored
@@ -24,8 +24,9 @@ platform/frontend:
|
||||
|
||||
platform/backend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/backend/**
|
||||
- all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**'
|
||||
- all-globs-to-any-file:
|
||||
- autogpt_platform/backend/**
|
||||
- '!autogpt_platform/backend/backend/blocks/**'
|
||||
|
||||
platform/blocks:
|
||||
- changed-files:
|
||||
|
||||
47
.github/workflows/claude.yml
vendored
Normal file
47
.github/workflows/claude.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: Claude Code
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
(
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
) && (
|
||||
github.event.comment.author_association == 'OWNER' ||
|
||||
github.event.comment.author_association == 'MEMBER' ||
|
||||
github.event.comment.author_association == 'COLLABORATOR' ||
|
||||
github.event.review.author_association == 'OWNER' ||
|
||||
github.event.review.author_association == 'MEMBER' ||
|
||||
github.event.review.author_association == 'COLLABORATOR' ||
|
||||
github.event.issue.author_association == 'OWNER' ||
|
||||
github.event.issue.author_association == 'MEMBER' ||
|
||||
github.event.issue.author_association == 'COLLABORATOR'
|
||||
)
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@beta
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
313
.github/workflows/platform-autogpt-deploy-dev.yaml
vendored
313
.github/workflows/platform-autogpt-deploy-dev.yaml
vendored
@@ -1,282 +1,51 @@
|
||||
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
|
||||
name: AutoGPT Platform - Deploy Dev Environment
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/**'
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check comment permissions and deployment status
|
||||
id: check_status
|
||||
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
|
||||
uses: actions/github-script@v7
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
script: |
|
||||
const commentBody = context.payload.comment.body.trim();
|
||||
const commentUser = context.payload.comment.user.login;
|
||||
const prAuthor = context.payload.issue.user.login;
|
||||
const authorAssociation = context.payload.comment.author_association;
|
||||
const triggeringCommentId = context.payload.comment.id;
|
||||
|
||||
// Check permissions
|
||||
const hasPermission = (
|
||||
authorAssociation === 'OWNER' ||
|
||||
authorAssociation === 'MEMBER' ||
|
||||
authorAssociation === 'COLLABORATOR'
|
||||
);
|
||||
|
||||
core.setOutput('comment_body', commentBody);
|
||||
core.setOutput('has_permission', hasPermission);
|
||||
|
||||
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
|
||||
core.setOutput('permission_denied', 'true');
|
||||
return;
|
||||
}
|
||||
|
||||
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get all comments to check deployment status
|
||||
const commentsResponse = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
per_page: 100
|
||||
});
|
||||
|
||||
// Filter out the triggering comment
|
||||
const commentsData = commentsResponse.data.filter(comment => comment.id !== triggeringCommentId);
|
||||
|
||||
// Find the last deploy and undeploy commands
|
||||
let lastDeployIndex = -2;
|
||||
let lastUndeployIndex = -1;
|
||||
|
||||
console.log(`Found ${commentsResponse.data.length} total comments, using ${commentsData.length} for status check after filtering`);
|
||||
|
||||
// Iterate through comments in reverse to find the most recent commands
|
||||
for (let i = commentsData.length - 1; i >= 0; i--) {
|
||||
const currentCommentBody = commentsData[i].body.trim();
|
||||
console.log(`Processing comment ${i}: ${currentCommentBody}`);
|
||||
|
||||
if (currentCommentBody === '!deploy' && lastDeployIndex === -2) {
|
||||
lastDeployIndex = i;
|
||||
} else if (currentCommentBody === '!undeploy' && lastUndeployIndex === -1) {
|
||||
lastUndeployIndex = i;
|
||||
}
|
||||
|
||||
// Break early if we found both
|
||||
if (lastDeployIndex !== -2 && lastUndeployIndex !== -1) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Last deploy index: ${lastDeployIndex}`);
|
||||
console.log(`Last undeploy index: ${lastUndeployIndex}`);
|
||||
|
||||
// Currently deployed if there's a deploy command after the last undeploy
|
||||
const isCurrentlyDeployed = lastDeployIndex > lastUndeployIndex;
|
||||
|
||||
// Determine actions based on current state and requested command
|
||||
if (commentBody === '!deploy') {
|
||||
if (isCurrentlyDeployed) {
|
||||
core.setOutput('deploy_blocked', 'already_deployed');
|
||||
} else {
|
||||
core.setOutput('should_deploy', 'true');
|
||||
}
|
||||
} else if (commentBody === '!undeploy') {
|
||||
if (!isCurrentlyDeployed) {
|
||||
// Check if there was ever a deploy
|
||||
const hasEverDeployed = lastDeployIndex !== -2;
|
||||
core.setOutput('undeploy_blocked', hasEverDeployed ? 'already_undeployed' : 'never_deployed');
|
||||
} else {
|
||||
core.setOutput('should_undeploy', 'true');
|
||||
}
|
||||
}
|
||||
|
||||
core.setOutput('has_active_deployment', isCurrentlyDeployed);
|
||||
|
||||
- name: Post permission denied comment
|
||||
if: steps.check_status.outputs.permission_denied == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
|
||||
});
|
||||
|
||||
- name: Post deploy blocked comment
|
||||
if: steps.check_status.outputs.deploy_blocked == 'already_deployed'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `⚠️ **Deploy skipped**: This PR already has an active deployment. Use \`!undeploy\` first if you want to redeploy.`
|
||||
});
|
||||
|
||||
- name: Post undeploy blocked comment
|
||||
if: steps.check_status.outputs.undeploy_blocked != ''
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const reason = '${{ steps.check_status.outputs.undeploy_blocked }}';
|
||||
let message;
|
||||
|
||||
if (reason === 'never_deployed') {
|
||||
message = `⚠️ **Undeploy skipped**: This PR has never been deployed. Use \`!deploy\` first.`;
|
||||
} else if (reason === 'already_undeployed') {
|
||||
message = `⚠️ **Undeploy skipped**: This PR is already undeployed.`;
|
||||
}
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: message
|
||||
});
|
||||
|
||||
- name: Get PR details for deployment
|
||||
id: pr_details
|
||||
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const pr = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: context.issue.number
|
||||
});
|
||||
core.setOutput('pr_number', pr.data.number);
|
||||
core.setOutput('pr_title', pr.data.title);
|
||||
core.setOutput('pr_state', pr.data.state);
|
||||
|
||||
- name: Dispatch Deploy Event
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "deploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post deploy success comment
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
|
||||
});
|
||||
|
||||
- name: Dispatch Undeploy Event (from comment)
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post undeploy success comment
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
|
||||
});
|
||||
|
||||
- name: Check deployment status on PR close
|
||||
id: check_pr_close
|
||||
if: github.event_name == 'pull_request' && github.event.action == 'closed'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const comments = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number
|
||||
});
|
||||
|
||||
let lastDeployIndex = -1;
|
||||
let lastUndeployIndex = -1;
|
||||
|
||||
comments.data.forEach((comment, index) => {
|
||||
if (comment.body.trim() === '!deploy') {
|
||||
lastDeployIndex = index;
|
||||
} else if (comment.body.trim() === '!undeploy') {
|
||||
lastUndeployIndex = index;
|
||||
}
|
||||
});
|
||||
|
||||
// Should undeploy if there's a !deploy without a subsequent !undeploy
|
||||
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
|
||||
core.setOutput('should_undeploy', shouldUndeploy);
|
||||
|
||||
- name: Dispatch Undeploy Event (PR closed with active deployment)
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ github.event.pull_request.number }}",
|
||||
"pr_title": "${{ github.event.pull_request.title }}",
|
||||
"pr_state": "${{ github.event.pull_request.state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post PR close undeploy comment
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
|
||||
});
|
||||
event-type: build_deploy_dev
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
|
||||
6
.github/workflows/platform-backend-ci.yml
vendored
6
.github/workflows/platform-backend-ci.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
python-version: ["3.11"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
@@ -81,12 +81,12 @@ jobs:
|
||||
- name: Install Poetry (Unix)
|
||||
run: |
|
||||
# Extract Poetry version from backend/poetry.lock
|
||||
HEAD_POETRY_VERSION=$(head -n 1 poetry.lock | grep -oP '(?<=Poetry )[0-9]+\.[0-9]+\.[0-9]+')
|
||||
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
|
||||
if [ -n "$BASE_REF" ]; then
|
||||
BASE_BRANCH=${BASE_REF/refs\/heads\//}
|
||||
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | head -n 1 | grep -oP '(?<=Poetry )[0-9]+\.[0-9]+\.[0-9]+')
|
||||
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
|
||||
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
|
||||
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
|
||||
else
|
||||
|
||||
198
.github/workflows/platform-dev-deploy-event-dispatcher.yml
vendored
Normal file
198
.github/workflows/platform-dev-deploy-event-dispatcher.yml
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check comment permissions and deployment status
|
||||
id: check_status
|
||||
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const commentBody = context.payload.comment.body.trim();
|
||||
const commentUser = context.payload.comment.user.login;
|
||||
const prAuthor = context.payload.issue.user.login;
|
||||
const authorAssociation = context.payload.comment.author_association;
|
||||
|
||||
// Check permissions
|
||||
const hasPermission = (
|
||||
authorAssociation === 'OWNER' ||
|
||||
authorAssociation === 'MEMBER' ||
|
||||
authorAssociation === 'COLLABORATOR'
|
||||
);
|
||||
|
||||
core.setOutput('comment_body', commentBody);
|
||||
core.setOutput('has_permission', hasPermission);
|
||||
|
||||
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
|
||||
core.setOutput('permission_denied', 'true');
|
||||
return;
|
||||
}
|
||||
|
||||
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process deploy command
|
||||
if (commentBody === '!deploy') {
|
||||
core.setOutput('should_deploy', 'true');
|
||||
}
|
||||
// Process undeploy command
|
||||
else if (commentBody === '!undeploy') {
|
||||
core.setOutput('should_undeploy', 'true');
|
||||
}
|
||||
|
||||
- name: Post permission denied comment
|
||||
if: steps.check_status.outputs.permission_denied == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
|
||||
});
|
||||
|
||||
- name: Get PR details for deployment
|
||||
id: pr_details
|
||||
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const pr = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: context.issue.number
|
||||
});
|
||||
core.setOutput('pr_number', pr.data.number);
|
||||
core.setOutput('pr_title', pr.data.title);
|
||||
core.setOutput('pr_state', pr.data.state);
|
||||
|
||||
- name: Dispatch Deploy Event
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "deploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post deploy success comment
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
|
||||
});
|
||||
|
||||
- name: Dispatch Undeploy Event (from comment)
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post undeploy success comment
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
|
||||
});
|
||||
|
||||
- name: Check deployment status on PR close
|
||||
id: check_pr_close
|
||||
if: github.event_name == 'pull_request' && github.event.action == 'closed'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const comments = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number
|
||||
});
|
||||
|
||||
let lastDeployIndex = -1;
|
||||
let lastUndeployIndex = -1;
|
||||
|
||||
comments.data.forEach((comment, index) => {
|
||||
if (comment.body.trim() === '!deploy') {
|
||||
lastDeployIndex = index;
|
||||
} else if (comment.body.trim() === '!undeploy') {
|
||||
lastUndeployIndex = index;
|
||||
}
|
||||
});
|
||||
|
||||
// Should undeploy if there's a !deploy without a subsequent !undeploy
|
||||
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
|
||||
core.setOutput('should_undeploy', shouldUndeploy);
|
||||
|
||||
- name: Dispatch Undeploy Event (PR closed with active deployment)
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ github.event.pull_request.number }}",
|
||||
"pr_title": "${{ github.event.pull_request.title }}",
|
||||
"pr_state": "${{ github.event.pull_request.state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post PR close undeploy comment
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
|
||||
});
|
||||
57
.github/workflows/platform-dev-deploy.yml
vendored
57
.github/workflows/platform-dev-deploy.yml
vendored
@@ -1,57 +0,0 @@
|
||||
name: Dev Deploy PR Event Dispatcher
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, closed]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if should dispatch
|
||||
id: check
|
||||
if: >-
|
||||
github.event.issue.pull_request &&
|
||||
github.event.comment.body == '!deploy' &&
|
||||
(
|
||||
github.event.comment.user.login == github.event.issue.user.login ||
|
||||
github.event.comment.author_association == 'OWNER' ||
|
||||
github.event.comment.author_association == 'MEMBER' ||
|
||||
github.event.comment.author_association == 'COLLABORATOR'
|
||||
)
|
||||
run: |
|
||||
echo "should_dispatch=true" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Dispatch PR Event
|
||||
if: steps.check.outputs.should_dispatch == 'true'
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "deploy",
|
||||
"pr_number": "${{ github.event.pull_request.number }}",
|
||||
"pr_title": "${{ github.event.pull_request.title }}",
|
||||
"pr_state": "${{ github.event.pull_request.state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Dispatch PR Closure Event
|
||||
if: github.event.action == 'closed' && contains(github.event.pull_request.comments.*.body, '!deploy')
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ github.event.pull_request.number }}",
|
||||
"pr_title": "${{ github.event.pull_request.title }}",
|
||||
"pr_state": "${{ github.event.pull_request.state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
44
.github/workflows/platform-frontend-ci.yml
vendored
44
.github/workflows/platform-frontend-ci.yml
vendored
@@ -29,13 +29,14 @@ jobs:
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
yarn lint
|
||||
run: pnpm lint
|
||||
|
||||
type-check:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -48,13 +49,14 @@ jobs:
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run tsc check
|
||||
run: |
|
||||
yarn type-check
|
||||
run: pnpm type-check
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -74,6 +76,9 @@ jobs:
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
@@ -93,25 +98,24 @@ jobs:
|
||||
docker compose -f ../docker-compose.yml up -d
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Setup Builder .env
|
||||
run: |
|
||||
cp .env.example .env
|
||||
- name: Setup .env
|
||||
run: cp .env.example .env
|
||||
|
||||
- name: Build frontend
|
||||
run: pnpm build --turbo
|
||||
# uses Turbopack, much faster and safe enough for a test pipeline
|
||||
|
||||
- name: Install Browser '${{ matrix.browser }}'
|
||||
run: yarn playwright install --with-deps ${{ matrix.browser }}
|
||||
run: pnpm playwright install --with-deps ${{ matrix.browser }}
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
yarn test --project=${{ matrix.browser }}
|
||||
- name: Run Playwright tests
|
||||
run: pnpm test:no-build --project=${{ matrix.browser }}
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml logs
|
||||
run: docker compose -f ../docker-compose.yml logs
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
60
.github/workflows/scripts/get_package_version_from_lockfile.py
vendored
Normal file
60
.github/workflows/scripts/get_package_version_from_lockfile.py
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
print("Python version 3.11 or higher required")
|
||||
sys.exit(1)
|
||||
|
||||
import tomllib
|
||||
|
||||
|
||||
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
|
||||
"""Extract package version from poetry.lock file."""
|
||||
try:
|
||||
if lockfile_path == "-":
|
||||
data = tomllib.load(sys.stdin.buffer)
|
||||
else:
|
||||
with open(lockfile_path, "rb") as f:
|
||||
data = tomllib.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except tomllib.TOMLDecodeError as e:
|
||||
print(f"Error parsing TOML file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error reading file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Look for the package in the packages list
|
||||
packages = data.get("package", [])
|
||||
for package in packages:
|
||||
if package.get("name", "").lower() == package_name.lower():
|
||||
return package.get("version")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) not in (2, 3):
|
||||
print(
|
||||
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
|
||||
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
package_name = sys.argv[1]
|
||||
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
|
||||
|
||||
version = get_package_version(package_name, lockfile_path)
|
||||
|
||||
if version:
|
||||
print(version)
|
||||
else:
|
||||
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -235,7 +235,7 @@ repos:
|
||||
hooks:
|
||||
- id: tsc
|
||||
name: Typecheck - AutoGPT Platform - Frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && npm run type-check'
|
||||
entry: bash -c 'cd autogpt_platform/frontend && pnpm type-check'
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
This guide provides context for Codex when updating the **autogpt_platform** folder.
|
||||
|
||||
## Directory overview
|
||||
|
||||
- `autogpt_platform/backend` – FastAPI based backend service.
|
||||
- `autogpt_platform/autogpt_libs` – Shared Python libraries.
|
||||
- `autogpt_platform/frontend` – Next.js + Typescript frontend.
|
||||
@@ -11,12 +12,14 @@ This guide provides context for Codex when updating the **autogpt_platform** fol
|
||||
See `docs/content/platform/getting-started.md` for setup instructions.
|
||||
|
||||
## Code style
|
||||
|
||||
- Format Python code with `poetry run format`.
|
||||
- Format frontend code using `yarn format`.
|
||||
- Format frontend code using `pnpm format`.
|
||||
|
||||
## Testing
|
||||
|
||||
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
|
||||
- Frontend: `yarn test` or `yarn test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
|
||||
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
|
||||
|
||||
Always run the relevant linters and tests before committing.
|
||||
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
|
||||
@@ -38,6 +41,7 @@ Use conventional commit messages for all commits (e.g. `feat(backend): add API`)
|
||||
- blocks
|
||||
|
||||
## Pull requests
|
||||
|
||||
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
|
||||
- Rely on the pre-commit checks for linting and formatting
|
||||
- Fill out the **Changes** section and the checklist.
|
||||
@@ -47,4 +51,3 @@ Use conventional commit messages for all commits (e.g. `feat(backend): add API`)
|
||||
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
|
||||
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
|
||||
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs
|
||||
|
||||
|
||||
132
autogpt_platform/CLAUDE.md
Normal file
132
autogpt_platform/CLAUDE.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Repository Overview
|
||||
|
||||
AutoGPT Platform is a monorepo containing:
|
||||
- **Backend** (`/backend`): Python FastAPI server with async support
|
||||
- **Frontend** (`/frontend`): Next.js React application
|
||||
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
|
||||
|
||||
## Essential Commands
|
||||
|
||||
### Backend Development
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd backend && poetry install
|
||||
|
||||
# Run database migrations
|
||||
poetry run prisma migrate dev
|
||||
|
||||
# Start all services (database, redis, rabbitmq)
|
||||
docker compose up -d
|
||||
|
||||
# Run the backend server
|
||||
poetry run serve
|
||||
|
||||
# Run tests
|
||||
poetry run test
|
||||
|
||||
# Run specific test
|
||||
poetry run pytest path/to/test_file.py::test_function_name
|
||||
|
||||
# Lint and format
|
||||
poetry run format # Black + isort
|
||||
poetry run lint # ruff
|
||||
```
|
||||
More details can be found in TESTING.md
|
||||
|
||||
#### Creating/Updating Snapshots
|
||||
|
||||
When you first write a test or when the expected output changes:
|
||||
|
||||
```bash
|
||||
poetry run pytest path/to/test.py --snapshot-update
|
||||
```
|
||||
|
||||
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
|
||||
|
||||
|
||||
### Frontend Development
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd frontend && npm install
|
||||
|
||||
# Start development server
|
||||
npm run dev
|
||||
|
||||
# Run E2E tests
|
||||
npm run test
|
||||
|
||||
# Run Storybook for component development
|
||||
npm run storybook
|
||||
|
||||
# Build production
|
||||
npm run build
|
||||
|
||||
# Type checking
|
||||
npm run type-check
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Backend Architecture
|
||||
- **API Layer**: FastAPI with REST and WebSocket endpoints
|
||||
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
|
||||
- **Queue System**: RabbitMQ for async task processing
|
||||
- **Execution Engine**: Separate executor service processes agent workflows
|
||||
- **Authentication**: JWT-based with Supabase integration
|
||||
|
||||
### Frontend Architecture
|
||||
- **Framework**: Next.js App Router with React Server Components
|
||||
- **State Management**: React hooks + Supabase client for real-time updates
|
||||
- **Workflow Builder**: Visual graph editor using @xyflow/react
|
||||
- **UI Components**: Radix UI primitives with Tailwind CSS styling
|
||||
- **Feature Flags**: LaunchDarkly integration
|
||||
|
||||
### Key Concepts
|
||||
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
|
||||
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
|
||||
3. **Integrations**: OAuth and API connections stored per user
|
||||
4. **Store**: Marketplace for sharing agent templates
|
||||
|
||||
### Testing Approach
|
||||
- Backend uses pytest with snapshot testing for API responses
|
||||
- Test files are colocated with source files (`*_test.py`)
|
||||
- Frontend uses Playwright for E2E tests
|
||||
- Component testing via Storybook
|
||||
|
||||
### Database Schema
|
||||
Key models (defined in `/backend/schema.prisma`):
|
||||
- `User`: Authentication and profile data
|
||||
- `AgentGraph`: Workflow definitions with version control
|
||||
- `AgentGraphExecution`: Execution history and results
|
||||
- `AgentNode`: Individual nodes in a workflow
|
||||
- `StoreListing`: Marketplace listings for sharing agents
|
||||
|
||||
### Environment Configuration
|
||||
- Backend: `.env` file in `/backend`
|
||||
- Frontend: `.env.local` file in `/frontend`
|
||||
- Both require Supabase credentials and API keys for various services
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
**Adding a new block:**
|
||||
1. Create new file in `/backend/backend/blocks/`
|
||||
2. Inherit from `Block` base class
|
||||
3. Define input/output schemas
|
||||
4. Implement `run` method
|
||||
5. Register in block registry
|
||||
|
||||
**Modifying the API:**
|
||||
1. Update route in `/backend/backend/server/routers/`
|
||||
2. Add/update Pydantic models in same directory
|
||||
3. Write tests alongside the route file
|
||||
4. Run `poetry run test` to verify
|
||||
|
||||
**Frontend feature development:**
|
||||
1. Components go in `/frontend/src/components/`
|
||||
2. Use existing UI components from `/frontend/src/components/ui/`
|
||||
3. Add Storybook stories for new components
|
||||
4. Test with Playwright if user-facing
|
||||
@@ -15,44 +15,57 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
|
||||
To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
|
||||
|
||||
```
|
||||
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
|
||||
cd AutoGPT/autogpt_platform
|
||||
```
|
||||
|
||||
2. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
3. Run the following command:
|
||||
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
4. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
|
||||
```
|
||||
cd frontend
|
||||
```
|
||||
|
||||
You will need to run your frontend application separately on your local machine.
|
||||
|
||||
5. Run the following command:
|
||||
5. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
6. Run the following command:
|
||||
|
||||
Enable corepack and install dependencies by running:
|
||||
|
||||
```
|
||||
npm install
|
||||
npm run dev
|
||||
corepack enable
|
||||
pnpm i
|
||||
```
|
||||
This command will install the necessary dependencies and start the frontend application in development mode.
|
||||
If you are using Yarn, you can run the following commands instead:
|
||||
|
||||
Then start the frontend application in development mode:
|
||||
|
||||
```
|
||||
yarn install && yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
@@ -68,43 +81,52 @@ Here are some useful Docker Compose commands for managing your AutoGPT Platform:
|
||||
- `docker compose down`: Stop and remove containers, networks, and volumes.
|
||||
- `docker compose watch`: Watch for changes in your services and automatically update them.
|
||||
|
||||
|
||||
### Sample Scenarios
|
||||
|
||||
Here are some common scenarios where you might use multiple Docker Compose commands:
|
||||
|
||||
1. Updating and restarting a specific service:
|
||||
|
||||
```
|
||||
docker compose build api_srv
|
||||
docker compose up -d --no-deps api_srv
|
||||
```
|
||||
|
||||
This rebuilds the `api_srv` service and restarts it without affecting other services.
|
||||
|
||||
2. Viewing logs for troubleshooting:
|
||||
|
||||
```
|
||||
docker compose logs -f api_srv ws_srv
|
||||
```
|
||||
|
||||
This shows and follows the logs for both `api_srv` and `ws_srv` services.
|
||||
|
||||
3. Scaling a service for increased load:
|
||||
|
||||
```
|
||||
docker compose up -d --scale executor=3
|
||||
```
|
||||
|
||||
This scales the `executor` service to 3 instances to handle increased load.
|
||||
|
||||
4. Stopping the entire system for maintenance:
|
||||
|
||||
```
|
||||
docker compose stop
|
||||
docker compose rm -f
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This stops all services, removes containers, pulls the latest images, and restarts the system.
|
||||
|
||||
5. Developing with live updates:
|
||||
|
||||
```
|
||||
docker compose watch
|
||||
```
|
||||
|
||||
This watches for changes in your code and automatically updates the relevant services.
|
||||
|
||||
6. Checking the status of services:
|
||||
@@ -115,7 +137,6 @@ Here are some common scenarios where you might use multiple Docker Compose comma
|
||||
|
||||
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
|
||||
|
||||
|
||||
### Persisting Data
|
||||
|
||||
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
|
||||
|
||||
237
autogpt_platform/backend/TESTING.md
Normal file
237
autogpt_platform/backend/TESTING.md
Normal file
@@ -0,0 +1,237 @@
|
||||
# Backend Testing Guide
|
||||
|
||||
This guide covers testing practices for the AutoGPT Platform backend, with a focus on snapshot testing for API endpoints.
|
||||
|
||||
## Table of Contents
|
||||
- [Overview](#overview)
|
||||
- [Running Tests](#running-tests)
|
||||
- [Snapshot Testing](#snapshot-testing)
|
||||
- [Writing Tests for API Routes](#writing-tests-for-api-routes)
|
||||
- [Best Practices](#best-practices)
|
||||
|
||||
## Overview
|
||||
|
||||
The backend uses pytest for testing with the following key libraries:
|
||||
- `pytest` - Test framework
|
||||
- `pytest-asyncio` - Async test support
|
||||
- `pytest-mock` - Mocking support
|
||||
- `pytest-snapshot` - Snapshot testing for API responses
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Run all tests
|
||||
```bash
|
||||
poetry run test
|
||||
```
|
||||
|
||||
### Run specific test file
|
||||
```bash
|
||||
poetry run pytest path/to/test_file.py
|
||||
```
|
||||
|
||||
### Run with verbose output
|
||||
```bash
|
||||
poetry run pytest -v
|
||||
```
|
||||
|
||||
### Run with coverage
|
||||
```bash
|
||||
poetry run pytest --cov=backend
|
||||
```
|
||||
|
||||
## Snapshot Testing
|
||||
|
||||
Snapshot testing captures the output of your code and compares it against previously saved snapshots. This is particularly useful for testing API responses.
|
||||
|
||||
### How Snapshot Testing Works
|
||||
|
||||
1. First run: Creates snapshot files in `snapshots/` directories
|
||||
2. Subsequent runs: Compares output against saved snapshots
|
||||
3. Changes detected: Test fails if output differs from snapshot
|
||||
|
||||
### Creating/Updating Snapshots
|
||||
|
||||
When you first write a test or when the expected output changes:
|
||||
|
||||
```bash
|
||||
poetry run pytest path/to/test.py --snapshot-update
|
||||
```
|
||||
|
||||
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
|
||||
|
||||
### Snapshot Test Example
|
||||
|
||||
```python
|
||||
import json
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
def test_api_endpoint(snapshot: Snapshot):
|
||||
response = client.get("/api/endpoint")
|
||||
|
||||
# Snapshot the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response.json(), indent=2, sort_keys=True),
|
||||
"endpoint_response"
|
||||
)
|
||||
```
|
||||
|
||||
### Best Practices for Snapshots
|
||||
|
||||
1. **Use descriptive names**: `"user_list_response"` not `"response1"`
|
||||
2. **Sort JSON keys**: Ensures consistent snapshots
|
||||
3. **Format JSON**: Use `indent=2` for readable diffs
|
||||
4. **Exclude dynamic data**: Remove timestamps, IDs, etc. that change between runs
|
||||
|
||||
Example of excluding dynamic data:
|
||||
```python
|
||||
response_data = response.json()
|
||||
# Remove dynamic fields for snapshot
|
||||
response_data.pop("created_at", None)
|
||||
response_data.pop("id", None)
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"static_response_data"
|
||||
)
|
||||
```
|
||||
|
||||
## Writing Tests for API Routes
|
||||
|
||||
### Basic Structure
|
||||
|
||||
```python
|
||||
import json
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
from backend.server.v2.myroute import router
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(router)
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
def test_endpoint_success(snapshot: Snapshot):
|
||||
response = client.get("/endpoint")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test specific fields
|
||||
data = response.json()
|
||||
assert data["status"] == "success"
|
||||
|
||||
# Snapshot the full response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(data, indent=2, sort_keys=True),
|
||||
"endpoint_success_response"
|
||||
)
|
||||
```
|
||||
|
||||
### Testing with Authentication
|
||||
|
||||
```python
|
||||
def override_auth_middleware():
|
||||
return {"sub": "test-user-id"}
|
||||
|
||||
def override_get_user_id():
|
||||
return "test-user-id"
|
||||
|
||||
app.dependency_overrides[auth_middleware] = override_auth_middleware
|
||||
app.dependency_overrides[get_user_id] = override_get_user_id
|
||||
```
|
||||
|
||||
### Mocking External Services
|
||||
|
||||
```python
|
||||
def test_external_api_call(mocker, snapshot):
|
||||
# Mock external service
|
||||
mock_response = {"external": "data"}
|
||||
mocker.patch(
|
||||
"backend.services.external_api.call",
|
||||
return_value=mock_response
|
||||
)
|
||||
|
||||
response = client.post("/api/process")
|
||||
assert response.status_code == 200
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response.json(), indent=2, sort_keys=True),
|
||||
"process_with_external_response"
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Test Organization
|
||||
- Place tests next to the code: `routes.py` → `routes_test.py`
|
||||
- Use descriptive test names: `test_create_user_with_invalid_email`
|
||||
- Group related tests in classes when appropriate
|
||||
|
||||
### 2. Test Coverage
|
||||
- Test happy path and error cases
|
||||
- Test edge cases (empty data, invalid formats)
|
||||
- Test authentication and authorization
|
||||
|
||||
### 3. Snapshot Testing Guidelines
|
||||
- Review all snapshot changes carefully
|
||||
- Don't snapshot sensitive data
|
||||
- Keep snapshots focused and minimal
|
||||
- Update snapshots intentionally, not accidentally
|
||||
|
||||
### 4. Async Testing
|
||||
- Use regular `def` for FastAPI TestClient tests
|
||||
- Use `async def` with `@pytest.mark.asyncio` for testing async functions directly
|
||||
|
||||
### 5. Fixtures
|
||||
Create reusable fixtures for common test data:
|
||||
|
||||
```python
|
||||
@pytest.fixture
|
||||
def sample_user():
|
||||
return {
|
||||
"email": "test@example.com",
|
||||
"name": "Test User"
|
||||
}
|
||||
|
||||
def test_create_user(sample_user, snapshot):
|
||||
response = client.post("/users", json=sample_user)
|
||||
# ... test implementation
|
||||
```
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
The GitHub Actions workflow automatically runs tests on:
|
||||
- Pull requests
|
||||
- Pushes to main branch
|
||||
|
||||
Snapshot tests work in CI by:
|
||||
1. Committing snapshot files to the repository
|
||||
2. CI compares against committed snapshots
|
||||
3. Fails if snapshots don't match
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Snapshot Mismatches
|
||||
- Review the diff carefully
|
||||
- If changes are expected: `poetry run pytest --snapshot-update`
|
||||
- If changes are unexpected: Fix the code causing the difference
|
||||
|
||||
### Async Test Issues
|
||||
- Ensure async functions use `@pytest.mark.asyncio`
|
||||
- Use `AsyncMock` for mocking async functions
|
||||
- FastAPI TestClient handles async automatically
|
||||
|
||||
### Import Errors
|
||||
- Check that all dependencies are in `pyproject.toml`
|
||||
- Run `poetry install` to ensure dependencies are installed
|
||||
- Verify import paths are correct
|
||||
|
||||
## Summary
|
||||
|
||||
Snapshot testing provides a powerful way to ensure API responses remain consistent. Combined with traditional assertions, it creates a robust test suite that catches regressions while remaining maintainable.
|
||||
|
||||
Remember: Good tests are as important as good code!
|
||||
@@ -21,6 +21,7 @@ logger = logging.getLogger(__name__)
|
||||
class FalModel(str, Enum):
|
||||
MOCHI = "fal-ai/mochi-v1"
|
||||
LUMA = "fal-ai/luma-dream-machine"
|
||||
VEO3 = "fal-ai/veo3"
|
||||
|
||||
|
||||
class AIVideoGeneratorBlock(Block):
|
||||
@@ -102,6 +103,8 @@ class AIVideoGeneratorBlock(Block):
|
||||
# Submit generation request
|
||||
submit_url = f"{base_url}/{input_data.model.value}"
|
||||
submit_data = {"prompt": input_data.prompt}
|
||||
if input_data.model == FalModel.VEO3:
|
||||
submit_data["generate_audio"] = True # type: ignore
|
||||
|
||||
seen_logs = set()
|
||||
|
||||
|
||||
@@ -1047,8 +1047,8 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
for output in self._run(input_data, credentials):
|
||||
yield output
|
||||
for output_name, output_data in self._run(input_data, credentials):
|
||||
yield output_name, output_data
|
||||
|
||||
def _run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput:
|
||||
chunks = self._split_text(
|
||||
|
||||
@@ -38,7 +38,7 @@ from backend.integrations.credentials_store import (
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
|
||||
MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.O3: 7,
|
||||
LlmModel.O3: 4,
|
||||
LlmModel.O3_MINI: 2, # $1.10 / $4.40
|
||||
LlmModel.O1: 16, # $15 / $60
|
||||
LlmModel.O1_PREVIEW: 16,
|
||||
|
||||
@@ -3,6 +3,7 @@ from collections import defaultdict
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from enum import Enum
|
||||
from multiprocessing import Manager
|
||||
from queue import Empty
|
||||
from typing import (
|
||||
Annotated,
|
||||
Any,
|
||||
@@ -621,14 +622,25 @@ async def update_graph_execution_stats(
|
||||
return GraphExecution.from_db(graph_exec)
|
||||
|
||||
|
||||
async def update_node_execution_stats(node_exec_id: str, stats: NodeExecutionStats):
|
||||
async def update_node_execution_stats(
|
||||
node_exec_id: str, stats: NodeExecutionStats
|
||||
) -> NodeExecutionResult:
|
||||
data = stats.model_dump()
|
||||
if isinstance(data["error"], Exception):
|
||||
data["error"] = str(data["error"])
|
||||
await AgentNodeExecution.prisma().update(
|
||||
|
||||
res = await AgentNodeExecution.prisma().update(
|
||||
where={"id": node_exec_id},
|
||||
data={"stats": Json(data)},
|
||||
data={
|
||||
"stats": Json(data),
|
||||
"endedTime": datetime.now(tz=timezone.utc),
|
||||
},
|
||||
include=EXECUTION_RESULT_INCLUDE,
|
||||
)
|
||||
if not res:
|
||||
raise ValueError(f"Node execution {node_exec_id} not found.")
|
||||
|
||||
return NodeExecutionResult.from_db(res)
|
||||
|
||||
|
||||
async def update_node_execution_status_batch(
|
||||
@@ -702,6 +714,16 @@ async def delete_graph_execution(
|
||||
)
|
||||
|
||||
|
||||
async def get_node_execution(node_exec_id: str) -> NodeExecutionResult | None:
|
||||
execution = await AgentNodeExecution.prisma().find_first(
|
||||
where={"id": node_exec_id},
|
||||
include=EXECUTION_RESULT_INCLUDE,
|
||||
)
|
||||
if not execution:
|
||||
return None
|
||||
return NodeExecutionResult.from_db(execution)
|
||||
|
||||
|
||||
async def get_node_executions(
|
||||
graph_exec_id: str,
|
||||
node_id: str | None = None,
|
||||
@@ -794,6 +816,12 @@ class ExecutionQueue(Generic[T]):
|
||||
def empty(self) -> bool:
|
||||
return self.queue.empty()
|
||||
|
||||
def get_or_none(self) -> T | None:
|
||||
try:
|
||||
return self.queue.get_nowait()
|
||||
except Empty:
|
||||
return None
|
||||
|
||||
|
||||
# --------------------- Event Bus --------------------- #
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ from prisma.types import (
|
||||
AgentGraphWhereInput,
|
||||
AgentNodeCreateInput,
|
||||
AgentNodeLinkCreateInput,
|
||||
StoreListingVersionWhereInput,
|
||||
)
|
||||
from pydantic import create_model
|
||||
from pydantic.fields import computed_field
|
||||
@@ -712,23 +713,24 @@ async def get_graph(
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
order={"version": "desc"},
|
||||
)
|
||||
|
||||
# For access, the graph must be owned by the user or listed in the store
|
||||
if graph is None or (
|
||||
graph.userId != user_id
|
||||
and not (
|
||||
await StoreListingVersion.prisma().find_first(
|
||||
where={
|
||||
"agentGraphId": graph_id,
|
||||
"agentGraphVersion": version or graph.version,
|
||||
"isDeleted": False,
|
||||
"submissionStatus": SubmissionStatus.APPROVED,
|
||||
}
|
||||
)
|
||||
)
|
||||
):
|
||||
if graph is None:
|
||||
return None
|
||||
|
||||
if graph.userId != user_id:
|
||||
store_listing_filter: StoreListingVersionWhereInput = {
|
||||
"agentGraphId": graph_id,
|
||||
"isDeleted": False,
|
||||
"submissionStatus": SubmissionStatus.APPROVED,
|
||||
}
|
||||
if version is not None:
|
||||
store_listing_filter["agentGraphVersion"] = version
|
||||
|
||||
# For access, the graph must be owned by the user or listed in the store
|
||||
if not await StoreListingVersion.prisma().find_first(
|
||||
where=store_listing_filter, order={"agentGraphVersion": "desc"}
|
||||
):
|
||||
return None
|
||||
|
||||
if include_subgraphs or for_export:
|
||||
sub_graphs = await get_sub_graphs(graph)
|
||||
return GraphModel.from_db(
|
||||
|
||||
@@ -9,6 +9,7 @@ from backend.data.execution import (
|
||||
get_graph_execution_meta,
|
||||
get_graph_executions,
|
||||
get_latest_node_execution,
|
||||
get_node_execution,
|
||||
get_node_executions,
|
||||
update_graph_execution_start_time,
|
||||
update_graph_execution_stats,
|
||||
@@ -91,6 +92,7 @@ class DatabaseManager(AppService):
|
||||
get_graph_executions = _(get_graph_executions)
|
||||
get_graph_execution_meta = _(get_graph_execution_meta)
|
||||
create_graph_execution = _(create_graph_execution)
|
||||
get_node_execution = _(get_node_execution)
|
||||
get_node_executions = _(get_node_executions)
|
||||
get_latest_node_execution = _(get_latest_node_execution)
|
||||
update_node_execution_status = _(update_node_execution_status)
|
||||
@@ -149,6 +151,7 @@ class DatabaseManagerClient(AppServiceClient):
|
||||
get_graph_executions = _(d.get_graph_executions)
|
||||
get_graph_execution_meta = _(d.get_graph_execution_meta)
|
||||
create_graph_execution = _(d.create_graph_execution)
|
||||
get_node_execution = _(d.get_node_execution)
|
||||
get_node_executions = _(d.get_node_executions)
|
||||
get_latest_node_execution = _(d.get_latest_node_execution)
|
||||
update_node_execution_status = _(d.update_node_execution_status)
|
||||
|
||||
@@ -5,10 +5,12 @@ import os
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from concurrent.futures import Future, ProcessPoolExecutor
|
||||
from contextlib import contextmanager
|
||||
from multiprocessing.pool import AsyncResult, Pool
|
||||
from typing import TYPE_CHECKING, Any, Generator, Optional, TypeVar, cast
|
||||
from multiprocessing.pool import Pool
|
||||
from typing import TYPE_CHECKING, Optional, TypeVar, cast
|
||||
|
||||
from pika.adapters.blocking_connection import BlockingChannel
|
||||
from pika.spec import Basic, BasicProperties
|
||||
@@ -40,7 +42,13 @@ from prometheus_client import Gauge, start_http_server
|
||||
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.data import redis
|
||||
from backend.data.block import BlockData, BlockInput, BlockSchema, get_block
|
||||
from backend.data.block import (
|
||||
BlockData,
|
||||
BlockInput,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
get_block,
|
||||
)
|
||||
from backend.data.credit import UsageTransactionMetadata
|
||||
from backend.data.execution import (
|
||||
ExecutionQueue,
|
||||
@@ -55,6 +63,8 @@ from backend.executor.utils import (
|
||||
GRAPH_EXECUTION_CANCEL_QUEUE_NAME,
|
||||
GRAPH_EXECUTION_QUEUE_NAME,
|
||||
CancelExecutionEvent,
|
||||
ExecutionOutputEntry,
|
||||
NodeExecutionProgress,
|
||||
block_usage_cost,
|
||||
execution_usage_cost,
|
||||
get_execution_event_bus,
|
||||
@@ -117,18 +127,17 @@ class LogMetadata(TruncatedLogger):
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
ExecutionStream = Generator[NodeExecutionEntry, None, None]
|
||||
|
||||
|
||||
def execute_node(
|
||||
db_client: "DatabaseManagerClient",
|
||||
node: Node,
|
||||
creds_manager: IntegrationCredentialsManager,
|
||||
data: NodeExecutionEntry,
|
||||
execution_stats: NodeExecutionStats | None = None,
|
||||
node_credentials_input_map: Optional[
|
||||
dict[str, dict[str, CredentialsMetaInput]]
|
||||
] = None,
|
||||
) -> ExecutionStream:
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Execute a node in the graph. This will trigger a block execution on a node,
|
||||
persist the execution result, and return the subsequent node to be executed.
|
||||
@@ -147,24 +156,8 @@ def execute_node(
|
||||
graph_id = data.graph_id
|
||||
node_exec_id = data.node_exec_id
|
||||
node_id = data.node_id
|
||||
|
||||
def update_execution_status(status: ExecutionStatus) -> NodeExecutionResult:
|
||||
"""Sets status and fetches+broadcasts the latest state of the node execution"""
|
||||
exec_update = db_client.update_node_execution_status(node_exec_id, status)
|
||||
send_execution_update(exec_update)
|
||||
return exec_update
|
||||
|
||||
node = db_client.get_node(node_id)
|
||||
|
||||
node_block = node.block
|
||||
|
||||
def push_output(output_name: str, output_data: Any) -> None:
|
||||
db_client.upsert_execution_output(
|
||||
node_exec_id=node_exec_id,
|
||||
output_name=output_name,
|
||||
output_data=output_data,
|
||||
)
|
||||
|
||||
log_metadata = LogMetadata(
|
||||
user_id=user_id,
|
||||
graph_eid=graph_exec_id,
|
||||
@@ -178,8 +171,7 @@ def execute_node(
|
||||
input_data, error = validate_exec(node, data.inputs, resolve_input=False)
|
||||
if input_data is None:
|
||||
log_metadata.error(f"Skip execution, input validation error: {error}")
|
||||
push_output("error", error)
|
||||
update_execution_status(ExecutionStatus.FAILED)
|
||||
yield "error", error
|
||||
return
|
||||
|
||||
# Re-shape the input data for agent block.
|
||||
@@ -196,7 +188,6 @@ def execute_node(
|
||||
input_data_str = json.dumps(input_data)
|
||||
input_size = len(input_data_str)
|
||||
log_metadata.debug("Executed node with input", input=input_data_str)
|
||||
update_execution_status(ExecutionStatus.RUNNING)
|
||||
|
||||
# Inject extra execution arguments for the blocks via kwargs
|
||||
extra_exec_kwargs: dict = {
|
||||
@@ -220,47 +211,19 @@ def execute_node(
|
||||
|
||||
output_size = 0
|
||||
try:
|
||||
outputs: dict[str, Any] = {}
|
||||
for output_name, output_data in node_block.execute(
|
||||
input_data, **extra_exec_kwargs
|
||||
):
|
||||
output_data = json.convert_pydantic_to_json(output_data)
|
||||
output_size += len(json.dumps(output_data))
|
||||
log_metadata.debug("Node produced output", **{output_name: output_data})
|
||||
push_output(output_name, output_data)
|
||||
outputs[output_name] = output_data
|
||||
for execution in _enqueue_next_nodes(
|
||||
db_client=db_client,
|
||||
node=node,
|
||||
output=(output_name, output_data),
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
log_metadata=log_metadata,
|
||||
node_credentials_input_map=node_credentials_input_map,
|
||||
):
|
||||
yield execution
|
||||
|
||||
update_execution_status(ExecutionStatus.COMPLETED)
|
||||
yield output_name, output_data
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
push_output("error", error_msg)
|
||||
update_execution_status(ExecutionStatus.FAILED)
|
||||
|
||||
for execution in _enqueue_next_nodes(
|
||||
db_client=db_client,
|
||||
node=node,
|
||||
output=("error", error_msg),
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
log_metadata=log_metadata,
|
||||
node_credentials_input_map=node_credentials_input_map,
|
||||
):
|
||||
yield execution
|
||||
|
||||
yield "error", error_msg
|
||||
raise e
|
||||
|
||||
finally:
|
||||
# Ensure credentials are released even if execution fails
|
||||
if creds_lock and creds_lock.locked() and creds_lock.owned():
|
||||
@@ -291,10 +254,12 @@ def _enqueue_next_nodes(
|
||||
def add_enqueued_execution(
|
||||
node_exec_id: str, node_id: str, block_id: str, data: BlockInput
|
||||
) -> NodeExecutionEntry:
|
||||
exec_update = db_client.update_node_execution_status(
|
||||
node_exec_id, ExecutionStatus.QUEUED, data
|
||||
update_node_execution_status(
|
||||
db_client=db_client,
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.QUEUED,
|
||||
execution_data=data,
|
||||
)
|
||||
send_execution_update(exec_update)
|
||||
return NodeExecutionEntry(
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
@@ -306,6 +271,13 @@ def _enqueue_next_nodes(
|
||||
)
|
||||
|
||||
def register_next_executions(node_link: Link) -> list[NodeExecutionEntry]:
|
||||
try:
|
||||
return _register_next_executions(node_link)
|
||||
except Exception as e:
|
||||
log_metadata.exception(f"Failed to register next executions: {e}")
|
||||
return []
|
||||
|
||||
def _register_next_executions(node_link: Link) -> list[NodeExecutionEntry]:
|
||||
enqueued_executions = []
|
||||
next_output_name = node_link.source_name
|
||||
next_input_name = node_link.sink_name
|
||||
@@ -328,6 +300,11 @@ def _enqueue_next_nodes(
|
||||
input_name=next_input_name,
|
||||
input_data=next_data,
|
||||
)
|
||||
update_node_execution_status(
|
||||
db_client=db_client,
|
||||
exec_id=next_node_exec_id,
|
||||
status=ExecutionStatus.INCOMPLETE,
|
||||
)
|
||||
|
||||
# Complete missing static input pins data using the last execution input.
|
||||
static_link_names = {
|
||||
@@ -487,7 +464,7 @@ class Executor:
|
||||
@error_logged
|
||||
def on_node_execution(
|
||||
cls,
|
||||
q: ExecutionQueue[NodeExecutionEntry],
|
||||
q: ExecutionQueue[ExecutionOutputEntry],
|
||||
node_exec: NodeExecutionEntry,
|
||||
node_credentials_input_map: Optional[
|
||||
dict[str, dict[str, CredentialsMetaInput]]
|
||||
@@ -501,6 +478,7 @@ class Executor:
|
||||
node_id=node_exec.node_id,
|
||||
block_name="-",
|
||||
)
|
||||
node = cls.db_client.get_node(node_exec.node_id)
|
||||
|
||||
moderate_block_content(
|
||||
graph_id=node_exec.graph_id,
|
||||
@@ -513,24 +491,31 @@ class Executor:
|
||||
|
||||
execution_stats = NodeExecutionStats()
|
||||
timing_info, _ = cls._on_node_execution(
|
||||
q, node_exec, log_metadata, execution_stats, node_credentials_input_map
|
||||
q=q,
|
||||
node_exec=node_exec,
|
||||
node=node,
|
||||
log_metadata=log_metadata,
|
||||
stats=execution_stats,
|
||||
node_credentials_input_map=node_credentials_input_map,
|
||||
)
|
||||
execution_stats.walltime = timing_info.wall_time
|
||||
execution_stats.cputime = timing_info.cpu_time
|
||||
|
||||
if isinstance(execution_stats.error, Exception):
|
||||
execution_stats.error = str(execution_stats.error)
|
||||
cls.db_client.update_node_execution_stats(
|
||||
exec_update = cls.db_client.update_node_execution_stats(
|
||||
node_exec.node_exec_id, execution_stats
|
||||
)
|
||||
send_execution_update(exec_update)
|
||||
return execution_stats
|
||||
|
||||
@classmethod
|
||||
@time_measured
|
||||
def _on_node_execution(
|
||||
cls,
|
||||
q: ExecutionQueue[NodeExecutionEntry],
|
||||
q: ExecutionQueue[ExecutionOutputEntry],
|
||||
node_exec: NodeExecutionEntry,
|
||||
node: Node,
|
||||
log_metadata: LogMetadata,
|
||||
stats: NodeExecutionStats | None = None,
|
||||
node_credentials_input_map: Optional[
|
||||
@@ -539,14 +524,26 @@ class Executor:
|
||||
):
|
||||
try:
|
||||
log_metadata.info(f"Start node execution {node_exec.node_exec_id}")
|
||||
for execution in execute_node(
|
||||
update_node_execution_status(
|
||||
db_client=cls.db_client,
|
||||
exec_id=node_exec.node_exec_id,
|
||||
status=ExecutionStatus.RUNNING,
|
||||
)
|
||||
|
||||
for output_name, output_data in execute_node(
|
||||
node=node,
|
||||
creds_manager=cls.creds_manager,
|
||||
data=node_exec,
|
||||
execution_stats=stats,
|
||||
node_credentials_input_map=node_credentials_input_map,
|
||||
):
|
||||
q.add(execution)
|
||||
q.add(
|
||||
ExecutionOutputEntry(
|
||||
node=node,
|
||||
node_exec_id=node_exec.node_exec_id,
|
||||
data=(output_name, output_data),
|
||||
)
|
||||
)
|
||||
log_metadata.info(f"Finished node execution {node_exec.node_exec_id}")
|
||||
except Exception as e:
|
||||
# Avoid user error being marked as an actual error.
|
||||
@@ -712,6 +709,51 @@ class Executor:
|
||||
error = None
|
||||
finished = False
|
||||
|
||||
def drain_output_queue():
|
||||
while output := output_queue.get_or_none():
|
||||
log_metadata.debug(
|
||||
f"Received output for {output.node.id} - {output.node_exec_id}: {output.data}"
|
||||
)
|
||||
running_executions[output.node.id].add_output(output)
|
||||
|
||||
def drain_done_task(node_exec_id: str, result: object):
|
||||
if not isinstance(result, NodeExecutionStats):
|
||||
log_metadata.error(f"Unexpected result #{node_exec_id}: {type(result)}")
|
||||
return
|
||||
|
||||
nonlocal execution_stats
|
||||
execution_stats.node_count += 1
|
||||
execution_stats.nodes_cputime += result.cputime
|
||||
execution_stats.nodes_walltime += result.walltime
|
||||
if (err := result.error) and isinstance(err, Exception):
|
||||
execution_stats.node_error_count += 1
|
||||
update_node_execution_status(
|
||||
db_client=cls.db_client,
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.FAILED,
|
||||
)
|
||||
else:
|
||||
update_node_execution_status(
|
||||
db_client=cls.db_client,
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.COMPLETED,
|
||||
)
|
||||
|
||||
if _graph_exec := cls.db_client.update_graph_execution_stats(
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
status=execution_status,
|
||||
stats=execution_stats,
|
||||
):
|
||||
send_execution_update(_graph_exec)
|
||||
else:
|
||||
logger.error(
|
||||
"Callback for "
|
||||
f"finished node execution #{node_exec_id} "
|
||||
"could not update execution stats "
|
||||
f"for graph execution #{graph_exec.graph_exec_id}; "
|
||||
f"triggered while graph exec status = {execution_status}"
|
||||
)
|
||||
|
||||
def cancel_handler():
|
||||
nonlocal execution_status
|
||||
|
||||
@@ -736,61 +778,27 @@ class Executor:
|
||||
amount=1,
|
||||
)
|
||||
|
||||
queue = ExecutionQueue[NodeExecutionEntry]()
|
||||
output_queue = ExecutionQueue[ExecutionOutputEntry]()
|
||||
execution_queue = ExecutionQueue[NodeExecutionEntry]()
|
||||
for node_exec in cls.db_client.get_node_executions(
|
||||
graph_exec.graph_exec_id,
|
||||
statuses=[ExecutionStatus.RUNNING, ExecutionStatus.QUEUED],
|
||||
):
|
||||
queue.add(node_exec.to_node_execution_entry())
|
||||
execution_queue.add(node_exec.to_node_execution_entry())
|
||||
|
||||
running_executions: dict[str, AsyncResult] = {}
|
||||
running_executions: dict[str, NodeExecutionProgress] = defaultdict(
|
||||
lambda: NodeExecutionProgress(
|
||||
drain_output_queue=drain_output_queue,
|
||||
drain_done_task=drain_done_task,
|
||||
)
|
||||
)
|
||||
|
||||
def make_exec_callback(exec_data: NodeExecutionEntry):
|
||||
def callback(result: object):
|
||||
running_executions.pop(exec_data.node_id)
|
||||
|
||||
if not isinstance(result, NodeExecutionStats):
|
||||
return
|
||||
|
||||
nonlocal execution_stats
|
||||
execution_stats.node_count += 1
|
||||
execution_stats.nodes_cputime += result.cputime
|
||||
execution_stats.nodes_walltime += result.walltime
|
||||
if (err := result.error) and isinstance(err, Exception):
|
||||
execution_stats.node_error_count += 1
|
||||
|
||||
if _graph_exec := cls.db_client.update_graph_execution_stats(
|
||||
graph_exec_id=exec_data.graph_exec_id,
|
||||
status=execution_status,
|
||||
stats=execution_stats,
|
||||
):
|
||||
send_execution_update(_graph_exec)
|
||||
else:
|
||||
logger.error(
|
||||
"Callback for "
|
||||
f"finished node execution #{exec_data.node_exec_id} "
|
||||
"could not update execution stats "
|
||||
f"for graph execution #{exec_data.graph_exec_id}; "
|
||||
f"triggered while graph exec status = {execution_status}"
|
||||
)
|
||||
|
||||
return callback
|
||||
|
||||
while not queue.empty():
|
||||
while not execution_queue.empty():
|
||||
if cancel.is_set():
|
||||
execution_status = ExecutionStatus.TERMINATED
|
||||
return execution_stats, execution_status, error
|
||||
|
||||
queued_node_exec = queue.get()
|
||||
|
||||
# Avoid parallel execution of the same node.
|
||||
execution = running_executions.get(queued_node_exec.node_id)
|
||||
if execution and not execution.ready():
|
||||
# TODO (performance improvement):
|
||||
# Wait for the completion of the same node execution is blocking.
|
||||
# To improve this we need a separate queue for each node.
|
||||
# Re-enqueueing the data back to the queue will disrupt the order.
|
||||
execution.wait()
|
||||
queued_node_exec = execution_queue.get()
|
||||
|
||||
log_metadata.debug(
|
||||
f"Dispatching node execution {queued_node_exec.node_exec_id} "
|
||||
@@ -810,12 +818,12 @@ class Executor:
|
||||
output_name="error",
|
||||
output_data=str(error),
|
||||
)
|
||||
|
||||
execution_status = ExecutionStatus.FAILED
|
||||
exec_update = cls.db_client.update_node_execution_status(
|
||||
node_exec_id, execution_status
|
||||
update_node_execution_status(
|
||||
db_client=cls.db_client,
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.FAILED,
|
||||
)
|
||||
send_execution_update(exec_update)
|
||||
execution_status = ExecutionStatus.FAILED
|
||||
|
||||
cls._handle_low_balance_notif(
|
||||
graph_exec.user_id,
|
||||
@@ -838,34 +846,57 @@ class Executor:
|
||||
)
|
||||
|
||||
# Initiate node execution
|
||||
running_executions[queued_node_exec.node_id] = cls.executor.apply_async(
|
||||
cls.on_node_execution,
|
||||
(queue, queued_node_exec, node_creds_map),
|
||||
callback=make_exec_callback(queued_node_exec),
|
||||
running_executions[queued_node_exec.node_id].add_task(
|
||||
queued_node_exec.node_exec_id,
|
||||
cls.executor.apply_async(
|
||||
cls.on_node_execution,
|
||||
(output_queue, queued_node_exec, node_creds_map),
|
||||
),
|
||||
)
|
||||
|
||||
# Avoid terminating graph execution when some nodes are still running.
|
||||
while queue.empty() and running_executions:
|
||||
while execution_queue.empty() and running_executions:
|
||||
log_metadata.debug(
|
||||
f"Queue empty; running nodes: {list(running_executions.keys())}"
|
||||
)
|
||||
|
||||
# Register next node executions from running_executions.
|
||||
for node_id, execution in list(running_executions.items()):
|
||||
if cancel.is_set():
|
||||
execution_status = ExecutionStatus.TERMINATED
|
||||
return execution_stats, execution_status, error
|
||||
|
||||
if not queue.empty():
|
||||
break # yield to parent loop to execute new queue items
|
||||
|
||||
log_metadata.debug(f"Waiting on execution of node {node_id}")
|
||||
execution.wait(3)
|
||||
while output := execution.pop_output():
|
||||
cls._process_node_output(
|
||||
output=output,
|
||||
node_id=node_id,
|
||||
graph_exec=graph_exec,
|
||||
log_metadata=log_metadata,
|
||||
node_creds_map=node_creds_map,
|
||||
execution_queue=execution_queue,
|
||||
)
|
||||
if not execution_queue.empty():
|
||||
break # Prioritize executing next nodes than enqueuing outputs
|
||||
|
||||
if execution.is_done():
|
||||
running_executions.pop(node_id)
|
||||
|
||||
if not execution_queue.empty():
|
||||
continue # Make sure each not is checked once
|
||||
|
||||
if execution_queue.empty() and running_executions:
|
||||
log_metadata.debug(
|
||||
"No more nodes to execute, waiting for outputs..."
|
||||
)
|
||||
time.sleep(0.1)
|
||||
|
||||
log_metadata.info(f"Finished graph execution {graph_exec.graph_exec_id}")
|
||||
execution_status = ExecutionStatus.COMPLETED
|
||||
|
||||
except Exception as e:
|
||||
error = e
|
||||
log_metadata.error(
|
||||
log_metadata.exception(
|
||||
f"Failed graph execution {graph_exec.graph_exec_id}: {error}"
|
||||
)
|
||||
execution_status = ExecutionStatus.FAILED
|
||||
@@ -878,6 +909,61 @@ class Executor:
|
||||
clean_exec_files(graph_exec.graph_exec_id)
|
||||
return execution_stats, execution_status, error
|
||||
|
||||
@classmethod
|
||||
def _process_node_output(
|
||||
cls,
|
||||
output: ExecutionOutputEntry,
|
||||
node_id: str,
|
||||
graph_exec: GraphExecutionEntry,
|
||||
log_metadata: LogMetadata,
|
||||
node_creds_map: Optional[dict[str, dict[str, CredentialsMetaInput]]],
|
||||
execution_queue: ExecutionQueue[NodeExecutionEntry],
|
||||
) -> None:
|
||||
"""Process a node's output, update its status, and enqueue next nodes.
|
||||
|
||||
Args:
|
||||
output: The execution output entry to process
|
||||
node_id: The ID of the node that produced the output
|
||||
graph_exec: The graph execution entry
|
||||
log_metadata: Logger metadata for consistent logging
|
||||
node_creds_map: Optional map of node credentials
|
||||
execution_queue: Queue to add next executions to
|
||||
"""
|
||||
try:
|
||||
name, data = output.data
|
||||
cls.db_client.upsert_execution_output(
|
||||
node_exec_id=output.node_exec_id,
|
||||
output_name=name,
|
||||
output_data=data,
|
||||
)
|
||||
if exec_update := cls.db_client.get_node_execution(output.node_exec_id):
|
||||
send_execution_update(exec_update)
|
||||
|
||||
log_metadata.debug(f"Enqueue nodes for {node_id}: {output}")
|
||||
for next_execution in _enqueue_next_nodes(
|
||||
db_client=cls.db_client,
|
||||
node=output.node,
|
||||
output=output.data,
|
||||
user_id=graph_exec.user_id,
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
log_metadata=log_metadata,
|
||||
node_credentials_input_map=node_creds_map,
|
||||
):
|
||||
execution_queue.add(next_execution)
|
||||
except Exception as e:
|
||||
log_metadata.exception(f"Failed to process node output: {e}")
|
||||
cls.db_client.upsert_execution_output(
|
||||
node_exec_id=output.node_exec_id,
|
||||
output_name="error",
|
||||
output_data=str(e),
|
||||
)
|
||||
update_node_execution_status(
|
||||
db_client=cls.db_client,
|
||||
exec_id=output.node_exec_id,
|
||||
status=ExecutionStatus.FAILED,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _handle_agent_run_notif(
|
||||
cls,
|
||||
@@ -1142,6 +1228,20 @@ def send_execution_update(entry: GraphExecution | NodeExecutionResult | None):
|
||||
return get_execution_event_bus().publish(entry)
|
||||
|
||||
|
||||
def update_node_execution_status(
|
||||
db_client: "DatabaseManagerClient",
|
||||
exec_id: str,
|
||||
status: ExecutionStatus,
|
||||
execution_data: BlockInput | None = None,
|
||||
) -> NodeExecutionResult:
|
||||
"""Sets status and fetches+broadcasts the latest state of the node execution"""
|
||||
exec_update = db_client.update_node_execution_status(
|
||||
exec_id, status, execution_data
|
||||
)
|
||||
send_execution_update(exec_update)
|
||||
return exec_update
|
||||
|
||||
|
||||
@contextmanager
|
||||
def synchronized(key: str, timeout: int = 60):
|
||||
lock: RedisLock = redis.get_redis().lock(f"lock:{key}", timeout=timeout)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Optional, cast
|
||||
from collections import defaultdict
|
||||
from multiprocessing.pool import AsyncResult
|
||||
from typing import TYPE_CHECKING, Any, Callable, Optional, cast
|
||||
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
from pydantic import BaseModel
|
||||
@@ -174,68 +176,195 @@ def _is_cost_filter_match(cost_filter: BlockInput, input_data: BlockInput) -> bo
|
||||
|
||||
# ============ Execution Input Helpers ============ #
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Delimiters
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
LIST_SPLIT = "_$_"
|
||||
DICT_SPLIT = "_#_"
|
||||
OBJC_SPLIT = "_@_"
|
||||
|
||||
_DELIMS = (LIST_SPLIT, DICT_SPLIT, OBJC_SPLIT)
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Tokenisation utilities
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
def _next_delim(s: str) -> tuple[str | None, int]:
|
||||
"""
|
||||
Return the *earliest* delimiter appearing in `s` and its index.
|
||||
|
||||
If none present → (None, -1).
|
||||
"""
|
||||
first: str | None = None
|
||||
pos = len(s) # sentinel: larger than any real index
|
||||
for d in _DELIMS:
|
||||
i = s.find(d)
|
||||
if 0 <= i < pos:
|
||||
first, pos = d, i
|
||||
return first, (pos if first else -1)
|
||||
|
||||
|
||||
def _tokenise(path: str) -> list[tuple[str, str]] | None:
|
||||
"""
|
||||
Convert the raw path string (starting with a delimiter) into
|
||||
[ (delimiter, identifier), … ] or None if the syntax is malformed.
|
||||
"""
|
||||
tokens: list[tuple[str, str]] = []
|
||||
while path:
|
||||
# 1. Which delimiter starts this chunk?
|
||||
delim = next((d for d in _DELIMS if path.startswith(d)), None)
|
||||
if delim is None:
|
||||
return None # invalid syntax
|
||||
|
||||
# 2. Slice off the delimiter, then up to the next delimiter (or EOS)
|
||||
path = path[len(delim) :]
|
||||
nxt_delim, pos = _next_delim(path)
|
||||
token, path = (
|
||||
path[: pos if pos != -1 else len(path)],
|
||||
path[pos if pos != -1 else len(path) :],
|
||||
)
|
||||
if token == "":
|
||||
return None # empty identifier is invalid
|
||||
tokens.append((delim, token))
|
||||
return tokens
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Public API – parsing (flattened ➜ concrete)
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
def parse_execution_output(output: BlockData, name: str) -> Any | None:
|
||||
"""
|
||||
Extracts partial output data by name from a given BlockData.
|
||||
Retrieve a nested value out of `output` using the flattened *name*.
|
||||
|
||||
The function supports extracting data from lists, dictionaries, and objects
|
||||
using specific naming conventions:
|
||||
- For lists: <output_name>_$_<index>
|
||||
- For dictionaries: <output_name>_#_<key>
|
||||
- For objects: <output_name>_@_<attribute>
|
||||
|
||||
Args:
|
||||
output (BlockData): A tuple containing the output name and data.
|
||||
name (str): The name used to extract specific data from the output.
|
||||
|
||||
Returns:
|
||||
Any | None: The extracted data if found, otherwise None.
|
||||
|
||||
Examples:
|
||||
>>> output = ("result", [10, 20, 30])
|
||||
>>> parse_execution_output(output, "result_$_1")
|
||||
20
|
||||
|
||||
>>> output = ("config", {"key1": "value1", "key2": "value2"})
|
||||
>>> parse_execution_output(output, "config_#_key1")
|
||||
'value1'
|
||||
|
||||
>>> class Sample:
|
||||
... attr1 = "value1"
|
||||
... attr2 = "value2"
|
||||
>>> output = ("object", Sample())
|
||||
>>> parse_execution_output(output, "object_@_attr1")
|
||||
'value1'
|
||||
On any failure (wrong name, wrong type, out-of-range, bad path)
|
||||
returns **None**.
|
||||
"""
|
||||
output_name, output_data = output
|
||||
base_name, data = output
|
||||
|
||||
if name == output_name:
|
||||
return output_data
|
||||
# Exact match → whole object
|
||||
if name == base_name:
|
||||
return data
|
||||
|
||||
if name.startswith(f"{output_name}{LIST_SPLIT}"):
|
||||
index = int(name.split(LIST_SPLIT)[1])
|
||||
if not isinstance(output_data, list) or len(output_data) <= index:
|
||||
return None
|
||||
return output_data[int(name.split(LIST_SPLIT)[1])]
|
||||
# Must start with the expected name
|
||||
if not name.startswith(base_name):
|
||||
return None
|
||||
path = name[len(base_name) :]
|
||||
if not path:
|
||||
return None # nothing left to parse
|
||||
|
||||
if name.startswith(f"{output_name}{DICT_SPLIT}"):
|
||||
index = name.split(DICT_SPLIT)[1]
|
||||
if not isinstance(output_data, dict) or index not in output_data:
|
||||
return None
|
||||
return output_data[index]
|
||||
|
||||
if name.startswith(f"{output_name}{OBJC_SPLIT}"):
|
||||
index = name.split(OBJC_SPLIT)[1]
|
||||
if isinstance(output_data, object) and hasattr(output_data, index):
|
||||
return getattr(output_data, index)
|
||||
tokens = _tokenise(path)
|
||||
if tokens is None:
|
||||
return None
|
||||
|
||||
return None
|
||||
cur: Any = data
|
||||
for delim, ident in tokens:
|
||||
if delim == LIST_SPLIT:
|
||||
# list[index]
|
||||
try:
|
||||
idx = int(ident)
|
||||
except ValueError:
|
||||
return None
|
||||
if not isinstance(cur, list) or idx >= len(cur):
|
||||
return None
|
||||
cur = cur[idx]
|
||||
|
||||
elif delim == DICT_SPLIT:
|
||||
if not isinstance(cur, dict) or ident not in cur:
|
||||
return None
|
||||
cur = cur[ident]
|
||||
|
||||
elif delim == OBJC_SPLIT:
|
||||
if not hasattr(cur, ident):
|
||||
return None
|
||||
cur = getattr(cur, ident)
|
||||
|
||||
else:
|
||||
return None # unreachable
|
||||
|
||||
return cur
|
||||
|
||||
|
||||
def _assign(container: Any, tokens: list[tuple[str, str]], value: Any) -> Any:
|
||||
"""
|
||||
Recursive helper that *returns* the (possibly new) container with
|
||||
`value` assigned along the remaining `tokens` path.
|
||||
"""
|
||||
if not tokens:
|
||||
return value # leaf reached
|
||||
|
||||
delim, ident = tokens[0]
|
||||
rest = tokens[1:]
|
||||
|
||||
# ---------- list ----------
|
||||
if delim == LIST_SPLIT:
|
||||
try:
|
||||
idx = int(ident)
|
||||
except ValueError:
|
||||
raise ValueError("index must be an integer")
|
||||
|
||||
if container is None:
|
||||
container = []
|
||||
elif not isinstance(container, list):
|
||||
container = list(container) if hasattr(container, "__iter__") else []
|
||||
|
||||
while len(container) <= idx:
|
||||
container.append(None)
|
||||
container[idx] = _assign(container[idx], rest, value)
|
||||
return container
|
||||
|
||||
# ---------- dict ----------
|
||||
if delim == DICT_SPLIT:
|
||||
if container is None:
|
||||
container = {}
|
||||
elif not isinstance(container, dict):
|
||||
container = dict(container) if hasattr(container, "items") else {}
|
||||
container[ident] = _assign(container.get(ident), rest, value)
|
||||
return container
|
||||
|
||||
# ---------- object ----------
|
||||
if delim == OBJC_SPLIT:
|
||||
if container is None or not isinstance(container, MockObject):
|
||||
container = MockObject()
|
||||
setattr(
|
||||
container,
|
||||
ident,
|
||||
_assign(getattr(container, ident, None), rest, value),
|
||||
)
|
||||
return container
|
||||
|
||||
return value # unreachable
|
||||
|
||||
|
||||
def merge_execution_input(data: BlockInput) -> BlockInput:
|
||||
"""
|
||||
Reconstruct nested objects from a *flattened* dict of key → value.
|
||||
|
||||
Raises ValueError on syntactically invalid list indices.
|
||||
"""
|
||||
merged: BlockInput = {}
|
||||
|
||||
for key, value in data.items():
|
||||
# Split off the base name (before the first delimiter, if any)
|
||||
delim, pos = _next_delim(key)
|
||||
if delim is None:
|
||||
merged[key] = value
|
||||
continue
|
||||
|
||||
base, path = key[:pos], key[pos:]
|
||||
tokens = _tokenise(path)
|
||||
if tokens is None:
|
||||
# Invalid key; treat as scalar under the raw name
|
||||
merged[key] = value
|
||||
continue
|
||||
|
||||
merged[base] = _assign(merged.get(base), tokens, value)
|
||||
|
||||
data.update(merged)
|
||||
return data
|
||||
|
||||
|
||||
def validate_exec(
|
||||
@@ -292,77 +421,6 @@ def validate_exec(
|
||||
return data, node_block.name
|
||||
|
||||
|
||||
def merge_execution_input(data: BlockInput) -> BlockInput:
|
||||
"""
|
||||
Merges dynamic input pins into a single list, dictionary, or object based on naming patterns.
|
||||
|
||||
This function processes input keys that follow specific patterns to merge them into a unified structure:
|
||||
- `<input_name>_$_<index>` for list inputs.
|
||||
- `<input_name>_#_<index>` for dictionary inputs.
|
||||
- `<input_name>_@_<index>` for object inputs.
|
||||
|
||||
Args:
|
||||
data (BlockInput): A dictionary containing input keys and their corresponding values.
|
||||
|
||||
Returns:
|
||||
BlockInput: A dictionary with merged inputs.
|
||||
|
||||
Raises:
|
||||
ValueError: If a list index is not an integer.
|
||||
|
||||
Examples:
|
||||
>>> data = {
|
||||
... "list_$_0": "a",
|
||||
... "list_$_1": "b",
|
||||
... "dict_#_key1": "value1",
|
||||
... "dict_#_key2": "value2",
|
||||
... "object_@_attr1": "value1",
|
||||
... "object_@_attr2": "value2"
|
||||
... }
|
||||
>>> merge_execution_input(data)
|
||||
{
|
||||
"list": ["a", "b"],
|
||||
"dict": {"key1": "value1", "key2": "value2"},
|
||||
"object": <MockObject attr1="value1" attr2="value2">
|
||||
}
|
||||
"""
|
||||
|
||||
# Merge all input with <input_name>_$_<index> into a single list.
|
||||
items = list(data.items())
|
||||
|
||||
for key, value in items:
|
||||
if LIST_SPLIT not in key:
|
||||
continue
|
||||
name, index = key.split(LIST_SPLIT)
|
||||
if not index.isdigit():
|
||||
raise ValueError(f"Invalid key: {key}, #{index} index must be an integer.")
|
||||
|
||||
data[name] = data.get(name, [])
|
||||
if int(index) >= len(data[name]):
|
||||
# Pad list with empty string on missing indices.
|
||||
data[name].extend([""] * (int(index) - len(data[name]) + 1))
|
||||
data[name][int(index)] = value
|
||||
|
||||
# Merge all input with <input_name>_#_<index> into a single dict.
|
||||
for key, value in items:
|
||||
if DICT_SPLIT not in key:
|
||||
continue
|
||||
name, index = key.split(DICT_SPLIT)
|
||||
data[name] = data.get(name, {})
|
||||
data[name][index] = value
|
||||
|
||||
# Merge all input with <input_name>_@_<index> into a single object.
|
||||
for key, value in items:
|
||||
if OBJC_SPLIT not in key:
|
||||
continue
|
||||
name, index = key.split(OBJC_SPLIT)
|
||||
if name not in data or not isinstance(data[name], object):
|
||||
data[name] = MockObject()
|
||||
setattr(data[name], index, value)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def _validate_node_input_credentials(
|
||||
graph: GraphModel,
|
||||
user_id: str,
|
||||
@@ -748,3 +806,79 @@ def add_graph_execution(
|
||||
stats=GraphExecutionStats(error=str(e)),
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# ============ Execution Output Helpers ============ #
|
||||
|
||||
|
||||
class ExecutionOutputEntry(BaseModel):
|
||||
node: Node
|
||||
node_exec_id: str
|
||||
data: BlockData
|
||||
|
||||
|
||||
class NodeExecutionProgress:
|
||||
def __init__(
|
||||
self,
|
||||
drain_output_queue: Callable[[], None],
|
||||
drain_done_task: Callable[[str, object], None],
|
||||
):
|
||||
self.output: dict[str, list[ExecutionOutputEntry]] = defaultdict(list)
|
||||
self.tasks: dict[str, AsyncResult] = {}
|
||||
self.drain_output_queue = drain_output_queue
|
||||
self.drain_done_task = drain_done_task
|
||||
|
||||
def add_task(self, node_exec_id: str, task: AsyncResult):
|
||||
self.tasks[node_exec_id] = task
|
||||
|
||||
def add_output(self, output: ExecutionOutputEntry):
|
||||
self.output[output.node_exec_id].append(output)
|
||||
|
||||
def pop_output(self) -> ExecutionOutputEntry | None:
|
||||
exec_id = self._next_exec()
|
||||
if not exec_id:
|
||||
return None
|
||||
|
||||
if self._pop_done_task(exec_id):
|
||||
return self.pop_output()
|
||||
|
||||
if next_output := self.output[exec_id]:
|
||||
return next_output.pop(0)
|
||||
|
||||
return None
|
||||
|
||||
def is_done(self, wait_time: float = 0.0) -> bool:
|
||||
exec_id = self._next_exec()
|
||||
if not exec_id:
|
||||
return True
|
||||
|
||||
if self._pop_done_task(exec_id):
|
||||
return self.is_done(wait_time)
|
||||
|
||||
if wait_time <= 0:
|
||||
return False
|
||||
|
||||
self.tasks[exec_id].wait(wait_time)
|
||||
return self.is_done(0)
|
||||
|
||||
def _pop_done_task(self, exec_id: str) -> bool:
|
||||
task = self.tasks.get(exec_id)
|
||||
if not task:
|
||||
return True
|
||||
|
||||
if not task.ready():
|
||||
return False
|
||||
|
||||
self.drain_output_queue()
|
||||
if self.output[exec_id]:
|
||||
return False
|
||||
|
||||
if task := self.tasks.pop(exec_id):
|
||||
self.drain_done_task(exec_id, task.get())
|
||||
|
||||
return True
|
||||
|
||||
def _next_exec(self) -> str | None:
|
||||
if not self.tasks:
|
||||
return None
|
||||
return next(iter(self.tasks.keys()))
|
||||
|
||||
17
autogpt_platform/backend/backend/server/conftest.py
Normal file
17
autogpt_platform/backend/backend/server/conftest.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""Common test fixtures for server tests."""
|
||||
|
||||
import pytest
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def configured_snapshot(snapshot: Snapshot) -> Snapshot:
|
||||
"""Pre-configured snapshot fixture with standard settings."""
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
return snapshot
|
||||
|
||||
|
||||
# Test ID constants
|
||||
TEST_USER_ID = "test-user-id"
|
||||
ADMIN_USER_ID = "admin-user-id"
|
||||
TARGET_USER_ID = "target-user-id"
|
||||
@@ -120,9 +120,17 @@ def callback(
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Code->Token exchange failed for provider {provider.value}: {e}")
|
||||
logger.exception(
|
||||
"OAuth callback for provider %s failed during code exchange: %s. Confirm provider credentials.",
|
||||
provider.value,
|
||||
e,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=400, detail=f"Failed to exchange code for tokens: {str(e)}"
|
||||
status_code=400,
|
||||
detail={
|
||||
"message": str(e),
|
||||
"hint": "Verify OAuth configuration and try again.",
|
||||
},
|
||||
)
|
||||
|
||||
# TODO: Allow specifying `title` to set on `credentials`
|
||||
@@ -286,9 +294,13 @@ async def webhook_ingress_generic(
|
||||
try:
|
||||
webhook = await get_webhook(webhook_id)
|
||||
except NotFoundError as e:
|
||||
logger.warning(f"Webhook payload received for unknown webhook: {e}")
|
||||
logger.warning(
|
||||
"Webhook payload received for unknown webhook %s. Confirm the webhook ID.",
|
||||
webhook_id,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=HTTP_404_NOT_FOUND, detail=f"Webhook #{webhook_id} not found"
|
||||
status_code=HTTP_404_NOT_FOUND,
|
||||
detail={"message": str(e), "hint": "Check if the webhook ID is correct."},
|
||||
) from e
|
||||
logger.debug(f"Webhook #{webhook_id}: {webhook}")
|
||||
payload, event_type = await webhook_manager.validate_payload(webhook, request)
|
||||
@@ -398,11 +410,16 @@ def _get_provider_oauth_handler(
|
||||
client_id = getattr(settings.secrets, f"{provider_name.value}_client_id")
|
||||
client_secret = getattr(settings.secrets, f"{provider_name.value}_client_secret")
|
||||
if not (client_id and client_secret):
|
||||
logger.error(
|
||||
"OAuth credentials for provider %s are missing. Check environment configuration.",
|
||||
provider_name.value,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=501,
|
||||
detail=(
|
||||
f"Integration with provider '{provider_name.value}' is not configured"
|
||||
),
|
||||
detail={
|
||||
"message": f"Integration with provider '{provider_name.value}' is not configured",
|
||||
"hint": "Set client ID and secret in the environment.",
|
||||
},
|
||||
)
|
||||
|
||||
handler_class = HANDLERS_BY_NAME[provider_name]
|
||||
|
||||
@@ -5,6 +5,7 @@ from typing import Any, Optional
|
||||
import autogpt_libs.auth.models
|
||||
import fastapi
|
||||
import fastapi.responses
|
||||
import pydantic
|
||||
import starlette.middleware.cors
|
||||
import uvicorn
|
||||
from autogpt_libs.feature_flag.client import (
|
||||
@@ -12,6 +13,7 @@ from autogpt_libs.feature_flag.client import (
|
||||
shutdown_launchdarkly,
|
||||
)
|
||||
from autogpt_libs.logging.utils import generate_uvicorn_config
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
|
||||
import backend.data.block
|
||||
import backend.data.db
|
||||
@@ -87,11 +89,23 @@ app = fastapi.FastAPI(
|
||||
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
|
||||
def handler(request: fastapi.Request, exc: Exception):
|
||||
if log_error:
|
||||
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
|
||||
logger.exception(
|
||||
"%s %s failed. Investigate and resolve the underlying issue: %s",
|
||||
request.method,
|
||||
request.url.path,
|
||||
exc,
|
||||
)
|
||||
|
||||
hint = (
|
||||
"Adjust the request and retry."
|
||||
if status_code < 500
|
||||
else "Check server logs and dependent services."
|
||||
)
|
||||
return fastapi.responses.JSONResponse(
|
||||
content={
|
||||
"message": f"{request.method} {request.url.path} failed",
|
||||
"message": f"Failed to process {request.method} {request.url.path}",
|
||||
"detail": str(exc),
|
||||
"hint": hint,
|
||||
},
|
||||
status_code=status_code,
|
||||
)
|
||||
@@ -99,6 +113,32 @@ def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
|
||||
return handler
|
||||
|
||||
|
||||
async def validation_error_handler(
|
||||
request: fastapi.Request, exc: Exception
|
||||
) -> fastapi.responses.JSONResponse:
|
||||
logger.error(
|
||||
"Validation failed for %s %s: %s. Fix the request payload and try again.",
|
||||
request.method,
|
||||
request.url.path,
|
||||
exc,
|
||||
)
|
||||
errors: list | str
|
||||
if hasattr(exc, "errors"):
|
||||
errors = exc.errors() # type: ignore[call-arg]
|
||||
else:
|
||||
errors = str(exc)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=422,
|
||||
content={
|
||||
"message": f"Invalid data for {request.method} {request.url.path}",
|
||||
"detail": errors,
|
||||
"hint": "Ensure the request matches the API schema.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
app.add_exception_handler(RequestValidationError, validation_error_handler)
|
||||
app.add_exception_handler(pydantic.ValidationError, validation_error_handler)
|
||||
app.add_exception_handler(ValueError, handle_internal_http_error(400))
|
||||
app.add_exception_handler(Exception, handle_internal_http_error(500))
|
||||
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"], prefix="/api")
|
||||
@@ -216,7 +256,7 @@ class AgentServer(backend.util.service.AppProcess):
|
||||
|
||||
@staticmethod
|
||||
async def test_get_presets(user_id: str, page: int = 1, page_size: int = 10):
|
||||
return await backend.server.v2.library.routes.presets.get_presets(
|
||||
return await backend.server.v2.library.routes.presets.list_presets(
|
||||
user_id=user_id, page=page, page_size=page_size
|
||||
)
|
||||
|
||||
@@ -228,7 +268,7 @@ class AgentServer(backend.util.service.AppProcess):
|
||||
|
||||
@staticmethod
|
||||
async def test_create_preset(
|
||||
preset: backend.server.v2.library.model.CreateLibraryAgentPresetRequest,
|
||||
preset: backend.server.v2.library.model.LibraryAgentPresetCreatable,
|
||||
user_id: str,
|
||||
):
|
||||
return await backend.server.v2.library.routes.presets.create_preset(
|
||||
@@ -238,7 +278,7 @@ class AgentServer(backend.util.service.AppProcess):
|
||||
@staticmethod
|
||||
async def test_update_preset(
|
||||
preset_id: str,
|
||||
preset: backend.server.v2.library.model.CreateLibraryAgentPresetRequest,
|
||||
preset: backend.server.v2.library.model.LibraryAgentPresetUpdatable,
|
||||
user_id: str,
|
||||
):
|
||||
return await backend.server.v2.library.routes.presets.update_preset(
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Analytics API"""
|
||||
|
||||
import logging
|
||||
from typing import Annotated
|
||||
|
||||
import fastapi
|
||||
@@ -8,6 +9,7 @@ import backend.data.analytics
|
||||
from backend.server.utils import get_user_id
|
||||
|
||||
router = fastapi.APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@router.post(path="/log_raw_metric")
|
||||
@@ -17,13 +19,25 @@ async def log_raw_metric(
|
||||
metric_value: Annotated[float, fastapi.Body(..., embed=True)],
|
||||
data_string: Annotated[str, fastapi.Body(..., embed=True)],
|
||||
):
|
||||
result = await backend.data.analytics.log_raw_metric(
|
||||
user_id=user_id,
|
||||
metric_name=metric_name,
|
||||
metric_value=metric_value,
|
||||
data_string=data_string,
|
||||
)
|
||||
return result.id
|
||||
try:
|
||||
result = await backend.data.analytics.log_raw_metric(
|
||||
user_id=user_id,
|
||||
metric_name=metric_name,
|
||||
metric_value=metric_value,
|
||||
data_string=data_string,
|
||||
)
|
||||
return result.id
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to log metric %s for user %s: %s", metric_name, user_id, e
|
||||
)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail={
|
||||
"message": str(e),
|
||||
"hint": "Check analytics service connection and retry.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.post("/log_raw_analytics")
|
||||
@@ -43,7 +57,14 @@ async def log_raw_analytics(
|
||||
),
|
||||
],
|
||||
):
|
||||
result = await backend.data.analytics.log_raw_analytics(
|
||||
user_id, type, data, data_index
|
||||
)
|
||||
return result.id
|
||||
try:
|
||||
result = await backend.data.analytics.log_raw_analytics(
|
||||
user_id, type, data, data_index
|
||||
)
|
||||
return result.id
|
||||
except Exception as e:
|
||||
logger.exception("Failed to log analytics for user %s: %s", user_id, e)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail={"message": str(e), "hint": "Ensure analytics DB is reachable."},
|
||||
)
|
||||
|
||||
@@ -0,0 +1,139 @@
|
||||
"""Example of analytics tests with improved error handling and assertions."""
|
||||
|
||||
import json
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.routers.analytics as analytics_routes
|
||||
from backend.server.conftest import TEST_USER_ID
|
||||
from backend.server.test_helpers import (
|
||||
assert_error_response_structure,
|
||||
assert_mock_called_with_partial,
|
||||
assert_response_status,
|
||||
safe_parse_json,
|
||||
)
|
||||
from backend.server.utils import get_user_id
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(analytics_routes.router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
def override_get_user_id() -> str:
|
||||
"""Override get_user_id for testing"""
|
||||
return TEST_USER_ID
|
||||
|
||||
|
||||
app.dependency_overrides[get_user_id] = override_get_user_id
|
||||
|
||||
|
||||
def test_log_raw_metric_success_improved(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful raw metric logging with improved assertions."""
|
||||
# Mock the analytics function
|
||||
mock_result = Mock(id="metric-123-uuid")
|
||||
|
||||
mock_log_metric = mocker.patch(
|
||||
"backend.data.analytics.log_raw_metric",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"metric_name": "page_load_time",
|
||||
"metric_value": 2.5,
|
||||
"data_string": "/dashboard",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_metric", json=request_data)
|
||||
|
||||
# Improved assertions with better error messages
|
||||
assert_response_status(response, 200, "Metric logging should succeed")
|
||||
response_data = safe_parse_json(response, "Metric response parsing")
|
||||
|
||||
assert response_data == "metric-123-uuid", f"Unexpected response: {response_data}"
|
||||
|
||||
# Verify the function was called with correct parameters
|
||||
assert_mock_called_with_partial(
|
||||
mock_log_metric,
|
||||
user_id=TEST_USER_ID,
|
||||
metric_name="page_load_time",
|
||||
metric_value=2.5,
|
||||
data_string="/dashboard",
|
||||
)
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps({"metric_id": response_data}, indent=2, sort_keys=True),
|
||||
"analytics_log_metric_success_improved",
|
||||
)
|
||||
|
||||
|
||||
def test_log_raw_metric_invalid_request_improved() -> None:
|
||||
"""Test invalid metric request with improved error assertions."""
|
||||
# Test missing required fields
|
||||
response = client.post("/log_raw_metric", json={})
|
||||
|
||||
error_data = assert_error_response_structure(
|
||||
response, expected_status=422, expected_error_fields=["loc", "msg", "type"]
|
||||
)
|
||||
|
||||
# Verify specific error details
|
||||
detail = error_data["detail"]
|
||||
assert isinstance(detail, list), "Error detail should be a list"
|
||||
assert len(detail) > 0, "Should have at least one error"
|
||||
|
||||
# Check that required fields are mentioned in errors
|
||||
error_fields = [error["loc"][-1] for error in detail if "loc" in error]
|
||||
assert "metric_name" in error_fields, "Should report missing metric_name"
|
||||
assert "metric_value" in error_fields, "Should report missing metric_value"
|
||||
assert "data_string" in error_fields, "Should report missing data_string"
|
||||
|
||||
|
||||
def test_log_raw_metric_type_validation_improved() -> None:
|
||||
"""Test metric type validation with improved assertions."""
|
||||
invalid_requests = [
|
||||
{
|
||||
"data": {
|
||||
"metric_name": "test",
|
||||
"metric_value": "not_a_number", # Invalid type
|
||||
"data_string": "test",
|
||||
},
|
||||
"expected_error": "Input should be a valid number",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"metric_name": "", # Empty string
|
||||
"metric_value": 1.0,
|
||||
"data_string": "test",
|
||||
},
|
||||
"expected_error": "String should have at least 1 character",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"metric_name": "test",
|
||||
"metric_value": float("inf"), # Infinity
|
||||
"data_string": "test",
|
||||
},
|
||||
"expected_error": "ensure this value is finite",
|
||||
},
|
||||
]
|
||||
|
||||
for test_case in invalid_requests:
|
||||
response = client.post("/log_raw_metric", json=test_case["data"])
|
||||
|
||||
error_data = assert_error_response_structure(response, expected_status=422)
|
||||
|
||||
# Check that expected error is in the response
|
||||
error_text = json.dumps(error_data)
|
||||
assert (
|
||||
test_case["expected_error"] in error_text
|
||||
or test_case["expected_error"].lower() in error_text.lower()
|
||||
), f"Expected error '{test_case['expected_error']}' not found in: {error_text}"
|
||||
@@ -0,0 +1,107 @@
|
||||
"""Example of parametrized tests for analytics endpoints."""
|
||||
|
||||
import json
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.routers.analytics as analytics_routes
|
||||
from backend.server.conftest import TEST_USER_ID
|
||||
from backend.server.utils import get_user_id
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(analytics_routes.router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
def override_get_user_id() -> str:
|
||||
"""Override get_user_id for testing"""
|
||||
return TEST_USER_ID
|
||||
|
||||
|
||||
app.dependency_overrides[get_user_id] = override_get_user_id
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"metric_value,metric_name,data_string,test_id",
|
||||
[
|
||||
(100, "api_calls_count", "external_api", "integer_value"),
|
||||
(0, "error_count", "no_errors", "zero_value"),
|
||||
(-5.2, "temperature_delta", "cooling", "negative_value"),
|
||||
(1.23456789, "precision_test", "float_precision", "float_precision"),
|
||||
(999999999, "large_number", "max_value", "large_number"),
|
||||
(0.0000001, "tiny_number", "min_value", "tiny_number"),
|
||||
],
|
||||
)
|
||||
def test_log_raw_metric_values_parametrized(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
metric_value: float,
|
||||
metric_name: str,
|
||||
data_string: str,
|
||||
test_id: str,
|
||||
) -> None:
|
||||
"""Test raw metric logging with various metric values using parametrize."""
|
||||
# Mock the analytics function
|
||||
mock_result = Mock(id=f"metric-{test_id}-uuid")
|
||||
|
||||
mocker.patch(
|
||||
"backend.data.analytics.log_raw_metric",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"metric_name": metric_name,
|
||||
"metric_value": metric_value,
|
||||
"data_string": data_string,
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_metric", json=request_data)
|
||||
|
||||
# Better error handling
|
||||
assert response.status_code == 200, f"Failed for {test_id}: {response.text}"
|
||||
response_data = response.json()
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(
|
||||
{"metric_id": response_data, "test_case": test_id}, indent=2, sort_keys=True
|
||||
),
|
||||
f"analytics_metric_{test_id}",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"invalid_data,expected_error",
|
||||
[
|
||||
({}, "Field required"), # Missing all fields
|
||||
({"metric_name": "test"}, "Field required"), # Missing metric_value
|
||||
(
|
||||
{"metric_name": "test", "metric_value": "not_a_number"},
|
||||
"Input should be a valid number",
|
||||
), # Invalid type
|
||||
(
|
||||
{"metric_name": "", "metric_value": 1.0, "data_string": "test"},
|
||||
"String should have at least 1 character",
|
||||
), # Empty name
|
||||
],
|
||||
)
|
||||
def test_log_raw_metric_invalid_requests_parametrized(
|
||||
invalid_data: dict,
|
||||
expected_error: str,
|
||||
) -> None:
|
||||
"""Test invalid metric requests with parametrize."""
|
||||
response = client.post("/log_raw_metric", json=invalid_data)
|
||||
|
||||
assert response.status_code == 422
|
||||
error_detail = response.json()
|
||||
assert "detail" in error_detail
|
||||
# Verify error message contains expected error
|
||||
error_text = json.dumps(error_detail)
|
||||
assert expected_error in error_text or expected_error.lower() in error_text.lower()
|
||||
@@ -0,0 +1,281 @@
|
||||
import json
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.routers.analytics as analytics_routes
|
||||
from backend.server.conftest import TEST_USER_ID
|
||||
from backend.server.utils import get_user_id
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(analytics_routes.router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
def override_get_user_id() -> str:
|
||||
"""Override get_user_id for testing"""
|
||||
return TEST_USER_ID
|
||||
|
||||
|
||||
app.dependency_overrides[get_user_id] = override_get_user_id
|
||||
|
||||
|
||||
def test_log_raw_metric_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful raw metric logging"""
|
||||
|
||||
# Mock the analytics function
|
||||
mock_result = Mock(id="metric-123-uuid")
|
||||
|
||||
mock_log_metric = mocker.patch(
|
||||
"backend.data.analytics.log_raw_metric",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"metric_name": "page_load_time",
|
||||
"metric_value": 2.5,
|
||||
"data_string": "/dashboard",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_metric", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data == "metric-123-uuid"
|
||||
|
||||
# Verify the function was called with correct parameters
|
||||
mock_log_metric.assert_called_once_with(
|
||||
user_id=TEST_USER_ID,
|
||||
metric_name="page_load_time",
|
||||
metric_value=2.5,
|
||||
data_string="/dashboard",
|
||||
)
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
|
||||
"analytics_log_metric_success",
|
||||
)
|
||||
|
||||
|
||||
def test_log_raw_metric_various_values(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test raw metric logging with various metric values"""
|
||||
|
||||
# Mock the analytics function
|
||||
mock_result = Mock(id="metric-456-uuid")
|
||||
|
||||
mocker.patch(
|
||||
"backend.data.analytics.log_raw_metric",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
# Test with integer value
|
||||
request_data = {
|
||||
"metric_name": "api_calls_count",
|
||||
"metric_value": 100,
|
||||
"data_string": "external_api",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_metric", json=request_data)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test with zero value
|
||||
request_data = {
|
||||
"metric_name": "error_count",
|
||||
"metric_value": 0,
|
||||
"data_string": "no_errors",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_metric", json=request_data)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test with negative value
|
||||
request_data = {
|
||||
"metric_name": "temperature_delta",
|
||||
"metric_value": -5.2,
|
||||
"data_string": "cooling",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_metric", json=request_data)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Snapshot the last response
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
|
||||
"analytics_log_metric_various_values",
|
||||
)
|
||||
|
||||
|
||||
def test_log_raw_analytics_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful raw analytics logging"""
|
||||
|
||||
# Mock the analytics function
|
||||
mock_result = Mock(id="analytics-789-uuid")
|
||||
|
||||
mock_log_analytics = mocker.patch(
|
||||
"backend.data.analytics.log_raw_analytics",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"type": "user_action",
|
||||
"data": {
|
||||
"action": "button_click",
|
||||
"button_id": "submit_form",
|
||||
"timestamp": "2023-01-01T00:00:00Z",
|
||||
"metadata": {
|
||||
"form_type": "registration",
|
||||
"fields_filled": 5,
|
||||
},
|
||||
},
|
||||
"data_index": "button_click_submit_form",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_analytics", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data == "analytics-789-uuid"
|
||||
|
||||
# Verify the function was called with correct parameters
|
||||
mock_log_analytics.assert_called_once_with(
|
||||
TEST_USER_ID,
|
||||
"user_action",
|
||||
request_data["data"],
|
||||
"button_click_submit_form",
|
||||
)
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps({"analytics_id": response_data}, indent=2, sort_keys=True),
|
||||
"analytics_log_analytics_success",
|
||||
)
|
||||
|
||||
|
||||
def test_log_raw_analytics_complex_data(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test raw analytics logging with complex nested data"""
|
||||
|
||||
# Mock the analytics function
|
||||
mock_result = Mock(id="analytics-complex-uuid")
|
||||
|
||||
mocker.patch(
|
||||
"backend.data.analytics.log_raw_analytics",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"type": "agent_execution",
|
||||
"data": {
|
||||
"agent_id": "agent_123",
|
||||
"execution_id": "exec_456",
|
||||
"status": "completed",
|
||||
"duration_ms": 3500,
|
||||
"nodes_executed": 15,
|
||||
"blocks_used": [
|
||||
{"block_id": "llm_block", "count": 3},
|
||||
{"block_id": "http_block", "count": 5},
|
||||
{"block_id": "code_block", "count": 2},
|
||||
],
|
||||
"errors": [],
|
||||
"metadata": {
|
||||
"trigger": "manual",
|
||||
"user_tier": "premium",
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
"data_index": "agent_123_exec_456",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_analytics", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
|
||||
# Snapshot test the complex data structure
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(
|
||||
{
|
||||
"analytics_id": response_data,
|
||||
"logged_data": request_data["data"],
|
||||
},
|
||||
indent=2,
|
||||
sort_keys=True,
|
||||
),
|
||||
"analytics_log_analytics_complex_data",
|
||||
)
|
||||
|
||||
|
||||
def test_log_raw_metric_invalid_request() -> None:
|
||||
"""Test raw metric logging with invalid request data"""
|
||||
# Missing required fields
|
||||
response = client.post("/log_raw_metric", json={})
|
||||
assert response.status_code == 422
|
||||
|
||||
# Invalid metric_value type
|
||||
response = client.post(
|
||||
"/log_raw_metric",
|
||||
json={
|
||||
"metric_name": "test",
|
||||
"metric_value": "not_a_number",
|
||||
"data_string": "test",
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
# Missing data_string
|
||||
response = client.post(
|
||||
"/log_raw_metric",
|
||||
json={
|
||||
"metric_name": "test",
|
||||
"metric_value": 1.0,
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
|
||||
def test_log_raw_analytics_invalid_request() -> None:
|
||||
"""Test raw analytics logging with invalid request data"""
|
||||
# Missing required fields
|
||||
response = client.post("/log_raw_analytics", json={})
|
||||
assert response.status_code == 422
|
||||
|
||||
# Invalid data type (should be dict)
|
||||
response = client.post(
|
||||
"/log_raw_analytics",
|
||||
json={
|
||||
"type": "test",
|
||||
"data": "not_a_dict",
|
||||
"data_index": "test",
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
# Missing data_index
|
||||
response = client.post(
|
||||
"/log_raw_analytics",
|
||||
json={
|
||||
"type": "test",
|
||||
"data": {"key": "value"},
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
from typing import Annotated
|
||||
|
||||
from autogpt_libs.auth.middleware import APIKeyValidator
|
||||
from fastapi import APIRouter, Body, Depends, Query
|
||||
from fastapi import APIRouter, Body, Depends, HTTPException, Query
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from backend.data.user import (
|
||||
@@ -40,8 +40,11 @@ async def unsubscribe_via_one_click(token: Annotated[str, Query()]):
|
||||
try:
|
||||
await unsubscribe_user_by_token(token)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to unsubscribe user by token {token}: {e}")
|
||||
raise e
|
||||
logger.exception("Unsubscribe token %s failed: %s", token, e)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail={"message": str(e), "hint": "Verify Postmark token settings."},
|
||||
)
|
||||
return JSONResponse(status_code=200, content={"status": "ok"})
|
||||
|
||||
|
||||
@@ -67,7 +70,10 @@ async def postmark_webhook_handler(
|
||||
case PostmarkSubscriptionChangeWebhook():
|
||||
subscription_handler(webhook)
|
||||
case _:
|
||||
logger.warning(f"Unknown webhook type: {type(webhook)}")
|
||||
logger.warning(
|
||||
"Unhandled Postmark webhook type %s. Update handler mappings.",
|
||||
type(webhook),
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
@@ -85,7 +91,10 @@ async def bounce_handler(event: PostmarkBounceWebhook):
|
||||
logger.info(f"{event.Email=}")
|
||||
user = await get_user_by_email(event.Email)
|
||||
if not user:
|
||||
logger.error(f"User not found for email: {event.Email}")
|
||||
logger.warning(
|
||||
"Received bounce for unknown email %s. Ensure user records are current.",
|
||||
event.Email,
|
||||
)
|
||||
return
|
||||
await set_user_email_verification(user.id, False)
|
||||
logger.debug(f"Setting email verification to false for user: {user.id}")
|
||||
|
||||
@@ -575,6 +575,13 @@ async def execute_graph(
|
||||
graph_version: Optional[int] = None,
|
||||
preset_id: Optional[str] = None,
|
||||
) -> ExecuteGraphResponse:
|
||||
current_balance = await _user_credit_model.get_credits(user_id)
|
||||
if current_balance <= 0:
|
||||
raise HTTPException(
|
||||
status_code=402,
|
||||
detail="Insufficient balance to execute the agent. Please top up your account.",
|
||||
)
|
||||
|
||||
graph_exec = await execution_utils.add_graph_execution_async(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
@@ -817,8 +824,15 @@ async def create_api_key(
|
||||
)
|
||||
return CreateAPIKeyResponse(api_key=api_key, plain_text_key=plain_text)
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to create API key: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
logger.error(
|
||||
"Could not create API key for user %s: %s. Review input and permissions.",
|
||||
user_id,
|
||||
e,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={"message": str(e), "hint": "Verify request payload and try again."},
|
||||
)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
@@ -834,8 +848,11 @@ async def get_api_keys(
|
||||
try:
|
||||
return await list_user_api_keys(user_id)
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to list API keys: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
logger.error("Failed to list API keys for user %s: %s", user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={"message": str(e), "hint": "Check API key service availability."},
|
||||
)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
@@ -854,8 +871,11 @@ async def get_api_key(
|
||||
raise HTTPException(status_code=404, detail="API key not found")
|
||||
return api_key
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to get API key: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
logger.error("Error retrieving API key %s for user %s: %s", key_id, user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={"message": str(e), "hint": "Ensure the key ID is correct."},
|
||||
)
|
||||
|
||||
|
||||
@v1_router.delete(
|
||||
@@ -876,8 +896,14 @@ async def delete_api_key(
|
||||
except APIKeyPermissionError:
|
||||
raise HTTPException(status_code=403, detail="Permission denied")
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to revoke API key: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
logger.error("Failed to revoke API key %s for user %s: %s", key_id, user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={
|
||||
"message": str(e),
|
||||
"hint": "Verify permissions or try again later.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
@@ -898,8 +924,11 @@ async def suspend_key(
|
||||
except APIKeyPermissionError:
|
||||
raise HTTPException(status_code=403, detail="Permission denied")
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to suspend API key: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
logger.error("Failed to suspend API key %s for user %s: %s", key_id, user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={"message": str(e), "hint": "Check user permissions and retry."},
|
||||
)
|
||||
|
||||
|
||||
@v1_router.put(
|
||||
@@ -922,5 +951,13 @@ async def update_permissions(
|
||||
except APIKeyPermissionError:
|
||||
raise HTTPException(status_code=403, detail="Permission denied")
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to update API key permissions: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
logger.error(
|
||||
"Failed to update permissions for API key %s of user %s: %s",
|
||||
key_id,
|
||||
user_id,
|
||||
e,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={"message": str(e), "hint": "Ensure permissions list is valid."},
|
||||
)
|
||||
|
||||
391
autogpt_platform/backend/backend/server/routers/v1_test.py
Normal file
391
autogpt_platform/backend/backend/server/routers/v1_test.py
Normal file
@@ -0,0 +1,391 @@
|
||||
import json
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
import autogpt_libs.auth.depends
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.routers.v1 as v1_routes
|
||||
from backend.data.credit import AutoTopUpConfig
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.server.conftest import TEST_USER_ID
|
||||
from backend.server.utils import get_user_id
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(v1_routes.v1_router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
def override_auth_middleware(request: fastapi.Request) -> dict[str, str]:
|
||||
"""Override auth middleware for testing"""
|
||||
return {"sub": TEST_USER_ID, "role": "user", "email": "test@example.com"}
|
||||
|
||||
|
||||
def override_get_user_id() -> str:
|
||||
"""Override get_user_id for testing"""
|
||||
return TEST_USER_ID
|
||||
|
||||
|
||||
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
|
||||
override_auth_middleware
|
||||
)
|
||||
app.dependency_overrides[get_user_id] = override_get_user_id
|
||||
|
||||
|
||||
# Auth endpoints tests
|
||||
def test_get_or_create_user_route(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test get or create user endpoint"""
|
||||
mock_user = Mock()
|
||||
mock_user.model_dump.return_value = {
|
||||
"id": TEST_USER_ID,
|
||||
"email": "test@example.com",
|
||||
"name": "Test User",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.get_or_create_user",
|
||||
return_value=mock_user,
|
||||
)
|
||||
|
||||
response = client.post("/auth/user")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"auth_user",
|
||||
)
|
||||
|
||||
|
||||
def test_update_user_email_route(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test update user email endpoint"""
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.update_user_email",
|
||||
return_value=None,
|
||||
)
|
||||
|
||||
response = client.post("/auth/user/email", json="newemail@example.com")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["email"] == "newemail@example.com"
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"auth_email",
|
||||
)
|
||||
|
||||
|
||||
# Blocks endpoints tests
|
||||
def test_get_graph_blocks(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test get blocks endpoint"""
|
||||
# Mock block
|
||||
mock_block = Mock()
|
||||
mock_block.to_dict.return_value = {
|
||||
"id": "test-block",
|
||||
"name": "Test Block",
|
||||
"description": "A test block",
|
||||
"disabled": False,
|
||||
}
|
||||
mock_block.id = "test-block"
|
||||
mock_block.disabled = False
|
||||
|
||||
# Mock get_blocks
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.get_blocks",
|
||||
return_value={"test-block": lambda: mock_block},
|
||||
)
|
||||
|
||||
# Mock block costs
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.get_block_costs",
|
||||
return_value={"test-block": [{"cost": 10, "type": "credit"}]},
|
||||
)
|
||||
|
||||
response = client.get("/blocks")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert len(response_data) == 1
|
||||
assert response_data[0]["id"] == "test-block"
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"blks_all",
|
||||
)
|
||||
|
||||
|
||||
def test_execute_graph_block(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test execute block endpoint"""
|
||||
# Mock block
|
||||
mock_block = Mock()
|
||||
mock_block.execute.return_value = [
|
||||
("output1", {"data": "result1"}),
|
||||
("output2", {"data": "result2"}),
|
||||
]
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.get_block",
|
||||
return_value=mock_block,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"input_name": "test_input",
|
||||
"input_value": "test_value",
|
||||
}
|
||||
|
||||
response = client.post("/blocks/test-block/execute", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"blks_exec",
|
||||
)
|
||||
|
||||
|
||||
def test_execute_graph_block_not_found(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test execute block with non-existent block"""
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.get_block",
|
||||
return_value=None,
|
||||
)
|
||||
|
||||
response = client.post("/blocks/nonexistent-block/execute", json={})
|
||||
|
||||
assert response.status_code == 404
|
||||
assert "not found" in response.json()["detail"]
|
||||
|
||||
|
||||
# Credits endpoints tests
|
||||
def test_get_user_credits(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test get user credits endpoint"""
|
||||
mock_credit_model = mocker.patch("backend.server.routers.v1._user_credit_model")
|
||||
mock_credit_model.get_credits = AsyncMock(return_value=1000)
|
||||
|
||||
response = client.get("/credits")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["credits"] == 1000
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"cred_bal",
|
||||
)
|
||||
|
||||
|
||||
def test_request_top_up(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test request top up endpoint"""
|
||||
mock_credit_model = mocker.patch("backend.server.routers.v1._user_credit_model")
|
||||
mock_credit_model.top_up_intent = AsyncMock(
|
||||
return_value="https://checkout.example.com/session123"
|
||||
)
|
||||
|
||||
request_data = {"credit_amount": 500}
|
||||
|
||||
response = client.post("/credits", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert "checkout_url" in response_data
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"cred_topup_req",
|
||||
)
|
||||
|
||||
|
||||
def test_get_auto_top_up(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test get auto top-up configuration endpoint"""
|
||||
mock_config = AutoTopUpConfig(threshold=100, amount=500)
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.get_auto_top_up",
|
||||
return_value=mock_config,
|
||||
)
|
||||
|
||||
response = client.get("/credits/auto-top-up")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["threshold"] == 100
|
||||
assert response_data["amount"] == 500
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"cred_topup_cfg",
|
||||
)
|
||||
|
||||
|
||||
# Graphs endpoints tests
|
||||
def test_get_graphs(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test get graphs endpoint"""
|
||||
mock_graph = GraphModel(
|
||||
id="graph-123",
|
||||
version=1,
|
||||
is_active=True,
|
||||
name="Test Graph",
|
||||
description="A test graph",
|
||||
user_id="test-user-id",
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.graph_db.get_graphs",
|
||||
return_value=[mock_graph],
|
||||
)
|
||||
|
||||
response = client.get("/graphs")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert len(response_data) == 1
|
||||
assert response_data[0]["id"] == "graph-123"
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"grphs_all",
|
||||
)
|
||||
|
||||
|
||||
def test_get_graph(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test get single graph endpoint"""
|
||||
mock_graph = GraphModel(
|
||||
id="graph-123",
|
||||
version=1,
|
||||
is_active=True,
|
||||
name="Test Graph",
|
||||
description="A test graph",
|
||||
user_id="test-user-id",
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.graph_db.get_graph",
|
||||
return_value=mock_graph,
|
||||
)
|
||||
|
||||
response = client.get("/graphs/graph-123")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["id"] == "graph-123"
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"grph_single",
|
||||
)
|
||||
|
||||
|
||||
def test_get_graph_not_found(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test get graph with non-existent ID"""
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.graph_db.get_graph",
|
||||
return_value=None,
|
||||
)
|
||||
|
||||
response = client.get("/graphs/nonexistent-graph")
|
||||
|
||||
assert response.status_code == 404
|
||||
assert "not found" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_delete_graph(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test delete graph endpoint"""
|
||||
# Mock active graph for deactivation
|
||||
mock_graph = GraphModel(
|
||||
id="graph-123",
|
||||
version=1,
|
||||
is_active=True,
|
||||
name="Test Graph",
|
||||
description="A test graph",
|
||||
user_id="test-user-id",
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.graph_db.get_graph",
|
||||
return_value=mock_graph,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.on_graph_deactivate",
|
||||
return_value=None,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.routers.v1.graph_db.delete_graph",
|
||||
return_value=3, # Number of versions deleted
|
||||
)
|
||||
|
||||
response = client.delete("/graphs/graph-123")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["version_counts"] == 3
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"grphs_del",
|
||||
)
|
||||
|
||||
|
||||
# Invalid request tests
|
||||
def test_invalid_json_request() -> None:
|
||||
"""Test endpoint with invalid JSON"""
|
||||
response = client.post(
|
||||
"/auth/user/email",
|
||||
content="invalid json",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
|
||||
def test_missing_required_field() -> None:
|
||||
"""Test endpoint with missing required field"""
|
||||
response = client.post("/credits", json={}) # Missing credit_amount
|
||||
assert response.status_code == 422
|
||||
139
autogpt_platform/backend/backend/server/test_fixtures.py
Normal file
139
autogpt_platform/backend/backend/server/test_fixtures.py
Normal file
@@ -0,0 +1,139 @@
|
||||
"""Common test fixtures with proper setup and teardown."""
|
||||
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import AsyncGenerator
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
from prisma import Prisma
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def test_db_connection() -> AsyncGenerator[Prisma, None]:
|
||||
"""Provide a test database connection with proper cleanup.
|
||||
|
||||
This fixture ensures the database connection is properly
|
||||
closed after the test, even if the test fails.
|
||||
"""
|
||||
db = Prisma()
|
||||
try:
|
||||
await db.connect()
|
||||
yield db
|
||||
finally:
|
||||
await db.disconnect()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_transaction():
|
||||
"""Mock database transaction with proper async context manager."""
|
||||
|
||||
@asynccontextmanager
|
||||
async def mock_context(*args, **kwargs):
|
||||
yield None
|
||||
|
||||
with patch("backend.data.db.locked_transaction", side_effect=mock_context) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def isolated_app_state():
|
||||
"""Fixture that ensures app state is isolated between tests."""
|
||||
# Example: Save original state
|
||||
# from backend.server.app import app
|
||||
# original_overrides = app.dependency_overrides.copy()
|
||||
|
||||
# try:
|
||||
# yield app
|
||||
# finally:
|
||||
# # Restore original state
|
||||
# app.dependency_overrides = original_overrides
|
||||
|
||||
# For now, just yield None as this is an example
|
||||
yield None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cleanup_files():
|
||||
"""Fixture to track and cleanup files created during tests."""
|
||||
created_files = []
|
||||
|
||||
def track_file(filepath: str):
|
||||
created_files.append(filepath)
|
||||
|
||||
yield track_file
|
||||
|
||||
# Cleanup
|
||||
import os
|
||||
|
||||
for filepath in created_files:
|
||||
try:
|
||||
if os.path.exists(filepath):
|
||||
os.remove(filepath)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to cleanup {filepath}: {e}")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def async_mock_with_cleanup():
|
||||
"""Create async mocks that are properly cleaned up."""
|
||||
mocks = []
|
||||
|
||||
def create_mock(**kwargs):
|
||||
mock = Mock(**kwargs)
|
||||
mocks.append(mock)
|
||||
return mock
|
||||
|
||||
yield create_mock
|
||||
|
||||
# Reset all mocks
|
||||
for mock in mocks:
|
||||
mock.reset_mock()
|
||||
|
||||
|
||||
class TestDatabaseIsolation:
|
||||
"""Example of proper test isolation with database operations."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
async def setup_and_teardown(self, test_db_connection):
|
||||
"""Setup and teardown for each test method."""
|
||||
# Setup: Clear test data
|
||||
await test_db_connection.user.delete_many(
|
||||
where={"email": {"contains": "@test.example"}}
|
||||
)
|
||||
|
||||
yield
|
||||
|
||||
# Teardown: Clear test data again
|
||||
await test_db_connection.user.delete_many(
|
||||
where={"email": {"contains": "@test.example"}}
|
||||
)
|
||||
|
||||
async def test_create_user(self, test_db_connection):
|
||||
"""Test that demonstrates proper isolation."""
|
||||
# This test has access to a clean database
|
||||
user = await test_db_connection.user.create(
|
||||
data={"email": "test@test.example", "name": "Test User"}
|
||||
)
|
||||
assert user.email == "test@test.example"
|
||||
# User will be cleaned up automatically
|
||||
|
||||
|
||||
@pytest.fixture(scope="function") # Explicitly use function scope
|
||||
def reset_singleton_state():
|
||||
"""Reset singleton state between tests."""
|
||||
# Example: Reset a singleton instance
|
||||
# from backend.data.some_singleton import SingletonClass
|
||||
|
||||
# # Save original state
|
||||
# original_instance = getattr(SingletonClass, "_instance", None)
|
||||
|
||||
# try:
|
||||
# # Clear singleton
|
||||
# SingletonClass._instance = None
|
||||
# yield
|
||||
# finally:
|
||||
# # Restore original state
|
||||
# SingletonClass._instance = original_instance
|
||||
|
||||
# For now, just yield None as this is an example
|
||||
yield None
|
||||
109
autogpt_platform/backend/backend/server/test_helpers.py
Normal file
109
autogpt_platform/backend/backend/server/test_helpers.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""Helper functions for improved test assertions and error handling."""
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
|
||||
def assert_response_status(
|
||||
response: Any, expected_status: int = 200, error_context: Optional[str] = None
|
||||
) -> None:
|
||||
"""Assert response status with helpful error message.
|
||||
|
||||
Args:
|
||||
response: The HTTP response object
|
||||
expected_status: Expected status code
|
||||
error_context: Optional context to include in error message
|
||||
"""
|
||||
if response.status_code != expected_status:
|
||||
error_msg = f"Expected status {expected_status}, got {response.status_code}"
|
||||
if error_context:
|
||||
error_msg = f"{error_context}: {error_msg}"
|
||||
|
||||
# Try to include response body in error
|
||||
try:
|
||||
body = response.json()
|
||||
error_msg += f"\nResponse body: {json.dumps(body, indent=2)}"
|
||||
except Exception:
|
||||
error_msg += f"\nResponse text: {response.text}"
|
||||
|
||||
raise AssertionError(error_msg)
|
||||
|
||||
|
||||
def safe_parse_json(
|
||||
response: Any, error_context: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Safely parse JSON response with error handling.
|
||||
|
||||
Args:
|
||||
response: The HTTP response object
|
||||
error_context: Optional context for error messages
|
||||
|
||||
Returns:
|
||||
Parsed JSON data
|
||||
|
||||
Raises:
|
||||
AssertionError: If JSON parsing fails
|
||||
"""
|
||||
try:
|
||||
return response.json()
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to parse JSON response: {e}"
|
||||
if error_context:
|
||||
error_msg = f"{error_context}: {error_msg}"
|
||||
error_msg += f"\nResponse text: {response.text[:500]}"
|
||||
raise AssertionError(error_msg)
|
||||
|
||||
|
||||
def assert_error_response_structure(
|
||||
response: Any,
|
||||
expected_status: int = 422,
|
||||
expected_error_fields: Optional[list[str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Assert error response has expected structure.
|
||||
|
||||
Args:
|
||||
response: The HTTP response object
|
||||
expected_status: Expected error status code
|
||||
expected_error_fields: List of expected fields in error detail
|
||||
|
||||
Returns:
|
||||
Parsed error response
|
||||
"""
|
||||
assert_response_status(response, expected_status, "Error response check")
|
||||
|
||||
error_data = safe_parse_json(response, "Error response parsing")
|
||||
|
||||
# Check basic error structure
|
||||
assert "detail" in error_data, f"Missing 'detail' in error response: {error_data}"
|
||||
|
||||
# Check specific error fields if provided
|
||||
if expected_error_fields:
|
||||
detail = error_data["detail"]
|
||||
if isinstance(detail, list):
|
||||
# FastAPI validation errors
|
||||
for error in detail:
|
||||
assert "loc" in error, f"Missing 'loc' in error: {error}"
|
||||
assert "msg" in error, f"Missing 'msg' in error: {error}"
|
||||
assert "type" in error, f"Missing 'type' in error: {error}"
|
||||
|
||||
return error_data
|
||||
|
||||
|
||||
def assert_mock_called_with_partial(mock_obj: Any, **expected_kwargs: Any) -> None:
|
||||
"""Assert mock was called with expected kwargs (partial match).
|
||||
|
||||
Args:
|
||||
mock_obj: The mock object to check
|
||||
**expected_kwargs: Expected keyword arguments
|
||||
"""
|
||||
assert mock_obj.called, f"Mock {mock_obj} was not called"
|
||||
|
||||
actual_kwargs = mock_obj.call_args.kwargs if mock_obj.call_args else {}
|
||||
|
||||
for key, expected_value in expected_kwargs.items():
|
||||
assert (
|
||||
key in actual_kwargs
|
||||
), f"Missing key '{key}' in mock call. Actual keys: {list(actual_kwargs.keys())}"
|
||||
assert (
|
||||
actual_kwargs[key] == expected_value
|
||||
), f"Mock called with {key}={actual_kwargs[key]}, expected {expected_value}"
|
||||
74
autogpt_platform/backend/backend/server/test_utils.py
Normal file
74
autogpt_platform/backend/backend/server/test_utils.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""Common test utilities and constants for server tests."""
|
||||
|
||||
from typing import Any, Dict
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
# Test ID constants
|
||||
TEST_USER_ID = "test-user-id"
|
||||
ADMIN_USER_ID = "admin-user-id"
|
||||
TARGET_USER_ID = "target-user-id"
|
||||
|
||||
# Common test data constants
|
||||
FIXED_TIMESTAMP = "2024-01-01T00:00:00Z"
|
||||
TRANSACTION_UUID = "transaction-123-uuid"
|
||||
METRIC_UUID = "metric-123-uuid"
|
||||
ANALYTICS_UUID = "analytics-123-uuid"
|
||||
|
||||
|
||||
def create_mock_with_id(mock_id: str) -> Mock:
|
||||
"""Create a mock object with an id attribute.
|
||||
|
||||
Args:
|
||||
mock_id: The ID value to set on the mock
|
||||
|
||||
Returns:
|
||||
Mock object with id attribute set
|
||||
"""
|
||||
return Mock(id=mock_id)
|
||||
|
||||
|
||||
def assert_status_and_parse_json(
|
||||
response: Any, expected_status: int = 200
|
||||
) -> Dict[str, Any]:
|
||||
"""Assert response status and return parsed JSON.
|
||||
|
||||
Args:
|
||||
response: The HTTP response object
|
||||
expected_status: Expected status code (default: 200)
|
||||
|
||||
Returns:
|
||||
Parsed JSON response data
|
||||
|
||||
Raises:
|
||||
AssertionError: If status code doesn't match expected
|
||||
"""
|
||||
assert (
|
||||
response.status_code == expected_status
|
||||
), f"Expected status {expected_status}, got {response.status_code}: {response.text}"
|
||||
return response.json()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"metric_value,metric_name,data_string",
|
||||
[
|
||||
(100, "api_calls_count", "external_api"),
|
||||
(0, "error_count", "no_errors"),
|
||||
(-5.2, "temperature_delta", "cooling"),
|
||||
(1.23456789, "precision_test", "float_precision"),
|
||||
(999999999, "large_number", "max_value"),
|
||||
],
|
||||
)
|
||||
def parametrized_metric_values_decorator(func):
|
||||
"""Decorator for parametrized metric value tests."""
|
||||
return pytest.mark.parametrize(
|
||||
"metric_value,metric_name,data_string",
|
||||
[
|
||||
(100, "api_calls_count", "external_api"),
|
||||
(0, "error_count", "no_errors"),
|
||||
(-5.2, "temperature_delta", "cooling"),
|
||||
(1.23456789, "precision_test", "float_precision"),
|
||||
(999999999, "large_number", "max_value"),
|
||||
],
|
||||
)(func)
|
||||
@@ -0,0 +1,331 @@
|
||||
import json
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import autogpt_libs.auth
|
||||
import autogpt_libs.auth.depends
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import prisma.enums
|
||||
import pytest_mock
|
||||
from prisma import Json
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.v2.admin.credit_admin_routes as credit_admin_routes
|
||||
import backend.server.v2.admin.model as admin_model
|
||||
from backend.data.model import UserTransaction
|
||||
from backend.server.conftest import ADMIN_USER_ID, TARGET_USER_ID
|
||||
from backend.server.model import Pagination
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(credit_admin_routes.router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
def override_requires_admin_user() -> dict[str, str]:
|
||||
"""Override admin user check for testing"""
|
||||
return {"sub": ADMIN_USER_ID, "role": "admin"}
|
||||
|
||||
|
||||
def override_get_user_id() -> str:
|
||||
"""Override get_user_id for testing"""
|
||||
return ADMIN_USER_ID
|
||||
|
||||
|
||||
app.dependency_overrides[autogpt_libs.auth.requires_admin_user] = (
|
||||
override_requires_admin_user
|
||||
)
|
||||
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id
|
||||
|
||||
|
||||
def test_add_user_credits_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful credit addition by admin"""
|
||||
# Mock the credit model
|
||||
mock_credit_model = mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes._user_credit_model"
|
||||
)
|
||||
mock_credit_model._add_transaction = AsyncMock(
|
||||
return_value=(1500, "transaction-123-uuid")
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"user_id": TARGET_USER_ID,
|
||||
"amount": 500,
|
||||
"comments": "Test credit grant for debugging",
|
||||
}
|
||||
|
||||
response = client.post("/admin/add_credits", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["new_balance"] == 1500
|
||||
assert response_data["transaction_key"] == "transaction-123-uuid"
|
||||
|
||||
# Verify the function was called with correct parameters
|
||||
mock_credit_model._add_transaction.assert_called_once()
|
||||
call_args = mock_credit_model._add_transaction.call_args
|
||||
assert call_args[0] == (TARGET_USER_ID, 500)
|
||||
assert call_args[1]["transaction_type"] == prisma.enums.CreditTransactionType.GRANT
|
||||
# Check that metadata is a Json object with the expected content
|
||||
assert isinstance(call_args[1]["metadata"], Json)
|
||||
assert call_args[1]["metadata"] == Json(
|
||||
{"admin_id": ADMIN_USER_ID, "reason": "Test credit grant for debugging"}
|
||||
)
|
||||
|
||||
# Snapshot test the response
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"admin_add_credits_success",
|
||||
)
|
||||
|
||||
|
||||
def test_add_user_credits_negative_amount(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test credit deduction by admin (negative amount)"""
|
||||
# Mock the credit model
|
||||
mock_credit_model = mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes._user_credit_model"
|
||||
)
|
||||
mock_credit_model._add_transaction = AsyncMock(
|
||||
return_value=(200, "transaction-456-uuid")
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"user_id": "target-user-id",
|
||||
"amount": -100,
|
||||
"comments": "Refund adjustment",
|
||||
}
|
||||
|
||||
response = client.post("/admin/add_credits", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["new_balance"] == 200
|
||||
|
||||
# Snapshot test the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"adm_add_cred_neg",
|
||||
)
|
||||
|
||||
|
||||
def test_get_user_history_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful retrieval of user credit history"""
|
||||
# Mock the admin_get_user_history function
|
||||
mock_history_response = admin_model.UserHistoryResponse(
|
||||
history=[
|
||||
UserTransaction(
|
||||
user_id="user-1",
|
||||
user_email="user1@example.com",
|
||||
amount=1000,
|
||||
reason="Initial grant",
|
||||
transaction_type=prisma.enums.CreditTransactionType.GRANT,
|
||||
),
|
||||
UserTransaction(
|
||||
user_id="user-2",
|
||||
user_email="user2@example.com",
|
||||
amount=-50,
|
||||
reason="Usage",
|
||||
transaction_type=prisma.enums.CreditTransactionType.USAGE,
|
||||
),
|
||||
],
|
||||
pagination=Pagination(
|
||||
total_items=2,
|
||||
total_pages=1,
|
||||
current_page=1,
|
||||
page_size=20,
|
||||
),
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
|
||||
return_value=mock_history_response,
|
||||
)
|
||||
|
||||
response = client.get("/admin/users_history")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert len(response_data["history"]) == 2
|
||||
assert response_data["pagination"]["total_items"] == 2
|
||||
|
||||
# Snapshot test the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"adm_usr_hist_ok",
|
||||
)
|
||||
|
||||
|
||||
def test_get_user_history_with_filters(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test user credit history with search and filter parameters"""
|
||||
# Mock the admin_get_user_history function
|
||||
mock_history_response = admin_model.UserHistoryResponse(
|
||||
history=[
|
||||
UserTransaction(
|
||||
user_id="user-3",
|
||||
user_email="test@example.com",
|
||||
amount=500,
|
||||
reason="Top up",
|
||||
transaction_type=prisma.enums.CreditTransactionType.TOP_UP,
|
||||
),
|
||||
],
|
||||
pagination=Pagination(
|
||||
total_items=1,
|
||||
total_pages=1,
|
||||
current_page=1,
|
||||
page_size=10,
|
||||
),
|
||||
)
|
||||
|
||||
mock_get_history = mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
|
||||
return_value=mock_history_response,
|
||||
)
|
||||
|
||||
response = client.get(
|
||||
"/admin/users_history",
|
||||
params={
|
||||
"search": "test@example.com",
|
||||
"page": 1,
|
||||
"page_size": 10,
|
||||
"transaction_filter": "TOP_UP",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert len(response_data["history"]) == 1
|
||||
assert response_data["history"][0]["transaction_type"] == "TOP_UP"
|
||||
|
||||
# Verify the function was called with correct parameters
|
||||
mock_get_history.assert_called_once_with(
|
||||
page=1,
|
||||
page_size=10,
|
||||
search="test@example.com",
|
||||
transaction_filter=prisma.enums.CreditTransactionType.TOP_UP,
|
||||
)
|
||||
|
||||
# Snapshot test the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"adm_usr_hist_filt",
|
||||
)
|
||||
|
||||
|
||||
def test_get_user_history_empty_results(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test user credit history with no results"""
|
||||
# Mock empty history response
|
||||
mock_history_response = admin_model.UserHistoryResponse(
|
||||
history=[],
|
||||
pagination=Pagination(
|
||||
total_items=0,
|
||||
total_pages=0,
|
||||
current_page=1,
|
||||
page_size=20,
|
||||
),
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
|
||||
return_value=mock_history_response,
|
||||
)
|
||||
|
||||
response = client.get("/admin/users_history", params={"search": "nonexistent"})
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert len(response_data["history"]) == 0
|
||||
assert response_data["pagination"]["total_items"] == 0
|
||||
|
||||
# Snapshot test the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"adm_usr_hist_empty",
|
||||
)
|
||||
|
||||
|
||||
def test_add_credits_invalid_request() -> None:
|
||||
"""Test credit addition with invalid request data"""
|
||||
# Missing required fields
|
||||
response = client.post("/admin/add_credits", json={})
|
||||
assert response.status_code == 422
|
||||
|
||||
# Invalid amount type
|
||||
response = client.post(
|
||||
"/admin/add_credits",
|
||||
json={
|
||||
"user_id": "test",
|
||||
"amount": "not_a_number",
|
||||
"comments": "test",
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
# Missing comments
|
||||
response = client.post(
|
||||
"/admin/add_credits",
|
||||
json={
|
||||
"user_id": "test",
|
||||
"amount": 100,
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
|
||||
def test_admin_endpoints_require_admin_role(mocker: pytest_mock.MockFixture) -> None:
|
||||
"""Test that admin endpoints require admin role"""
|
||||
# Clear the admin override to test authorization
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
# Mock requires_admin_user to raise an exception
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.requires_admin_user",
|
||||
side_effect=fastapi.HTTPException(
|
||||
status_code=403, detail="Admin access required"
|
||||
),
|
||||
)
|
||||
|
||||
# Test add_credits endpoint
|
||||
response = client.post(
|
||||
"/admin/add_credits",
|
||||
json={
|
||||
"user_id": "test",
|
||||
"amount": 100,
|
||||
"comments": "test",
|
||||
},
|
||||
)
|
||||
assert (
|
||||
response.status_code == 401
|
||||
) # Auth middleware returns 401 when auth is disabled
|
||||
|
||||
# Test users_history endpoint
|
||||
response = client.get("/admin/users_history")
|
||||
assert (
|
||||
response.status_code == 401
|
||||
) # Auth middleware returns 401 when auth is disabled
|
||||
|
||||
# Restore the override
|
||||
app.dependency_overrides[autogpt_libs.auth.requires_admin_user] = (
|
||||
override_requires_admin_user
|
||||
)
|
||||
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = (
|
||||
override_get_user_id
|
||||
)
|
||||
@@ -16,9 +16,11 @@ import backend.server.v2.store.media as store_media
|
||||
from backend.data import db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data.db import locked_transaction
|
||||
from backend.data.execution import get_graph_execution
|
||||
from backend.data.includes import library_agent_include
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -145,7 +147,7 @@ async def get_library_agent(id: str, user_id: str) -> library_model.LibraryAgent
|
||||
Get a specific agent from the user's library.
|
||||
|
||||
Args:
|
||||
library_agent_id: ID of the library agent to retrieve.
|
||||
id: ID of the library agent to retrieve.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
@@ -275,7 +277,7 @@ async def create_library_agent(
|
||||
isCreatedByUser=(user_id == graph.user_id),
|
||||
useGraphIsActiveVersion=True,
|
||||
User={"connect": {"id": user_id}},
|
||||
# Creator={"connect": {"id": agent.userId}},
|
||||
# Creator={"connect": {"id": graph.user_id}},
|
||||
AgentGraph={
|
||||
"connect": {
|
||||
"graphVersionId": {"id": graph.id, "version": graph.version}
|
||||
@@ -438,11 +440,13 @@ async def add_store_agent_to_library(
|
||||
|
||||
# Check if user already has this agent
|
||||
existing_library_agent = (
|
||||
await prisma.models.LibraryAgent.prisma().find_first(
|
||||
await prisma.models.LibraryAgent.prisma().find_unique(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentGraphId": graph.id,
|
||||
"agentGraphVersion": graph.version,
|
||||
"userId_agentGraphId_agentGraphVersion": {
|
||||
"userId": user_id,
|
||||
"agentGraphId": graph.id,
|
||||
"agentGraphVersion": graph.version,
|
||||
}
|
||||
},
|
||||
include={"AgentGraph": True},
|
||||
)
|
||||
@@ -450,13 +454,13 @@ async def add_store_agent_to_library(
|
||||
if existing_library_agent:
|
||||
if existing_library_agent.isDeleted:
|
||||
# Even if agent exists it needs to be marked as not deleted
|
||||
await set_is_deleted_for_library_agent(
|
||||
user_id, graph.id, graph.version, False
|
||||
await update_library_agent(
|
||||
existing_library_agent.id, user_id, is_deleted=False
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"User #{user_id} already has graph #{graph.id} "
|
||||
"in their library"
|
||||
f"v{graph.version} in their library"
|
||||
)
|
||||
return library_model.LibraryAgent.from_db(existing_library_agent)
|
||||
|
||||
@@ -464,8 +468,11 @@ async def add_store_agent_to_library(
|
||||
added_agent = await prisma.models.LibraryAgent.prisma().create(
|
||||
data=prisma.types.LibraryAgentCreateInput(
|
||||
userId=user_id,
|
||||
agentGraphId=graph.id,
|
||||
agentGraphVersion=graph.version,
|
||||
AgentGraph={
|
||||
"connect": {
|
||||
"graphVersionId": {"id": graph.id, "version": graph.version}
|
||||
}
|
||||
},
|
||||
isCreatedByUser=False,
|
||||
),
|
||||
include=library_agent_include(user_id),
|
||||
@@ -485,60 +492,22 @@ async def add_store_agent_to_library(
|
||||
raise store_exceptions.DatabaseError("Failed to add agent to library") from e
|
||||
|
||||
|
||||
async def set_is_deleted_for_library_agent(
|
||||
user_id: str, agent_id: str, agent_version: int, is_deleted: bool
|
||||
) -> None:
|
||||
"""
|
||||
Changes the isDeleted flag for a library agent.
|
||||
|
||||
Args:
|
||||
user_id: The user's library from which the agent is being removed.
|
||||
agent_id: The ID of the agent to remove.
|
||||
agent_version: The version of the agent to remove.
|
||||
is_deleted: Whether the agent is being marked as deleted.
|
||||
|
||||
Raises:
|
||||
DatabaseError: If there's an issue updating the Library
|
||||
"""
|
||||
logger.debug(
|
||||
f"Setting isDeleted={is_deleted} for agent {agent_id} v{agent_version} "
|
||||
f"in library for user {user_id}"
|
||||
)
|
||||
try:
|
||||
logger.warning(
|
||||
f"Setting isDeleted={is_deleted} for agent {agent_id} v{agent_version} in library for user {user_id}"
|
||||
)
|
||||
count = await prisma.models.LibraryAgent.prisma().update_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentGraphId": agent_id,
|
||||
"agentGraphVersion": agent_version,
|
||||
},
|
||||
data={"isDeleted": is_deleted},
|
||||
)
|
||||
logger.warning(f"Updated {count} isDeleted library agents")
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error setting agent isDeleted: {e}")
|
||||
raise store_exceptions.DatabaseError(
|
||||
"Failed to set agent isDeleted in library"
|
||||
) from e
|
||||
|
||||
|
||||
##############################################
|
||||
########### Presets DB Functions #############
|
||||
##############################################
|
||||
|
||||
|
||||
async def get_presets(
|
||||
user_id: str, page: int, page_size: int
|
||||
async def list_presets(
|
||||
user_id: str, page: int, page_size: int, graph_id: Optional[str] = None
|
||||
) -> library_model.LibraryAgentPresetResponse:
|
||||
"""
|
||||
Retrieves a paginated list of AgentPresets for the specified user.
|
||||
|
||||
Args:
|
||||
user_id: The user ID whose presets are being retrieved.
|
||||
page: The current page index (0-based or 1-based, clarify in your domain).
|
||||
page: The current page index (1-based).
|
||||
page_size: Number of items to retrieve per page.
|
||||
graph_id: Agent Graph ID to filter by.
|
||||
|
||||
Returns:
|
||||
A LibraryAgentPresetResponse containing a list of presets and pagination info.
|
||||
@@ -550,21 +519,24 @@ async def get_presets(
|
||||
f"Fetching presets for user #{user_id}, page={page}, page_size={page_size}"
|
||||
)
|
||||
|
||||
if page < 0 or page_size < 1:
|
||||
if page < 1 or page_size < 1:
|
||||
logger.warning(
|
||||
"Invalid pagination input: page=%d, page_size=%d", page, page_size
|
||||
)
|
||||
raise store_exceptions.DatabaseError("Invalid pagination parameters")
|
||||
|
||||
query_filter: prisma.types.AgentPresetWhereInput = {"userId": user_id}
|
||||
if graph_id:
|
||||
query_filter["agentGraphId"] = graph_id
|
||||
|
||||
try:
|
||||
presets_records = await prisma.models.AgentPreset.prisma().find_many(
|
||||
where={"userId": user_id},
|
||||
skip=page * page_size,
|
||||
where=query_filter,
|
||||
skip=(page - 1) * page_size,
|
||||
take=page_size,
|
||||
include={"InputPresets": True},
|
||||
)
|
||||
total_items = await prisma.models.AgentPreset.prisma().count(
|
||||
where={"userId": user_id}
|
||||
)
|
||||
total_items = await prisma.models.AgentPreset.prisma().count(where=query_filter)
|
||||
total_pages = (total_items + page_size - 1) // page_size
|
||||
|
||||
presets = [
|
||||
@@ -617,69 +589,142 @@ async def get_preset(
|
||||
raise store_exceptions.DatabaseError("Failed to fetch preset") from e
|
||||
|
||||
|
||||
async def upsert_preset(
|
||||
async def create_preset(
|
||||
user_id: str,
|
||||
preset: library_model.CreateLibraryAgentPresetRequest,
|
||||
preset_id: Optional[str] = None,
|
||||
preset: library_model.LibraryAgentPresetCreatable,
|
||||
) -> library_model.LibraryAgentPreset:
|
||||
"""
|
||||
Creates or updates an AgentPreset for a user.
|
||||
Creates a new AgentPreset for a user.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user creating/updating the preset.
|
||||
preset: The preset data used for creation or update.
|
||||
preset_id: An optional preset ID to update; if None, a new preset is created.
|
||||
user_id: The ID of the user creating the preset.
|
||||
preset: The preset data used for creation.
|
||||
|
||||
Returns:
|
||||
The newly created or updated LibraryAgentPreset.
|
||||
The newly created LibraryAgentPreset.
|
||||
|
||||
Raises:
|
||||
DatabaseError: If there's a database error in creating or updating the preset.
|
||||
DatabaseError: If there's a database error in creating the preset.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Creating preset ({repr(preset.name)}) for user #{user_id}",
|
||||
)
|
||||
try:
|
||||
new_preset = await prisma.models.AgentPreset.prisma().create(
|
||||
data=prisma.types.AgentPresetCreateInput(
|
||||
userId=user_id,
|
||||
name=preset.name,
|
||||
description=preset.description,
|
||||
agentGraphId=preset.graph_id,
|
||||
agentGraphVersion=preset.graph_version,
|
||||
isActive=preset.is_active,
|
||||
InputPresets={
|
||||
"create": [
|
||||
prisma.types.AgentNodeExecutionInputOutputCreateWithoutRelationsInput( # noqa
|
||||
name=name, data=prisma.fields.Json(data)
|
||||
)
|
||||
for name, data in preset.inputs.items()
|
||||
]
|
||||
},
|
||||
),
|
||||
include={"InputPresets": True},
|
||||
)
|
||||
return library_model.LibraryAgentPreset.from_db(new_preset)
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error creating preset: {e}")
|
||||
raise store_exceptions.DatabaseError("Failed to create preset") from e
|
||||
|
||||
|
||||
async def create_preset_from_graph_execution(
|
||||
user_id: str,
|
||||
create_request: library_model.LibraryAgentPresetCreatableFromGraphExecution,
|
||||
) -> library_model.LibraryAgentPreset:
|
||||
"""
|
||||
Creates a new AgentPreset from an AgentGraphExecution.
|
||||
|
||||
Params:
|
||||
user_id: The ID of the user creating the preset.
|
||||
create_request: The data used for creation.
|
||||
|
||||
Returns:
|
||||
The newly created LibraryAgentPreset.
|
||||
|
||||
Raises:
|
||||
DatabaseError: If there's a database error in creating the preset.
|
||||
"""
|
||||
graph_exec_id = create_request.graph_execution_id
|
||||
graph_execution = await get_graph_execution(user_id, graph_exec_id)
|
||||
if not graph_execution:
|
||||
raise NotFoundError(f"Graph execution #{graph_exec_id} not found")
|
||||
|
||||
logger.debug(
|
||||
f"Creating preset for user #{user_id} from graph execution #{graph_exec_id}",
|
||||
)
|
||||
return await create_preset(
|
||||
user_id=user_id,
|
||||
preset=library_model.LibraryAgentPresetCreatable(
|
||||
inputs=graph_execution.inputs,
|
||||
graph_id=graph_execution.graph_id,
|
||||
graph_version=graph_execution.graph_version,
|
||||
name=create_request.name,
|
||||
description=create_request.description,
|
||||
is_active=create_request.is_active,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
async def update_preset(
|
||||
user_id: str,
|
||||
preset_id: str,
|
||||
preset: library_model.LibraryAgentPresetUpdatable,
|
||||
) -> library_model.LibraryAgentPreset:
|
||||
"""
|
||||
Updates an existing AgentPreset for a user.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user updating the preset.
|
||||
preset_id: The ID of the preset to update.
|
||||
preset: The preset data used for the update.
|
||||
|
||||
Returns:
|
||||
The updated LibraryAgentPreset.
|
||||
|
||||
Raises:
|
||||
DatabaseError: If there's a database error in updating the preset.
|
||||
ValueError: If attempting to update a non-existent preset.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Upserting preset #{preset_id} ({repr(preset.name)}) for user #{user_id}",
|
||||
f"Updating preset #{preset_id} ({repr(preset.name)}) for user #{user_id}",
|
||||
)
|
||||
try:
|
||||
inputs = [
|
||||
prisma.types.AgentNodeExecutionInputOutputCreateWithoutRelationsInput(
|
||||
name=name, data=prisma.fields.Json(data)
|
||||
)
|
||||
for name, data in preset.inputs.items()
|
||||
]
|
||||
if preset_id:
|
||||
# Update existing preset
|
||||
updated = await prisma.models.AgentPreset.prisma().update(
|
||||
where={"id": preset_id},
|
||||
data={
|
||||
"name": preset.name,
|
||||
"description": preset.description,
|
||||
"isActive": preset.is_active,
|
||||
"InputPresets": {"create": inputs},
|
||||
},
|
||||
include={"InputPresets": True},
|
||||
)
|
||||
if not updated:
|
||||
raise ValueError(f"AgentPreset #{preset_id} not found")
|
||||
return library_model.LibraryAgentPreset.from_db(updated)
|
||||
else:
|
||||
# Create new preset
|
||||
new_preset = await prisma.models.AgentPreset.prisma().create(
|
||||
data=prisma.types.AgentPresetCreateInput(
|
||||
userId=user_id,
|
||||
name=preset.name,
|
||||
description=preset.description,
|
||||
agentGraphId=preset.graph_id,
|
||||
agentGraphVersion=preset.graph_version,
|
||||
isActive=preset.is_active,
|
||||
InputPresets={"create": inputs},
|
||||
),
|
||||
include={"InputPresets": True},
|
||||
)
|
||||
return library_model.LibraryAgentPreset.from_db(new_preset)
|
||||
update_data: prisma.types.AgentPresetUpdateInput = {}
|
||||
if preset.name:
|
||||
update_data["name"] = preset.name
|
||||
if preset.description:
|
||||
update_data["description"] = preset.description
|
||||
if preset.inputs:
|
||||
update_data["InputPresets"] = {
|
||||
"create": [
|
||||
prisma.types.AgentNodeExecutionInputOutputCreateWithoutRelationsInput( # noqa
|
||||
name=name, data=prisma.fields.Json(data)
|
||||
)
|
||||
for name, data in preset.inputs.items()
|
||||
]
|
||||
}
|
||||
if preset.is_active:
|
||||
update_data["isActive"] = preset.is_active
|
||||
|
||||
updated = await prisma.models.AgentPreset.prisma().update(
|
||||
where={"id": preset_id},
|
||||
data=update_data,
|
||||
include={"InputPresets": True},
|
||||
)
|
||||
if not updated:
|
||||
raise ValueError(f"AgentPreset #{preset_id} not found")
|
||||
return library_model.LibraryAgentPreset.from_db(updated)
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error upserting preset: {e}")
|
||||
raise store_exceptions.DatabaseError("Failed to create preset") from e
|
||||
logger.error(f"Database error updating preset: {e}")
|
||||
raise store_exceptions.DatabaseError("Failed to update preset") from e
|
||||
|
||||
|
||||
async def delete_preset(user_id: str, preset_id: str) -> None:
|
||||
|
||||
@@ -3,6 +3,7 @@ from datetime import datetime
|
||||
import prisma.enums
|
||||
import prisma.errors
|
||||
import prisma.models
|
||||
import prisma.types
|
||||
import pytest
|
||||
|
||||
import backend.server.v2.library.db as db
|
||||
@@ -84,6 +85,11 @@ async def test_get_library_agents(mocker):
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_add_agent_to_library(mocker):
|
||||
await connect()
|
||||
|
||||
# Mock the transaction context
|
||||
mock_transaction = mocker.patch("backend.server.v2.library.db.locked_transaction")
|
||||
mock_transaction.return_value.__aenter__ = mocker.AsyncMock(return_value=None)
|
||||
mock_transaction.return_value.__aexit__ = mocker.AsyncMock(return_value=None)
|
||||
# Mock data
|
||||
mock_store_listing_data = prisma.models.StoreListingVersion(
|
||||
id="version123",
|
||||
@@ -142,6 +148,10 @@ async def test_add_agent_to_library(mocker):
|
||||
return_value=mock_library_agent_data
|
||||
)
|
||||
|
||||
# Mock the model conversion
|
||||
mock_from_db = mocker.patch("backend.server.v2.library.model.LibraryAgent.from_db")
|
||||
mock_from_db.return_value = mocker.Mock()
|
||||
|
||||
# Call function
|
||||
await db.add_store_agent_to_library("version123", "test-user")
|
||||
|
||||
|
||||
@@ -168,27 +168,62 @@ class LibraryAgentResponse(pydantic.BaseModel):
|
||||
pagination: server_model.Pagination
|
||||
|
||||
|
||||
class LibraryAgentPreset(pydantic.BaseModel):
|
||||
class LibraryAgentPresetCreatable(pydantic.BaseModel):
|
||||
"""
|
||||
Request model used when creating a new preset for a library agent.
|
||||
"""
|
||||
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
|
||||
inputs: block_model.BlockInput
|
||||
|
||||
name: str
|
||||
description: str
|
||||
|
||||
is_active: bool = True
|
||||
|
||||
|
||||
class LibraryAgentPresetCreatableFromGraphExecution(pydantic.BaseModel):
|
||||
"""
|
||||
Request model used when creating a new preset for a library agent.
|
||||
"""
|
||||
|
||||
graph_execution_id: str
|
||||
|
||||
name: str
|
||||
description: str
|
||||
|
||||
is_active: bool = True
|
||||
|
||||
|
||||
class LibraryAgentPresetUpdatable(pydantic.BaseModel):
|
||||
"""
|
||||
Request model used when updating a preset for a library agent.
|
||||
"""
|
||||
|
||||
inputs: Optional[block_model.BlockInput] = None
|
||||
|
||||
name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
|
||||
is_active: Optional[bool] = None
|
||||
|
||||
|
||||
class LibraryAgentPreset(LibraryAgentPresetCreatable):
|
||||
"""Represents a preset configuration for a library agent."""
|
||||
|
||||
id: str
|
||||
updated_at: datetime.datetime
|
||||
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
|
||||
name: str
|
||||
description: str
|
||||
|
||||
is_active: bool
|
||||
|
||||
inputs: block_model.BlockInput
|
||||
|
||||
@classmethod
|
||||
def from_db(cls, preset: prisma.models.AgentPreset) -> "LibraryAgentPreset":
|
||||
if preset.InputPresets is None:
|
||||
raise ValueError("Input values must be included in object")
|
||||
|
||||
input_data: block_model.BlockInput = {}
|
||||
|
||||
for preset_input in preset.InputPresets or []:
|
||||
for preset_input in preset.InputPresets:
|
||||
input_data[preset_input.name] = preset_input.data
|
||||
|
||||
return cls(
|
||||
@@ -210,19 +245,6 @@ class LibraryAgentPresetResponse(pydantic.BaseModel):
|
||||
pagination: server_model.Pagination
|
||||
|
||||
|
||||
class CreateLibraryAgentPresetRequest(pydantic.BaseModel):
|
||||
"""
|
||||
Request model used when creating a new preset for a library agent.
|
||||
"""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
inputs: block_model.BlockInput
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
is_active: bool
|
||||
|
||||
|
||||
class LibraryAgentFilter(str, Enum):
|
||||
"""Possible filters for searching library agents."""
|
||||
|
||||
|
||||
@@ -70,10 +70,10 @@ async def list_library_agents(
|
||||
page_size=page_size,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not fetch library agents: {e}")
|
||||
logger.exception("Listing library agents failed for user %s: %s", user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to get library agents",
|
||||
detail={"message": str(e), "hint": "Inspect database connectivity."},
|
||||
) from e
|
||||
|
||||
|
||||
@@ -102,10 +102,17 @@ async def get_library_agent_by_store_listing_version_id(
|
||||
store_listing_version_id, user_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not fetch library agent from store version ID: {e}")
|
||||
logger.exception(
|
||||
"Retrieving library agent by store version failed for user %s: %s",
|
||||
user_id,
|
||||
e,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to add agent to library",
|
||||
detail={
|
||||
"message": str(e),
|
||||
"hint": "Check if the store listing ID is valid.",
|
||||
},
|
||||
) from e
|
||||
|
||||
|
||||
@@ -143,22 +150,31 @@ async def add_marketplace_agent_to_library(
|
||||
)
|
||||
|
||||
except store_exceptions.AgentNotFoundError:
|
||||
logger.warning(f"Agent not found: {store_listing_version_id}")
|
||||
logger.warning(
|
||||
"Store listing version %s not found when adding to library",
|
||||
store_listing_version_id,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Store listing version {store_listing_version_id} not found",
|
||||
detail={
|
||||
"message": f"Store listing version {store_listing_version_id} not found",
|
||||
"hint": "Confirm the ID provided.",
|
||||
},
|
||||
)
|
||||
except store_exceptions.DatabaseError as e:
|
||||
logger.error(f"Database error occurred whilst adding agent to library: {e}")
|
||||
logger.exception("Database error whilst adding agent to library: %s", e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to add agent to library",
|
||||
detail={"message": str(e), "hint": "Inspect DB logs for details."},
|
||||
) from e
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error while adding agent: {e}")
|
||||
logger.exception("Unexpected error while adding agent to library: %s", e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to add agent to library",
|
||||
detail={
|
||||
"message": str(e),
|
||||
"hint": "Check server logs for more information.",
|
||||
},
|
||||
) from e
|
||||
|
||||
|
||||
@@ -203,16 +219,16 @@ async def update_library_agent(
|
||||
content={"message": "Agent updated successfully"},
|
||||
)
|
||||
except store_exceptions.DatabaseError as e:
|
||||
logger.exception(f"Database error while updating library agent: {e}")
|
||||
logger.exception("Database error while updating library agent: %s", e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to update library agent",
|
||||
detail={"message": str(e), "hint": "Verify DB connection."},
|
||||
) from e
|
||||
except Exception as e:
|
||||
logger.exception(f"Unexpected error while updating library agent: {e}")
|
||||
logger.exception("Unexpected error while updating library agent: %s", e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to update library agent",
|
||||
detail={"message": str(e), "hint": "Check server logs."},
|
||||
) from e
|
||||
|
||||
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import logging
|
||||
from typing import Annotated, Any
|
||||
from typing import Annotated, Any, Optional
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, Body, Depends, HTTPException, status
|
||||
from fastapi import APIRouter, Body, Depends, HTTPException, Query, status
|
||||
|
||||
import backend.server.v2.library.db as db
|
||||
import backend.server.v2.library.model as models
|
||||
from backend.executor.utils import add_graph_execution_async
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -18,10 +19,13 @@ router = APIRouter()
|
||||
summary="List presets",
|
||||
description="Retrieve a paginated list of presets for the current user.",
|
||||
)
|
||||
async def get_presets(
|
||||
async def list_presets(
|
||||
user_id: str = Depends(autogpt_auth_lib.depends.get_user_id),
|
||||
page: int = 1,
|
||||
page_size: int = 10,
|
||||
page: int = Query(default=1, ge=1),
|
||||
page_size: int = Query(default=10, ge=1),
|
||||
graph_id: Optional[str] = Query(
|
||||
description="Allows to filter presets by a specific agent graph"
|
||||
),
|
||||
) -> models.LibraryAgentPresetResponse:
|
||||
"""
|
||||
Retrieve a paginated list of presets for the current user.
|
||||
@@ -30,17 +34,26 @@ async def get_presets(
|
||||
user_id (str): ID of the authenticated user.
|
||||
page (int): Page number for pagination.
|
||||
page_size (int): Number of items per page.
|
||||
graph_id: Allows to filter presets by a specific agent graph.
|
||||
|
||||
Returns:
|
||||
models.LibraryAgentPresetResponse: A response containing the list of presets.
|
||||
"""
|
||||
try:
|
||||
return await db.get_presets(user_id, page, page_size)
|
||||
return await db.list_presets(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception occurred while getting presets: {e}")
|
||||
logger.exception("Failed to list presets for user %s: %s", user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to get presets",
|
||||
detail={
|
||||
"message": str(e),
|
||||
"hint": "Ensure the presets DB table is accessible.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -75,10 +88,12 @@ async def get_preset(
|
||||
)
|
||||
return preset
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception occurred whilst getting preset: {e}")
|
||||
logger.exception(
|
||||
"Error retrieving preset %s for user %s: %s", preset_id, user_id, e
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to get preset",
|
||||
detail={"message": str(e), "hint": "Validate preset ID and retry."},
|
||||
)
|
||||
|
||||
|
||||
@@ -88,14 +103,17 @@ async def get_preset(
|
||||
description="Create a new preset for the current user.",
|
||||
)
|
||||
async def create_preset(
|
||||
preset: models.CreateLibraryAgentPresetRequest,
|
||||
preset: (
|
||||
models.LibraryAgentPresetCreatable
|
||||
| models.LibraryAgentPresetCreatableFromGraphExecution
|
||||
),
|
||||
user_id: str = Depends(autogpt_auth_lib.depends.get_user_id),
|
||||
) -> models.LibraryAgentPreset:
|
||||
"""
|
||||
Create a new library agent preset. Automatically corrects node_input format if needed.
|
||||
|
||||
Args:
|
||||
preset (models.CreateLibraryAgentPresetRequest): The preset data to create.
|
||||
preset (models.LibraryAgentPresetCreatable): The preset data to create.
|
||||
user_id (str): ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
@@ -105,31 +123,36 @@ async def create_preset(
|
||||
HTTPException: If an error occurs while creating the preset.
|
||||
"""
|
||||
try:
|
||||
return await db.upsert_preset(user_id, preset)
|
||||
if isinstance(preset, models.LibraryAgentPresetCreatable):
|
||||
return await db.create_preset(user_id, preset)
|
||||
else:
|
||||
return await db.create_preset_from_graph_execution(user_id, preset)
|
||||
except NotFoundError as e:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception occurred while creating preset: {e}")
|
||||
logger.exception("Preset creation failed for user %s: %s", user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to create preset",
|
||||
detail={"message": str(e), "hint": "Check preset payload format."},
|
||||
)
|
||||
|
||||
|
||||
@router.put(
|
||||
@router.patch(
|
||||
"/presets/{preset_id}",
|
||||
summary="Update an existing preset",
|
||||
description="Update an existing preset by its ID.",
|
||||
)
|
||||
async def update_preset(
|
||||
preset_id: str,
|
||||
preset: models.CreateLibraryAgentPresetRequest,
|
||||
preset: models.LibraryAgentPresetUpdatable,
|
||||
user_id: str = Depends(autogpt_auth_lib.depends.get_user_id),
|
||||
) -> models.LibraryAgentPreset:
|
||||
"""
|
||||
Update an existing library agent preset. If the preset doesn't exist, it may be created.
|
||||
Update an existing library agent preset.
|
||||
|
||||
Args:
|
||||
preset_id (str): ID of the preset to update.
|
||||
preset (models.CreateLibraryAgentPresetRequest): The preset data to update.
|
||||
preset (models.LibraryAgentPresetUpdatable): The preset data to update.
|
||||
user_id (str): ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
@@ -139,12 +162,14 @@ async def update_preset(
|
||||
HTTPException: If an error occurs while updating the preset.
|
||||
"""
|
||||
try:
|
||||
return await db.upsert_preset(user_id, preset, preset_id)
|
||||
return await db.update_preset(
|
||||
user_id=user_id, preset_id=preset_id, preset=preset
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception occurred whilst updating preset: {e}")
|
||||
logger.exception("Preset update failed for user %s: %s", user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to update preset",
|
||||
detail={"message": str(e), "hint": "Check preset data and try again."},
|
||||
)
|
||||
|
||||
|
||||
@@ -171,10 +196,12 @@ async def delete_preset(
|
||||
try:
|
||||
await db.delete_preset(user_id, preset_id)
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception occurred whilst deleting preset: {e}")
|
||||
logger.exception(
|
||||
"Error deleting preset %s for user %s: %s", preset_id, user_id, e
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to delete preset",
|
||||
detail={"message": str(e), "hint": "Ensure preset exists before deleting."},
|
||||
)
|
||||
|
||||
|
||||
@@ -232,8 +259,11 @@ async def execute_preset(
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception occurred while executing preset: {e}")
|
||||
logger.exception("Preset execution failed for user %s: %s", user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e),
|
||||
detail={
|
||||
"message": str(e),
|
||||
"hint": "Review preset configuration and graph ID.",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import datetime
|
||||
import json
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.model as server_model
|
||||
import backend.server.v2.library.model as library_model
|
||||
@@ -14,6 +16,8 @@ app.include_router(library_router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0)
|
||||
|
||||
|
||||
def override_auth_middleware():
|
||||
"""Override auth middleware for testing"""
|
||||
@@ -30,7 +34,10 @@ app.dependency_overrides[autogpt_auth_lib.depends.get_user_id] = override_get_us
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
|
||||
async def test_get_library_agents_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = library_model.LibraryAgentResponse(
|
||||
agents=[
|
||||
library_model.LibraryAgent(
|
||||
@@ -82,6 +89,10 @@ async def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
|
||||
assert data.agents[0].can_access_graph is True
|
||||
assert data.agents[1].graph_id == "test-agent-2"
|
||||
assert data.agents[1].can_access_graph is False
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "lib_agts_search")
|
||||
|
||||
mock_db_call.assert_called_once_with(
|
||||
user_id="test-user-id",
|
||||
search_term="test",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
|
||||
from autogpt_libs.auth.middleware import auth_middleware
|
||||
from fastapi import APIRouter, Depends
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
from backend.server.utils import get_user_id
|
||||
|
||||
@@ -23,4 +23,12 @@ async def proxy_otto_request(
|
||||
Proxy requests to Otto API while adding necessary security headers and logging.
|
||||
Requires an authenticated user.
|
||||
"""
|
||||
return await OttoService.ask(request, user_id)
|
||||
logger.debug("Forwarding request to Otto for user %s", user_id)
|
||||
try:
|
||||
return await OttoService.ask(request, user_id)
|
||||
except Exception as e:
|
||||
logger.exception("Otto request failed for user %s: %s", user_id, e)
|
||||
raise HTTPException(
|
||||
status_code=502,
|
||||
detail={"message": str(e), "hint": "Check Otto service status."},
|
||||
)
|
||||
|
||||
271
autogpt_platform/backend/backend/server/v2/otto/routes_test.py
Normal file
271
autogpt_platform/backend/backend/server/v2/otto/routes_test.py
Normal file
@@ -0,0 +1,271 @@
|
||||
import json
|
||||
|
||||
import autogpt_libs.auth.depends
|
||||
import autogpt_libs.auth.middleware
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.v2.otto.models as otto_models
|
||||
import backend.server.v2.otto.routes as otto_routes
|
||||
from backend.server.utils import get_user_id
|
||||
from backend.server.v2.otto.service import OttoService
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(otto_routes.router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
def override_auth_middleware():
|
||||
"""Override auth middleware for testing"""
|
||||
return {"sub": "test-user-id"}
|
||||
|
||||
|
||||
def override_get_user_id():
|
||||
"""Override get_user_id for testing"""
|
||||
return "test-user-id"
|
||||
|
||||
|
||||
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
|
||||
override_auth_middleware
|
||||
)
|
||||
app.dependency_overrides[get_user_id] = override_get_user_id
|
||||
|
||||
|
||||
def test_ask_otto_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful Otto API request"""
|
||||
# Mock the OttoService.ask method
|
||||
mock_response = otto_models.ApiResponse(
|
||||
answer="This is Otto's response to your query.",
|
||||
documents=[
|
||||
otto_models.Document(
|
||||
url="https://example.com/doc1",
|
||||
relevance_score=0.95,
|
||||
),
|
||||
otto_models.Document(
|
||||
url="https://example.com/doc2",
|
||||
relevance_score=0.87,
|
||||
),
|
||||
],
|
||||
success=True,
|
||||
)
|
||||
|
||||
mocker.patch.object(
|
||||
OttoService,
|
||||
"ask",
|
||||
return_value=mock_response,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"query": "How do I create an agent?",
|
||||
"conversation_history": [
|
||||
{
|
||||
"query": "What is AutoGPT?",
|
||||
"response": "AutoGPT is an AI agent platform.",
|
||||
}
|
||||
],
|
||||
"message_id": "msg_123",
|
||||
"include_graph_data": False,
|
||||
}
|
||||
|
||||
response = client.post("/ask", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["success"] is True
|
||||
assert response_data["answer"] == "This is Otto's response to your query."
|
||||
assert len(response_data["documents"]) == 2
|
||||
|
||||
# Snapshot test the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"otto_ok",
|
||||
)
|
||||
|
||||
|
||||
def test_ask_otto_with_graph_data(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test Otto API request with graph data included"""
|
||||
# Mock the OttoService.ask method
|
||||
mock_response = otto_models.ApiResponse(
|
||||
answer="Here's information about your graph.",
|
||||
documents=[
|
||||
otto_models.Document(
|
||||
url="https://example.com/graph-doc",
|
||||
relevance_score=0.92,
|
||||
),
|
||||
],
|
||||
success=True,
|
||||
)
|
||||
|
||||
mocker.patch.object(
|
||||
OttoService,
|
||||
"ask",
|
||||
return_value=mock_response,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"query": "Tell me about my graph",
|
||||
"conversation_history": [],
|
||||
"message_id": "msg_456",
|
||||
"include_graph_data": True,
|
||||
"graph_id": "graph_123",
|
||||
}
|
||||
|
||||
response = client.post("/ask", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["success"] is True
|
||||
|
||||
# Snapshot test the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"otto_grph",
|
||||
)
|
||||
|
||||
|
||||
def test_ask_otto_empty_conversation(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test Otto API request with empty conversation history"""
|
||||
# Mock the OttoService.ask method
|
||||
mock_response = otto_models.ApiResponse(
|
||||
answer="Welcome! How can I help you?",
|
||||
documents=[],
|
||||
success=True,
|
||||
)
|
||||
|
||||
mocker.patch.object(
|
||||
OttoService,
|
||||
"ask",
|
||||
return_value=mock_response,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"query": "Hello",
|
||||
"conversation_history": [],
|
||||
"message_id": "msg_789",
|
||||
"include_graph_data": False,
|
||||
}
|
||||
|
||||
response = client.post("/ask", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["success"] is True
|
||||
assert len(response_data["documents"]) == 0
|
||||
|
||||
# Snapshot test the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"otto_empty",
|
||||
)
|
||||
|
||||
|
||||
def test_ask_otto_service_error(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test Otto API request when service returns error"""
|
||||
# Mock the OttoService.ask method to return failure
|
||||
mock_response = otto_models.ApiResponse(
|
||||
answer="An error occurred while processing your request.",
|
||||
documents=[],
|
||||
success=False,
|
||||
)
|
||||
|
||||
mocker.patch.object(
|
||||
OttoService,
|
||||
"ask",
|
||||
return_value=mock_response,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"query": "Test query",
|
||||
"conversation_history": [],
|
||||
"message_id": "msg_error",
|
||||
"include_graph_data": False,
|
||||
}
|
||||
|
||||
response = client.post("/ask", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["success"] is False
|
||||
|
||||
# Snapshot test the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"otto_err",
|
||||
)
|
||||
|
||||
|
||||
def test_ask_otto_invalid_request() -> None:
|
||||
"""Test Otto API with invalid request data"""
|
||||
# Missing required fields
|
||||
response = client.post("/ask", json={})
|
||||
assert response.status_code == 422
|
||||
|
||||
# Invalid conversation history format
|
||||
response = client.post(
|
||||
"/ask",
|
||||
json={
|
||||
"query": "Test",
|
||||
"conversation_history": "not a list",
|
||||
"message_id": "123",
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
# Missing message_id
|
||||
response = client.post(
|
||||
"/ask",
|
||||
json={
|
||||
"query": "Test",
|
||||
"conversation_history": [],
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
|
||||
def test_ask_otto_unauthenticated(mocker: pytest_mock.MockFixture) -> None:
|
||||
"""Test Otto API request without authentication"""
|
||||
# Remove the auth override to test unauthenticated access
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
# Mock auth_middleware to raise an exception
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.middleware.auth_middleware",
|
||||
side_effect=fastapi.HTTPException(status_code=401, detail="Unauthorized"),
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"query": "Test",
|
||||
"conversation_history": [],
|
||||
"message_id": "123",
|
||||
}
|
||||
|
||||
response = client.post("/ask", json=request_data)
|
||||
# When auth is disabled and Otto API URL is not configured, we get 503
|
||||
assert response.status_code == 503
|
||||
|
||||
# Restore the override
|
||||
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
|
||||
override_auth_middleware
|
||||
)
|
||||
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = (
|
||||
override_get_user_id
|
||||
)
|
||||
@@ -48,11 +48,14 @@ async def get_profile(
|
||||
content={"detail": "Profile not found"},
|
||||
)
|
||||
return profile
|
||||
except Exception:
|
||||
logger.exception("Exception occurred whilst getting user profile")
|
||||
except Exception as e:
|
||||
logger.exception("Failed to fetch user profile for %s: %s", user_id, e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while retrieving the user profile"},
|
||||
content={
|
||||
"detail": "Failed to retrieve user profile",
|
||||
"hint": "Check database connection.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -86,11 +89,14 @@ async def update_or_create_profile(
|
||||
user_id=user_id, profile=profile
|
||||
)
|
||||
return updated_profile
|
||||
except Exception:
|
||||
logger.exception("Exception occurred whilst updating profile")
|
||||
except Exception as e:
|
||||
logger.exception("Failed to update profile for user %s: %s", user_id, e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while updating the user profile"},
|
||||
content={
|
||||
"detail": "Failed to update user profile",
|
||||
"hint": "Validate request data.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -160,11 +166,14 @@ async def get_agents(
|
||||
page_size=page_size,
|
||||
)
|
||||
return agents
|
||||
except Exception:
|
||||
logger.exception("Exception occured whilst getting store agents")
|
||||
except Exception as e:
|
||||
logger.exception("Failed to retrieve store agents: %s", e)
|
||||
return fastapi.responses.JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An error occurred while retrieving the store agents"},
|
||||
content={
|
||||
"detail": "Failed to retrieve store agents",
|
||||
"hint": "Check database or search parameters.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import datetime
|
||||
import json
|
||||
|
||||
import autogpt_libs.auth.depends
|
||||
import autogpt_libs.auth.middleware
|
||||
@@ -6,22 +7,27 @@ import fastapi
|
||||
import fastapi.testclient
|
||||
import prisma.enums
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.v2.store.model
|
||||
import backend.server.v2.store.routes
|
||||
|
||||
# Using a fixed timestamp for reproducible tests
|
||||
# 2023 date is intentionally used to ensure tests work regardless of current year
|
||||
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0)
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(backend.server.v2.store.routes.router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
def override_auth_middleware():
|
||||
def override_auth_middleware() -> dict[str, str]:
|
||||
"""Override auth middleware for testing"""
|
||||
return {"sub": "test-user-id"}
|
||||
|
||||
|
||||
def override_get_user_id():
|
||||
def override_get_user_id() -> str:
|
||||
"""Override get_user_id for testing"""
|
||||
return "test-user-id"
|
||||
|
||||
@@ -32,7 +38,10 @@ app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
|
||||
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id
|
||||
|
||||
|
||||
def test_get_agents_defaults(mocker: pytest_mock.MockFixture):
|
||||
def test_get_agents_defaults(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
|
||||
agents=[],
|
||||
pagination=backend.server.v2.store.model.Pagination(
|
||||
@@ -52,6 +61,9 @@ def test_get_agents_defaults(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
assert data.pagination.total_pages == 0
|
||||
assert data.agents == []
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "def_agts")
|
||||
mock_db_call.assert_called_once_with(
|
||||
featured=False,
|
||||
creator=None,
|
||||
@@ -63,7 +75,10 @@ def test_get_agents_defaults(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
|
||||
|
||||
def test_get_agents_featured(mocker: pytest_mock.MockFixture):
|
||||
def test_get_agents_featured(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
|
||||
agents=[
|
||||
backend.server.v2.store.model.StoreAgent(
|
||||
@@ -94,6 +109,8 @@ def test_get_agents_featured(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
assert len(data.agents) == 1
|
||||
assert data.agents[0].slug == "featured-agent"
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "feat_agts")
|
||||
mock_db_call.assert_called_once_with(
|
||||
featured=True,
|
||||
creator=None,
|
||||
@@ -105,7 +122,10 @@ def test_get_agents_featured(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
|
||||
|
||||
def test_get_agents_by_creator(mocker: pytest_mock.MockFixture):
|
||||
def test_get_agents_by_creator(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
|
||||
agents=[
|
||||
backend.server.v2.store.model.StoreAgent(
|
||||
@@ -136,6 +156,8 @@ def test_get_agents_by_creator(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
assert len(data.agents) == 1
|
||||
assert data.agents[0].creator == "specific-creator"
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_by_creator")
|
||||
mock_db_call.assert_called_once_with(
|
||||
featured=False,
|
||||
creator="specific-creator",
|
||||
@@ -147,7 +169,10 @@ def test_get_agents_by_creator(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
|
||||
|
||||
def test_get_agents_sorted(mocker: pytest_mock.MockFixture):
|
||||
def test_get_agents_sorted(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
|
||||
agents=[
|
||||
backend.server.v2.store.model.StoreAgent(
|
||||
@@ -178,6 +203,8 @@ def test_get_agents_sorted(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
assert len(data.agents) == 1
|
||||
assert data.agents[0].runs == 1000
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_sorted")
|
||||
mock_db_call.assert_called_once_with(
|
||||
featured=False,
|
||||
creator=None,
|
||||
@@ -189,7 +216,10 @@ def test_get_agents_sorted(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
|
||||
|
||||
def test_get_agents_search(mocker: pytest_mock.MockFixture):
|
||||
def test_get_agents_search(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
|
||||
agents=[
|
||||
backend.server.v2.store.model.StoreAgent(
|
||||
@@ -220,6 +250,8 @@ def test_get_agents_search(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
assert len(data.agents) == 1
|
||||
assert "specific" in data.agents[0].description.lower()
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_search")
|
||||
mock_db_call.assert_called_once_with(
|
||||
featured=False,
|
||||
creator=None,
|
||||
@@ -231,7 +263,10 @@ def test_get_agents_search(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
|
||||
|
||||
def test_get_agents_category(mocker: pytest_mock.MockFixture):
|
||||
def test_get_agents_category(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
|
||||
agents=[
|
||||
backend.server.v2.store.model.StoreAgent(
|
||||
@@ -261,6 +296,8 @@ def test_get_agents_category(mocker: pytest_mock.MockFixture):
|
||||
response.json()
|
||||
)
|
||||
assert len(data.agents) == 1
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_category")
|
||||
mock_db_call.assert_called_once_with(
|
||||
featured=False,
|
||||
creator=None,
|
||||
@@ -272,7 +309,10 @@ def test_get_agents_category(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
|
||||
|
||||
def test_get_agents_pagination(mocker: pytest_mock.MockFixture):
|
||||
def test_get_agents_pagination(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
|
||||
agents=[
|
||||
backend.server.v2.store.model.StoreAgent(
|
||||
@@ -305,6 +345,8 @@ def test_get_agents_pagination(mocker: pytest_mock.MockFixture):
|
||||
assert len(data.agents) == 5
|
||||
assert data.pagination.current_page == 2
|
||||
assert data.pagination.page_size == 5
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_pagination")
|
||||
mock_db_call.assert_called_once_with(
|
||||
featured=False,
|
||||
creator=None,
|
||||
@@ -334,7 +376,10 @@ def test_get_agents_malformed_request(mocker: pytest_mock.MockFixture):
|
||||
mock_db_call.assert_not_called()
|
||||
|
||||
|
||||
def test_get_agent_details(mocker: pytest_mock.MockFixture):
|
||||
def test_get_agent_details(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreAgentDetails(
|
||||
store_listing_version_id="test-version-id",
|
||||
slug="test-agent",
|
||||
@@ -349,7 +394,7 @@ def test_get_agent_details(mocker: pytest_mock.MockFixture):
|
||||
runs=100,
|
||||
rating=4.5,
|
||||
versions=["1.0.0", "1.1.0"],
|
||||
last_updated=datetime.datetime.now(),
|
||||
last_updated=FIXED_NOW,
|
||||
)
|
||||
mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agent_details")
|
||||
mock_db_call.return_value = mocked_value
|
||||
@@ -362,10 +407,15 @@ def test_get_agent_details(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
assert data.agent_name == "Test Agent"
|
||||
assert data.creator == "creator1"
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "agt_details")
|
||||
mock_db_call.assert_called_once_with(username="creator1", agent_name="test-agent")
|
||||
|
||||
|
||||
def test_get_creators_defaults(mocker: pytest_mock.MockFixture):
|
||||
def test_get_creators_defaults(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.CreatorsResponse(
|
||||
creators=[],
|
||||
pagination=backend.server.v2.store.model.Pagination(
|
||||
@@ -386,12 +436,17 @@ def test_get_creators_defaults(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
assert data.pagination.total_pages == 0
|
||||
assert data.creators == []
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "def_creators")
|
||||
mock_db_call.assert_called_once_with(
|
||||
featured=False, search_query=None, sorted_by=None, page=1, page_size=20
|
||||
)
|
||||
|
||||
|
||||
def test_get_creators_pagination(mocker: pytest_mock.MockFixture):
|
||||
def test_get_creators_pagination(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.CreatorsResponse(
|
||||
creators=[
|
||||
backend.server.v2.store.model.Creator(
|
||||
@@ -425,6 +480,8 @@ def test_get_creators_pagination(mocker: pytest_mock.MockFixture):
|
||||
assert len(data.creators) == 5
|
||||
assert data.pagination.current_page == 2
|
||||
assert data.pagination.page_size == 5
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "creators_pagination")
|
||||
mock_db_call.assert_called_once_with(
|
||||
featured=False, search_query=None, sorted_by=None, page=2, page_size=5
|
||||
)
|
||||
@@ -448,7 +505,10 @@ def test_get_creators_malformed_request(mocker: pytest_mock.MockFixture):
|
||||
mock_db_call.assert_not_called()
|
||||
|
||||
|
||||
def test_get_creator_details(mocker: pytest_mock.MockFixture):
|
||||
def test_get_creator_details(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.CreatorDetails(
|
||||
name="Test User",
|
||||
username="creator1",
|
||||
@@ -468,17 +528,22 @@ def test_get_creator_details(mocker: pytest_mock.MockFixture):
|
||||
data = backend.server.v2.store.model.CreatorDetails.model_validate(response.json())
|
||||
assert data.username == "creator1"
|
||||
assert data.name == "Test User"
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "creator_details")
|
||||
mock_db_call.assert_called_once_with(username="creator1")
|
||||
|
||||
|
||||
def test_get_submissions_success(mocker: pytest_mock.MockFixture):
|
||||
def test_get_submissions_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreSubmissionsResponse(
|
||||
submissions=[
|
||||
backend.server.v2.store.model.StoreSubmission(
|
||||
name="Test Agent",
|
||||
description="Test agent description",
|
||||
image_urls=["test.jpg"],
|
||||
date_submitted=datetime.datetime.now(),
|
||||
date_submitted=FIXED_NOW,
|
||||
status=prisma.enums.SubmissionStatus.APPROVED,
|
||||
runs=50,
|
||||
rating=4.2,
|
||||
@@ -507,10 +572,15 @@ def test_get_submissions_success(mocker: pytest_mock.MockFixture):
|
||||
assert len(data.submissions) == 1
|
||||
assert data.submissions[0].name == "Test Agent"
|
||||
assert data.pagination.current_page == 1
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "sub_success")
|
||||
mock_db_call.assert_called_once_with(user_id="test-user-id", page=1, page_size=20)
|
||||
|
||||
|
||||
def test_get_submissions_pagination(mocker: pytest_mock.MockFixture):
|
||||
def test_get_submissions_pagination(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
mocked_value = backend.server.v2.store.model.StoreSubmissionsResponse(
|
||||
submissions=[],
|
||||
pagination=backend.server.v2.store.model.Pagination(
|
||||
@@ -531,6 +601,8 @@ def test_get_submissions_pagination(mocker: pytest_mock.MockFixture):
|
||||
)
|
||||
assert data.pagination.current_page == 2
|
||||
assert data.pagination.page_size == 5
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(json.dumps(response.json(), indent=2), "sub_pagination")
|
||||
mock_db_call.assert_called_once_with(user_id="test-user-id", page=2, page_size=5)
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +35,9 @@ async def verify_token(request: TurnstileVerifyRequest) -> TurnstileVerifyRespon
|
||||
turnstile_verify_url = settings.secrets.turnstile_verify_url
|
||||
|
||||
if not turnstile_secret_key:
|
||||
logger.error("Turnstile secret key is not configured")
|
||||
logger.error(
|
||||
"Turnstile secret key missing. Set TURNSTILE_SECRET_KEY to enable verification."
|
||||
)
|
||||
return TurnstileVerifyResponse(
|
||||
success=False,
|
||||
error="CONFIGURATION_ERROR",
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest_mock
|
||||
|
||||
import backend.server.v2.turnstile.routes as turnstile_routes
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(turnstile_routes.router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
def test_verify_turnstile_token_no_secret_key(mocker: pytest_mock.MockFixture) -> None:
|
||||
"""Test token verification without secret key configured"""
|
||||
# Mock the settings with no secret key
|
||||
mock_settings = mocker.patch("backend.server.v2.turnstile.routes.settings")
|
||||
mock_settings.secrets.turnstile_secret_key = None
|
||||
|
||||
request_data = {"token": "test_token", "action": "login"}
|
||||
response = client.post("/verify", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["success"] is False
|
||||
assert response_data["error"] == "CONFIGURATION_ERROR"
|
||||
|
||||
|
||||
def test_verify_turnstile_token_invalid_request() -> None:
|
||||
"""Test token verification with invalid request data"""
|
||||
# Missing token
|
||||
response = client.post("/verify", json={"action": "login"})
|
||||
assert response.status_code == 422
|
||||
@@ -3,6 +3,7 @@ import logging
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Protocol
|
||||
|
||||
import pydantic
|
||||
import uvicorn
|
||||
from autogpt_libs.auth import parse_jwt_token
|
||||
from autogpt_libs.logging.utils import generate_uvicorn_config
|
||||
@@ -51,7 +52,11 @@ async def event_broadcaster(manager: ConnectionManager):
|
||||
async for event in event_queue.listen("*"):
|
||||
await manager.send_execution_update(event)
|
||||
except Exception as e:
|
||||
logger.exception(f"Event broadcaster error: {e}")
|
||||
logger.exception(
|
||||
"Event broadcaster stopped due to error: %s. "
|
||||
"Verify the Redis connection and restart the service.",
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@@ -221,7 +226,22 @@ async def websocket_router(
|
||||
try:
|
||||
while True:
|
||||
data = await websocket.receive_text()
|
||||
message = WSMessage.model_validate_json(data)
|
||||
try:
|
||||
message = WSMessage.model_validate_json(data)
|
||||
except pydantic.ValidationError as e:
|
||||
logger.error(
|
||||
"Invalid WebSocket message from user #%s: %s",
|
||||
user_id,
|
||||
e,
|
||||
)
|
||||
await websocket.send_text(
|
||||
WSMessage(
|
||||
method=WSMethod.ERROR,
|
||||
success=False,
|
||||
error=("Invalid message format. Review the schema and retry"),
|
||||
).model_dump_json()
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
if message.method in _MSG_HANDLERS:
|
||||
@@ -232,6 +252,21 @@ async def websocket_router(
|
||||
message=message,
|
||||
)
|
||||
continue
|
||||
except pydantic.ValidationError as e:
|
||||
logger.error(
|
||||
"Validation error while handling '%s' for user #%s: %s",
|
||||
message.method.value,
|
||||
user_id,
|
||||
e,
|
||||
)
|
||||
await websocket.send_text(
|
||||
WSMessage(
|
||||
method=WSMethod.ERROR,
|
||||
success=False,
|
||||
error="Invalid message data. Refer to the API schema",
|
||||
).model_dump_json()
|
||||
)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error while handling '{message.method.value}' message "
|
||||
|
||||
@@ -2,16 +2,17 @@ services:
|
||||
postgres-test:
|
||||
image: ankane/pgvector:latest
|
||||
environment:
|
||||
- POSTGRES_USER=${DB_USER}
|
||||
- POSTGRES_PASSWORD=${DB_PASS}
|
||||
- POSTGRES_DB=${DB_NAME}
|
||||
- POSTGRES_USER=${DB_USER:-postgres}
|
||||
- POSTGRES_PASSWORD=${DB_PASS:-postgres}
|
||||
- POSTGRES_DB=${DB_NAME:-postgres}
|
||||
- POSTGRES_PORT=${DB_PORT:-5432}
|
||||
healthcheck:
|
||||
test: pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
ports:
|
||||
- "${DB_PORT}:5432"
|
||||
- "${DB_PORT:-5432}:5432"
|
||||
networks:
|
||||
- app-network-test
|
||||
redis-test:
|
||||
|
||||
3382
autogpt_platform/backend/poetry.lock
generated
3382
autogpt_platform/backend/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -37,7 +37,7 @@ ollama = "^0.4.8"
|
||||
openai = "^1.78.1"
|
||||
pika = "^1.3.2"
|
||||
pinecone = "^5.3.1"
|
||||
poetry = "^2.1.3"
|
||||
poetry = "2.1.1" # CHECK DEPENDABOT SUPPORT BEFORE UPGRADING
|
||||
postmarker = "^1.0"
|
||||
praw = "~7.8.1"
|
||||
prisma = "^0.15.0"
|
||||
@@ -65,6 +65,7 @@ websockets = "^14.2"
|
||||
youtube-transcript-api = "^0.6.2"
|
||||
zerobouncesdk = "^1.1.1"
|
||||
# NOTE: please insert new dependencies in their alphabetical location
|
||||
pytest-snapshot = "^0.9.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
aiohappyeyeballs = "^2.6.1"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
@@ -59,11 +60,52 @@ def test():
|
||||
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
|
||||
sys.exit(1)
|
||||
|
||||
# Run Prisma migrations
|
||||
run_command(["prisma", "migrate", "dev"])
|
||||
# IMPORTANT: Set test database environment variables to prevent accidentally
|
||||
# resetting the developer's local database.
|
||||
#
|
||||
# This script spins up a separate test database container (postgres-test) using
|
||||
# docker-compose.test.yaml. We explicitly set DATABASE_URL and DIRECT_URL to point
|
||||
# to this test database to ensure that:
|
||||
# 1. The prisma migrate reset command only affects the test database
|
||||
# 2. Tests run against the test database, not the developer's local database
|
||||
# 3. Any database operations during testing are isolated from development data
|
||||
#
|
||||
# Without this, if a developer has DATABASE_URL set in their environment pointing
|
||||
# to their development database, running tests would wipe their local data!
|
||||
test_env = os.environ.copy()
|
||||
|
||||
# Run the tests
|
||||
result = subprocess.run(["pytest"] + sys.argv[1:], check=False)
|
||||
# Use environment variables if set, otherwise use defaults that match docker-compose.test.yaml
|
||||
db_user = os.getenv("DB_USER", "postgres")
|
||||
db_pass = os.getenv("DB_PASS", "postgres")
|
||||
db_name = os.getenv("DB_NAME", "postgres")
|
||||
db_port = os.getenv("DB_PORT", "5432")
|
||||
|
||||
# Construct the test database URL - this ensures we're always pointing to the test container
|
||||
test_env["DATABASE_URL"] = (
|
||||
f"postgresql://{db_user}:{db_pass}@localhost:{db_port}/{db_name}"
|
||||
)
|
||||
test_env["DIRECT_URL"] = test_env["DATABASE_URL"]
|
||||
|
||||
test_env["DB_PORT"] = db_port
|
||||
test_env["DB_NAME"] = db_name
|
||||
test_env["DB_PASS"] = db_pass
|
||||
test_env["DB_USER"] = db_user
|
||||
|
||||
# Run Prisma migrations with test database
|
||||
# First, reset the database to ensure clean state for tests
|
||||
# This is safe because we've explicitly set DATABASE_URL to the test database above
|
||||
subprocess.run(
|
||||
["prisma", "migrate", "reset", "--force", "--skip-seed"],
|
||||
env=test_env,
|
||||
check=False,
|
||||
)
|
||||
# Then apply migrations to get the test database schema up to date
|
||||
subprocess.run(["prisma", "migrate", "deploy"], env=test_env, check=True)
|
||||
|
||||
# Run the tests with test database environment
|
||||
# This ensures all database connections in the tests use the test database,
|
||||
# not any database that might be configured in the developer's environment
|
||||
result = subprocess.run(["pytest"] + sys.argv[1:], env=test_env, check=False)
|
||||
|
||||
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
|
||||
|
||||
|
||||
4
autogpt_platform/backend/snapshots/adm_add_cred_neg
Normal file
4
autogpt_platform/backend/snapshots/adm_add_cred_neg
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"new_balance": 200,
|
||||
"transaction_key": "transaction-456-uuid"
|
||||
}
|
||||
4
autogpt_platform/backend/snapshots/adm_add_cred_ok
Normal file
4
autogpt_platform/backend/snapshots/adm_add_cred_ok
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"new_balance": 1500,
|
||||
"transaction_key": "transaction-123-uuid"
|
||||
}
|
||||
9
autogpt_platform/backend/snapshots/adm_usr_hist_empty
Normal file
9
autogpt_platform/backend/snapshots/adm_usr_hist_empty
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"history": [],
|
||||
"pagination": {
|
||||
"current_page": 1,
|
||||
"page_size": 20,
|
||||
"total_items": 0,
|
||||
"total_pages": 0
|
||||
}
|
||||
}
|
||||
28
autogpt_platform/backend/snapshots/adm_usr_hist_filt
Normal file
28
autogpt_platform/backend/snapshots/adm_usr_hist_filt
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"history": [
|
||||
{
|
||||
"admin_email": null,
|
||||
"amount": 500,
|
||||
"current_balance": 0,
|
||||
"description": null,
|
||||
"extra_data": null,
|
||||
"reason": "Top up",
|
||||
"running_balance": 0,
|
||||
"transaction_key": "",
|
||||
"transaction_time": "0001-01-01T00:00:00Z",
|
||||
"transaction_type": "TOP_UP",
|
||||
"usage_execution_id": null,
|
||||
"usage_graph_id": null,
|
||||
"usage_node_count": 0,
|
||||
"usage_start_time": "9999-12-31T23:59:59.999999Z",
|
||||
"user_email": "test@example.com",
|
||||
"user_id": "user-3"
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"current_page": 1,
|
||||
"page_size": 10,
|
||||
"total_items": 1,
|
||||
"total_pages": 1
|
||||
}
|
||||
}
|
||||
46
autogpt_platform/backend/snapshots/adm_usr_hist_ok
Normal file
46
autogpt_platform/backend/snapshots/adm_usr_hist_ok
Normal file
@@ -0,0 +1,46 @@
|
||||
{
|
||||
"history": [
|
||||
{
|
||||
"admin_email": null,
|
||||
"amount": 1000,
|
||||
"current_balance": 0,
|
||||
"description": null,
|
||||
"extra_data": null,
|
||||
"reason": "Initial grant",
|
||||
"running_balance": 0,
|
||||
"transaction_key": "",
|
||||
"transaction_time": "0001-01-01T00:00:00Z",
|
||||
"transaction_type": "GRANT",
|
||||
"usage_execution_id": null,
|
||||
"usage_graph_id": null,
|
||||
"usage_node_count": 0,
|
||||
"usage_start_time": "9999-12-31T23:59:59.999999Z",
|
||||
"user_email": "user1@example.com",
|
||||
"user_id": "user-1"
|
||||
},
|
||||
{
|
||||
"admin_email": null,
|
||||
"amount": -50,
|
||||
"current_balance": 0,
|
||||
"description": null,
|
||||
"extra_data": null,
|
||||
"reason": "Usage",
|
||||
"running_balance": 0,
|
||||
"transaction_key": "",
|
||||
"transaction_time": "0001-01-01T00:00:00Z",
|
||||
"transaction_type": "USAGE",
|
||||
"usage_execution_id": null,
|
||||
"usage_graph_id": null,
|
||||
"usage_node_count": 0,
|
||||
"usage_start_time": "9999-12-31T23:59:59.999999Z",
|
||||
"user_email": "user2@example.com",
|
||||
"user_id": "user-2"
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"current_page": 1,
|
||||
"page_size": 20,
|
||||
"total_items": 2,
|
||||
"total_pages": 1
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"new_balance": 1500,
|
||||
"transaction_key": "transaction-123-uuid"
|
||||
}
|
||||
27
autogpt_platform/backend/snapshots/agt_details
Normal file
27
autogpt_platform/backend/snapshots/agt_details
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"store_listing_version_id": "test-version-id",
|
||||
"slug": "test-agent",
|
||||
"agent_name": "Test Agent",
|
||||
"agent_video": "video.mp4",
|
||||
"agent_image": [
|
||||
"image1.jpg",
|
||||
"image2.jpg"
|
||||
],
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Test agent subheading",
|
||||
"description": "Test agent description",
|
||||
"categories": [
|
||||
"category1",
|
||||
"category2"
|
||||
],
|
||||
"runs": 100,
|
||||
"rating": 4.5,
|
||||
"versions": [
|
||||
"1.0.0",
|
||||
"1.1.0"
|
||||
],
|
||||
"last_updated": "2023-01-01T00:00:00",
|
||||
"active_version_id": null,
|
||||
"has_approved_version": false
|
||||
}
|
||||
21
autogpt_platform/backend/snapshots/agts_by_creator
Normal file
21
autogpt_platform/backend/snapshots/agts_by_creator
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"slug": "creator-agent",
|
||||
"agent_name": "Creator Agent",
|
||||
"agent_image": "agent.jpg",
|
||||
"creator": "specific-creator",
|
||||
"creator_avatar": "avatar.jpg",
|
||||
"sub_heading": "Creator agent subheading",
|
||||
"description": "Creator agent description",
|
||||
"runs": 50,
|
||||
"rating": 4.0
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"total_items": 1,
|
||||
"total_pages": 1,
|
||||
"current_page": 1,
|
||||
"page_size": 20
|
||||
}
|
||||
}
|
||||
21
autogpt_platform/backend/snapshots/agts_category
Normal file
21
autogpt_platform/backend/snapshots/agts_category
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"slug": "category-agent",
|
||||
"agent_name": "Category Agent",
|
||||
"agent_image": "category.jpg",
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Category agent subheading",
|
||||
"description": "Category agent description",
|
||||
"runs": 60,
|
||||
"rating": 4.1
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"total_items": 1,
|
||||
"total_pages": 1,
|
||||
"current_page": 1,
|
||||
"page_size": 20
|
||||
}
|
||||
}
|
||||
65
autogpt_platform/backend/snapshots/agts_pagination
Normal file
65
autogpt_platform/backend/snapshots/agts_pagination
Normal file
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"slug": "agent-0",
|
||||
"agent_name": "Agent 0",
|
||||
"agent_image": "agent0.jpg",
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Agent 0 subheading",
|
||||
"description": "Agent 0 description",
|
||||
"runs": 0,
|
||||
"rating": 4.0
|
||||
},
|
||||
{
|
||||
"slug": "agent-1",
|
||||
"agent_name": "Agent 1",
|
||||
"agent_image": "agent1.jpg",
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Agent 1 subheading",
|
||||
"description": "Agent 1 description",
|
||||
"runs": 10,
|
||||
"rating": 4.0
|
||||
},
|
||||
{
|
||||
"slug": "agent-2",
|
||||
"agent_name": "Agent 2",
|
||||
"agent_image": "agent2.jpg",
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Agent 2 subheading",
|
||||
"description": "Agent 2 description",
|
||||
"runs": 20,
|
||||
"rating": 4.0
|
||||
},
|
||||
{
|
||||
"slug": "agent-3",
|
||||
"agent_name": "Agent 3",
|
||||
"agent_image": "agent3.jpg",
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Agent 3 subheading",
|
||||
"description": "Agent 3 description",
|
||||
"runs": 30,
|
||||
"rating": 4.0
|
||||
},
|
||||
{
|
||||
"slug": "agent-4",
|
||||
"agent_name": "Agent 4",
|
||||
"agent_image": "agent4.jpg",
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Agent 4 subheading",
|
||||
"description": "Agent 4 description",
|
||||
"runs": 40,
|
||||
"rating": 4.0
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"total_items": 15,
|
||||
"total_pages": 3,
|
||||
"current_page": 2,
|
||||
"page_size": 5
|
||||
}
|
||||
}
|
||||
21
autogpt_platform/backend/snapshots/agts_search
Normal file
21
autogpt_platform/backend/snapshots/agts_search
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"slug": "search-agent",
|
||||
"agent_name": "Search Agent",
|
||||
"agent_image": "search.jpg",
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Search agent subheading",
|
||||
"description": "Specific search term description",
|
||||
"runs": 75,
|
||||
"rating": 4.2
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"total_items": 1,
|
||||
"total_pages": 1,
|
||||
"current_page": 1,
|
||||
"page_size": 20
|
||||
}
|
||||
}
|
||||
21
autogpt_platform/backend/snapshots/agts_sorted
Normal file
21
autogpt_platform/backend/snapshots/agts_sorted
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"slug": "top-agent",
|
||||
"agent_name": "Top Agent",
|
||||
"agent_image": "top.jpg",
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Top agent subheading",
|
||||
"description": "Top agent description",
|
||||
"runs": 1000,
|
||||
"rating": 5.0
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"total_items": 1,
|
||||
"total_pages": 1,
|
||||
"current_page": 1,
|
||||
"page_size": 20
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"analytics_id": "analytics-complex-uuid",
|
||||
"logged_data": {
|
||||
"agent_id": "agent_123",
|
||||
"blocks_used": [
|
||||
{
|
||||
"block_id": "llm_block",
|
||||
"count": 3
|
||||
},
|
||||
{
|
||||
"block_id": "http_block",
|
||||
"count": 5
|
||||
},
|
||||
{
|
||||
"block_id": "code_block",
|
||||
"count": 2
|
||||
}
|
||||
],
|
||||
"duration_ms": 3500,
|
||||
"errors": [],
|
||||
"execution_id": "exec_456",
|
||||
"metadata": {
|
||||
"environment": "production",
|
||||
"trigger": "manual",
|
||||
"user_tier": "premium"
|
||||
},
|
||||
"nodes_executed": 15,
|
||||
"status": "completed"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"analytics_id": "analytics-789-uuid"
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"metric_id": "metric-123-uuid"
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"metric_id": "metric-456-uuid"
|
||||
}
|
||||
3
autogpt_platform/backend/snapshots/auth_email
Normal file
3
autogpt_platform/backend/snapshots/auth_email
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"email": "newemail@example.com"
|
||||
}
|
||||
5
autogpt_platform/backend/snapshots/auth_user
Normal file
5
autogpt_platform/backend/snapshots/auth_user
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"email": "test@example.com",
|
||||
"id": "test-user-id",
|
||||
"name": "Test User"
|
||||
}
|
||||
14
autogpt_platform/backend/snapshots/blks_all
Normal file
14
autogpt_platform/backend/snapshots/blks_all
Normal file
@@ -0,0 +1,14 @@
|
||||
[
|
||||
{
|
||||
"costs": [
|
||||
{
|
||||
"cost": 10,
|
||||
"type": "credit"
|
||||
}
|
||||
],
|
||||
"description": "A test block",
|
||||
"disabled": false,
|
||||
"id": "test-block",
|
||||
"name": "Test Block"
|
||||
}
|
||||
]
|
||||
12
autogpt_platform/backend/snapshots/blks_exec
Normal file
12
autogpt_platform/backend/snapshots/blks_exec
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"output1": [
|
||||
{
|
||||
"data": "result1"
|
||||
}
|
||||
],
|
||||
"output2": [
|
||||
{
|
||||
"data": "result2"
|
||||
}
|
||||
]
|
||||
}
|
||||
16
autogpt_platform/backend/snapshots/creator_details
Normal file
16
autogpt_platform/backend/snapshots/creator_details
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "Test User",
|
||||
"username": "creator1",
|
||||
"description": "Test creator description",
|
||||
"links": [
|
||||
"link1.com",
|
||||
"link2.com"
|
||||
],
|
||||
"avatar_url": "avatar.jpg",
|
||||
"agent_rating": 4.8,
|
||||
"agent_runs": 1000,
|
||||
"top_categories": [
|
||||
"category1",
|
||||
"category2"
|
||||
]
|
||||
}
|
||||
60
autogpt_platform/backend/snapshots/creators_pagination
Normal file
60
autogpt_platform/backend/snapshots/creators_pagination
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"creators": [
|
||||
{
|
||||
"name": "Creator 0",
|
||||
"username": "creator0",
|
||||
"description": "Creator 0 description",
|
||||
"avatar_url": "avatar0.jpg",
|
||||
"num_agents": 1,
|
||||
"agent_rating": 4.5,
|
||||
"agent_runs": 100,
|
||||
"is_featured": false
|
||||
},
|
||||
{
|
||||
"name": "Creator 1",
|
||||
"username": "creator1",
|
||||
"description": "Creator 1 description",
|
||||
"avatar_url": "avatar1.jpg",
|
||||
"num_agents": 1,
|
||||
"agent_rating": 4.5,
|
||||
"agent_runs": 100,
|
||||
"is_featured": false
|
||||
},
|
||||
{
|
||||
"name": "Creator 2",
|
||||
"username": "creator2",
|
||||
"description": "Creator 2 description",
|
||||
"avatar_url": "avatar2.jpg",
|
||||
"num_agents": 1,
|
||||
"agent_rating": 4.5,
|
||||
"agent_runs": 100,
|
||||
"is_featured": false
|
||||
},
|
||||
{
|
||||
"name": "Creator 3",
|
||||
"username": "creator3",
|
||||
"description": "Creator 3 description",
|
||||
"avatar_url": "avatar3.jpg",
|
||||
"num_agents": 1,
|
||||
"agent_rating": 4.5,
|
||||
"agent_runs": 100,
|
||||
"is_featured": false
|
||||
},
|
||||
{
|
||||
"name": "Creator 4",
|
||||
"username": "creator4",
|
||||
"description": "Creator 4 description",
|
||||
"avatar_url": "avatar4.jpg",
|
||||
"num_agents": 1,
|
||||
"agent_rating": 4.5,
|
||||
"agent_runs": 100,
|
||||
"is_featured": false
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"total_items": 15,
|
||||
"total_pages": 3,
|
||||
"current_page": 2,
|
||||
"page_size": 5
|
||||
}
|
||||
}
|
||||
3
autogpt_platform/backend/snapshots/cred_bal
Normal file
3
autogpt_platform/backend/snapshots/cred_bal
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"credits": 1000
|
||||
}
|
||||
4
autogpt_platform/backend/snapshots/cred_topup_cfg
Normal file
4
autogpt_platform/backend/snapshots/cred_topup_cfg
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"amount": 500,
|
||||
"threshold": 100
|
||||
}
|
||||
3
autogpt_platform/backend/snapshots/cred_topup_req
Normal file
3
autogpt_platform/backend/snapshots/cred_topup_req
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"checkout_url": "https://checkout.example.com/session123"
|
||||
}
|
||||
9
autogpt_platform/backend/snapshots/def_agts
Normal file
9
autogpt_platform/backend/snapshots/def_agts
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"agents": [],
|
||||
"pagination": {
|
||||
"total_items": 0,
|
||||
"total_pages": 0,
|
||||
"current_page": 0,
|
||||
"page_size": 10
|
||||
}
|
||||
}
|
||||
9
autogpt_platform/backend/snapshots/def_creators
Normal file
9
autogpt_platform/backend/snapshots/def_creators
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"creators": [],
|
||||
"pagination": {
|
||||
"total_items": 0,
|
||||
"total_pages": 0,
|
||||
"current_page": 0,
|
||||
"page_size": 10
|
||||
}
|
||||
}
|
||||
21
autogpt_platform/backend/snapshots/feat_agts
Normal file
21
autogpt_platform/backend/snapshots/feat_agts
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"slug": "featured-agent",
|
||||
"agent_name": "Featured Agent",
|
||||
"agent_image": "featured.jpg",
|
||||
"creator": "creator1",
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Featured agent subheading",
|
||||
"description": "Featured agent description",
|
||||
"runs": 100,
|
||||
"rating": 4.5
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"total_items": 1,
|
||||
"total_pages": 1,
|
||||
"current_page": 1,
|
||||
"page_size": 20
|
||||
}
|
||||
}
|
||||
20
autogpt_platform/backend/snapshots/grph_in_schm
Normal file
20
autogpt_platform/backend/snapshots/grph_in_schm
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"properties": {
|
||||
"in_key_a": {
|
||||
"advanced": true,
|
||||
"default": "A",
|
||||
"secret": false,
|
||||
"title": "Key A"
|
||||
},
|
||||
"in_key_b": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "in_key_b"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"in_key_b"
|
||||
],
|
||||
"title": "ExpectedInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
15
autogpt_platform/backend/snapshots/grph_out_schm
Normal file
15
autogpt_platform/backend/snapshots/grph_out_schm
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"properties": {
|
||||
"out_key": {
|
||||
"advanced": false,
|
||||
"description": "This is an output key",
|
||||
"secret": false,
|
||||
"title": "out_key"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"out_key"
|
||||
],
|
||||
"title": "ExpectedOutputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
29
autogpt_platform/backend/snapshots/grph_single
Normal file
29
autogpt_platform/backend/snapshots/grph_single
Normal file
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"credentials_input_schema": {
|
||||
"properties": {},
|
||||
"title": "TestGraphCredentialsInputSchema",
|
||||
"type": "object"
|
||||
},
|
||||
"description": "A test graph",
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"has_webhook_trigger": false,
|
||||
"id": "graph-123",
|
||||
"input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"is_active": true,
|
||||
"links": [],
|
||||
"name": "Test Graph",
|
||||
"nodes": [],
|
||||
"output_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"sub_graphs": [],
|
||||
"user_id": "test-user-id",
|
||||
"version": 1
|
||||
}
|
||||
17
autogpt_platform/backend/snapshots/grph_struct
Normal file
17
autogpt_platform/backend/snapshots/grph_struct
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"description": "Test graph",
|
||||
"link_structure": [
|
||||
{
|
||||
"sink_name": "name",
|
||||
"source_name": "output"
|
||||
}
|
||||
],
|
||||
"links_count": 1,
|
||||
"name": "TestGraph",
|
||||
"node_blocks": [
|
||||
"1ff065e9-88e8-4358-9d82-8dc91f622ba9",
|
||||
"c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"1ff065e9-88e8-4358-9d82-8dc91f622ba9"
|
||||
],
|
||||
"nodes_count": 3
|
||||
}
|
||||
31
autogpt_platform/backend/snapshots/grphs_all
Normal file
31
autogpt_platform/backend/snapshots/grphs_all
Normal file
@@ -0,0 +1,31 @@
|
||||
[
|
||||
{
|
||||
"credentials_input_schema": {
|
||||
"properties": {},
|
||||
"title": "TestGraphCredentialsInputSchema",
|
||||
"type": "object"
|
||||
},
|
||||
"description": "A test graph",
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"has_webhook_trigger": false,
|
||||
"id": "graph-123",
|
||||
"input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"is_active": true,
|
||||
"links": [],
|
||||
"name": "Test Graph",
|
||||
"nodes": [],
|
||||
"output_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"sub_graphs": [],
|
||||
"user_id": "test-user-id",
|
||||
"version": 1
|
||||
}
|
||||
]
|
||||
3
autogpt_platform/backend/snapshots/grphs_del
Normal file
3
autogpt_platform/backend/snapshots/grphs_del
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"version_counts": 3
|
||||
}
|
||||
48
autogpt_platform/backend/snapshots/lib_agts_search
Normal file
48
autogpt_platform/backend/snapshots/lib_agts_search
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"id": "test-agent-1",
|
||||
"graph_id": "test-agent-1",
|
||||
"graph_version": 1,
|
||||
"image_url": null,
|
||||
"creator_name": "Test Creator",
|
||||
"creator_image_url": "",
|
||||
"status": "COMPLETED",
|
||||
"updated_at": "2023-01-01T00:00:00",
|
||||
"name": "Test Agent 1",
|
||||
"description": "Test Description 1",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
},
|
||||
"new_output": false,
|
||||
"can_access_graph": true,
|
||||
"is_latest_version": true
|
||||
},
|
||||
{
|
||||
"id": "test-agent-2",
|
||||
"graph_id": "test-agent-2",
|
||||
"graph_version": 1,
|
||||
"image_url": null,
|
||||
"creator_name": "Test Creator",
|
||||
"creator_image_url": "",
|
||||
"status": "COMPLETED",
|
||||
"updated_at": "2023-01-01T00:00:00",
|
||||
"name": "Test Agent 2",
|
||||
"description": "Test Description 2",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
},
|
||||
"new_output": false,
|
||||
"can_access_graph": false,
|
||||
"is_latest_version": true
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"total_items": 2,
|
||||
"total_pages": 1,
|
||||
"current_page": 1,
|
||||
"page_size": 50
|
||||
}
|
||||
}
|
||||
30
autogpt_platform/backend/snapshots/log_anlyt_cplx
Normal file
30
autogpt_platform/backend/snapshots/log_anlyt_cplx
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"analytics_id": "analytics-complex-uuid",
|
||||
"logged_data": {
|
||||
"agent_id": "agent_123",
|
||||
"blocks_used": [
|
||||
{
|
||||
"block_id": "llm_block",
|
||||
"count": 3
|
||||
},
|
||||
{
|
||||
"block_id": "http_block",
|
||||
"count": 5
|
||||
},
|
||||
{
|
||||
"block_id": "code_block",
|
||||
"count": 2
|
||||
}
|
||||
],
|
||||
"duration_ms": 3500,
|
||||
"errors": [],
|
||||
"execution_id": "exec_456",
|
||||
"metadata": {
|
||||
"environment": "production",
|
||||
"trigger": "manual",
|
||||
"user_tier": "premium"
|
||||
},
|
||||
"nodes_executed": 15,
|
||||
"status": "completed"
|
||||
}
|
||||
}
|
||||
3
autogpt_platform/backend/snapshots/log_anlyt_ok
Normal file
3
autogpt_platform/backend/snapshots/log_anlyt_ok
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"analytics_id": "analytics-789-uuid"
|
||||
}
|
||||
3
autogpt_platform/backend/snapshots/log_metric_ok
Normal file
3
autogpt_platform/backend/snapshots/log_metric_ok
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"metric_id": "metric-123-uuid"
|
||||
}
|
||||
3
autogpt_platform/backend/snapshots/log_metric_vals
Normal file
3
autogpt_platform/backend/snapshots/log_metric_vals
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"metric_id": "metric-456-uuid"
|
||||
}
|
||||
5
autogpt_platform/backend/snapshots/otto_empty
Normal file
5
autogpt_platform/backend/snapshots/otto_empty
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"answer": "Welcome! How can I help you?",
|
||||
"documents": [],
|
||||
"success": true
|
||||
}
|
||||
5
autogpt_platform/backend/snapshots/otto_err
Normal file
5
autogpt_platform/backend/snapshots/otto_err
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"answer": "An error occurred while processing your request.",
|
||||
"documents": [],
|
||||
"success": false
|
||||
}
|
||||
10
autogpt_platform/backend/snapshots/otto_grph
Normal file
10
autogpt_platform/backend/snapshots/otto_grph
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"answer": "Here's information about your graph.",
|
||||
"documents": [
|
||||
{
|
||||
"relevance_score": 0.92,
|
||||
"url": "https://example.com/graph-doc"
|
||||
}
|
||||
],
|
||||
"success": true
|
||||
}
|
||||
14
autogpt_platform/backend/snapshots/otto_ok
Normal file
14
autogpt_platform/backend/snapshots/otto_ok
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"answer": "This is Otto's response to your query.",
|
||||
"documents": [
|
||||
{
|
||||
"relevance_score": 0.95,
|
||||
"url": "https://example.com/doc1"
|
||||
},
|
||||
{
|
||||
"relevance_score": 0.87,
|
||||
"url": "https://example.com/doc2"
|
||||
}
|
||||
],
|
||||
"success": true
|
||||
}
|
||||
7
autogpt_platform/backend/snapshots/sub
Normal file
7
autogpt_platform/backend/snapshots/sub
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"channel": "3e53486c-cf57-477e-ba2a-cb02dc828e1a|graph_exec#test-graph-exec-1",
|
||||
"data": null,
|
||||
"error": null,
|
||||
"method": "subscribe_graph_execution",
|
||||
"success": true
|
||||
}
|
||||
9
autogpt_platform/backend/snapshots/sub_pagination
Normal file
9
autogpt_platform/backend/snapshots/sub_pagination
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"submissions": [],
|
||||
"pagination": {
|
||||
"total_items": 10,
|
||||
"total_pages": 2,
|
||||
"current_page": 2,
|
||||
"page_size": 5
|
||||
}
|
||||
}
|
||||
32
autogpt_platform/backend/snapshots/sub_success
Normal file
32
autogpt_platform/backend/snapshots/sub_success
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"submissions": [
|
||||
{
|
||||
"agent_id": "test-agent-id",
|
||||
"agent_version": 1,
|
||||
"name": "Test Agent",
|
||||
"sub_heading": "Test agent subheading",
|
||||
"slug": "test-agent",
|
||||
"description": "Test agent description",
|
||||
"image_urls": [
|
||||
"test.jpg"
|
||||
],
|
||||
"date_submitted": "2023-01-01T00:00:00",
|
||||
"status": "APPROVED",
|
||||
"runs": 50,
|
||||
"rating": 4.2,
|
||||
"store_listing_version_id": null,
|
||||
"version": null,
|
||||
"reviewer_id": null,
|
||||
"review_comments": null,
|
||||
"internal_comments": null,
|
||||
"reviewed_at": null,
|
||||
"changes_summary": null
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"total_items": 1,
|
||||
"total_pages": 1,
|
||||
"current_page": 1,
|
||||
"page_size": 20
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user