Merge branch 'dev' into fix/untrusted-origins

This commit is contained in:
Swifty
2025-06-11 11:07:56 +02:00
committed by GitHub
130 changed files with 6621 additions and 2561 deletions

47
.github/workflows/claude.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: Claude Code
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
issues:
types: [opened, assigned]
pull_request_review:
types: [submitted]
jobs:
claude:
if: |
(
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
) && (
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR' ||
github.event.review.author_association == 'OWNER' ||
github.event.review.author_association == 'MEMBER' ||
github.event.review.author_association == 'COLLABORATOR' ||
github.event.issue.author_association == 'OWNER' ||
github.event.issue.author_association == 'MEMBER' ||
github.event.issue.author_association == 'COLLABORATOR'
)
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
issues: read
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@beta
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}

View File

@@ -1,282 +1,51 @@
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
name: AutoGPT Platform - Deploy Dev Environment
on:
pull_request:
types: [closed]
issue_comment:
types: [created]
push:
branches: [ dev ]
paths:
- 'autogpt_platform/**'
permissions:
issues: write
pull-requests: write
contents: 'read'
id-token: 'write'
jobs:
dispatch:
migrate:
environment: develop
name: Run migrations for AutoGPT Platform
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install prisma
- name: Run Backend Migrations
working-directory: ./autogpt_platform/backend
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:
needs: migrate
runs-on: ubuntu-latest
steps:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
uses: actions/github-script@v7
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v3
with:
script: |
const commentBody = context.payload.comment.body.trim();
const commentUser = context.payload.comment.user.login;
const prAuthor = context.payload.issue.user.login;
const authorAssociation = context.payload.comment.author_association;
const triggeringCommentId = context.payload.comment.id;
// Check permissions
const hasPermission = (
authorAssociation === 'OWNER' ||
authorAssociation === 'MEMBER' ||
authorAssociation === 'COLLABORATOR'
);
core.setOutput('comment_body', commentBody);
core.setOutput('has_permission', hasPermission);
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
core.setOutput('permission_denied', 'true');
return;
}
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
return;
}
// Get all comments to check deployment status
const commentsResponse = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
per_page: 100
});
// Filter out the triggering comment
const commentsData = commentsResponse.data.filter(comment => comment.id !== triggeringCommentId);
// Find the last deploy and undeploy commands
let lastDeployIndex = -2;
let lastUndeployIndex = -1;
console.log(`Found ${commentsResponse.data.length} total comments, using ${commentsData.length} for status check after filtering`);
// Iterate through comments in reverse to find the most recent commands
for (let i = commentsData.length - 1; i >= 0; i--) {
const currentCommentBody = commentsData[i].body.trim();
console.log(`Processing comment ${i}: ${currentCommentBody}`);
if (currentCommentBody === '!deploy' && lastDeployIndex === -2) {
lastDeployIndex = i;
} else if (currentCommentBody === '!undeploy' && lastUndeployIndex === -1) {
lastUndeployIndex = i;
}
// Break early if we found both
if (lastDeployIndex !== -2 && lastUndeployIndex !== -1) {
break;
}
}
console.log(`Last deploy index: ${lastDeployIndex}`);
console.log(`Last undeploy index: ${lastUndeployIndex}`);
// Currently deployed if there's a deploy command after the last undeploy
const isCurrentlyDeployed = lastDeployIndex > lastUndeployIndex;
// Determine actions based on current state and requested command
if (commentBody === '!deploy') {
if (isCurrentlyDeployed) {
core.setOutput('deploy_blocked', 'already_deployed');
} else {
core.setOutput('should_deploy', 'true');
}
} else if (commentBody === '!undeploy') {
if (!isCurrentlyDeployed) {
// Check if there was ever a deploy
const hasEverDeployed = lastDeployIndex !== -2;
core.setOutput('undeploy_blocked', hasEverDeployed ? 'already_undeployed' : 'never_deployed');
} else {
core.setOutput('should_undeploy', 'true');
}
}
core.setOutput('has_active_deployment', isCurrentlyDeployed);
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
});
- name: Post deploy blocked comment
if: steps.check_status.outputs.deploy_blocked == 'already_deployed'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `⚠️ **Deploy skipped**: This PR already has an active deployment. Use \`!undeploy\` first if you want to redeploy.`
});
- name: Post undeploy blocked comment
if: steps.check_status.outputs.undeploy_blocked != ''
uses: actions/github-script@v7
with:
script: |
const reason = '${{ steps.check_status.outputs.undeploy_blocked }}';
let message;
if (reason === 'never_deployed') {
message = `⚠️ **Undeploy skipped**: This PR has never been deployed. Use \`!deploy\` first.`;
} else if (reason === 'already_undeployed') {
message = `⚠️ **Undeploy skipped**: This PR is already undeployed.`;
}
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: message
});
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number
});
core.setOutput('pr_number', pr.data.number);
core.setOutput('pr_title', pr.data.title);
core.setOutput('pr_state', pr.data.state);
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "deploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
});
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
});
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
uses: actions/github-script@v7
with:
script: |
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
let lastDeployIndex = -1;
let lastUndeployIndex = -1;
comments.data.forEach((comment, index) => {
if (comment.body.trim() === '!deploy') {
lastDeployIndex = index;
} else if (comment.body.trim() === '!undeploy') {
lastUndeployIndex = index;
}
});
// Should undeploy if there's a !deploy without a subsequent !undeploy
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
core.setOutput('should_undeploy', shouldUndeploy);
- name: Dispatch Undeploy Event (PR closed with active deployment)
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}
- name: Post PR close undeploy comment
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
});
event-type: build_deploy_dev
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'

View File

@@ -82,7 +82,7 @@ jobs:
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v2
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -110,7 +110,7 @@ jobs:
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -168,7 +168,7 @@ jobs:
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

132
autogpt_platform/CLAUDE.md Normal file
View File

@@ -0,0 +1,132 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Repository Overview
AutoGPT Platform is a monorepo containing:
- **Backend** (`/backend`): Python FastAPI server with async support
- **Frontend** (`/frontend`): Next.js React application
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
## Essential Commands
### Backend Development
```bash
# Install dependencies
cd backend && poetry install
# Run database migrations
poetry run prisma migrate dev
# Start all services (database, redis, rabbitmq)
docker compose up -d
# Run the backend server
poetry run serve
# Run tests
poetry run test
# Run specific test
poetry run pytest path/to/test_file.py::test_function_name
# Lint and format
poetry run format # Black + isort
poetry run lint # ruff
```
More details can be found in TESTING.md
#### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Frontend Development
```bash
# Install dependencies
cd frontend && npm install
# Start development server
npm run dev
# Run E2E tests
npm run test
# Run Storybook for component development
npm run storybook
# Build production
npm run build
# Type checking
npm run type-check
```
## Architecture Overview
### Backend Architecture
- **API Layer**: FastAPI with REST and WebSocket endpoints
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
- **Queue System**: RabbitMQ for async task processing
- **Execution Engine**: Separate executor service processes agent workflows
- **Authentication**: JWT-based with Supabase integration
### Frontend Architecture
- **Framework**: Next.js App Router with React Server Components
- **State Management**: React hooks + Supabase client for real-time updates
- **Workflow Builder**: Visual graph editor using @xyflow/react
- **UI Components**: Radix UI primitives with Tailwind CSS styling
- **Feature Flags**: LaunchDarkly integration
### Key Concepts
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
3. **Integrations**: OAuth and API connections stored per user
4. **Store**: Marketplace for sharing agent templates
### Testing Approach
- Backend uses pytest with snapshot testing for API responses
- Test files are colocated with source files (`*_test.py`)
- Frontend uses Playwright for E2E tests
- Component testing via Storybook
### Database Schema
Key models (defined in `/backend/schema.prisma`):
- `User`: Authentication and profile data
- `AgentGraph`: Workflow definitions with version control
- `AgentGraphExecution`: Execution history and results
- `AgentNode`: Individual nodes in a workflow
- `StoreListing`: Marketplace listings for sharing agents
### Environment Configuration
- Backend: `.env` file in `/backend`
- Frontend: `.env.local` file in `/frontend`
- Both require Supabase credentials and API keys for various services
### Common Development Tasks
**Adding a new block:**
1. Create new file in `/backend/backend/blocks/`
2. Inherit from `Block` base class
3. Define input/output schemas
4. Implement `run` method
5. Register in block registry
**Modifying the API:**
1. Update route in `/backend/backend/server/routers/`
2. Add/update Pydantic models in same directory
3. Write tests alongside the route file
4. Run `poetry run test` to verify
**Frontend feature development:**
1. Components go in `/frontend/src/components/`
2. Use existing UI components from `/frontend/src/components/ui/`
3. Add Storybook stories for new components
4. Test with Playwright if user-facing

View File

@@ -0,0 +1,237 @@
# Backend Testing Guide
This guide covers testing practices for the AutoGPT Platform backend, with a focus on snapshot testing for API endpoints.
## Table of Contents
- [Overview](#overview)
- [Running Tests](#running-tests)
- [Snapshot Testing](#snapshot-testing)
- [Writing Tests for API Routes](#writing-tests-for-api-routes)
- [Best Practices](#best-practices)
## Overview
The backend uses pytest for testing with the following key libraries:
- `pytest` - Test framework
- `pytest-asyncio` - Async test support
- `pytest-mock` - Mocking support
- `pytest-snapshot` - Snapshot testing for API responses
## Running Tests
### Run all tests
```bash
poetry run test
```
### Run specific test file
```bash
poetry run pytest path/to/test_file.py
```
### Run with verbose output
```bash
poetry run pytest -v
```
### Run with coverage
```bash
poetry run pytest --cov=backend
```
## Snapshot Testing
Snapshot testing captures the output of your code and compares it against previously saved snapshots. This is particularly useful for testing API responses.
### How Snapshot Testing Works
1. First run: Creates snapshot files in `snapshots/` directories
2. Subsequent runs: Compares output against saved snapshots
3. Changes detected: Test fails if output differs from snapshot
### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Snapshot Test Example
```python
import json
from pytest_snapshot.plugin import Snapshot
def test_api_endpoint(snapshot: Snapshot):
response = client.get("/api/endpoint")
# Snapshot the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"endpoint_response"
)
```
### Best Practices for Snapshots
1. **Use descriptive names**: `"user_list_response"` not `"response1"`
2. **Sort JSON keys**: Ensures consistent snapshots
3. **Format JSON**: Use `indent=2` for readable diffs
4. **Exclude dynamic data**: Remove timestamps, IDs, etc. that change between runs
Example of excluding dynamic data:
```python
response_data = response.json()
# Remove dynamic fields for snapshot
response_data.pop("created_at", None)
response_data.pop("id", None)
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"static_response_data"
)
```
## Writing Tests for API Routes
### Basic Structure
```python
import json
import fastapi
import fastapi.testclient
import pytest
from pytest_snapshot.plugin import Snapshot
from backend.server.v2.myroute import router
app = fastapi.FastAPI()
app.include_router(router)
client = fastapi.testclient.TestClient(app)
def test_endpoint_success(snapshot: Snapshot):
response = client.get("/endpoint")
assert response.status_code == 200
# Test specific fields
data = response.json()
assert data["status"] == "success"
# Snapshot the full response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(data, indent=2, sort_keys=True),
"endpoint_success_response"
)
```
### Testing with Authentication
```python
def override_auth_middleware():
return {"sub": "test-user-id"}
def override_get_user_id():
return "test-user-id"
app.dependency_overrides[auth_middleware] = override_auth_middleware
app.dependency_overrides[get_user_id] = override_get_user_id
```
### Mocking External Services
```python
def test_external_api_call(mocker, snapshot):
# Mock external service
mock_response = {"external": "data"}
mocker.patch(
"backend.services.external_api.call",
return_value=mock_response
)
response = client.post("/api/process")
assert response.status_code == 200
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"process_with_external_response"
)
```
## Best Practices
### 1. Test Organization
- Place tests next to the code: `routes.py``routes_test.py`
- Use descriptive test names: `test_create_user_with_invalid_email`
- Group related tests in classes when appropriate
### 2. Test Coverage
- Test happy path and error cases
- Test edge cases (empty data, invalid formats)
- Test authentication and authorization
### 3. Snapshot Testing Guidelines
- Review all snapshot changes carefully
- Don't snapshot sensitive data
- Keep snapshots focused and minimal
- Update snapshots intentionally, not accidentally
### 4. Async Testing
- Use regular `def` for FastAPI TestClient tests
- Use `async def` with `@pytest.mark.asyncio` for testing async functions directly
### 5. Fixtures
Create reusable fixtures for common test data:
```python
@pytest.fixture
def sample_user():
return {
"email": "test@example.com",
"name": "Test User"
}
def test_create_user(sample_user, snapshot):
response = client.post("/users", json=sample_user)
# ... test implementation
```
## CI/CD Integration
The GitHub Actions workflow automatically runs tests on:
- Pull requests
- Pushes to main branch
Snapshot tests work in CI by:
1. Committing snapshot files to the repository
2. CI compares against committed snapshots
3. Fails if snapshots don't match
## Troubleshooting
### Snapshot Mismatches
- Review the diff carefully
- If changes are expected: `poetry run pytest --snapshot-update`
- If changes are unexpected: Fix the code causing the difference
### Async Test Issues
- Ensure async functions use `@pytest.mark.asyncio`
- Use `AsyncMock` for mocking async functions
- FastAPI TestClient handles async automatically
### Import Errors
- Check that all dependencies are in `pyproject.toml`
- Run `poetry install` to ensure dependencies are installed
- Verify import paths are correct
## Summary
Snapshot testing provides a powerful way to ensure API responses remain consistent. Combined with traditional assertions, it creates a robust test suite that catches regressions while remaining maintainable.
Remember: Good tests are as important as good code!

View File

@@ -21,6 +21,7 @@ logger = logging.getLogger(__name__)
class FalModel(str, Enum):
MOCHI = "fal-ai/mochi-v1"
LUMA = "fal-ai/luma-dream-machine"
VEO3 = "fal-ai/veo3"
class AIVideoGeneratorBlock(Block):
@@ -102,6 +103,8 @@ class AIVideoGeneratorBlock(Block):
# Submit generation request
submit_url = f"{base_url}/{input_data.model.value}"
submit_data = {"prompt": input_data.prompt}
if input_data.model == FalModel.VEO3:
submit_data["generate_audio"] = True # type: ignore
seen_logs = set()

View File

@@ -38,7 +38,7 @@ from backend.integrations.credentials_store import (
# =============== Configure the cost for each LLM Model call =============== #
MODEL_COST: dict[LlmModel, int] = {
LlmModel.O3: 7,
LlmModel.O3: 4,
LlmModel.O3_MINI: 2, # $1.10 / $4.40
LlmModel.O1: 16, # $15 / $60
LlmModel.O1_PREVIEW: 16,

View File

@@ -628,15 +628,11 @@ async def update_node_execution_stats(
data = stats.model_dump()
if isinstance(data["error"], Exception):
data["error"] = str(data["error"])
execution_status = ExecutionStatus.FAILED
else:
execution_status = ExecutionStatus.COMPLETED
res = await AgentNodeExecution.prisma().update(
where={"id": node_exec_id},
data={
"stats": Json(data),
"executionStatus": execution_status,
"endedTime": datetime.now(tz=timezone.utc),
},
include=EXECUTION_RESULT_INCLUDE,

View File

@@ -706,6 +706,44 @@ class Executor:
)
running_executions[output.node.id].add_output(output)
def drain_done_task(node_exec_id: str, result: object):
if not isinstance(result, NodeExecutionStats):
log_metadata.error(f"Unexpected result #{node_exec_id}: {type(result)}")
return
nonlocal execution_stats
execution_stats.node_count += 1
execution_stats.nodes_cputime += result.cputime
execution_stats.nodes_walltime += result.walltime
if (err := result.error) and isinstance(err, Exception):
execution_stats.node_error_count += 1
update_node_execution_status(
db_client=cls.db_client,
exec_id=node_exec_id,
status=ExecutionStatus.FAILED,
)
else:
update_node_execution_status(
db_client=cls.db_client,
exec_id=node_exec_id,
status=ExecutionStatus.COMPLETED,
)
if _graph_exec := cls.db_client.update_graph_execution_stats(
graph_exec_id=graph_exec.graph_exec_id,
status=execution_status,
stats=execution_stats,
):
send_execution_update(_graph_exec)
else:
logger.error(
"Callback for "
f"finished node execution #{node_exec_id} "
"could not update execution stats "
f"for graph execution #{graph_exec.graph_exec_id}; "
f"triggered while graph exec status = {execution_status}"
)
def cancel_handler():
nonlocal execution_status
@@ -739,38 +777,12 @@ class Executor:
execution_queue.add(node_exec.to_node_execution_entry())
running_executions: dict[str, NodeExecutionProgress] = defaultdict(
lambda: NodeExecutionProgress(drain_output_queue)
lambda: NodeExecutionProgress(
drain_output_queue=drain_output_queue,
drain_done_task=drain_done_task,
)
)
def make_exec_callback(exec_data: NodeExecutionEntry):
def callback(result: object):
if not isinstance(result, NodeExecutionStats):
return
nonlocal execution_stats
execution_stats.node_count += 1
execution_stats.nodes_cputime += result.cputime
execution_stats.nodes_walltime += result.walltime
if (err := result.error) and isinstance(err, Exception):
execution_stats.node_error_count += 1
if _graph_exec := cls.db_client.update_graph_execution_stats(
graph_exec_id=exec_data.graph_exec_id,
status=execution_status,
stats=execution_stats,
):
send_execution_update(_graph_exec)
else:
logger.error(
"Callback for "
f"finished node execution #{exec_data.node_exec_id} "
"could not update execution stats "
f"for graph execution #{exec_data.graph_exec_id}; "
f"triggered while graph exec status = {execution_status}"
)
return callback
while not execution_queue.empty():
if cancel.is_set():
execution_status = ExecutionStatus.TERMINATED
@@ -829,7 +841,6 @@ class Executor:
cls.executor.apply_async(
cls.on_node_execution,
(output_queue, queued_node_exec, node_creds_map),
callback=make_exec_callback(queued_node_exec),
),
)
@@ -845,9 +856,6 @@ class Executor:
execution_status = ExecutionStatus.TERMINATED
return execution_stats, execution_status, error
if not execution_queue.empty():
break # yield to parent loop to execute new queue items
log_metadata.debug(f"Waiting on execution of node {node_id}")
while output := execution.pop_output():
cls._process_node_output(
@@ -858,11 +866,20 @@ class Executor:
node_creds_map=node_creds_map,
execution_queue=execution_queue,
)
if not execution_queue.empty():
break # Prioritize executing next nodes than enqueuing outputs
if execution.is_done(1):
if execution.is_done():
running_executions.pop(node_id)
else:
time.sleep(0.1)
if not execution_queue.empty():
continue # Make sure each not is checked once
if execution_queue.empty() and running_executions:
log_metadata.debug(
"No more nodes to execute, waiting for outputs..."
)
time.sleep(0.1)
log_metadata.info(f"Finished graph execution {graph_exec.graph_exec_id}")
execution_status = ExecutionStatus.COMPLETED

View File

@@ -818,10 +818,15 @@ class ExecutionOutputEntry(BaseModel):
class NodeExecutionProgress:
def __init__(self, drain_output_queue: Callable[[], None]):
def __init__(
self,
drain_output_queue: Callable[[], None],
drain_done_task: Callable[[str, object], None],
):
self.output: dict[str, list[ExecutionOutputEntry]] = defaultdict(list)
self.tasks: dict[str, AsyncResult] = {}
self.drain_output_queue = drain_output_queue
self.drain_done_task = drain_done_task
def add_task(self, node_exec_id: str, task: AsyncResult):
self.tasks[node_exec_id] = task
@@ -868,7 +873,9 @@ class NodeExecutionProgress:
if self.output[exec_id]:
return False
self.tasks.pop(exec_id)
if task := self.tasks.pop(exec_id):
self.drain_done_task(exec_id, task.get())
return True
def _next_exec(self) -> str | None:

View File

@@ -0,0 +1,17 @@
"""Common test fixtures for server tests."""
import pytest
from pytest_snapshot.plugin import Snapshot
@pytest.fixture
def configured_snapshot(snapshot: Snapshot) -> Snapshot:
"""Pre-configured snapshot fixture with standard settings."""
snapshot.snapshot_dir = "snapshots"
return snapshot
# Test ID constants
TEST_USER_ID = "test-user-id"
ADMIN_USER_ID = "admin-user-id"
TARGET_USER_ID = "target-user-id"

View File

@@ -120,9 +120,17 @@ def callback(
)
except Exception as e:
logger.error(f"Code->Token exchange failed for provider {provider.value}: {e}")
logger.exception(
"OAuth callback for provider %s failed during code exchange: %s. Confirm provider credentials.",
provider.value,
e,
)
raise HTTPException(
status_code=400, detail=f"Failed to exchange code for tokens: {str(e)}"
status_code=400,
detail={
"message": str(e),
"hint": "Verify OAuth configuration and try again.",
},
)
# TODO: Allow specifying `title` to set on `credentials`
@@ -286,9 +294,13 @@ async def webhook_ingress_generic(
try:
webhook = await get_webhook(webhook_id)
except NotFoundError as e:
logger.warning(f"Webhook payload received for unknown webhook: {e}")
logger.warning(
"Webhook payload received for unknown webhook %s. Confirm the webhook ID.",
webhook_id,
)
raise HTTPException(
status_code=HTTP_404_NOT_FOUND, detail=f"Webhook #{webhook_id} not found"
status_code=HTTP_404_NOT_FOUND,
detail={"message": str(e), "hint": "Check if the webhook ID is correct."},
) from e
logger.debug(f"Webhook #{webhook_id}: {webhook}")
payload, event_type = await webhook_manager.validate_payload(webhook, request)
@@ -398,11 +410,16 @@ def _get_provider_oauth_handler(
client_id = getattr(settings.secrets, f"{provider_name.value}_client_id")
client_secret = getattr(settings.secrets, f"{provider_name.value}_client_secret")
if not (client_id and client_secret):
logger.error(
"OAuth credentials for provider %s are missing. Check environment configuration.",
provider_name.value,
)
raise HTTPException(
status_code=501,
detail=(
f"Integration with provider '{provider_name.value}' is not configured"
),
detail={
"message": f"Integration with provider '{provider_name.value}' is not configured",
"hint": "Set client ID and secret in the environment.",
},
)
handler_class = HANDLERS_BY_NAME[provider_name]

View File

@@ -5,6 +5,7 @@ from typing import Any, Optional
import autogpt_libs.auth.models
import fastapi
import fastapi.responses
import pydantic
import starlette.middleware.cors
import uvicorn
from autogpt_libs.feature_flag.client import (
@@ -12,6 +13,7 @@ from autogpt_libs.feature_flag.client import (
shutdown_launchdarkly,
)
from autogpt_libs.logging.utils import generate_uvicorn_config
from fastapi.exceptions import RequestValidationError
import backend.data.block
import backend.data.db
@@ -86,11 +88,23 @@ app = fastapi.FastAPI(
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
logger.exception(
"%s %s failed. Investigate and resolve the underlying issue: %s",
request.method,
request.url.path,
exc,
)
hint = (
"Adjust the request and retry."
if status_code < 500
else "Check server logs and dependent services."
)
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"message": f"Failed to process {request.method} {request.url.path}",
"detail": str(exc),
"hint": hint,
},
status_code=status_code,
)
@@ -98,6 +112,32 @@ def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
return handler
async def validation_error_handler(
request: fastapi.Request, exc: Exception
) -> fastapi.responses.JSONResponse:
logger.error(
"Validation failed for %s %s: %s. Fix the request payload and try again.",
request.method,
request.url.path,
exc,
)
errors: list | str
if hasattr(exc, "errors"):
errors = exc.errors() # type: ignore[call-arg]
else:
errors = str(exc)
return fastapi.responses.JSONResponse(
status_code=422,
content={
"message": f"Invalid data for {request.method} {request.url.path}",
"detail": errors,
"hint": "Ensure the request matches the API schema.",
},
)
app.add_exception_handler(RequestValidationError, validation_error_handler)
app.add_exception_handler(pydantic.ValidationError, validation_error_handler)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"], prefix="/api")

View File

@@ -1,5 +1,6 @@
"""Analytics API"""
import logging
from typing import Annotated
import fastapi
@@ -8,6 +9,7 @@ import backend.data.analytics
from backend.server.utils import get_user_id
router = fastapi.APIRouter()
logger = logging.getLogger(__name__)
@router.post(path="/log_raw_metric")
@@ -17,13 +19,25 @@ async def log_raw_metric(
metric_value: Annotated[float, fastapi.Body(..., embed=True)],
data_string: Annotated[str, fastapi.Body(..., embed=True)],
):
result = await backend.data.analytics.log_raw_metric(
user_id=user_id,
metric_name=metric_name,
metric_value=metric_value,
data_string=data_string,
)
return result.id
try:
result = await backend.data.analytics.log_raw_metric(
user_id=user_id,
metric_name=metric_name,
metric_value=metric_value,
data_string=data_string,
)
return result.id
except Exception as e:
logger.exception(
"Failed to log metric %s for user %s: %s", metric_name, user_id, e
)
raise fastapi.HTTPException(
status_code=500,
detail={
"message": str(e),
"hint": "Check analytics service connection and retry.",
},
)
@router.post("/log_raw_analytics")
@@ -43,7 +57,14 @@ async def log_raw_analytics(
),
],
):
result = await backend.data.analytics.log_raw_analytics(
user_id, type, data, data_index
)
return result.id
try:
result = await backend.data.analytics.log_raw_analytics(
user_id, type, data, data_index
)
return result.id
except Exception as e:
logger.exception("Failed to log analytics for user %s: %s", user_id, e)
raise fastapi.HTTPException(
status_code=500,
detail={"message": str(e), "hint": "Ensure analytics DB is reachable."},
)

View File

@@ -0,0 +1,139 @@
"""Example of analytics tests with improved error handling and assertions."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
from backend.server.conftest import TEST_USER_ID
from backend.server.test_helpers import (
assert_error_response_structure,
assert_mock_called_with_partial,
assert_response_status,
safe_parse_json,
)
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[get_user_id] = override_get_user_id
def test_log_raw_metric_success_improved(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful raw metric logging with improved assertions."""
# Mock the analytics function
mock_result = Mock(id="metric-123-uuid")
mock_log_metric = mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": "page_load_time",
"metric_value": 2.5,
"data_string": "/dashboard",
}
response = client.post("/log_raw_metric", json=request_data)
# Improved assertions with better error messages
assert_response_status(response, 200, "Metric logging should succeed")
response_data = safe_parse_json(response, "Metric response parsing")
assert response_data == "metric-123-uuid", f"Unexpected response: {response_data}"
# Verify the function was called with correct parameters
assert_mock_called_with_partial(
mock_log_metric,
user_id=TEST_USER_ID,
metric_name="page_load_time",
metric_value=2.5,
data_string="/dashboard",
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps({"metric_id": response_data}, indent=2, sort_keys=True),
"analytics_log_metric_success_improved",
)
def test_log_raw_metric_invalid_request_improved() -> None:
"""Test invalid metric request with improved error assertions."""
# Test missing required fields
response = client.post("/log_raw_metric", json={})
error_data = assert_error_response_structure(
response, expected_status=422, expected_error_fields=["loc", "msg", "type"]
)
# Verify specific error details
detail = error_data["detail"]
assert isinstance(detail, list), "Error detail should be a list"
assert len(detail) > 0, "Should have at least one error"
# Check that required fields are mentioned in errors
error_fields = [error["loc"][-1] for error in detail if "loc" in error]
assert "metric_name" in error_fields, "Should report missing metric_name"
assert "metric_value" in error_fields, "Should report missing metric_value"
assert "data_string" in error_fields, "Should report missing data_string"
def test_log_raw_metric_type_validation_improved() -> None:
"""Test metric type validation with improved assertions."""
invalid_requests = [
{
"data": {
"metric_name": "test",
"metric_value": "not_a_number", # Invalid type
"data_string": "test",
},
"expected_error": "Input should be a valid number",
},
{
"data": {
"metric_name": "", # Empty string
"metric_value": 1.0,
"data_string": "test",
},
"expected_error": "String should have at least 1 character",
},
{
"data": {
"metric_name": "test",
"metric_value": float("inf"), # Infinity
"data_string": "test",
},
"expected_error": "ensure this value is finite",
},
]
for test_case in invalid_requests:
response = client.post("/log_raw_metric", json=test_case["data"])
error_data = assert_error_response_structure(response, expected_status=422)
# Check that expected error is in the response
error_text = json.dumps(error_data)
assert (
test_case["expected_error"] in error_text
or test_case["expected_error"].lower() in error_text.lower()
), f"Expected error '{test_case['expected_error']}' not found in: {error_text}"

View File

@@ -0,0 +1,107 @@
"""Example of parametrized tests for analytics endpoints."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
from backend.server.conftest import TEST_USER_ID
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[get_user_id] = override_get_user_id
@pytest.mark.parametrize(
"metric_value,metric_name,data_string,test_id",
[
(100, "api_calls_count", "external_api", "integer_value"),
(0, "error_count", "no_errors", "zero_value"),
(-5.2, "temperature_delta", "cooling", "negative_value"),
(1.23456789, "precision_test", "float_precision", "float_precision"),
(999999999, "large_number", "max_value", "large_number"),
(0.0000001, "tiny_number", "min_value", "tiny_number"),
],
)
def test_log_raw_metric_values_parametrized(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
metric_value: float,
metric_name: str,
data_string: str,
test_id: str,
) -> None:
"""Test raw metric logging with various metric values using parametrize."""
# Mock the analytics function
mock_result = Mock(id=f"metric-{test_id}-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": metric_name,
"metric_value": metric_value,
"data_string": data_string,
}
response = client.post("/log_raw_metric", json=request_data)
# Better error handling
assert response.status_code == 200, f"Failed for {test_id}: {response.text}"
response_data = response.json()
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps(
{"metric_id": response_data, "test_case": test_id}, indent=2, sort_keys=True
),
f"analytics_metric_{test_id}",
)
@pytest.mark.parametrize(
"invalid_data,expected_error",
[
({}, "Field required"), # Missing all fields
({"metric_name": "test"}, "Field required"), # Missing metric_value
(
{"metric_name": "test", "metric_value": "not_a_number"},
"Input should be a valid number",
), # Invalid type
(
{"metric_name": "", "metric_value": 1.0, "data_string": "test"},
"String should have at least 1 character",
), # Empty name
],
)
def test_log_raw_metric_invalid_requests_parametrized(
invalid_data: dict,
expected_error: str,
) -> None:
"""Test invalid metric requests with parametrize."""
response = client.post("/log_raw_metric", json=invalid_data)
assert response.status_code == 422
error_detail = response.json()
assert "detail" in error_detail
# Verify error message contains expected error
error_text = json.dumps(error_detail)
assert expected_error in error_text or expected_error.lower() in error_text.lower()

View File

@@ -0,0 +1,281 @@
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
from backend.server.conftest import TEST_USER_ID
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[get_user_id] = override_get_user_id
def test_log_raw_metric_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful raw metric logging"""
# Mock the analytics function
mock_result = Mock(id="metric-123-uuid")
mock_log_metric = mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": "page_load_time",
"metric_value": 2.5,
"data_string": "/dashboard",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data == "metric-123-uuid"
# Verify the function was called with correct parameters
mock_log_metric.assert_called_once_with(
user_id=TEST_USER_ID,
metric_name="page_load_time",
metric_value=2.5,
data_string="/dashboard",
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_metric_success",
)
def test_log_raw_metric_various_values(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test raw metric logging with various metric values"""
# Mock the analytics function
mock_result = Mock(id="metric-456-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
# Test with integer value
request_data = {
"metric_name": "api_calls_count",
"metric_value": 100,
"data_string": "external_api",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
# Test with zero value
request_data = {
"metric_name": "error_count",
"metric_value": 0,
"data_string": "no_errors",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
# Test with negative value
request_data = {
"metric_name": "temperature_delta",
"metric_value": -5.2,
"data_string": "cooling",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
# Snapshot the last response
configured_snapshot.assert_match(
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_metric_various_values",
)
def test_log_raw_analytics_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful raw analytics logging"""
# Mock the analytics function
mock_result = Mock(id="analytics-789-uuid")
mock_log_analytics = mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "user_action",
"data": {
"action": "button_click",
"button_id": "submit_form",
"timestamp": "2023-01-01T00:00:00Z",
"metadata": {
"form_type": "registration",
"fields_filled": 5,
},
},
"data_index": "button_click_submit_form",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data == "analytics-789-uuid"
# Verify the function was called with correct parameters
mock_log_analytics.assert_called_once_with(
TEST_USER_ID,
"user_action",
request_data["data"],
"button_click_submit_form",
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps({"analytics_id": response_data}, indent=2, sort_keys=True),
"analytics_log_analytics_success",
)
def test_log_raw_analytics_complex_data(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test raw analytics logging with complex nested data"""
# Mock the analytics function
mock_result = Mock(id="analytics-complex-uuid")
mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "agent_execution",
"data": {
"agent_id": "agent_123",
"execution_id": "exec_456",
"status": "completed",
"duration_ms": 3500,
"nodes_executed": 15,
"blocks_used": [
{"block_id": "llm_block", "count": 3},
{"block_id": "http_block", "count": 5},
{"block_id": "code_block", "count": 2},
],
"errors": [],
"metadata": {
"trigger": "manual",
"user_tier": "premium",
"environment": "production",
},
},
"data_index": "agent_123_exec_456",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200
response_data = response.json()
# Snapshot test the complex data structure
configured_snapshot.assert_match(
json.dumps(
{
"analytics_id": response_data,
"logged_data": request_data["data"],
},
indent=2,
sort_keys=True,
),
"analytics_log_analytics_complex_data",
)
def test_log_raw_metric_invalid_request() -> None:
"""Test raw metric logging with invalid request data"""
# Missing required fields
response = client.post("/log_raw_metric", json={})
assert response.status_code == 422
# Invalid metric_value type
response = client.post(
"/log_raw_metric",
json={
"metric_name": "test",
"metric_value": "not_a_number",
"data_string": "test",
},
)
assert response.status_code == 422
# Missing data_string
response = client.post(
"/log_raw_metric",
json={
"metric_name": "test",
"metric_value": 1.0,
},
)
assert response.status_code == 422
def test_log_raw_analytics_invalid_request() -> None:
"""Test raw analytics logging with invalid request data"""
# Missing required fields
response = client.post("/log_raw_analytics", json={})
assert response.status_code == 422
# Invalid data type (should be dict)
response = client.post(
"/log_raw_analytics",
json={
"type": "test",
"data": "not_a_dict",
"data_index": "test",
},
)
assert response.status_code == 422
# Missing data_index
response = client.post(
"/log_raw_analytics",
json={
"type": "test",
"data": {"key": "value"},
},
)
assert response.status_code == 422

View File

@@ -2,7 +2,7 @@ import logging
from typing import Annotated
from autogpt_libs.auth.middleware import APIKeyValidator
from fastapi import APIRouter, Body, Depends, Query
from fastapi import APIRouter, Body, Depends, HTTPException, Query
from fastapi.responses import JSONResponse
from backend.data.user import (
@@ -40,8 +40,11 @@ async def unsubscribe_via_one_click(token: Annotated[str, Query()]):
try:
await unsubscribe_user_by_token(token)
except Exception as e:
logger.error(f"Failed to unsubscribe user by token {token}: {e}")
raise e
logger.exception("Unsubscribe token %s failed: %s", token, e)
raise HTTPException(
status_code=500,
detail={"message": str(e), "hint": "Verify Postmark token settings."},
)
return JSONResponse(status_code=200, content={"status": "ok"})
@@ -67,7 +70,10 @@ async def postmark_webhook_handler(
case PostmarkSubscriptionChangeWebhook():
subscription_handler(webhook)
case _:
logger.warning(f"Unknown webhook type: {type(webhook)}")
logger.warning(
"Unhandled Postmark webhook type %s. Update handler mappings.",
type(webhook),
)
return
@@ -85,7 +91,10 @@ async def bounce_handler(event: PostmarkBounceWebhook):
logger.info(f"{event.Email=}")
user = await get_user_by_email(event.Email)
if not user:
logger.error(f"User not found for email: {event.Email}")
logger.warning(
"Received bounce for unknown email %s. Ensure user records are current.",
event.Email,
)
return
await set_user_email_verification(user.id, False)
logger.debug(f"Setting email verification to false for user: {user.id}")

View File

@@ -575,6 +575,13 @@ async def execute_graph(
graph_version: Optional[int] = None,
preset_id: Optional[str] = None,
) -> ExecuteGraphResponse:
current_balance = await _user_credit_model.get_credits(user_id)
if current_balance <= 0:
raise HTTPException(
status_code=402,
detail="Insufficient balance to execute the agent. Please top up your account.",
)
graph_exec = await execution_utils.add_graph_execution_async(
graph_id=graph_id,
user_id=user_id,
@@ -817,8 +824,15 @@ async def create_api_key(
)
return CreateAPIKeyResponse(api_key=api_key, plain_text_key=plain_text)
except APIKeyError as e:
logger.error(f"Failed to create API key: {str(e)}")
raise HTTPException(status_code=400, detail=str(e))
logger.error(
"Could not create API key for user %s: %s. Review input and permissions.",
user_id,
e,
)
raise HTTPException(
status_code=400,
detail={"message": str(e), "hint": "Verify request payload and try again."},
)
@v1_router.get(
@@ -834,8 +848,11 @@ async def get_api_keys(
try:
return await list_user_api_keys(user_id)
except APIKeyError as e:
logger.error(f"Failed to list API keys: {str(e)}")
raise HTTPException(status_code=400, detail=str(e))
logger.error("Failed to list API keys for user %s: %s", user_id, e)
raise HTTPException(
status_code=400,
detail={"message": str(e), "hint": "Check API key service availability."},
)
@v1_router.get(
@@ -854,8 +871,11 @@ async def get_api_key(
raise HTTPException(status_code=404, detail="API key not found")
return api_key
except APIKeyError as e:
logger.error(f"Failed to get API key: {str(e)}")
raise HTTPException(status_code=400, detail=str(e))
logger.error("Error retrieving API key %s for user %s: %s", key_id, user_id, e)
raise HTTPException(
status_code=400,
detail={"message": str(e), "hint": "Ensure the key ID is correct."},
)
@v1_router.delete(
@@ -876,8 +896,14 @@ async def delete_api_key(
except APIKeyPermissionError:
raise HTTPException(status_code=403, detail="Permission denied")
except APIKeyError as e:
logger.error(f"Failed to revoke API key: {str(e)}")
raise HTTPException(status_code=400, detail=str(e))
logger.error("Failed to revoke API key %s for user %s: %s", key_id, user_id, e)
raise HTTPException(
status_code=400,
detail={
"message": str(e),
"hint": "Verify permissions or try again later.",
},
)
@v1_router.post(
@@ -898,8 +924,11 @@ async def suspend_key(
except APIKeyPermissionError:
raise HTTPException(status_code=403, detail="Permission denied")
except APIKeyError as e:
logger.error(f"Failed to suspend API key: {str(e)}")
raise HTTPException(status_code=400, detail=str(e))
logger.error("Failed to suspend API key %s for user %s: %s", key_id, user_id, e)
raise HTTPException(
status_code=400,
detail={"message": str(e), "hint": "Check user permissions and retry."},
)
@v1_router.put(
@@ -922,5 +951,13 @@ async def update_permissions(
except APIKeyPermissionError:
raise HTTPException(status_code=403, detail="Permission denied")
except APIKeyError as e:
logger.error(f"Failed to update API key permissions: {str(e)}")
raise HTTPException(status_code=400, detail=str(e))
logger.error(
"Failed to update permissions for API key %s of user %s: %s",
key_id,
user_id,
e,
)
raise HTTPException(
status_code=400,
detail={"message": str(e), "hint": "Ensure permissions list is valid."},
)

View File

@@ -0,0 +1,391 @@
import json
from unittest.mock import AsyncMock, Mock
import autogpt_libs.auth.depends
import fastapi
import fastapi.testclient
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.v1 as v1_routes
from backend.data.credit import AutoTopUpConfig
from backend.data.graph import GraphModel
from backend.server.conftest import TEST_USER_ID
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(v1_routes.v1_router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware(request: fastapi.Request) -> dict[str, str]:
"""Override auth middleware for testing"""
return {"sub": TEST_USER_ID, "role": "user", "email": "test@example.com"}
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
override_auth_middleware
)
app.dependency_overrides[get_user_id] = override_get_user_id
# Auth endpoints tests
def test_get_or_create_user_route(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test get or create user endpoint"""
mock_user = Mock()
mock_user.model_dump.return_value = {
"id": TEST_USER_ID,
"email": "test@example.com",
"name": "Test User",
}
mocker.patch(
"backend.server.routers.v1.get_or_create_user",
return_value=mock_user,
)
response = client.post("/auth/user")
assert response.status_code == 200
response_data = response.json()
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"auth_user",
)
def test_update_user_email_route(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test update user email endpoint"""
mocker.patch(
"backend.server.routers.v1.update_user_email",
return_value=None,
)
response = client.post("/auth/user/email", json="newemail@example.com")
assert response.status_code == 200
response_data = response.json()
assert response_data["email"] == "newemail@example.com"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"auth_email",
)
# Blocks endpoints tests
def test_get_graph_blocks(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get blocks endpoint"""
# Mock block
mock_block = Mock()
mock_block.to_dict.return_value = {
"id": "test-block",
"name": "Test Block",
"description": "A test block",
"disabled": False,
}
mock_block.id = "test-block"
mock_block.disabled = False
# Mock get_blocks
mocker.patch(
"backend.server.routers.v1.get_blocks",
return_value={"test-block": lambda: mock_block},
)
# Mock block costs
mocker.patch(
"backend.server.routers.v1.get_block_costs",
return_value={"test-block": [{"cost": 10, "type": "credit"}]},
)
response = client.get("/blocks")
assert response.status_code == 200
response_data = response.json()
assert len(response_data) == 1
assert response_data[0]["id"] == "test-block"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"blks_all",
)
def test_execute_graph_block(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test execute block endpoint"""
# Mock block
mock_block = Mock()
mock_block.execute.return_value = [
("output1", {"data": "result1"}),
("output2", {"data": "result2"}),
]
mocker.patch(
"backend.server.routers.v1.get_block",
return_value=mock_block,
)
request_data = {
"input_name": "test_input",
"input_value": "test_value",
}
response = client.post("/blocks/test-block/execute", json=request_data)
assert response.status_code == 200
response_data = response.json()
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"blks_exec",
)
def test_execute_graph_block_not_found(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test execute block with non-existent block"""
mocker.patch(
"backend.server.routers.v1.get_block",
return_value=None,
)
response = client.post("/blocks/nonexistent-block/execute", json={})
assert response.status_code == 404
assert "not found" in response.json()["detail"]
# Credits endpoints tests
def test_get_user_credits(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get user credits endpoint"""
mock_credit_model = mocker.patch("backend.server.routers.v1._user_credit_model")
mock_credit_model.get_credits = AsyncMock(return_value=1000)
response = client.get("/credits")
assert response.status_code == 200
response_data = response.json()
assert response_data["credits"] == 1000
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"cred_bal",
)
def test_request_top_up(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test request top up endpoint"""
mock_credit_model = mocker.patch("backend.server.routers.v1._user_credit_model")
mock_credit_model.top_up_intent = AsyncMock(
return_value="https://checkout.example.com/session123"
)
request_data = {"credit_amount": 500}
response = client.post("/credits", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert "checkout_url" in response_data
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"cred_topup_req",
)
def test_get_auto_top_up(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get auto top-up configuration endpoint"""
mock_config = AutoTopUpConfig(threshold=100, amount=500)
mocker.patch(
"backend.server.routers.v1.get_auto_top_up",
return_value=mock_config,
)
response = client.get("/credits/auto-top-up")
assert response.status_code == 200
response_data = response.json()
assert response_data["threshold"] == 100
assert response_data["amount"] == 500
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"cred_topup_cfg",
)
# Graphs endpoints tests
def test_get_graphs(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get graphs endpoint"""
mock_graph = GraphModel(
id="graph-123",
version=1,
is_active=True,
name="Test Graph",
description="A test graph",
user_id="test-user-id",
)
mocker.patch(
"backend.server.routers.v1.graph_db.get_graphs",
return_value=[mock_graph],
)
response = client.get("/graphs")
assert response.status_code == 200
response_data = response.json()
assert len(response_data) == 1
assert response_data[0]["id"] == "graph-123"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"grphs_all",
)
def test_get_graph(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get single graph endpoint"""
mock_graph = GraphModel(
id="graph-123",
version=1,
is_active=True,
name="Test Graph",
description="A test graph",
user_id="test-user-id",
)
mocker.patch(
"backend.server.routers.v1.graph_db.get_graph",
return_value=mock_graph,
)
response = client.get("/graphs/graph-123")
assert response.status_code == 200
response_data = response.json()
assert response_data["id"] == "graph-123"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"grph_single",
)
def test_get_graph_not_found(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test get graph with non-existent ID"""
mocker.patch(
"backend.server.routers.v1.graph_db.get_graph",
return_value=None,
)
response = client.get("/graphs/nonexistent-graph")
assert response.status_code == 404
assert "not found" in response.json()["detail"]
def test_delete_graph(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test delete graph endpoint"""
# Mock active graph for deactivation
mock_graph = GraphModel(
id="graph-123",
version=1,
is_active=True,
name="Test Graph",
description="A test graph",
user_id="test-user-id",
)
mocker.patch(
"backend.server.routers.v1.graph_db.get_graph",
return_value=mock_graph,
)
mocker.patch(
"backend.server.routers.v1.on_graph_deactivate",
return_value=None,
)
mocker.patch(
"backend.server.routers.v1.graph_db.delete_graph",
return_value=3, # Number of versions deleted
)
response = client.delete("/graphs/graph-123")
assert response.status_code == 200
response_data = response.json()
assert response_data["version_counts"] == 3
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"grphs_del",
)
# Invalid request tests
def test_invalid_json_request() -> None:
"""Test endpoint with invalid JSON"""
response = client.post(
"/auth/user/email",
content="invalid json",
headers={"Content-Type": "application/json"},
)
assert response.status_code == 422
def test_missing_required_field() -> None:
"""Test endpoint with missing required field"""
response = client.post("/credits", json={}) # Missing credit_amount
assert response.status_code == 422

View File

@@ -0,0 +1,139 @@
"""Common test fixtures with proper setup and teardown."""
from contextlib import asynccontextmanager
from typing import AsyncGenerator
from unittest.mock import Mock, patch
import pytest
from prisma import Prisma
@pytest.fixture
async def test_db_connection() -> AsyncGenerator[Prisma, None]:
"""Provide a test database connection with proper cleanup.
This fixture ensures the database connection is properly
closed after the test, even if the test fails.
"""
db = Prisma()
try:
await db.connect()
yield db
finally:
await db.disconnect()
@pytest.fixture
def mock_transaction():
"""Mock database transaction with proper async context manager."""
@asynccontextmanager
async def mock_context(*args, **kwargs):
yield None
with patch("backend.data.db.locked_transaction", side_effect=mock_context) as mock:
yield mock
@pytest.fixture
def isolated_app_state():
"""Fixture that ensures app state is isolated between tests."""
# Example: Save original state
# from backend.server.app import app
# original_overrides = app.dependency_overrides.copy()
# try:
# yield app
# finally:
# # Restore original state
# app.dependency_overrides = original_overrides
# For now, just yield None as this is an example
yield None
@pytest.fixture
def cleanup_files():
"""Fixture to track and cleanup files created during tests."""
created_files = []
def track_file(filepath: str):
created_files.append(filepath)
yield track_file
# Cleanup
import os
for filepath in created_files:
try:
if os.path.exists(filepath):
os.remove(filepath)
except Exception as e:
print(f"Warning: Failed to cleanup {filepath}: {e}")
@pytest.fixture
async def async_mock_with_cleanup():
"""Create async mocks that are properly cleaned up."""
mocks = []
def create_mock(**kwargs):
mock = Mock(**kwargs)
mocks.append(mock)
return mock
yield create_mock
# Reset all mocks
for mock in mocks:
mock.reset_mock()
class TestDatabaseIsolation:
"""Example of proper test isolation with database operations."""
@pytest.fixture(autouse=True)
async def setup_and_teardown(self, test_db_connection):
"""Setup and teardown for each test method."""
# Setup: Clear test data
await test_db_connection.user.delete_many(
where={"email": {"contains": "@test.example"}}
)
yield
# Teardown: Clear test data again
await test_db_connection.user.delete_many(
where={"email": {"contains": "@test.example"}}
)
async def test_create_user(self, test_db_connection):
"""Test that demonstrates proper isolation."""
# This test has access to a clean database
user = await test_db_connection.user.create(
data={"email": "test@test.example", "name": "Test User"}
)
assert user.email == "test@test.example"
# User will be cleaned up automatically
@pytest.fixture(scope="function") # Explicitly use function scope
def reset_singleton_state():
"""Reset singleton state between tests."""
# Example: Reset a singleton instance
# from backend.data.some_singleton import SingletonClass
# # Save original state
# original_instance = getattr(SingletonClass, "_instance", None)
# try:
# # Clear singleton
# SingletonClass._instance = None
# yield
# finally:
# # Restore original state
# SingletonClass._instance = original_instance
# For now, just yield None as this is an example
yield None

View File

@@ -0,0 +1,109 @@
"""Helper functions for improved test assertions and error handling."""
import json
from typing import Any, Dict, Optional
def assert_response_status(
response: Any, expected_status: int = 200, error_context: Optional[str] = None
) -> None:
"""Assert response status with helpful error message.
Args:
response: The HTTP response object
expected_status: Expected status code
error_context: Optional context to include in error message
"""
if response.status_code != expected_status:
error_msg = f"Expected status {expected_status}, got {response.status_code}"
if error_context:
error_msg = f"{error_context}: {error_msg}"
# Try to include response body in error
try:
body = response.json()
error_msg += f"\nResponse body: {json.dumps(body, indent=2)}"
except Exception:
error_msg += f"\nResponse text: {response.text}"
raise AssertionError(error_msg)
def safe_parse_json(
response: Any, error_context: Optional[str] = None
) -> Dict[str, Any]:
"""Safely parse JSON response with error handling.
Args:
response: The HTTP response object
error_context: Optional context for error messages
Returns:
Parsed JSON data
Raises:
AssertionError: If JSON parsing fails
"""
try:
return response.json()
except Exception as e:
error_msg = f"Failed to parse JSON response: {e}"
if error_context:
error_msg = f"{error_context}: {error_msg}"
error_msg += f"\nResponse text: {response.text[:500]}"
raise AssertionError(error_msg)
def assert_error_response_structure(
response: Any,
expected_status: int = 422,
expected_error_fields: Optional[list[str]] = None,
) -> Dict[str, Any]:
"""Assert error response has expected structure.
Args:
response: The HTTP response object
expected_status: Expected error status code
expected_error_fields: List of expected fields in error detail
Returns:
Parsed error response
"""
assert_response_status(response, expected_status, "Error response check")
error_data = safe_parse_json(response, "Error response parsing")
# Check basic error structure
assert "detail" in error_data, f"Missing 'detail' in error response: {error_data}"
# Check specific error fields if provided
if expected_error_fields:
detail = error_data["detail"]
if isinstance(detail, list):
# FastAPI validation errors
for error in detail:
assert "loc" in error, f"Missing 'loc' in error: {error}"
assert "msg" in error, f"Missing 'msg' in error: {error}"
assert "type" in error, f"Missing 'type' in error: {error}"
return error_data
def assert_mock_called_with_partial(mock_obj: Any, **expected_kwargs: Any) -> None:
"""Assert mock was called with expected kwargs (partial match).
Args:
mock_obj: The mock object to check
**expected_kwargs: Expected keyword arguments
"""
assert mock_obj.called, f"Mock {mock_obj} was not called"
actual_kwargs = mock_obj.call_args.kwargs if mock_obj.call_args else {}
for key, expected_value in expected_kwargs.items():
assert (
key in actual_kwargs
), f"Missing key '{key}' in mock call. Actual keys: {list(actual_kwargs.keys())}"
assert (
actual_kwargs[key] == expected_value
), f"Mock called with {key}={actual_kwargs[key]}, expected {expected_value}"

View File

@@ -0,0 +1,74 @@
"""Common test utilities and constants for server tests."""
from typing import Any, Dict
from unittest.mock import Mock
import pytest
# Test ID constants
TEST_USER_ID = "test-user-id"
ADMIN_USER_ID = "admin-user-id"
TARGET_USER_ID = "target-user-id"
# Common test data constants
FIXED_TIMESTAMP = "2024-01-01T00:00:00Z"
TRANSACTION_UUID = "transaction-123-uuid"
METRIC_UUID = "metric-123-uuid"
ANALYTICS_UUID = "analytics-123-uuid"
def create_mock_with_id(mock_id: str) -> Mock:
"""Create a mock object with an id attribute.
Args:
mock_id: The ID value to set on the mock
Returns:
Mock object with id attribute set
"""
return Mock(id=mock_id)
def assert_status_and_parse_json(
response: Any, expected_status: int = 200
) -> Dict[str, Any]:
"""Assert response status and return parsed JSON.
Args:
response: The HTTP response object
expected_status: Expected status code (default: 200)
Returns:
Parsed JSON response data
Raises:
AssertionError: If status code doesn't match expected
"""
assert (
response.status_code == expected_status
), f"Expected status {expected_status}, got {response.status_code}: {response.text}"
return response.json()
@pytest.mark.parametrize(
"metric_value,metric_name,data_string",
[
(100, "api_calls_count", "external_api"),
(0, "error_count", "no_errors"),
(-5.2, "temperature_delta", "cooling"),
(1.23456789, "precision_test", "float_precision"),
(999999999, "large_number", "max_value"),
],
)
def parametrized_metric_values_decorator(func):
"""Decorator for parametrized metric value tests."""
return pytest.mark.parametrize(
"metric_value,metric_name,data_string",
[
(100, "api_calls_count", "external_api"),
(0, "error_count", "no_errors"),
(-5.2, "temperature_delta", "cooling"),
(1.23456789, "precision_test", "float_precision"),
(999999999, "large_number", "max_value"),
],
)(func)

View File

@@ -0,0 +1,331 @@
import json
from unittest.mock import AsyncMock
import autogpt_libs.auth
import autogpt_libs.auth.depends
import fastapi
import fastapi.testclient
import prisma.enums
import pytest_mock
from prisma import Json
from pytest_snapshot.plugin import Snapshot
import backend.server.v2.admin.credit_admin_routes as credit_admin_routes
import backend.server.v2.admin.model as admin_model
from backend.data.model import UserTransaction
from backend.server.conftest import ADMIN_USER_ID, TARGET_USER_ID
from backend.server.model import Pagination
app = fastapi.FastAPI()
app.include_router(credit_admin_routes.router)
client = fastapi.testclient.TestClient(app)
def override_requires_admin_user() -> dict[str, str]:
"""Override admin user check for testing"""
return {"sub": ADMIN_USER_ID, "role": "admin"}
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return ADMIN_USER_ID
app.dependency_overrides[autogpt_libs.auth.requires_admin_user] = (
override_requires_admin_user
)
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id
def test_add_user_credits_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful credit addition by admin"""
# Mock the credit model
mock_credit_model = mocker.patch(
"backend.server.v2.admin.credit_admin_routes._user_credit_model"
)
mock_credit_model._add_transaction = AsyncMock(
return_value=(1500, "transaction-123-uuid")
)
request_data = {
"user_id": TARGET_USER_ID,
"amount": 500,
"comments": "Test credit grant for debugging",
}
response = client.post("/admin/add_credits", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["new_balance"] == 1500
assert response_data["transaction_key"] == "transaction-123-uuid"
# Verify the function was called with correct parameters
mock_credit_model._add_transaction.assert_called_once()
call_args = mock_credit_model._add_transaction.call_args
assert call_args[0] == (TARGET_USER_ID, 500)
assert call_args[1]["transaction_type"] == prisma.enums.CreditTransactionType.GRANT
# Check that metadata is a Json object with the expected content
assert isinstance(call_args[1]["metadata"], Json)
assert call_args[1]["metadata"] == Json(
{"admin_id": ADMIN_USER_ID, "reason": "Test credit grant for debugging"}
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"admin_add_credits_success",
)
def test_add_user_credits_negative_amount(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test credit deduction by admin (negative amount)"""
# Mock the credit model
mock_credit_model = mocker.patch(
"backend.server.v2.admin.credit_admin_routes._user_credit_model"
)
mock_credit_model._add_transaction = AsyncMock(
return_value=(200, "transaction-456-uuid")
)
request_data = {
"user_id": "target-user-id",
"amount": -100,
"comments": "Refund adjustment",
}
response = client.post("/admin/add_credits", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["new_balance"] == 200
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"adm_add_cred_neg",
)
def test_get_user_history_success(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test successful retrieval of user credit history"""
# Mock the admin_get_user_history function
mock_history_response = admin_model.UserHistoryResponse(
history=[
UserTransaction(
user_id="user-1",
user_email="user1@example.com",
amount=1000,
reason="Initial grant",
transaction_type=prisma.enums.CreditTransactionType.GRANT,
),
UserTransaction(
user_id="user-2",
user_email="user2@example.com",
amount=-50,
reason="Usage",
transaction_type=prisma.enums.CreditTransactionType.USAGE,
),
],
pagination=Pagination(
total_items=2,
total_pages=1,
current_page=1,
page_size=20,
),
)
mocker.patch(
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
return_value=mock_history_response,
)
response = client.get("/admin/users_history")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["history"]) == 2
assert response_data["pagination"]["total_items"] == 2
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"adm_usr_hist_ok",
)
def test_get_user_history_with_filters(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test user credit history with search and filter parameters"""
# Mock the admin_get_user_history function
mock_history_response = admin_model.UserHistoryResponse(
history=[
UserTransaction(
user_id="user-3",
user_email="test@example.com",
amount=500,
reason="Top up",
transaction_type=prisma.enums.CreditTransactionType.TOP_UP,
),
],
pagination=Pagination(
total_items=1,
total_pages=1,
current_page=1,
page_size=10,
),
)
mock_get_history = mocker.patch(
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
return_value=mock_history_response,
)
response = client.get(
"/admin/users_history",
params={
"search": "test@example.com",
"page": 1,
"page_size": 10,
"transaction_filter": "TOP_UP",
},
)
assert response.status_code == 200
response_data = response.json()
assert len(response_data["history"]) == 1
assert response_data["history"][0]["transaction_type"] == "TOP_UP"
# Verify the function was called with correct parameters
mock_get_history.assert_called_once_with(
page=1,
page_size=10,
search="test@example.com",
transaction_filter=prisma.enums.CreditTransactionType.TOP_UP,
)
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"adm_usr_hist_filt",
)
def test_get_user_history_empty_results(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test user credit history with no results"""
# Mock empty history response
mock_history_response = admin_model.UserHistoryResponse(
history=[],
pagination=Pagination(
total_items=0,
total_pages=0,
current_page=1,
page_size=20,
),
)
mocker.patch(
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
return_value=mock_history_response,
)
response = client.get("/admin/users_history", params={"search": "nonexistent"})
assert response.status_code == 200
response_data = response.json()
assert len(response_data["history"]) == 0
assert response_data["pagination"]["total_items"] == 0
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"adm_usr_hist_empty",
)
def test_add_credits_invalid_request() -> None:
"""Test credit addition with invalid request data"""
# Missing required fields
response = client.post("/admin/add_credits", json={})
assert response.status_code == 422
# Invalid amount type
response = client.post(
"/admin/add_credits",
json={
"user_id": "test",
"amount": "not_a_number",
"comments": "test",
},
)
assert response.status_code == 422
# Missing comments
response = client.post(
"/admin/add_credits",
json={
"user_id": "test",
"amount": 100,
},
)
assert response.status_code == 422
def test_admin_endpoints_require_admin_role(mocker: pytest_mock.MockFixture) -> None:
"""Test that admin endpoints require admin role"""
# Clear the admin override to test authorization
app.dependency_overrides.clear()
# Mock requires_admin_user to raise an exception
mocker.patch(
"autogpt_libs.auth.requires_admin_user",
side_effect=fastapi.HTTPException(
status_code=403, detail="Admin access required"
),
)
# Test add_credits endpoint
response = client.post(
"/admin/add_credits",
json={
"user_id": "test",
"amount": 100,
"comments": "test",
},
)
assert (
response.status_code == 401
) # Auth middleware returns 401 when auth is disabled
# Test users_history endpoint
response = client.get("/admin/users_history")
assert (
response.status_code == 401
) # Auth middleware returns 401 when auth is disabled
# Restore the override
app.dependency_overrides[autogpt_libs.auth.requires_admin_user] = (
override_requires_admin_user
)
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = (
override_get_user_id
)

View File

@@ -3,6 +3,7 @@ from datetime import datetime
import prisma.enums
import prisma.errors
import prisma.models
import prisma.types
import pytest
import backend.server.v2.library.db as db
@@ -84,6 +85,11 @@ async def test_get_library_agents(mocker):
@pytest.mark.asyncio(loop_scope="session")
async def test_add_agent_to_library(mocker):
await connect()
# Mock the transaction context
mock_transaction = mocker.patch("backend.server.v2.library.db.locked_transaction")
mock_transaction.return_value.__aenter__ = mocker.AsyncMock(return_value=None)
mock_transaction.return_value.__aexit__ = mocker.AsyncMock(return_value=None)
# Mock data
mock_store_listing_data = prisma.models.StoreListingVersion(
id="version123",
@@ -142,6 +148,10 @@ async def test_add_agent_to_library(mocker):
return_value=mock_library_agent_data
)
# Mock the model conversion
mock_from_db = mocker.patch("backend.server.v2.library.model.LibraryAgent.from_db")
mock_from_db.return_value = mocker.Mock()
# Call function
await db.add_store_agent_to_library("version123", "test-user")

View File

@@ -70,10 +70,10 @@ async def list_library_agents(
page_size=page_size,
)
except Exception as e:
logger.error(f"Could not fetch library agents: {e}")
logger.exception("Listing library agents failed for user %s: %s", user_id, e)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to get library agents",
detail={"message": str(e), "hint": "Inspect database connectivity."},
) from e
@@ -102,10 +102,17 @@ async def get_library_agent_by_store_listing_version_id(
store_listing_version_id, user_id
)
except Exception as e:
logger.error(f"Could not fetch library agent from store version ID: {e}")
logger.exception(
"Retrieving library agent by store version failed for user %s: %s",
user_id,
e,
)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to add agent to library",
detail={
"message": str(e),
"hint": "Check if the store listing ID is valid.",
},
) from e
@@ -143,22 +150,31 @@ async def add_marketplace_agent_to_library(
)
except store_exceptions.AgentNotFoundError:
logger.warning(f"Agent not found: {store_listing_version_id}")
logger.warning(
"Store listing version %s not found when adding to library",
store_listing_version_id,
)
raise HTTPException(
status_code=404,
detail=f"Store listing version {store_listing_version_id} not found",
detail={
"message": f"Store listing version {store_listing_version_id} not found",
"hint": "Confirm the ID provided.",
},
)
except store_exceptions.DatabaseError as e:
logger.error(f"Database error occurred whilst adding agent to library: {e}")
logger.exception("Database error whilst adding agent to library: %s", e)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to add agent to library",
detail={"message": str(e), "hint": "Inspect DB logs for details."},
) from e
except Exception as e:
logger.error(f"Unexpected error while adding agent: {e}")
logger.exception("Unexpected error while adding agent to library: %s", e)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to add agent to library",
detail={
"message": str(e),
"hint": "Check server logs for more information.",
},
) from e
@@ -203,16 +219,16 @@ async def update_library_agent(
content={"message": "Agent updated successfully"},
)
except store_exceptions.DatabaseError as e:
logger.exception(f"Database error while updating library agent: {e}")
logger.exception("Database error while updating library agent: %s", e)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to update library agent",
detail={"message": str(e), "hint": "Verify DB connection."},
) from e
except Exception as e:
logger.exception(f"Unexpected error while updating library agent: {e}")
logger.exception("Unexpected error while updating library agent: %s", e)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to update library agent",
detail={"message": str(e), "hint": "Check server logs."},
) from e

View File

@@ -47,10 +47,13 @@ async def list_presets(
page_size=page_size,
)
except Exception as e:
logger.exception(f"Exception occurred while getting presets: {e}")
logger.exception("Failed to list presets for user %s: %s", user_id, e)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to get presets",
detail={
"message": str(e),
"hint": "Ensure the presets DB table is accessible.",
},
)
@@ -85,10 +88,12 @@ async def get_preset(
)
return preset
except Exception as e:
logger.exception(f"Exception occurred whilst getting preset: {e}")
logger.exception(
"Error retrieving preset %s for user %s: %s", preset_id, user_id, e
)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to get preset",
detail={"message": str(e), "hint": "Validate preset ID and retry."},
)
@@ -125,10 +130,10 @@ async def create_preset(
except NotFoundError as e:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e))
except Exception as e:
logger.exception(f"Exception occurred while creating preset: {e}")
logger.exception("Preset creation failed for user %s: %s", user_id, e)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to create preset",
detail={"message": str(e), "hint": "Check preset payload format."},
)
@@ -161,10 +166,10 @@ async def update_preset(
user_id=user_id, preset_id=preset_id, preset=preset
)
except Exception as e:
logger.exception(f"Exception occurred whilst updating preset: {e}")
logger.exception("Preset update failed for user %s: %s", user_id, e)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to update preset",
detail={"message": str(e), "hint": "Check preset data and try again."},
)
@@ -191,10 +196,12 @@ async def delete_preset(
try:
await db.delete_preset(user_id, preset_id)
except Exception as e:
logger.exception(f"Exception occurred whilst deleting preset: {e}")
logger.exception(
"Error deleting preset %s for user %s: %s", preset_id, user_id, e
)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to delete preset",
detail={"message": str(e), "hint": "Ensure preset exists before deleting."},
)
@@ -252,8 +259,11 @@ async def execute_preset(
except HTTPException:
raise
except Exception as e:
logger.exception(f"Exception occurred while executing preset: {e}")
logger.exception("Preset execution failed for user %s: %s", user_id, e)
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e),
detail={
"message": str(e),
"hint": "Review preset configuration and graph ID.",
},
)

View File

@@ -1,9 +1,11 @@
import datetime
import json
import autogpt_libs.auth as autogpt_auth_lib
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.model as server_model
import backend.server.v2.library.model as library_model
@@ -14,6 +16,8 @@ app.include_router(library_router)
client = fastapi.testclient.TestClient(app)
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0)
def override_auth_middleware():
"""Override auth middleware for testing"""
@@ -30,7 +34,10 @@ app.dependency_overrides[autogpt_auth_lib.depends.get_user_id] = override_get_us
@pytest.mark.asyncio
async def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
async def test_get_library_agents_success(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = library_model.LibraryAgentResponse(
agents=[
library_model.LibraryAgent(
@@ -82,6 +89,10 @@ async def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
assert data.agents[0].can_access_graph is True
assert data.agents[1].graph_id == "test-agent-2"
assert data.agents[1].can_access_graph is False
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "lib_agts_search")
mock_db_call.assert_called_once_with(
user_id="test-user-id",
search_term="test",

View File

@@ -1,7 +1,7 @@
import logging
from autogpt_libs.auth.middleware import auth_middleware
from fastapi import APIRouter, Depends
from fastapi import APIRouter, Depends, HTTPException
from backend.server.utils import get_user_id
@@ -23,4 +23,12 @@ async def proxy_otto_request(
Proxy requests to Otto API while adding necessary security headers and logging.
Requires an authenticated user.
"""
return await OttoService.ask(request, user_id)
logger.debug("Forwarding request to Otto for user %s", user_id)
try:
return await OttoService.ask(request, user_id)
except Exception as e:
logger.exception("Otto request failed for user %s: %s", user_id, e)
raise HTTPException(
status_code=502,
detail={"message": str(e), "hint": "Check Otto service status."},
)

View File

@@ -0,0 +1,271 @@
import json
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import fastapi.testclient
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.v2.otto.models as otto_models
import backend.server.v2.otto.routes as otto_routes
from backend.server.utils import get_user_id
from backend.server.v2.otto.service import OttoService
app = fastapi.FastAPI()
app.include_router(otto_routes.router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
"""Override get_user_id for testing"""
return "test-user-id"
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
override_auth_middleware
)
app.dependency_overrides[get_user_id] = override_get_user_id
def test_ask_otto_success(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test successful Otto API request"""
# Mock the OttoService.ask method
mock_response = otto_models.ApiResponse(
answer="This is Otto's response to your query.",
documents=[
otto_models.Document(
url="https://example.com/doc1",
relevance_score=0.95,
),
otto_models.Document(
url="https://example.com/doc2",
relevance_score=0.87,
),
],
success=True,
)
mocker.patch.object(
OttoService,
"ask",
return_value=mock_response,
)
request_data = {
"query": "How do I create an agent?",
"conversation_history": [
{
"query": "What is AutoGPT?",
"response": "AutoGPT is an AI agent platform.",
}
],
"message_id": "msg_123",
"include_graph_data": False,
}
response = client.post("/ask", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is True
assert response_data["answer"] == "This is Otto's response to your query."
assert len(response_data["documents"]) == 2
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"otto_ok",
)
def test_ask_otto_with_graph_data(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test Otto API request with graph data included"""
# Mock the OttoService.ask method
mock_response = otto_models.ApiResponse(
answer="Here's information about your graph.",
documents=[
otto_models.Document(
url="https://example.com/graph-doc",
relevance_score=0.92,
),
],
success=True,
)
mocker.patch.object(
OttoService,
"ask",
return_value=mock_response,
)
request_data = {
"query": "Tell me about my graph",
"conversation_history": [],
"message_id": "msg_456",
"include_graph_data": True,
"graph_id": "graph_123",
}
response = client.post("/ask", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is True
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"otto_grph",
)
def test_ask_otto_empty_conversation(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test Otto API request with empty conversation history"""
# Mock the OttoService.ask method
mock_response = otto_models.ApiResponse(
answer="Welcome! How can I help you?",
documents=[],
success=True,
)
mocker.patch.object(
OttoService,
"ask",
return_value=mock_response,
)
request_data = {
"query": "Hello",
"conversation_history": [],
"message_id": "msg_789",
"include_graph_data": False,
}
response = client.post("/ask", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is True
assert len(response_data["documents"]) == 0
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"otto_empty",
)
def test_ask_otto_service_error(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test Otto API request when service returns error"""
# Mock the OttoService.ask method to return failure
mock_response = otto_models.ApiResponse(
answer="An error occurred while processing your request.",
documents=[],
success=False,
)
mocker.patch.object(
OttoService,
"ask",
return_value=mock_response,
)
request_data = {
"query": "Test query",
"conversation_history": [],
"message_id": "msg_error",
"include_graph_data": False,
}
response = client.post("/ask", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is False
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"otto_err",
)
def test_ask_otto_invalid_request() -> None:
"""Test Otto API with invalid request data"""
# Missing required fields
response = client.post("/ask", json={})
assert response.status_code == 422
# Invalid conversation history format
response = client.post(
"/ask",
json={
"query": "Test",
"conversation_history": "not a list",
"message_id": "123",
},
)
assert response.status_code == 422
# Missing message_id
response = client.post(
"/ask",
json={
"query": "Test",
"conversation_history": [],
},
)
assert response.status_code == 422
def test_ask_otto_unauthenticated(mocker: pytest_mock.MockFixture) -> None:
"""Test Otto API request without authentication"""
# Remove the auth override to test unauthenticated access
app.dependency_overrides.clear()
# Mock auth_middleware to raise an exception
mocker.patch(
"autogpt_libs.auth.middleware.auth_middleware",
side_effect=fastapi.HTTPException(status_code=401, detail="Unauthorized"),
)
request_data = {
"query": "Test",
"conversation_history": [],
"message_id": "123",
}
response = client.post("/ask", json=request_data)
# When auth is disabled and Otto API URL is not configured, we get 503
assert response.status_code == 503
# Restore the override
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
override_auth_middleware
)
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = (
override_get_user_id
)

View File

@@ -48,11 +48,14 @@ async def get_profile(
content={"detail": "Profile not found"},
)
return profile
except Exception:
logger.exception("Exception occurred whilst getting user profile")
except Exception as e:
logger.exception("Failed to fetch user profile for %s: %s", user_id, e)
return fastapi.responses.JSONResponse(
status_code=500,
content={"detail": "An error occurred while retrieving the user profile"},
content={
"detail": "Failed to retrieve user profile",
"hint": "Check database connection.",
},
)
@@ -86,11 +89,14 @@ async def update_or_create_profile(
user_id=user_id, profile=profile
)
return updated_profile
except Exception:
logger.exception("Exception occurred whilst updating profile")
except Exception as e:
logger.exception("Failed to update profile for user %s: %s", user_id, e)
return fastapi.responses.JSONResponse(
status_code=500,
content={"detail": "An error occurred while updating the user profile"},
content={
"detail": "Failed to update user profile",
"hint": "Validate request data.",
},
)
@@ -160,11 +166,14 @@ async def get_agents(
page_size=page_size,
)
return agents
except Exception:
logger.exception("Exception occured whilst getting store agents")
except Exception as e:
logger.exception("Failed to retrieve store agents: %s", e)
return fastapi.responses.JSONResponse(
status_code=500,
content={"detail": "An error occurred while retrieving the store agents"},
content={
"detail": "Failed to retrieve store agents",
"hint": "Check database or search parameters.",
},
)

View File

@@ -1,4 +1,5 @@
import datetime
import json
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
@@ -6,22 +7,27 @@ import fastapi
import fastapi.testclient
import prisma.enums
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.v2.store.model
import backend.server.v2.store.routes
# Using a fixed timestamp for reproducible tests
# 2023 date is intentionally used to ensure tests work regardless of current year
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0)
app = fastapi.FastAPI()
app.include_router(backend.server.v2.store.routes.router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
def override_auth_middleware() -> dict[str, str]:
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return "test-user-id"
@@ -32,7 +38,10 @@ app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id
def test_get_agents_defaults(mocker: pytest_mock.MockFixture):
def test_get_agents_defaults(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[],
pagination=backend.server.v2.store.model.Pagination(
@@ -52,6 +61,9 @@ def test_get_agents_defaults(mocker: pytest_mock.MockFixture):
)
assert data.pagination.total_pages == 0
assert data.agents == []
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "def_agts")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -63,7 +75,10 @@ def test_get_agents_defaults(mocker: pytest_mock.MockFixture):
)
def test_get_agents_featured(mocker: pytest_mock.MockFixture):
def test_get_agents_featured(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -94,6 +109,8 @@ def test_get_agents_featured(mocker: pytest_mock.MockFixture):
)
assert len(data.agents) == 1
assert data.agents[0].slug == "featured-agent"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "feat_agts")
mock_db_call.assert_called_once_with(
featured=True,
creator=None,
@@ -105,7 +122,10 @@ def test_get_agents_featured(mocker: pytest_mock.MockFixture):
)
def test_get_agents_by_creator(mocker: pytest_mock.MockFixture):
def test_get_agents_by_creator(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -136,6 +156,8 @@ def test_get_agents_by_creator(mocker: pytest_mock.MockFixture):
)
assert len(data.agents) == 1
assert data.agents[0].creator == "specific-creator"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_by_creator")
mock_db_call.assert_called_once_with(
featured=False,
creator="specific-creator",
@@ -147,7 +169,10 @@ def test_get_agents_by_creator(mocker: pytest_mock.MockFixture):
)
def test_get_agents_sorted(mocker: pytest_mock.MockFixture):
def test_get_agents_sorted(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -178,6 +203,8 @@ def test_get_agents_sorted(mocker: pytest_mock.MockFixture):
)
assert len(data.agents) == 1
assert data.agents[0].runs == 1000
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_sorted")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -189,7 +216,10 @@ def test_get_agents_sorted(mocker: pytest_mock.MockFixture):
)
def test_get_agents_search(mocker: pytest_mock.MockFixture):
def test_get_agents_search(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -220,6 +250,8 @@ def test_get_agents_search(mocker: pytest_mock.MockFixture):
)
assert len(data.agents) == 1
assert "specific" in data.agents[0].description.lower()
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_search")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -231,7 +263,10 @@ def test_get_agents_search(mocker: pytest_mock.MockFixture):
)
def test_get_agents_category(mocker: pytest_mock.MockFixture):
def test_get_agents_category(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -261,6 +296,8 @@ def test_get_agents_category(mocker: pytest_mock.MockFixture):
response.json()
)
assert len(data.agents) == 1
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_category")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -272,7 +309,10 @@ def test_get_agents_category(mocker: pytest_mock.MockFixture):
)
def test_get_agents_pagination(mocker: pytest_mock.MockFixture):
def test_get_agents_pagination(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -305,6 +345,8 @@ def test_get_agents_pagination(mocker: pytest_mock.MockFixture):
assert len(data.agents) == 5
assert data.pagination.current_page == 2
assert data.pagination.page_size == 5
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_pagination")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -334,7 +376,10 @@ def test_get_agents_malformed_request(mocker: pytest_mock.MockFixture):
mock_db_call.assert_not_called()
def test_get_agent_details(mocker: pytest_mock.MockFixture):
def test_get_agent_details(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentDetails(
store_listing_version_id="test-version-id",
slug="test-agent",
@@ -349,7 +394,7 @@ def test_get_agent_details(mocker: pytest_mock.MockFixture):
runs=100,
rating=4.5,
versions=["1.0.0", "1.1.0"],
last_updated=datetime.datetime.now(),
last_updated=FIXED_NOW,
)
mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agent_details")
mock_db_call.return_value = mocked_value
@@ -362,10 +407,15 @@ def test_get_agent_details(mocker: pytest_mock.MockFixture):
)
assert data.agent_name == "Test Agent"
assert data.creator == "creator1"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agt_details")
mock_db_call.assert_called_once_with(username="creator1", agent_name="test-agent")
def test_get_creators_defaults(mocker: pytest_mock.MockFixture):
def test_get_creators_defaults(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.CreatorsResponse(
creators=[],
pagination=backend.server.v2.store.model.Pagination(
@@ -386,12 +436,17 @@ def test_get_creators_defaults(mocker: pytest_mock.MockFixture):
)
assert data.pagination.total_pages == 0
assert data.creators == []
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "def_creators")
mock_db_call.assert_called_once_with(
featured=False, search_query=None, sorted_by=None, page=1, page_size=20
)
def test_get_creators_pagination(mocker: pytest_mock.MockFixture):
def test_get_creators_pagination(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.CreatorsResponse(
creators=[
backend.server.v2.store.model.Creator(
@@ -425,6 +480,8 @@ def test_get_creators_pagination(mocker: pytest_mock.MockFixture):
assert len(data.creators) == 5
assert data.pagination.current_page == 2
assert data.pagination.page_size == 5
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "creators_pagination")
mock_db_call.assert_called_once_with(
featured=False, search_query=None, sorted_by=None, page=2, page_size=5
)
@@ -448,7 +505,10 @@ def test_get_creators_malformed_request(mocker: pytest_mock.MockFixture):
mock_db_call.assert_not_called()
def test_get_creator_details(mocker: pytest_mock.MockFixture):
def test_get_creator_details(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.CreatorDetails(
name="Test User",
username="creator1",
@@ -468,17 +528,22 @@ def test_get_creator_details(mocker: pytest_mock.MockFixture):
data = backend.server.v2.store.model.CreatorDetails.model_validate(response.json())
assert data.username == "creator1"
assert data.name == "Test User"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "creator_details")
mock_db_call.assert_called_once_with(username="creator1")
def test_get_submissions_success(mocker: pytest_mock.MockFixture):
def test_get_submissions_success(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreSubmissionsResponse(
submissions=[
backend.server.v2.store.model.StoreSubmission(
name="Test Agent",
description="Test agent description",
image_urls=["test.jpg"],
date_submitted=datetime.datetime.now(),
date_submitted=FIXED_NOW,
status=prisma.enums.SubmissionStatus.APPROVED,
runs=50,
rating=4.2,
@@ -507,10 +572,15 @@ def test_get_submissions_success(mocker: pytest_mock.MockFixture):
assert len(data.submissions) == 1
assert data.submissions[0].name == "Test Agent"
assert data.pagination.current_page == 1
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "sub_success")
mock_db_call.assert_called_once_with(user_id="test-user-id", page=1, page_size=20)
def test_get_submissions_pagination(mocker: pytest_mock.MockFixture):
def test_get_submissions_pagination(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreSubmissionsResponse(
submissions=[],
pagination=backend.server.v2.store.model.Pagination(
@@ -531,6 +601,8 @@ def test_get_submissions_pagination(mocker: pytest_mock.MockFixture):
)
assert data.pagination.current_page == 2
assert data.pagination.page_size == 5
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "sub_pagination")
mock_db_call.assert_called_once_with(user_id="test-user-id", page=2, page_size=5)

View File

@@ -35,7 +35,9 @@ async def verify_token(request: TurnstileVerifyRequest) -> TurnstileVerifyRespon
turnstile_verify_url = settings.secrets.turnstile_verify_url
if not turnstile_secret_key:
logger.error("Turnstile secret key is not configured")
logger.error(
"Turnstile secret key missing. Set TURNSTILE_SECRET_KEY to enable verification."
)
return TurnstileVerifyResponse(
success=False,
error="CONFIGURATION_ERROR",

View File

@@ -0,0 +1,32 @@
import fastapi
import fastapi.testclient
import pytest_mock
import backend.server.v2.turnstile.routes as turnstile_routes
app = fastapi.FastAPI()
app.include_router(turnstile_routes.router)
client = fastapi.testclient.TestClient(app)
def test_verify_turnstile_token_no_secret_key(mocker: pytest_mock.MockFixture) -> None:
"""Test token verification without secret key configured"""
# Mock the settings with no secret key
mock_settings = mocker.patch("backend.server.v2.turnstile.routes.settings")
mock_settings.secrets.turnstile_secret_key = None
request_data = {"token": "test_token", "action": "login"}
response = client.post("/verify", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is False
assert response_data["error"] == "CONFIGURATION_ERROR"
def test_verify_turnstile_token_invalid_request() -> None:
"""Test token verification with invalid request data"""
# Missing token
response = client.post("/verify", json={"action": "login"})
assert response.status_code == 422

View File

@@ -3,6 +3,7 @@ import logging
from contextlib import asynccontextmanager
from typing import Protocol
import pydantic
import uvicorn
from autogpt_libs.auth import parse_jwt_token
from autogpt_libs.logging.utils import generate_uvicorn_config
@@ -51,7 +52,11 @@ async def event_broadcaster(manager: ConnectionManager):
async for event in event_queue.listen("*"):
await manager.send_execution_update(event)
except Exception as e:
logger.exception(f"Event broadcaster error: {e}")
logger.exception(
"Event broadcaster stopped due to error: %s. "
"Verify the Redis connection and restart the service.",
e,
)
raise
@@ -221,7 +226,22 @@ async def websocket_router(
try:
while True:
data = await websocket.receive_text()
message = WSMessage.model_validate_json(data)
try:
message = WSMessage.model_validate_json(data)
except pydantic.ValidationError as e:
logger.error(
"Invalid WebSocket message from user #%s: %s",
user_id,
e,
)
await websocket.send_text(
WSMessage(
method=WSMethod.ERROR,
success=False,
error=("Invalid message format. Review the schema and retry"),
).model_dump_json()
)
continue
try:
if message.method in _MSG_HANDLERS:
@@ -232,6 +252,21 @@ async def websocket_router(
message=message,
)
continue
except pydantic.ValidationError as e:
logger.error(
"Validation error while handling '%s' for user #%s: %s",
message.method.value,
user_id,
e,
)
await websocket.send_text(
WSMessage(
method=WSMethod.ERROR,
success=False,
error="Invalid message data. Refer to the API schema",
).model_dump_json()
)
continue
except Exception as e:
logger.error(
f"Error while handling '{message.method.value}' message "

View File

@@ -2,16 +2,17 @@ services:
postgres-test:
image: ankane/pgvector:latest
environment:
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASS}
- POSTGRES_DB=${DB_NAME}
- POSTGRES_USER=${DB_USER:-postgres}
- POSTGRES_PASSWORD=${DB_PASS:-postgres}
- POSTGRES_DB=${DB_NAME:-postgres}
- POSTGRES_PORT=${DB_PORT:-5432}
healthcheck:
test: pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB
interval: 10s
timeout: 5s
retries: 5
ports:
- "${DB_PORT}:5432"
- "${DB_PORT:-5432}:5432"
networks:
- app-network-test
redis-test:

File diff suppressed because it is too large Load Diff

View File

@@ -65,6 +65,7 @@ websockets = "^14.2"
youtube-transcript-api = "^0.6.2"
zerobouncesdk = "^1.1.1"
# NOTE: please insert new dependencies in their alphabetical location
pytest-snapshot = "^0.9.0"
[tool.poetry.group.dev.dependencies]
aiohappyeyeballs = "^2.6.1"

View File

@@ -1,3 +1,4 @@
import os
import subprocess
import sys
import time
@@ -59,11 +60,52 @@ def test():
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(1)
# Run Prisma migrations
run_command(["prisma", "migrate", "dev"])
# IMPORTANT: Set test database environment variables to prevent accidentally
# resetting the developer's local database.
#
# This script spins up a separate test database container (postgres-test) using
# docker-compose.test.yaml. We explicitly set DATABASE_URL and DIRECT_URL to point
# to this test database to ensure that:
# 1. The prisma migrate reset command only affects the test database
# 2. Tests run against the test database, not the developer's local database
# 3. Any database operations during testing are isolated from development data
#
# Without this, if a developer has DATABASE_URL set in their environment pointing
# to their development database, running tests would wipe their local data!
test_env = os.environ.copy()
# Run the tests
result = subprocess.run(["pytest"] + sys.argv[1:], check=False)
# Use environment variables if set, otherwise use defaults that match docker-compose.test.yaml
db_user = os.getenv("DB_USER", "postgres")
db_pass = os.getenv("DB_PASS", "postgres")
db_name = os.getenv("DB_NAME", "postgres")
db_port = os.getenv("DB_PORT", "5432")
# Construct the test database URL - this ensures we're always pointing to the test container
test_env["DATABASE_URL"] = (
f"postgresql://{db_user}:{db_pass}@localhost:{db_port}/{db_name}"
)
test_env["DIRECT_URL"] = test_env["DATABASE_URL"]
test_env["DB_PORT"] = db_port
test_env["DB_NAME"] = db_name
test_env["DB_PASS"] = db_pass
test_env["DB_USER"] = db_user
# Run Prisma migrations with test database
# First, reset the database to ensure clean state for tests
# This is safe because we've explicitly set DATABASE_URL to the test database above
subprocess.run(
["prisma", "migrate", "reset", "--force", "--skip-seed"],
env=test_env,
check=False,
)
# Then apply migrations to get the test database schema up to date
subprocess.run(["prisma", "migrate", "deploy"], env=test_env, check=True)
# Run the tests with test database environment
# This ensures all database connections in the tests use the test database,
# not any database that might be configured in the developer's environment
result = subprocess.run(["pytest"] + sys.argv[1:], env=test_env, check=False)
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])

View File

@@ -0,0 +1,4 @@
{
"new_balance": 200,
"transaction_key": "transaction-456-uuid"
}

View File

@@ -0,0 +1,4 @@
{
"new_balance": 1500,
"transaction_key": "transaction-123-uuid"
}

View File

@@ -0,0 +1,9 @@
{
"history": [],
"pagination": {
"current_page": 1,
"page_size": 20,
"total_items": 0,
"total_pages": 0
}
}

View File

@@ -0,0 +1,28 @@
{
"history": [
{
"admin_email": null,
"amount": 500,
"current_balance": 0,
"description": null,
"extra_data": null,
"reason": "Top up",
"running_balance": 0,
"transaction_key": "",
"transaction_time": "0001-01-01T00:00:00Z",
"transaction_type": "TOP_UP",
"usage_execution_id": null,
"usage_graph_id": null,
"usage_node_count": 0,
"usage_start_time": "9999-12-31T23:59:59.999999Z",
"user_email": "test@example.com",
"user_id": "user-3"
}
],
"pagination": {
"current_page": 1,
"page_size": 10,
"total_items": 1,
"total_pages": 1
}
}

View File

@@ -0,0 +1,46 @@
{
"history": [
{
"admin_email": null,
"amount": 1000,
"current_balance": 0,
"description": null,
"extra_data": null,
"reason": "Initial grant",
"running_balance": 0,
"transaction_key": "",
"transaction_time": "0001-01-01T00:00:00Z",
"transaction_type": "GRANT",
"usage_execution_id": null,
"usage_graph_id": null,
"usage_node_count": 0,
"usage_start_time": "9999-12-31T23:59:59.999999Z",
"user_email": "user1@example.com",
"user_id": "user-1"
},
{
"admin_email": null,
"amount": -50,
"current_balance": 0,
"description": null,
"extra_data": null,
"reason": "Usage",
"running_balance": 0,
"transaction_key": "",
"transaction_time": "0001-01-01T00:00:00Z",
"transaction_type": "USAGE",
"usage_execution_id": null,
"usage_graph_id": null,
"usage_node_count": 0,
"usage_start_time": "9999-12-31T23:59:59.999999Z",
"user_email": "user2@example.com",
"user_id": "user-2"
}
],
"pagination": {
"current_page": 1,
"page_size": 20,
"total_items": 2,
"total_pages": 1
}
}

View File

@@ -0,0 +1,4 @@
{
"new_balance": 1500,
"transaction_key": "transaction-123-uuid"
}

View File

@@ -0,0 +1,27 @@
{
"store_listing_version_id": "test-version-id",
"slug": "test-agent",
"agent_name": "Test Agent",
"agent_video": "video.mp4",
"agent_image": [
"image1.jpg",
"image2.jpg"
],
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Test agent subheading",
"description": "Test agent description",
"categories": [
"category1",
"category2"
],
"runs": 100,
"rating": 4.5,
"versions": [
"1.0.0",
"1.1.0"
],
"last_updated": "2023-01-01T00:00:00",
"active_version_id": null,
"has_approved_version": false
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "creator-agent",
"agent_name": "Creator Agent",
"agent_image": "agent.jpg",
"creator": "specific-creator",
"creator_avatar": "avatar.jpg",
"sub_heading": "Creator agent subheading",
"description": "Creator agent description",
"runs": 50,
"rating": 4.0
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "category-agent",
"agent_name": "Category Agent",
"agent_image": "category.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Category agent subheading",
"description": "Category agent description",
"runs": 60,
"rating": 4.1
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,65 @@
{
"agents": [
{
"slug": "agent-0",
"agent_name": "Agent 0",
"agent_image": "agent0.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 0 subheading",
"description": "Agent 0 description",
"runs": 0,
"rating": 4.0
},
{
"slug": "agent-1",
"agent_name": "Agent 1",
"agent_image": "agent1.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 1 subheading",
"description": "Agent 1 description",
"runs": 10,
"rating": 4.0
},
{
"slug": "agent-2",
"agent_name": "Agent 2",
"agent_image": "agent2.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 2 subheading",
"description": "Agent 2 description",
"runs": 20,
"rating": 4.0
},
{
"slug": "agent-3",
"agent_name": "Agent 3",
"agent_image": "agent3.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 3 subheading",
"description": "Agent 3 description",
"runs": 30,
"rating": 4.0
},
{
"slug": "agent-4",
"agent_name": "Agent 4",
"agent_image": "agent4.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 4 subheading",
"description": "Agent 4 description",
"runs": 40,
"rating": 4.0
}
],
"pagination": {
"total_items": 15,
"total_pages": 3,
"current_page": 2,
"page_size": 5
}
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "search-agent",
"agent_name": "Search Agent",
"agent_image": "search.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Search agent subheading",
"description": "Specific search term description",
"runs": 75,
"rating": 4.2
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "top-agent",
"agent_name": "Top Agent",
"agent_image": "top.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Top agent subheading",
"description": "Top agent description",
"runs": 1000,
"rating": 5.0
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,30 @@
{
"analytics_id": "analytics-complex-uuid",
"logged_data": {
"agent_id": "agent_123",
"blocks_used": [
{
"block_id": "llm_block",
"count": 3
},
{
"block_id": "http_block",
"count": 5
},
{
"block_id": "code_block",
"count": 2
}
],
"duration_ms": 3500,
"errors": [],
"execution_id": "exec_456",
"metadata": {
"environment": "production",
"trigger": "manual",
"user_tier": "premium"
},
"nodes_executed": 15,
"status": "completed"
}
}

View File

@@ -0,0 +1,3 @@
{
"analytics_id": "analytics-789-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"metric_id": "metric-123-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"metric_id": "metric-456-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"email": "newemail@example.com"
}

View File

@@ -0,0 +1,5 @@
{
"email": "test@example.com",
"id": "test-user-id",
"name": "Test User"
}

View File

@@ -0,0 +1,14 @@
[
{
"costs": [
{
"cost": 10,
"type": "credit"
}
],
"description": "A test block",
"disabled": false,
"id": "test-block",
"name": "Test Block"
}
]

View File

@@ -0,0 +1,12 @@
{
"output1": [
{
"data": "result1"
}
],
"output2": [
{
"data": "result2"
}
]
}

View File

@@ -0,0 +1,16 @@
{
"name": "Test User",
"username": "creator1",
"description": "Test creator description",
"links": [
"link1.com",
"link2.com"
],
"avatar_url": "avatar.jpg",
"agent_rating": 4.8,
"agent_runs": 1000,
"top_categories": [
"category1",
"category2"
]
}

View File

@@ -0,0 +1,60 @@
{
"creators": [
{
"name": "Creator 0",
"username": "creator0",
"description": "Creator 0 description",
"avatar_url": "avatar0.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
},
{
"name": "Creator 1",
"username": "creator1",
"description": "Creator 1 description",
"avatar_url": "avatar1.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
},
{
"name": "Creator 2",
"username": "creator2",
"description": "Creator 2 description",
"avatar_url": "avatar2.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
},
{
"name": "Creator 3",
"username": "creator3",
"description": "Creator 3 description",
"avatar_url": "avatar3.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
},
{
"name": "Creator 4",
"username": "creator4",
"description": "Creator 4 description",
"avatar_url": "avatar4.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
}
],
"pagination": {
"total_items": 15,
"total_pages": 3,
"current_page": 2,
"page_size": 5
}
}

View File

@@ -0,0 +1,3 @@
{
"credits": 1000
}

View File

@@ -0,0 +1,4 @@
{
"amount": 500,
"threshold": 100
}

View File

@@ -0,0 +1,3 @@
{
"checkout_url": "https://checkout.example.com/session123"
}

View File

@@ -0,0 +1,9 @@
{
"agents": [],
"pagination": {
"total_items": 0,
"total_pages": 0,
"current_page": 0,
"page_size": 10
}
}

View File

@@ -0,0 +1,9 @@
{
"creators": [],
"pagination": {
"total_items": 0,
"total_pages": 0,
"current_page": 0,
"page_size": 10
}
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "featured-agent",
"agent_name": "Featured Agent",
"agent_image": "featured.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Featured agent subheading",
"description": "Featured agent description",
"runs": 100,
"rating": 4.5
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,20 @@
{
"properties": {
"in_key_a": {
"advanced": true,
"default": "A",
"secret": false,
"title": "Key A"
},
"in_key_b": {
"advanced": false,
"secret": false,
"title": "in_key_b"
}
},
"required": [
"in_key_b"
],
"title": "ExpectedInputSchema",
"type": "object"
}

View File

@@ -0,0 +1,15 @@
{
"properties": {
"out_key": {
"advanced": false,
"description": "This is an output key",
"secret": false,
"title": "out_key"
}
},
"required": [
"out_key"
],
"title": "ExpectedOutputSchema",
"type": "object"
}

View File

@@ -0,0 +1,29 @@
{
"credentials_input_schema": {
"properties": {},
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"description": "A test graph",
"forked_from_id": null,
"forked_from_version": null,
"has_webhook_trigger": false,
"id": "graph-123",
"input_schema": {
"properties": {},
"required": [],
"type": "object"
},
"is_active": true,
"links": [],
"name": "Test Graph",
"nodes": [],
"output_schema": {
"properties": {},
"required": [],
"type": "object"
},
"sub_graphs": [],
"user_id": "test-user-id",
"version": 1
}

View File

@@ -0,0 +1,17 @@
{
"description": "Test graph",
"link_structure": [
{
"sink_name": "name",
"source_name": "output"
}
],
"links_count": 1,
"name": "TestGraph",
"node_blocks": [
"1ff065e9-88e8-4358-9d82-8dc91f622ba9",
"c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"1ff065e9-88e8-4358-9d82-8dc91f622ba9"
],
"nodes_count": 3
}

View File

@@ -0,0 +1,31 @@
[
{
"credentials_input_schema": {
"properties": {},
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"description": "A test graph",
"forked_from_id": null,
"forked_from_version": null,
"has_webhook_trigger": false,
"id": "graph-123",
"input_schema": {
"properties": {},
"required": [],
"type": "object"
},
"is_active": true,
"links": [],
"name": "Test Graph",
"nodes": [],
"output_schema": {
"properties": {},
"required": [],
"type": "object"
},
"sub_graphs": [],
"user_id": "test-user-id",
"version": 1
}
]

View File

@@ -0,0 +1,3 @@
{
"version_counts": 3
}

View File

@@ -0,0 +1,48 @@
{
"agents": [
{
"id": "test-agent-1",
"graph_id": "test-agent-1",
"graph_version": 1,
"image_url": null,
"creator_name": "Test Creator",
"creator_image_url": "",
"status": "COMPLETED",
"updated_at": "2023-01-01T00:00:00",
"name": "Test Agent 1",
"description": "Test Description 1",
"input_schema": {
"type": "object",
"properties": {}
},
"new_output": false,
"can_access_graph": true,
"is_latest_version": true
},
{
"id": "test-agent-2",
"graph_id": "test-agent-2",
"graph_version": 1,
"image_url": null,
"creator_name": "Test Creator",
"creator_image_url": "",
"status": "COMPLETED",
"updated_at": "2023-01-01T00:00:00",
"name": "Test Agent 2",
"description": "Test Description 2",
"input_schema": {
"type": "object",
"properties": {}
},
"new_output": false,
"can_access_graph": false,
"is_latest_version": true
}
],
"pagination": {
"total_items": 2,
"total_pages": 1,
"current_page": 1,
"page_size": 50
}
}

View File

@@ -0,0 +1,30 @@
{
"analytics_id": "analytics-complex-uuid",
"logged_data": {
"agent_id": "agent_123",
"blocks_used": [
{
"block_id": "llm_block",
"count": 3
},
{
"block_id": "http_block",
"count": 5
},
{
"block_id": "code_block",
"count": 2
}
],
"duration_ms": 3500,
"errors": [],
"execution_id": "exec_456",
"metadata": {
"environment": "production",
"trigger": "manual",
"user_tier": "premium"
},
"nodes_executed": 15,
"status": "completed"
}
}

View File

@@ -0,0 +1,3 @@
{
"analytics_id": "analytics-789-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"metric_id": "metric-123-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"metric_id": "metric-456-uuid"
}

View File

@@ -0,0 +1,5 @@
{
"answer": "Welcome! How can I help you?",
"documents": [],
"success": true
}

View File

@@ -0,0 +1,5 @@
{
"answer": "An error occurred while processing your request.",
"documents": [],
"success": false
}

View File

@@ -0,0 +1,10 @@
{
"answer": "Here's information about your graph.",
"documents": [
{
"relevance_score": 0.92,
"url": "https://example.com/graph-doc"
}
],
"success": true
}

View File

@@ -0,0 +1,14 @@
{
"answer": "This is Otto's response to your query.",
"documents": [
{
"relevance_score": 0.95,
"url": "https://example.com/doc1"
},
{
"relevance_score": 0.87,
"url": "https://example.com/doc2"
}
],
"success": true
}

View File

@@ -0,0 +1,7 @@
{
"channel": "3e53486c-cf57-477e-ba2a-cb02dc828e1a|graph_exec#test-graph-exec-1",
"data": null,
"error": null,
"method": "subscribe_graph_execution",
"success": true
}

View File

@@ -0,0 +1,9 @@
{
"submissions": [],
"pagination": {
"total_items": 10,
"total_pages": 2,
"current_page": 2,
"page_size": 5
}
}

View File

@@ -0,0 +1,32 @@
{
"submissions": [
{
"agent_id": "test-agent-id",
"agent_version": 1,
"name": "Test Agent",
"sub_heading": "Test agent subheading",
"slug": "test-agent",
"description": "Test agent description",
"image_urls": [
"test.jpg"
],
"date_submitted": "2023-01-01T00:00:00",
"status": "APPROVED",
"runs": 50,
"rating": 4.2,
"store_listing_version_id": null,
"version": null,
"reviewer_id": null,
"review_comments": null,
"internal_comments": null,
"reviewed_at": null,
"changes_summary": null
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,7 @@
{
"channel": "3e53486c-cf57-477e-ba2a-cb02dc828e1a|graph_exec#test-graph-exec-1",
"data": null,
"error": null,
"method": "unsubscribe",
"success": true
}

View File

@@ -1,9 +1,11 @@
import json
from typing import Any
from uuid import UUID
import autogpt_libs.auth.models
import fastapi.exceptions
import pytest
from pytest_snapshot.plugin import Snapshot
import backend.server.v2.store.model as store
from backend.blocks.basic import StoreValueBlock
@@ -18,7 +20,7 @@ from backend.util.test import SpinTestServer
@pytest.mark.asyncio(loop_scope="session")
async def test_graph_creation(server: SpinTestServer):
async def test_graph_creation(server: SpinTestServer, snapshot: Snapshot):
"""
Test the creation of a graph with nodes and links.
@@ -70,9 +72,27 @@ async def test_graph_creation(server: SpinTestServer):
assert links[0].source_id in {nodes[0].id, nodes[1].id, nodes[2].id}
assert links[0].sink_id in {nodes[0].id, nodes[1].id, nodes[2].id}
# Create a serializable version of the graph for snapshot testing
# Remove dynamic IDs to make snapshots reproducible
graph_data = {
"name": created_graph.name,
"description": created_graph.description,
"nodes_count": len(created_graph.nodes),
"links_count": len(created_graph.links),
"node_blocks": [node.block_id for node in created_graph.nodes],
"link_structure": [
{"source_name": link.source_name, "sink_name": link.sink_name}
for link in created_graph.links
],
}
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(graph_data, indent=2, sort_keys=True), "grph_struct"
)
@pytest.mark.asyncio(loop_scope="session")
async def test_get_input_schema(server: SpinTestServer):
async def test_get_input_schema(server: SpinTestServer, snapshot: Snapshot):
"""
Test the get_input_schema method of a created graph.
@@ -162,10 +182,22 @@ async def test_get_input_schema(server: SpinTestServer):
input_schema["title"] = "ExpectedInputSchema"
assert input_schema == ExpectedInputSchema.jsonschema()
# Add snapshot testing for the schemas
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(input_schema, indent=2, sort_keys=True), "grph_in_schm"
)
output_schema = created_graph.output_schema
output_schema["title"] = "ExpectedOutputSchema"
assert output_schema == ExpectedOutputSchema.jsonschema()
# Add snapshot testing for the output schema
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(output_schema, indent=2, sort_keys=True), "grph_out_schm"
)
@pytest.mark.asyncio(loop_scope="session")
async def test_clean_graph(server: SpinTestServer):

View File

@@ -1,8 +1,10 @@
import json
from typing import cast
from unittest.mock import AsyncMock
import pytest
from fastapi import WebSocket, WebSocketDisconnect
from pytest_snapshot.plugin import Snapshot
from backend.data.user import DEFAULT_USER_ID
from backend.server.conn_manager import ConnectionManager
@@ -27,7 +29,7 @@ def mock_manager() -> AsyncMock:
@pytest.mark.asyncio
async def test_websocket_router_subscribe(
mock_websocket: AsyncMock, mock_manager: AsyncMock
mock_websocket: AsyncMock, mock_manager: AsyncMock, snapshot: Snapshot
) -> None:
mock_websocket.receive_text.side_effect = [
WSMessage(
@@ -56,12 +58,19 @@ async def test_websocket_router_subscribe(
in mock_websocket.send_text.call_args[0][0]
)
assert '"success":true' in mock_websocket.send_text.call_args[0][0]
# Capture and snapshot the WebSocket response message
sent_message = mock_websocket.send_text.call_args[0][0]
parsed_message = json.loads(sent_message)
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(parsed_message, indent=2, sort_keys=True), "sub")
mock_manager.disconnect_socket.assert_called_once_with(mock_websocket)
@pytest.mark.asyncio
async def test_websocket_router_unsubscribe(
mock_websocket: AsyncMock, mock_manager: AsyncMock
mock_websocket: AsyncMock, mock_manager: AsyncMock, snapshot: Snapshot
) -> None:
mock_websocket.receive_text.side_effect = [
WSMessage(
@@ -87,6 +96,13 @@ async def test_websocket_router_unsubscribe(
mock_websocket.send_text.assert_called_once()
assert '"method":"unsubscribe"' in mock_websocket.send_text.call_args[0][0]
assert '"success":true' in mock_websocket.send_text.call_args[0][0]
# Capture and snapshot the WebSocket response message
sent_message = mock_websocket.send_text.call_args[0][0]
parsed_message = json.loads(sent_message)
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(parsed_message, indent=2, sort_keys=True), "unsub")
mock_manager.disconnect_socket.assert_called_once_with(mock_websocket)

View File

@@ -32,4 +32,4 @@ NEXT_PUBLIC_SHOW_BILLING_PAGE=false
## Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
## This is the frontend site key
NEXT_PUBLIC_CLOUDFLARE_TURNSTILE_SITE_KEY=
NEXT_PUBLIC_DISABLE_TURNSTILE=false
NEXT_PUBLIC_TURNSTILE=disabled

View File

@@ -8,13 +8,14 @@ const config: StorybookConfig = {
"@storybook/addon-links",
"@storybook/addon-essentials",
"@storybook/addon-interactions",
"@storybook/addon-docs",
],
features: {
experimentalRSC: true,
},
framework: {
name: "@storybook/nextjs",
options: {},
options: { builder: { useSWC: true } },
},
staticDirs: ["../public"],
};

View File

@@ -3,6 +3,15 @@ import type { Preview } from "@storybook/react";
import { initialize, mswLoader } from "msw-storybook-addon";
import "../src/app/globals.css";
import "../src/components/styles/fonts.css";
import {
Controls,
Description,
Primary,
Source,
Stories,
Subtitle,
Title,
} from "@storybook/blocks";
// Initialize MSW
initialize();
@@ -12,19 +21,26 @@ const preview: Preview = {
nextjs: {
appDirectory: true,
},
controls: {
matchers: {
color: /(background|color)$/i,
date: /Date$/i,
},
docs: {
page: () => (
<>
<Title />
<Subtitle />
<Description />
<Primary />
<Source />
<Stories />
<Controls />
</>
),
},
},
loaders: [mswLoader],
decorators: [
(Story) => (
<>
<div className="bg-background p-8">
<Story />
</>
</div>
),
],
};

View File

@@ -2,61 +2,64 @@
// The config you add here will be used whenever a users loads a page in their browser.
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
import { getEnvironmentStr } from "@/lib/utils";
import { BehaveAs, getBehaveAs, getEnvironmentStr } from "@/lib/utils";
import * as Sentry from "@sentry/nextjs";
if (process.env.NODE_ENV === "production") {
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
const isProductionCloud =
process.env.NODE_ENV === "production" && getBehaveAs() === BehaveAs.CLOUD;
environment: getEnvironmentStr(),
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
// Add optional integrations for additional features
integrations: [
Sentry.replayIntegration(),
Sentry.httpClientIntegration(),
Sentry.replayCanvasIntegration(),
Sentry.reportingObserverIntegration(),
Sentry.browserProfilingIntegration(),
// Sentry.feedbackIntegration({
// // Additional SDK configuration goes in here, for example:
// colorScheme: "system",
// }),
],
environment: getEnvironmentStr(),
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
enabled: isProductionCloud,
// Set `tracePropagationTargets` to control for which URLs trace propagation should be enabled
tracePropagationTargets: [
"localhost",
"localhost:8006",
/^https:\/\/dev\-builder\.agpt\.co\/api/,
/^https:\/\/.*\.agpt\.co\/api/,
],
// Add optional integrations for additional features
integrations: [
Sentry.replayIntegration(),
Sentry.httpClientIntegration(),
Sentry.replayCanvasIntegration(),
Sentry.reportingObserverIntegration(),
Sentry.browserProfilingIntegration(),
// Sentry.feedbackIntegration({
// // Additional SDK configuration goes in here, for example:
// colorScheme: "system",
// }),
],
// Define how likely Replay events are sampled.
// This sets the sample rate to be 10%. You may want this to be 100% while
// in development and sample at a lower rate in production
replaysSessionSampleRate: 0.1,
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
// Define how likely Replay events are sampled when an error occurs.
replaysOnErrorSampleRate: 1.0,
// Set `tracePropagationTargets` to control for which URLs trace propagation should be enabled
tracePropagationTargets: [
"localhost",
"localhost:8006",
/^https:\/\/dev\-builder\.agpt\.co\/api/,
/^https:\/\/.*\.agpt\.co\/api/,
],
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
// Define how likely Replay events are sampled.
// This sets the sample rate to be 10%. You may want this to be 100% while
// in development and sample at a lower rate in production
replaysSessionSampleRate: 0.1,
// Set profilesSampleRate to 1.0 to profile every transaction.
// Since profilesSampleRate is relative to tracesSampleRate,
// the final profiling rate can be computed as tracesSampleRate * profilesSampleRate
// For example, a tracesSampleRate of 0.5 and profilesSampleRate of 0.5 would
// result in 25% of transactions being profiled (0.5*0.5=0.25)
profilesSampleRate: 1.0,
_experiments: {
// Enable logs to be sent to Sentry.
enableLogs: true,
},
});
}
// Define how likely Replay events are sampled when an error occurs.
replaysOnErrorSampleRate: 1.0,
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
// Set profilesSampleRate to 1.0 to profile every transaction.
// Since profilesSampleRate is relative to tracesSampleRate,
// the final profiling rate can be computed as tracesSampleRate * profilesSampleRate
// For example, a tracesSampleRate of 0.5 and profilesSampleRate of 0.5 would
// result in 25% of transactions being profiled (0.5*0.5=0.25)
profilesSampleRate: 1.0,
_experiments: {
// Enable logs to be sent to Sentry.
enableLogs: true,
},
});
export const onRouterTransitionStart = Sentry.captureRouterTransitionStart;

View File

@@ -25,7 +25,7 @@
],
"dependencies": {
"@faker-js/faker": "9.8.0",
"@hookform/resolvers": "3.10.0",
"@hookform/resolvers": "5.1.1",
"@next/third-parties": "15.3.3",
"@radix-ui/react-alert-dialog": "1.1.14",
"@radix-ui/react-avatar": "1.1.10",
@@ -89,9 +89,10 @@
"zod": "3.25.51"
},
"devDependencies": {
"@chromatic-com/storybook": "3.2.4",
"@chromatic-com/storybook": "3.2.6",
"@playwright/test": "1.52.0",
"@storybook/addon-a11y": "8.6.14",
"@storybook/addon-docs": "8.6.14",
"@storybook/addon-essentials": "8.6.14",
"@storybook/addon-interactions": "8.6.14",
"@storybook/addon-links": "8.6.14",
@@ -100,11 +101,11 @@
"@storybook/nextjs": "8.6.14",
"@storybook/react": "8.6.14",
"@storybook/test": "8.6.14",
"@storybook/test-runner": "0.22.0",
"@storybook/test-runner": "0.22.1",
"@types/canvas-confetti": "1.9.0",
"@types/lodash": "4.17.17",
"@types/negotiator": "0.6.3",
"@types/node": "22.15.29",
"@types/negotiator": "0.6.4",
"@types/node": "22.15.30",
"@types/react": "18.3.17",
"@types/react-dom": "18.3.5",
"@types/react-modal": "3.16.3",
@@ -114,11 +115,13 @@
"eslint": "8.57.1",
"eslint-config-next": "15.3.3",
"eslint-plugin-storybook": "0.12.0",
"msw": "2.9.0",
"msw-storybook-addon": "2.0.4",
"import-in-the-middle": "1.14.0",
"msw": "2.10.2",
"msw-storybook-addon": "2.0.5",
"postcss": "8.5.4",
"prettier": "3.5.3",
"prettier-plugin-tailwindcss": "0.6.12",
"require-in-the-middle": "7.5.2",
"storybook": "8.6.14",
"tailwindcss": "3.4.17",
"typescript": "5.8.3"

View File

@@ -12,8 +12,8 @@ importers:
specifier: 9.8.0
version: 9.8.0
'@hookform/resolvers':
specifier: 3.10.0
version: 3.10.0(react-hook-form@7.57.0(react@18.3.1))
specifier: 5.1.1
version: 5.1.1(react-hook-form@7.57.0(react@18.3.1))
'@next/third-parties':
specifier: 15.3.3
version: 15.3.3(next@15.3.3(@babel/core@7.27.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.52.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
@@ -199,14 +199,17 @@ importers:
version: 3.25.51
devDependencies:
'@chromatic-com/storybook':
specifier: 3.2.4
version: 3.2.4(react@18.3.1)(storybook@8.6.14(prettier@3.5.3))
specifier: 3.2.6
version: 3.2.6(react@18.3.1)(storybook@8.6.14(prettier@3.5.3))
'@playwright/test':
specifier: 1.52.0
version: 1.52.0
'@storybook/addon-a11y':
specifier: 8.6.14
version: 8.6.14(storybook@8.6.14(prettier@3.5.3))
'@storybook/addon-docs':
specifier: 8.6.14
version: 8.6.14(@types/react@18.3.17)(storybook@8.6.14(prettier@3.5.3))
'@storybook/addon-essentials':
specifier: 8.6.14
version: 8.6.14(@types/react@18.3.17)(storybook@8.6.14(prettier@3.5.3))
@@ -232,8 +235,8 @@ importers:
specifier: 8.6.14
version: 8.6.14(storybook@8.6.14(prettier@3.5.3))
'@storybook/test-runner':
specifier: 0.22.0
version: 0.22.0(@types/node@22.15.29)(storybook@8.6.14(prettier@3.5.3))
specifier: 0.22.1
version: 0.22.1(@types/node@22.15.30)(storybook@8.6.14(prettier@3.5.3))
'@types/canvas-confetti':
specifier: 1.9.0
version: 1.9.0
@@ -241,11 +244,11 @@ importers:
specifier: 4.17.17
version: 4.17.17
'@types/negotiator':
specifier: 0.6.3
version: 0.6.3
specifier: 0.6.4
version: 0.6.4
'@types/node':
specifier: 22.15.29
version: 22.15.29
specifier: 22.15.30
version: 22.15.30
'@types/react':
specifier: 18.3.17
version: 18.3.17
@@ -273,12 +276,15 @@ importers:
eslint-plugin-storybook:
specifier: 0.12.0
version: 0.12.0(eslint@8.57.1)(typescript@5.8.3)
import-in-the-middle:
specifier: 1.14.0
version: 1.14.0
msw:
specifier: 2.9.0
version: 2.9.0(@types/node@22.15.29)(typescript@5.8.3)
specifier: 2.10.2
version: 2.10.2(@types/node@22.15.30)(typescript@5.8.3)
msw-storybook-addon:
specifier: 2.0.4
version: 2.0.4(msw@2.9.0(@types/node@22.15.29)(typescript@5.8.3))
specifier: 2.0.5
version: 2.0.5(msw@2.10.2(@types/node@22.15.30)(typescript@5.8.3))
postcss:
specifier: 8.5.4
version: 8.5.4
@@ -288,6 +294,9 @@ importers:
prettier-plugin-tailwindcss:
specifier: 0.6.12
version: 0.6.12(prettier@3.5.3)
require-in-the-middle:
specifier: 7.5.2
version: 7.5.2
storybook:
specifier: 8.6.14
version: 8.6.14(prettier@3.5.3)
@@ -946,8 +955,8 @@ packages:
'@bundled-es-modules/tough-cookie@0.1.6':
resolution: {integrity: sha512-dvMHbL464C0zI+Yqxbz6kZ5TOEp7GLW+pry/RWndAR8MJQAXZ2rPmIs8tziTZjeIyhSNZgZbCePtfSbdWqStJw==}
'@chromatic-com/storybook@3.2.4':
resolution: {integrity: sha512-5/bOOYxfwZ2BktXeqcCpOVAoR6UCoeART5t9FVy22hoo8F291zOuX4y3SDgm10B1GVU/ZTtJWPT2X9wZFlxYLg==}
'@chromatic-com/storybook@3.2.6':
resolution: {integrity: sha512-FDmn5Ry2DzQdik+eq2sp/kJMMT36Ewe7ONXUXM2Izd97c7r6R/QyGli8eyh/F0iyqVvbLveNYFyF0dBOJNwLqw==}
engines: {node: '>=16.0.0', yarn: '>=1.22.18'}
peerDependencies:
storybook: ^8.2.0 || ^8.3.0-0 || ^8.4.0-0 || ^8.5.0-0 || ^8.6.0-0
@@ -1166,10 +1175,10 @@ packages:
'@hapi/topo@5.1.0':
resolution: {integrity: sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==}
'@hookform/resolvers@3.10.0':
resolution: {integrity: sha512-79Dv+3mDF7i+2ajj7SkypSKHhl1cbln1OGavqrsF7p6mbUv11xpqpacPsGDCTRvCSjEEIez2ef1NveSVL3b0Ag==}
'@hookform/resolvers@5.1.1':
resolution: {integrity: sha512-J/NVING3LMAEvexJkyTLjruSm7aOFx7QX21pzkiJfMoNG0wl5aFEjLTl7ay7IQb9EWY6AkrBy7tHL2Alijpdcg==}
peerDependencies:
react-hook-form: ^7.0.0
react-hook-form: ^7.55.0
'@humanwhocodes/config-array@0.13.0':
resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==}
@@ -1545,8 +1554,8 @@ packages:
'@types/react': '>=16'
react: '>=16'
'@mswjs/interceptors@0.38.7':
resolution: {integrity: sha512-Jkb27iSn7JPdkqlTqKfhncFfnEZsIJVYxsFbUSWEkxdIPdsyngrhoDBk0/BGD2FQcRH99vlRrkHpNTyKqI+0/w==}
'@mswjs/interceptors@0.39.2':
resolution: {integrity: sha512-RuzCup9Ct91Y7V79xwCb146RaBRHZ7NBbrIUySumd1rpKqHL5OonaqrGIbug5hNwP/fRyxFMA6ISgw4FTtYFYg==}
engines: {node: '>=18'}
'@napi-rs/wasm-runtime@0.2.10':
@@ -2633,6 +2642,9 @@ packages:
'@sinonjs/fake-timers@10.3.0':
resolution: {integrity: sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==}
'@standard-schema/utils@0.3.0':
resolution: {integrity: sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==}
'@storybook/addon-a11y@8.6.14':
resolution: {integrity: sha512-fozv6enO9IgpWq2U8qqS8MZ21Nt+MVHiRQe3CjnCpBOejTyo/ATm690PeYYRVHVG6M/15TVePb0h3ngKQbrrzQ==}
peerDependencies:
@@ -2835,8 +2847,8 @@ packages:
typescript:
optional: true
'@storybook/test-runner@0.22.0':
resolution: {integrity: sha512-fKY6MTE/bcvMaulKXy+z0fPmRXJx1REkYMOMcGn8zn6uffyBigGgaVf/sZ+AZfibwvjzg/StWhJ9HvAM8pc14g==}
'@storybook/test-runner@0.22.1':
resolution: {integrity: sha512-F5omZH0Pj2Y0UXSqShl1RuPrnhLBbb/yPFnZbVWDSPWZHDSX+dfBuu1T2zVfJplNKd04RzJuMbWHPFtZ0mimSw==}
engines: {node: ^16.10.0 || ^18.0.0 || >=20.0.0}
hasBin: true
peerDependencies:
@@ -2960,8 +2972,8 @@ packages:
peerDependencies:
'@swc/core': '*'
'@swc/types@0.1.21':
resolution: {integrity: sha512-2YEtj5HJVbKivud9N4bpPBAyZhj4S2Ipe5LkUG94alTpr7in/GU/EARgPAd3BwU+YOmFVJC2+kjqhGRi3r0ZpQ==}
'@swc/types@0.1.22':
resolution: {integrity: sha512-D13mY/ZA4PPEFSy6acki9eBT/3WgjMoRqNcdpIvjaYLQ44Xk5BdaL7UkDxAh6Z9UOe7tCCp67BVmZCojYp9owg==}
'@tanstack/react-table@8.21.3':
resolution: {integrity: sha512-5nNMTSETP4ykGegmVkhjcS8tTLW6Vl4axfEGQN3v0zdHYbK4UfoqfPChclTrJ4EoK9QynqAu9oUf8VEmrpZ5Ww==}
@@ -3120,11 +3132,11 @@ packages:
'@types/mysql@2.15.26':
resolution: {integrity: sha512-DSLCOXhkvfS5WNNPbfn2KdICAmk8lLc+/PNvnPnF7gOdMZCxopXduqv0OQ13y/yA/zXTSikZZqVgybUxOEg6YQ==}
'@types/negotiator@0.6.3':
resolution: {integrity: sha512-JkXTOdKs5MF086b/pt8C3+yVp3iDUwG635L7oCH6HvJvvr6lSUU5oe/gLXnPEfYRROHjJIPgCV6cuAg8gGkntQ==}
'@types/negotiator@0.6.4':
resolution: {integrity: sha512-elf6BsTq+AkyNsb2h5cGNst2Mc7dPliVoAPm1fXglC/BM3f2pFA40BaSSv3E5lyHteEawVKLP+8TwiY1DMNb3A==}
'@types/node@22.15.29':
resolution: {integrity: sha512-LNdjOkUDlU1RZb8e1kOIUpN1qQUlzGkEtbVNo53vbrwDg5om6oduhm4SiUaPW5ASTXhAiP0jInWG8Qx9fVlOeQ==}
'@types/node@22.15.30':
resolution: {integrity: sha512-6Q7lr06bEHdlfplU6YRbgG1SFBdlsfNC4/lX+SkhiTs0cpJkOElmWls8PxDFv4yY/xKb8Y6SO0OmSX4wgqTZbA==}
'@types/parse-json@4.0.2':
resolution: {integrity: sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==}
@@ -3164,8 +3176,8 @@ packages:
'@types/stack-utils@2.0.3':
resolution: {integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==}
'@types/statuses@2.0.5':
resolution: {integrity: sha512-jmIUGWrAiwu3dZpxntxieC+1n/5c3mjrImkmOSQ2NC5uP6cYO4aAZDdSmRcI5C1oiTmqlZGHC+/NmJrKogbP5A==}
'@types/statuses@2.0.6':
resolution: {integrity: sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==}
'@types/stylis@4.2.5':
resolution: {integrity: sha512-1Xve+NMN7FWjY14vLoY5tL3BVEQ/n42YLwaqJIPYhotZ9uBHt87VceMwWQpzmdEt2TNXIorIFG+YeCUUW7RInw==}
@@ -4795,8 +4807,8 @@ packages:
typescript: '>3.6.0'
webpack: ^5.11.0
form-data@4.0.2:
resolution: {integrity: sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==}
form-data@4.0.3:
resolution: {integrity: sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==}
engines: {node: '>= 6'}
forwarded-parse@2.1.2:
@@ -5895,13 +5907,13 @@ packages:
ms@2.1.3:
resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==}
msw-storybook-addon@2.0.4:
resolution: {integrity: sha512-rstO8+r01sRMg6PPP7OxM8LG5/6r4+wmp2uapHeHvm9TQQRHvpPXOU/Y9/Somysz8Oi4Ea1aummXH3JlnP2LIA==}
msw-storybook-addon@2.0.5:
resolution: {integrity: sha512-uum2gtprDBoUb8GV/rPMwPytHmB8+AUr25BQUY0MpjYey5/ujaew2Edt+4oHiXpLTd0ThyMqmEvGy/sRpDV4lg==}
peerDependencies:
msw: ^2.0.0
msw@2.9.0:
resolution: {integrity: sha512-fNyrJ11YNbe2zl64EwtxM5OFkInFPAw5vipOljMsf9lY2ep9B2BslqQrS8EC9pB9961K61FqTUi0wsdqk6hwow==}
msw@2.10.2:
resolution: {integrity: sha512-RCKM6IZseZQCWcSWlutdf590M8nVfRHG1ImwzOtwz8IYxgT4zhUO0rfTcTvDGiaFE0Rhcc+h43lcF3Jc9gFtwQ==}
engines: {node: '>=18'}
hasBin: true
peerDependencies:
@@ -6989,8 +7001,8 @@ packages:
resolution: {integrity: sha512-WjlahMgHmCJpqzU8bIBy4qtsZdU9lRlcZE3Lvyej6t4tuOuv1vk57OW3MBrj6hXBFx/nNoC9MPMTcr5YA7NQbg==}
engines: {node: '>=6'}
statuses@2.0.1:
resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==}
statuses@2.0.2:
resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==}
engines: {node: '>= 0.8'}
stop-iteration-iterator@1.1.0:
@@ -8523,14 +8535,14 @@ snapshots:
'@bundled-es-modules/statuses@1.0.1':
dependencies:
statuses: 2.0.1
statuses: 2.0.2
'@bundled-es-modules/tough-cookie@0.1.6':
dependencies:
'@types/tough-cookie': 4.0.5
tough-cookie: 4.1.4
'@chromatic-com/storybook@3.2.4(react@18.3.1)(storybook@8.6.14(prettier@3.5.3))':
'@chromatic-com/storybook@3.2.6(react@18.3.1)(storybook@8.6.14(prettier@3.5.3))':
dependencies:
chromatic: 11.25.2
filesize: 10.1.6
@@ -8692,8 +8704,9 @@ snapshots:
dependencies:
'@hapi/hoek': 9.3.0
'@hookform/resolvers@3.10.0(react-hook-form@7.57.0(react@18.3.1))':
'@hookform/resolvers@5.1.1(react-hook-form@7.57.0(react@18.3.1))':
dependencies:
'@standard-schema/utils': 0.3.0
react-hook-form: 7.57.0(react@18.3.1)
'@humanwhocodes/config-array@0.13.0':
@@ -8864,17 +8877,17 @@ snapshots:
'@img/sharp-win32-x64@0.34.2':
optional: true
'@inquirer/confirm@5.1.12(@types/node@22.15.29)':
'@inquirer/confirm@5.1.12(@types/node@22.15.30)':
dependencies:
'@inquirer/core': 10.1.13(@types/node@22.15.29)
'@inquirer/type': 3.0.7(@types/node@22.15.29)
'@inquirer/core': 10.1.13(@types/node@22.15.30)
'@inquirer/type': 3.0.7(@types/node@22.15.30)
optionalDependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
'@inquirer/core@10.1.13(@types/node@22.15.29)':
'@inquirer/core@10.1.13(@types/node@22.15.30)':
dependencies:
'@inquirer/figures': 1.0.12
'@inquirer/type': 3.0.7(@types/node@22.15.29)
'@inquirer/type': 3.0.7(@types/node@22.15.30)
ansi-escapes: 4.3.2
cli-width: 4.1.0
mute-stream: 2.0.0
@@ -8882,13 +8895,13 @@ snapshots:
wrap-ansi: 6.2.0
yoctocolors-cjs: 2.1.2
optionalDependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
'@inquirer/figures@1.0.12': {}
'@inquirer/type@3.0.7(@types/node@22.15.29)':
'@inquirer/type@3.0.7(@types/node@22.15.30)':
optionalDependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
'@isaacs/cliui@8.0.2':
dependencies:
@@ -8912,7 +8925,7 @@ snapshots:
'@jest/console@29.7.0':
dependencies:
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
chalk: 4.1.2
jest-message-util: 29.7.0
jest-util: 29.7.0
@@ -8925,14 +8938,14 @@ snapshots:
'@jest/test-result': 29.7.0
'@jest/transform': 29.7.0
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
ansi-escapes: 4.3.2
chalk: 4.1.2
ci-info: 3.9.0
exit: 0.1.2
graceful-fs: 4.2.11
jest-changed-files: 29.7.0
jest-config: 29.7.0(@types/node@22.15.29)
jest-config: 29.7.0(@types/node@22.15.30)
jest-haste-map: 29.7.0
jest-message-util: 29.7.0
jest-regex-util: 29.6.3
@@ -8961,7 +8974,7 @@ snapshots:
dependencies:
'@jest/fake-timers': 29.7.0
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
jest-mock: 29.7.0
'@jest/expect-utils@29.7.0':
@@ -8979,7 +8992,7 @@ snapshots:
dependencies:
'@jest/types': 29.6.3
'@sinonjs/fake-timers': 10.3.0
'@types/node': 22.15.29
'@types/node': 22.15.30
jest-message-util: 29.7.0
jest-mock: 29.7.0
jest-util: 29.7.0
@@ -9001,7 +9014,7 @@ snapshots:
'@jest/transform': 29.7.0
'@jest/types': 29.6.3
'@jridgewell/trace-mapping': 0.3.25
'@types/node': 22.15.29
'@types/node': 22.15.30
chalk: 4.1.2
collect-v8-coverage: 1.0.2
exit: 0.1.2
@@ -9071,7 +9084,7 @@ snapshots:
'@jest/schemas': 29.6.3
'@types/istanbul-lib-coverage': 2.0.6
'@types/istanbul-reports': 3.0.4
'@types/node': 22.15.29
'@types/node': 22.15.30
'@types/yargs': 17.0.33
chalk: 4.1.2
@@ -9103,7 +9116,7 @@ snapshots:
'@types/react': 18.3.17
react: 18.3.1
'@mswjs/interceptors@0.38.7':
'@mswjs/interceptors@0.39.2':
dependencies:
'@open-draft/deferred-promise': 2.2.0
'@open-draft/logger': 0.3.0
@@ -10271,6 +10284,8 @@ snapshots:
dependencies:
'@sinonjs/commons': 3.0.1
'@standard-schema/utils@0.3.0': {}
'@storybook/addon-a11y@8.6.14(storybook@8.6.14(prettier@3.5.3))':
dependencies:
'@storybook/addon-highlight': 8.6.14(storybook@8.6.14(prettier@3.5.3))
@@ -10609,7 +10624,7 @@ snapshots:
'@storybook/test': 8.6.14(storybook@8.6.14(prettier@3.5.3))
typescript: 5.8.3
'@storybook/test-runner@0.22.0(@types/node@22.15.29)(storybook@8.6.14(prettier@3.5.3))':
'@storybook/test-runner@0.22.1(@types/node@22.15.30)(storybook@8.6.14(prettier@3.5.3))':
dependencies:
'@babel/core': 7.27.4
'@babel/generator': 7.27.5
@@ -10620,14 +10635,14 @@ snapshots:
'@swc/core': 1.11.31
'@swc/jest': 0.2.38(@swc/core@1.11.31)
expect-playwright: 0.8.0
jest: 29.7.0(@types/node@22.15.29)
jest: 29.7.0(@types/node@22.15.30)
jest-circus: 29.7.0
jest-environment-node: 29.7.0
jest-junit: 16.0.0
jest-playwright-preset: 4.0.0(jest-circus@29.7.0)(jest-environment-node@29.7.0)(jest-runner@29.7.0)(jest@29.7.0(@types/node@22.15.29))
jest-playwright-preset: 4.0.0(jest-circus@29.7.0)(jest-environment-node@29.7.0)(jest-runner@29.7.0)(jest@29.7.0(@types/node@22.15.30))
jest-runner: 29.7.0
jest-serializer-html: 7.1.0
jest-watch-typeahead: 2.2.2(jest@29.7.0(@types/node@22.15.29))
jest-watch-typeahead: 2.2.2(jest@29.7.0(@types/node@22.15.30))
nyc: 15.1.0
playwright: 1.52.0
storybook: 8.6.14(prettier@3.5.3)
@@ -10735,7 +10750,7 @@ snapshots:
'@swc/core@1.11.31':
dependencies:
'@swc/counter': 0.1.3
'@swc/types': 0.1.21
'@swc/types': 0.1.22
optionalDependencies:
'@swc/core-darwin-arm64': 1.11.31
'@swc/core-darwin-x64': 1.11.31
@@ -10761,7 +10776,7 @@ snapshots:
'@swc/counter': 0.1.3
jsonc-parser: 3.3.1
'@swc/types@0.1.21':
'@swc/types@0.1.22':
dependencies:
'@swc/counter': 0.1.3
@@ -10830,7 +10845,7 @@ snapshots:
'@types/connect@3.4.38':
dependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
'@types/cookie@0.6.0': {}
@@ -10899,7 +10914,7 @@ snapshots:
'@types/graceful-fs@4.1.9':
dependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
'@types/hast@3.0.4':
dependencies:
@@ -10937,11 +10952,11 @@ snapshots:
'@types/mysql@2.15.26':
dependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
'@types/negotiator@0.6.3': {}
'@types/negotiator@0.6.4': {}
'@types/node@22.15.29':
'@types/node@22.15.30':
dependencies:
undici-types: 6.21.0
@@ -10953,7 +10968,7 @@ snapshots:
'@types/pg@8.6.1':
dependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
pg-protocol: 1.10.0
pg-types: 2.2.0
@@ -10982,13 +10997,13 @@ snapshots:
'@types/stack-utils@2.0.3': {}
'@types/statuses@2.0.5': {}
'@types/statuses@2.0.6': {}
'@types/stylis@4.2.5': {}
'@types/tedious@4.0.14':
dependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
'@types/tough-cookie@4.0.5': {}
@@ -11000,11 +11015,11 @@ snapshots:
'@types/wait-on@5.3.4':
dependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
'@types/ws@8.18.1':
dependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
'@types/yargs-parser@21.0.3': {}
@@ -11522,7 +11537,7 @@ snapshots:
axios@1.9.0:
dependencies:
follow-redirects: 1.15.9
form-data: 4.0.2
form-data: 4.0.3
proxy-from-env: 1.1.0
transitivePeerDependencies:
- debug
@@ -11998,13 +12013,13 @@ snapshots:
safe-buffer: 5.2.1
sha.js: 2.4.11
create-jest@29.7.0(@types/node@22.15.29):
create-jest@29.7.0(@types/node@22.15.30):
dependencies:
'@jest/types': 29.6.3
chalk: 4.1.2
exit: 0.1.2
graceful-fs: 4.2.11
jest-config: 29.7.0(@types/node@22.15.29)
jest-config: 29.7.0(@types/node@22.15.30)
jest-util: 29.7.0
prompts: 2.4.2
transitivePeerDependencies:
@@ -12927,11 +12942,12 @@ snapshots:
typescript: 5.8.3
webpack: 5.99.9(@swc/core@1.11.31)(esbuild@0.24.2)
form-data@4.0.2:
form-data@4.0.3:
dependencies:
asynckit: 0.4.0
combined-stream: 1.0.8
es-set-tostringtag: 2.1.0
hasown: 2.0.2
mime-types: 2.1.35
forwarded-parse@2.1.2: {}
@@ -13542,7 +13558,7 @@ snapshots:
'@jest/expect': 29.7.0
'@jest/test-result': 29.7.0
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
chalk: 4.1.2
co: 4.6.0
dedent: 1.6.0
@@ -13562,16 +13578,16 @@ snapshots:
- babel-plugin-macros
- supports-color
jest-cli@29.7.0(@types/node@22.15.29):
jest-cli@29.7.0(@types/node@22.15.30):
dependencies:
'@jest/core': 29.7.0
'@jest/test-result': 29.7.0
'@jest/types': 29.6.3
chalk: 4.1.2
create-jest: 29.7.0(@types/node@22.15.29)
create-jest: 29.7.0(@types/node@22.15.30)
exit: 0.1.2
import-local: 3.2.0
jest-config: 29.7.0(@types/node@22.15.29)
jest-config: 29.7.0(@types/node@22.15.30)
jest-util: 29.7.0
jest-validate: 29.7.0
yargs: 17.7.2
@@ -13581,7 +13597,7 @@ snapshots:
- supports-color
- ts-node
jest-config@29.7.0(@types/node@22.15.29):
jest-config@29.7.0(@types/node@22.15.30):
dependencies:
'@babel/core': 7.27.4
'@jest/test-sequencer': 29.7.0
@@ -13606,7 +13622,7 @@ snapshots:
slash: 3.0.0
strip-json-comments: 3.1.1
optionalDependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
transitivePeerDependencies:
- babel-plugin-macros
- supports-color
@@ -13635,7 +13651,7 @@ snapshots:
'@jest/environment': 29.7.0
'@jest/fake-timers': 29.7.0
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
jest-mock: 29.7.0
jest-util: 29.7.0
@@ -13645,7 +13661,7 @@ snapshots:
dependencies:
'@jest/types': 29.6.3
'@types/graceful-fs': 4.1.9
'@types/node': 22.15.29
'@types/node': 22.15.30
anymatch: 3.1.3
fb-watchman: 2.0.2
graceful-fs: 4.2.11
@@ -13691,13 +13707,13 @@ snapshots:
jest-mock@29.7.0:
dependencies:
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
jest-util: 29.7.0
jest-playwright-preset@4.0.0(jest-circus@29.7.0)(jest-environment-node@29.7.0)(jest-runner@29.7.0)(jest@29.7.0(@types/node@22.15.29)):
jest-playwright-preset@4.0.0(jest-circus@29.7.0)(jest-environment-node@29.7.0)(jest-runner@29.7.0)(jest@29.7.0(@types/node@22.15.30)):
dependencies:
expect-playwright: 0.8.0
jest: 29.7.0(@types/node@22.15.29)
jest: 29.7.0(@types/node@22.15.30)
jest-circus: 29.7.0
jest-environment-node: 29.7.0
jest-process-manager: 0.4.0
@@ -13758,7 +13774,7 @@ snapshots:
'@jest/test-result': 29.7.0
'@jest/transform': 29.7.0
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
chalk: 4.1.2
emittery: 0.13.1
graceful-fs: 4.2.11
@@ -13786,7 +13802,7 @@ snapshots:
'@jest/test-result': 29.7.0
'@jest/transform': 29.7.0
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
chalk: 4.1.2
cjs-module-lexer: 1.4.3
collect-v8-coverage: 1.0.2
@@ -13836,7 +13852,7 @@ snapshots:
jest-util@29.7.0:
dependencies:
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
chalk: 4.1.2
ci-info: 3.9.0
graceful-fs: 4.2.11
@@ -13851,11 +13867,11 @@ snapshots:
leven: 3.1.0
pretty-format: 29.7.0
jest-watch-typeahead@2.2.2(jest@29.7.0(@types/node@22.15.29)):
jest-watch-typeahead@2.2.2(jest@29.7.0(@types/node@22.15.30)):
dependencies:
ansi-escapes: 6.2.1
chalk: 5.4.1
jest: 29.7.0(@types/node@22.15.29)
jest: 29.7.0(@types/node@22.15.30)
jest-regex-util: 29.6.3
jest-watcher: 29.7.0
slash: 5.1.0
@@ -13866,7 +13882,7 @@ snapshots:
dependencies:
'@jest/test-result': 29.7.0
'@jest/types': 29.6.3
'@types/node': 22.15.29
'@types/node': 22.15.30
ansi-escapes: 4.3.2
chalk: 4.1.2
emittery: 0.13.1
@@ -13875,23 +13891,23 @@ snapshots:
jest-worker@27.5.1:
dependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
merge-stream: 2.0.0
supports-color: 8.1.1
jest-worker@29.7.0:
dependencies:
'@types/node': 22.15.29
'@types/node': 22.15.30
jest-util: 29.7.0
merge-stream: 2.0.0
supports-color: 8.1.1
jest@29.7.0(@types/node@22.15.29):
jest@29.7.0(@types/node@22.15.30):
dependencies:
'@jest/core': 29.7.0
'@jest/types': 29.6.3
import-local: 3.2.0
jest-cli: 29.7.0(@types/node@22.15.29)
jest-cli: 29.7.0(@types/node@22.15.30)
transitivePeerDependencies:
- '@types/node'
- babel-plugin-macros
@@ -14382,22 +14398,22 @@ snapshots:
ms@2.1.3: {}
msw-storybook-addon@2.0.4(msw@2.9.0(@types/node@22.15.29)(typescript@5.8.3)):
msw-storybook-addon@2.0.5(msw@2.10.2(@types/node@22.15.30)(typescript@5.8.3)):
dependencies:
is-node-process: 1.2.0
msw: 2.9.0(@types/node@22.15.29)(typescript@5.8.3)
msw: 2.10.2(@types/node@22.15.30)(typescript@5.8.3)
msw@2.9.0(@types/node@22.15.29)(typescript@5.8.3):
msw@2.10.2(@types/node@22.15.30)(typescript@5.8.3):
dependencies:
'@bundled-es-modules/cookie': 2.0.1
'@bundled-es-modules/statuses': 1.0.1
'@bundled-es-modules/tough-cookie': 0.1.6
'@inquirer/confirm': 5.1.12(@types/node@22.15.29)
'@mswjs/interceptors': 0.38.7
'@inquirer/confirm': 5.1.12(@types/node@22.15.30)
'@mswjs/interceptors': 0.39.2
'@open-draft/deferred-promise': 2.2.0
'@open-draft/until': 2.1.0
'@types/cookie': 0.6.0
'@types/statuses': 2.0.5
'@types/statuses': 2.0.6
graphql: 16.11.0
headers-polyfill: 4.0.3
is-node-process: 1.2.0
@@ -15281,7 +15297,7 @@ snapshots:
dependencies:
debug: 4.4.1
module-details-from-path: 1.0.4
resolve: 1.22.8
resolve: 1.22.10
transitivePeerDependencies:
- supports-color
@@ -15628,7 +15644,7 @@ snapshots:
dependencies:
type-fest: 0.7.1
statuses@2.0.1: {}
statuses@2.0.2: {}
stop-iteration-iterator@1.1.0:
dependencies:

View File

@@ -4,13 +4,18 @@
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
import * as Sentry from "@sentry/nextjs";
import { getEnvironmentStr } from "./src/lib/utils";
import { BehaveAs, getBehaveAs, getEnvironmentStr } from "./src/lib/utils";
const isProductionCloud =
process.env.NODE_ENV === "production" && getBehaveAs() === BehaveAs.CLOUD;
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
environment: getEnvironmentStr(),
enabled: isProductionCloud,
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
tracePropagationTargets: [

View File

@@ -2,15 +2,20 @@
// The config you add here will be used whenever the server handles a request.
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
import { getEnvironmentStr } from "@/lib/utils";
import { BehaveAs, getBehaveAs, getEnvironmentStr } from "@/lib/utils";
import * as Sentry from "@sentry/nextjs";
// import { NodeProfilingIntegration } from "@sentry/profiling-node";
const isProductionCloud =
process.env.NODE_ENV === "production" && getBehaveAs() === BehaveAs.CLOUD;
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
environment: getEnvironmentStr(),
enabled: isProductionCloud,
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
tracePropagationTargets: [

View File

@@ -1,5 +1,15 @@
import getServerSupabase from "@/lib/supabase/getServerSupabase";
import BackendAPI from "@/lib/autogpt-server-api";
import { NextResponse } from "next/server";
import { revalidatePath } from "next/cache";
async function shouldShowOnboarding() {
const api = new BackendAPI();
return (
(await api.isOnboardingEnabled()) &&
!(await api.getUserOnboarding()).completedSteps.includes("CONGRATS")
);
}
// Validate redirect URL to prevent open redirect attacks
function validateRedirectUrl(url: string): string {
@@ -15,13 +25,14 @@ function validateRedirectUrl(url: string): string {
export async function GET(request: Request) {
const { searchParams, origin } = new URL(request.url);
const code = searchParams.get("code");
// if "next" is in param, use it as the redirect URL
const nextParam = searchParams.get("next") ?? "/";
// Validate redirect URL to prevent open redirect attacks
const next = validateRedirectUrl(nextParam);
if (code) {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
if (!supabase) {
return NextResponse.redirect(`${origin}/error`);
@@ -30,6 +41,21 @@ export async function GET(request: Request) {
const { data, error } = await supabase.auth.exchangeCodeForSession(code);
// data.session?.refresh_token is available if you need to store it for later use
if (!error) {
try {
const api = new BackendAPI();
await api.createUser();
if (await shouldShowOnboarding()) {
next = "/onboarding";
revalidatePath("/onboarding", "layout");
} else {
revalidatePath("/", "layout");
}
} catch (createUserError) {
console.error("Error creating user:", createUserError);
// Continue with redirect even if createUser fails
}
const forwardedHost = request.headers.get("x-forwarded-host"); // original origin before load balancer
const isLocalEnv = process.env.NODE_ENV === "development";
if (isLocalEnv) {

View File

@@ -12,7 +12,7 @@ export async function GET(request: NextRequest) {
const next = searchParams.get("next") ?? "/";
if (token_hash && type) {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
if (!supabase) {
redirect("/error");

View File

@@ -21,7 +21,7 @@ export async function login(
turnstileToken: string,
) {
return await Sentry.withServerActionInstrumentation("login", {}, async () => {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
const api = new BackendAPI();
if (!supabase) {
@@ -60,14 +60,13 @@ export async function providerLogin(provider: LoginProvider) {
"providerLogin",
{},
async () => {
const supabase = getServerSupabase();
const api = new BackendAPI();
const supabase = await getServerSupabase();
if (!supabase) {
redirect("/error");
}
const { error } = await supabase!.auth.signInWithOAuth({
const { data, error } = await supabase!.auth.signInWithOAuth({
provider: provider,
options: {
redirectTo:
@@ -81,12 +80,13 @@ export async function providerLogin(provider: LoginProvider) {
return error.message;
}
await api.createUser();
// Don't onboard if disabled or already onboarded
if (await shouldShowOnboarding()) {
revalidatePath("/onboarding", "layout");
redirect("/onboarding");
// Redirect to the OAuth provider's URL
if (data?.url) {
redirect(data.url);
}
// Note: api.createUser() and onboarding check happen in the callback handler
// after the session is established. See `auth/callback/route.ts`.
},
);
}

View File

@@ -1,5 +1,4 @@
"use client";
import { login, providerLogin } from "./actions";
import {
Form,
FormControl,
@@ -8,14 +7,8 @@ import {
FormLabel,
FormMessage,
} from "@/components/ui/form";
import { useForm } from "react-hook-form";
import { Input } from "@/components/ui/input";
import { z } from "zod";
import { zodResolver } from "@hookform/resolvers/zod";
import { useCallback, useState } from "react";
import { useRouter } from "next/navigation";
import Link from "next/link";
import useSupabase from "@/lib/supabase/useSupabase";
import LoadingBox from "@/components/ui/loading";
import {
AuthCard,
@@ -23,86 +16,34 @@ import {
AuthButton,
AuthFeedback,
AuthBottomText,
GoogleOAuthButton,
PasswordInput,
Turnstile,
} from "@/components/auth";
import { loginFormSchema } from "@/types/auth";
import { getBehaveAs } from "@/lib/utils";
import { useTurnstile } from "@/hooks/useTurnstile";
import { useLoginPage } from "./useLoginPage";
export default function LoginPage() {
const { supabase, user, isUserLoading } = useSupabase();
const [feedback, setFeedback] = useState<string | null>(null);
const router = useRouter();
const [isLoading, setIsLoading] = useState(false);
const {
form,
feedback,
turnstile,
captchaKey,
isLoading,
isCloudEnv,
isLoggedIn,
isUserLoading,
isGoogleLoading,
isSupabaseAvailable,
handleSubmit,
handleProviderLogin,
} = useLoginPage();
const turnstile = useTurnstile({
action: "login",
autoVerify: false,
resetOnError: true,
});
const form = useForm<z.infer<typeof loginFormSchema>>({
resolver: zodResolver(loginFormSchema),
defaultValues: {
email: "",
password: "",
},
});
// TODO: uncomment when we enable social login
// const onProviderLogin = useCallback(async (
// provider: LoginProvider,
// ) => {
// setIsLoading(true);
// const error = await providerLogin(provider);
// setIsLoading(false);
// if (error) {
// setFeedback(error);
// return;
// }
// setFeedback(null);
// }, [supabase]);
const onLogin = useCallback(
async (data: z.infer<typeof loginFormSchema>) => {
setIsLoading(true);
if (!(await form.trigger())) {
setIsLoading(false);
return;
}
if (!turnstile.verified) {
setFeedback("Please complete the CAPTCHA challenge.");
setIsLoading(false);
return;
}
const error = await login(data, turnstile.token as string);
await supabase?.auth.refreshSession();
setIsLoading(false);
if (error) {
setFeedback(error);
// Always reset the turnstile on any error
turnstile.reset();
return;
}
setFeedback(null);
},
[form, turnstile, supabase],
);
if (user) {
console.debug("User exists, redirecting to /");
router.push("/");
}
if (isUserLoading || user) {
if (isUserLoading || isLoggedIn) {
return <LoadingBox className="h-[80vh]" />;
}
if (!supabase) {
if (!isSupabaseAvailable) {
return (
<div>
User accounts are disabled because Supabase client is unavailable
@@ -113,8 +54,26 @@ export default function LoginPage() {
return (
<AuthCard className="mx-auto">
<AuthHeader>Login to your account</AuthHeader>
{isCloudEnv ? (
<>
<div className="mb-6">
<GoogleOAuthButton
onClick={() => handleProviderLogin("google")}
isLoading={isGoogleLoading}
disabled={isLoading}
/>
</div>
<div className="mb-6 flex items-center">
<div className="flex-1 border-t border-gray-300"></div>
<span className="mx-3 text-sm text-gray-500">or</span>
<div className="flex-1 border-t border-gray-300"></div>
</div>
</>
) : null}
<Form {...form}>
<form onSubmit={form.handleSubmit(onLogin)}>
<form onSubmit={handleSubmit}>
<FormField
control={form.control}
name="email"
@@ -160,6 +119,7 @@ export default function LoginPage() {
{/* Turnstile CAPTCHA Component */}
<Turnstile
key={captchaKey}
siteKey={turnstile.siteKey}
onVerify={turnstile.handleVerify}
onExpire={turnstile.handleExpire}
@@ -169,11 +129,7 @@ export default function LoginPage() {
shouldRender={turnstile.shouldRender}
/>
<AuthButton
onClick={() => onLogin(form.getValues())}
isLoading={isLoading}
type="submit"
>
<AuthButton isLoading={isLoading} type="submit">
Login
</AuthButton>
</form>

Some files were not shown because too many files have changed in this diff Show More