Merge branch 'dev' into swiftyos/sdk

This commit is contained in:
Swifty
2025-06-10 12:08:15 +02:00
committed by GitHub
101 changed files with 5497 additions and 2637 deletions

View File

@@ -1,282 +1,51 @@
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
name: AutoGPT Platform - Deploy Dev Environment
on:
pull_request:
types: [closed]
issue_comment:
types: [created]
push:
branches: [ dev ]
paths:
- 'autogpt_platform/**'
permissions:
issues: write
pull-requests: write
contents: 'read'
id-token: 'write'
jobs:
dispatch:
migrate:
environment: develop
name: Run migrations for AutoGPT Platform
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install prisma
- name: Run Backend Migrations
working-directory: ./autogpt_platform/backend
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:
needs: migrate
runs-on: ubuntu-latest
steps:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
uses: actions/github-script@v7
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v3
with:
script: |
const commentBody = context.payload.comment.body.trim();
const commentUser = context.payload.comment.user.login;
const prAuthor = context.payload.issue.user.login;
const authorAssociation = context.payload.comment.author_association;
const triggeringCommentId = context.payload.comment.id;
// Check permissions
const hasPermission = (
authorAssociation === 'OWNER' ||
authorAssociation === 'MEMBER' ||
authorAssociation === 'COLLABORATOR'
);
core.setOutput('comment_body', commentBody);
core.setOutput('has_permission', hasPermission);
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
core.setOutput('permission_denied', 'true');
return;
}
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
return;
}
// Get all comments to check deployment status
const commentsResponse = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
per_page: 100
});
// Filter out the triggering comment
const commentsData = commentsResponse.data.filter(comment => comment.id !== triggeringCommentId);
// Find the last deploy and undeploy commands
let lastDeployIndex = -2;
let lastUndeployIndex = -1;
console.log(`Found ${commentsResponse.data.length} total comments, using ${commentsData.length} for status check after filtering`);
// Iterate through comments in reverse to find the most recent commands
for (let i = commentsData.length - 1; i >= 0; i--) {
const currentCommentBody = commentsData[i].body.trim();
console.log(`Processing comment ${i}: ${currentCommentBody}`);
if (currentCommentBody === '!deploy' && lastDeployIndex === -2) {
lastDeployIndex = i;
} else if (currentCommentBody === '!undeploy' && lastUndeployIndex === -1) {
lastUndeployIndex = i;
}
// Break early if we found both
if (lastDeployIndex !== -2 && lastUndeployIndex !== -1) {
break;
}
}
console.log(`Last deploy index: ${lastDeployIndex}`);
console.log(`Last undeploy index: ${lastUndeployIndex}`);
// Currently deployed if there's a deploy command after the last undeploy
const isCurrentlyDeployed = lastDeployIndex > lastUndeployIndex;
// Determine actions based on current state and requested command
if (commentBody === '!deploy') {
if (isCurrentlyDeployed) {
core.setOutput('deploy_blocked', 'already_deployed');
} else {
core.setOutput('should_deploy', 'true');
}
} else if (commentBody === '!undeploy') {
if (!isCurrentlyDeployed) {
// Check if there was ever a deploy
const hasEverDeployed = lastDeployIndex !== -2;
core.setOutput('undeploy_blocked', hasEverDeployed ? 'already_undeployed' : 'never_deployed');
} else {
core.setOutput('should_undeploy', 'true');
}
}
core.setOutput('has_active_deployment', isCurrentlyDeployed);
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
});
- name: Post deploy blocked comment
if: steps.check_status.outputs.deploy_blocked == 'already_deployed'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `⚠️ **Deploy skipped**: This PR already has an active deployment. Use \`!undeploy\` first if you want to redeploy.`
});
- name: Post undeploy blocked comment
if: steps.check_status.outputs.undeploy_blocked != ''
uses: actions/github-script@v7
with:
script: |
const reason = '${{ steps.check_status.outputs.undeploy_blocked }}';
let message;
if (reason === 'never_deployed') {
message = `⚠️ **Undeploy skipped**: This PR has never been deployed. Use \`!deploy\` first.`;
} else if (reason === 'already_undeployed') {
message = `⚠️ **Undeploy skipped**: This PR is already undeployed.`;
}
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: message
});
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number
});
core.setOutput('pr_number', pr.data.number);
core.setOutput('pr_title', pr.data.title);
core.setOutput('pr_state', pr.data.state);
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "deploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
});
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
});
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
uses: actions/github-script@v7
with:
script: |
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
let lastDeployIndex = -1;
let lastUndeployIndex = -1;
comments.data.forEach((comment, index) => {
if (comment.body.trim() === '!deploy') {
lastDeployIndex = index;
} else if (comment.body.trim() === '!undeploy') {
lastUndeployIndex = index;
}
});
// Should undeploy if there's a !deploy without a subsequent !undeploy
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
core.setOutput('should_undeploy', shouldUndeploy);
- name: Dispatch Undeploy Event (PR closed with active deployment)
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}
- name: Post PR close undeploy comment
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
});
event-type: build_deploy_dev
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'

View File

@@ -82,7 +82,7 @@ jobs:
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v2
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -110,7 +110,7 @@ jobs:
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -168,7 +168,7 @@ jobs:
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -1,402 +1,131 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Repository Overview
## Block Development with SDK
AutoGPT Platform is a monorepo containing:
- **Backend** (`/backend`): Python FastAPI server with async support
- **Frontend** (`/frontend`): Next.js React application
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
The AutoGPT Platform now includes a comprehensive SDK that dramatically simplifies block creation. Blocks can be fully self-contained with zero external configuration required.
### Quick Start - Creating a New Block
```python
from backend.sdk import *
@provider("my-service") # Auto-registers new provider
@cost_config(
BlockCost(cost_amount=5, cost_type=BlockCostType.RUN),
BlockCost(cost_amount=1, cost_type=BlockCostType.BYTE)
)
@default_credentials(
APIKeyCredentials(
id="my-service-default",
provider="my-service",
api_key=SecretStr("default-api-key"),
title="My Service Default API Key"
)
)
class MyServiceBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput = CredentialsField(
provider="my-service",
supported_credential_types={"api_key"}
)
text: String = SchemaField(description="Input text")
class Output(BlockSchema):
result: String = SchemaField(description="Output result")
error: String = SchemaField(description="Error message", default="")
def __init__(self):
super().__init__(
id="my-service-block-12345678-1234-1234-1234-123456789012",
description="Process text using My Service",
categories={BlockCategory.TEXT},
input_schema=MyServiceBlock.Input,
output_schema=MyServiceBlock.Output,
)
def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs) -> BlockOutput:
try:
api_key = credentials.api_key.get_secret_value()
# Process with API
yield "result", f"Processed: {input_data.text}"
except Exception as e:
yield "error", str(e)
```
### Key Features
1. **Single Import**: `from backend.sdk import *` provides everything needed
2. **Auto-Registration**: No manual configuration files to update
3. **Dynamic Providers**: Any string works as a provider name
4. **Self-Contained**: All configuration via decorators
### Available Decorators
- `@provider("name")` - Register new provider
- `@cost_config(...)` - Set block execution costs
- `@default_credentials(...)` - Provide default API credentials
- `@webhook_config("provider", ManagerClass)` - Register webhook manager
- `@oauth_config("provider", HandlerClass)` - Register OAuth handler
### Creating Blocks with Webhooks
```python
from backend.sdk import *
# First, create webhook manager
class MyWebhookManager(BaseWebhooksManager):
PROVIDER_NAME = "my-service"
class WebhookType(str, Enum):
DATA_UPDATE = "data_update"
async def validate_payload(self, webhook, request) -> tuple[dict, str]:
payload = await request.json()
event_type = request.headers.get("X-MyService-Event", "unknown")
return payload, event_type
async def _register_webhook(self, webhook, credentials) -> tuple[str, dict]:
# Register with external service
return "webhook-id", {"status": "registered"}
async def _deregister_webhook(self, webhook, credentials) -> None:
# Deregister from external service
pass
# Then create webhook block
@provider("my-service")
@webhook_config("my-service", MyWebhookManager)
class MyWebhookBlock(Block):
class Input(BlockSchema):
events: BaseModel = SchemaField(
description="Events to listen for",
default={"data_update": True}
)
payload: Dict = SchemaField(
description="Webhook payload",
default={},
hidden=True
)
class Output(BlockSchema):
event_type: String = SchemaField(description="Event type")
event_data: Dict = SchemaField(description="Event data")
def __init__(self):
super().__init__(
id="my-webhook-block-12345678-1234-1234-1234-123456789012",
description="Listen for My Service webhooks",
categories={BlockCategory.INPUT},
input_schema=MyWebhookBlock.Input,
output_schema=MyWebhookBlock.Output,
block_type=BlockType.WEBHOOK,
webhook_config=BlockWebhookConfig(
provider="my-service",
webhook_type="data_update",
event_filter_input="events",
),
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
payload = input_data.payload
yield "event_type", payload.get("type", "unknown")
yield "event_data", payload
```
### Creating Blocks with OAuth
```python
from backend.sdk import *
# First, create OAuth handler
class MyServiceOAuthHandler(BaseOAuthHandler):
PROVIDER_NAME = "my-service"
DEFAULT_SCOPES = ["read", "write"]
def get_login_url(self, scopes: list[str], state: str, code_challenge: Optional[str]) -> str:
# Build OAuth authorization URL
return f"https://my-service.com/oauth/authorize?..."
def exchange_code_for_tokens(self, code: str, scopes: list[str], code_verifier: Optional[str]) -> OAuth2Credentials:
# Exchange authorization code for tokens
return OAuth2Credentials(
provider="my-service",
access_token=SecretStr("access-token"),
refresh_token=SecretStr("refresh-token"),
scopes=scopes,
access_token_expires_at=int(time.time() + 3600)
)
# Then create OAuth-enabled block
@provider("my-service")
@oauth_config("my-service", MyServiceOAuthHandler)
class MyOAuthBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput = CredentialsField(
provider="my-service",
supported_credential_types={"oauth2"},
required_scopes={"read", "write"}
)
action: String = SchemaField(description="Action to perform")
class Output(BlockSchema):
result: Dict = SchemaField(description="API response")
error: String = SchemaField(description="Error message", default="")
def __init__(self):
super().__init__(
id="my-oauth-block-12345678-1234-1234-1234-123456789012",
description="Interact with My Service using OAuth",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=MyOAuthBlock.Input,
output_schema=MyOAuthBlock.Output,
)
def run(self, input_data: Input, *, credentials: OAuth2Credentials, **kwargs) -> BlockOutput:
try:
headers = {"Authorization": f"Bearer {credentials.access_token.get_secret_value()}"}
# Make API call with OAuth token
yield "result", {"status": "success", "action": input_data.action}
except Exception as e:
yield "error", str(e)
```
### SDK Components Available
The SDK provides 68+ components via `from backend.sdk import *`:
**Core Block Components:**
- `Block`, `BlockSchema`, `BlockOutput`, `BlockCategory`, `BlockType`
- `SchemaField`, `CredentialsField`, `CredentialsMetaInput`
**Credential Types:**
- `APIKeyCredentials`, `OAuth2Credentials`, `UserPasswordCredentials`
**Cost System:**
- `BlockCost`, `BlockCostType`, `NodeExecutionStats`
**Type Aliases:**
- `String`, `Integer`, `Float`, `Boolean` (cleaner than str, int, etc.)
**Common Types:**
- `List`, `Dict`, `Optional`, `Any`, `Union`, `BaseModel`, `SecretStr`
**Utilities:**
- `json`, `logging`, `store_media_file`, `MediaFileType`
### Best Practices
1. **Use UUID for Block ID**: Generate a unique UUID for each block
2. **Handle Errors**: Always include error handling in the run method
3. **Yield All Outputs**: Ensure all output schema fields are yielded
4. **Test Your Block**: Include test_input and test_output in __init__
5. **Document Well**: Provide clear descriptions for the block and all fields
### No Manual Configuration Needed
With the SDK, you never need to manually update these files:
-`backend/data/block_cost_config.py`
-`backend/integrations/credentials_store.py`
-`backend/integrations/providers.py`
-`backend/integrations/oauth/__init__.py`
-`backend/integrations/webhooks/__init__.py`
Everything is handled automatically by the decorators!
## Project Architecture
The AutoGPT Platform is a microservice-based system for creating and running AI-powered agent workflows. It consists of three main components:
### Core Components
- **Backend** (`backend/`): Python FastAPI microservices with Redis, RabbitMQ, and PostgreSQL
- **Frontend** (`frontend/`): Next.js 14 application with TypeScript and Radix UI components
- **Shared Libraries** (`autogpt_libs/`): Common Python utilities for auth, logging, rate limiting
### Service Architecture
The backend runs multiple services that communicate via Redis and RabbitMQ:
- **REST API Server** (port 8006-8007): Main HTTP API endpoints
- **WebSocket Server** (port 8001): Real-time communication for frontend
- **Executor** (port 8002): Handles workflow execution with block-based architecture
- **Scheduler** (port 8003): Manages scheduled agent runs
- **Database Manager**: Handles migrations and database connections
- **Notification Manager**: Email notifications and user alerts
### Data Model
- **AgentGraph**: Core workflow definition with nodes and links
- **AgentGraphExecution**: Runtime execution instances with status tracking
- **User**: Authentication via Supabase with credit system and integrations
- **Block**: Individual workflow components (400+ integrations supported)
- **LibraryAgent**: Reusable agent templates
- **StoreListing**: Marketplace for sharing agents
## Development Commands
## Essential Commands
### Backend Development
```bash
cd backend
poetry install
poetry run app # All services
poetry run rest # REST API only
poetry run ws # WebSocket only
poetry run executor # Executor only
poetry run scheduler # Scheduler only
poetry run format # Black + isort formatting
poetry run lint # Ruff linting
poetry run test # Run tests with Docker PostgreSQL
# Install dependencies
cd backend && poetry install
# Run database migrations
poetry run prisma migrate dev
# Start all services (database, redis, rabbitmq)
docker compose up -d
# Run the backend server
poetry run serve
# Run tests
poetry run test
# Run specific test
poetry run pytest path/to/test_file.py::test_function_name
# Lint and format
poetry run format # Black + isort
poetry run lint # ruff
```
More details can be found in TESTING.md
#### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Frontend Development
```bash
cd frontend
npm install
npm run dev # Development server (port 3000)
npm run build # Production build
npm run lint # ESLint + Prettier
npm run format # Prettier only
npm run type-check # TypeScript checking
npm run test # Playwright E2E tests
npm run test-ui # Playwright UI mode
npm run storybook # Component development (port 6006)
# Install dependencies
cd frontend && npm install
# Start development server
npm run dev
# Run E2E tests
npm run test
# Run Storybook for component development
npm run storybook
# Build production
npm run build
# Type checking
npm run type-check
```
### Docker Operations
```bash
docker compose up -d # Start all backend services
docker compose stop # Stop services
docker compose down # Stop and remove containers
docker compose logs -f <service> # View service logs
docker compose build <service> # Rebuild specific service
```
## Architecture Overview
### Database Management
```bash
cd backend
poetry run prisma migrate dev # Apply migrations
poetry run prisma generate # Generate Prisma client
poetry run prisma db push # Push schema changes
```
## Code Architecture Patterns
### Block System
The core execution model uses a block-based architecture where each block represents an atomic operation:
- Blocks inherit from `backend.blocks.block.Block`
- Input/Output schemas defined using Pydantic models
- Blocks are auto-discovered and registered at runtime
- Each block has a unique UUID and category classification
### Data Layer
- **Prisma ORM** for PostgreSQL with Python async client
- **Repository pattern** in `backend/data/` modules
- **Pydantic models** for API serialization in `backend/data/model.py`
- **Database connection pooling** via `backend/data/db.py`
### API Architecture
- **FastAPI** with automatic OpenAPI generation
- **WebSocket support** for real-time execution updates
- **Supabase integration** for authentication and row-level security
- **Middleware** for auth, CORS, rate limiting in `autogpt_libs/`
### Backend Architecture
- **API Layer**: FastAPI with REST and WebSocket endpoints
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
- **Queue System**: RabbitMQ for async task processing
- **Execution Engine**: Separate executor service processes agent workflows
- **Authentication**: JWT-based with Supabase integration
### Frontend Architecture
- **Next.js App Router** with TypeScript
- **React Flow** for visual workflow builder (`@xyflow/react`)
- **Zustand/React Context** for state management
- **Radix UI** components with Tailwind CSS styling
- **Supabase client** for auth and real-time subscriptions
- **Framework**: Next.js App Router with React Server Components
- **State Management**: React hooks + Supabase client for real-time updates
- **Workflow Builder**: Visual graph editor using @xyflow/react
- **UI Components**: Radix UI primitives with Tailwind CSS styling
- **Feature Flags**: LaunchDarkly integration
## Environment Setup
### Key Concepts
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
3. **Integrations**: OAuth and API connections stored per user
4. **Store**: Marketplace for sharing agent templates
### Required Environment Variables
### Testing Approach
- Backend uses pytest with snapshot testing for API responses
- Test files are colocated with source files (`*_test.py`)
- Frontend uses Playwright for E2E tests
- Component testing via Storybook
**Backend (.env)**:
- `DATABASE_URL` - PostgreSQL connection string
- `REDIS_HOST` - Redis server for caching/sessions
- `RABBITMQ_HOST` - RabbitMQ for async messaging
- `SUPABASE_URL` + `SUPABASE_JWT_SECRET` - Authentication
- `ENABLE_AUTH=true` - Enable Supabase authentication
### Database Schema
Key models (defined in `/backend/schema.prisma`):
- `User`: Authentication and profile data
- `AgentGraph`: Workflow definitions with version control
- `AgentGraphExecution`: Execution history and results
- `AgentNode`: Individual nodes in a workflow
- `StoreListing`: Marketplace listings for sharing agents
**Frontend (.env.local)**:
- `NEXT_PUBLIC_AGPT_SERVER_URL` - Backend REST API URL
- `NEXT_PUBLIC_AGPT_WS_SERVER_URL` - Backend WebSocket URL
- `NEXT_PUBLIC_SUPABASE_URL` + `NEXT_PUBLIC_SUPABASE_ANON_KEY` - Auth
### Environment Configuration
- Backend: `.env` file in `/backend`
- Frontend: `.env.local` file in `/frontend`
- Both require Supabase credentials and API keys for various services
### Integration Setup
The platform supports 400+ integrations requiring various API keys:
- **AI Providers**: OpenAI, Anthropic, Groq, Replicate
- **OAuth Providers**: GitHub, Google, Linear, Twitter, Todoist
- **Business Tools**: Stripe, HubSpot, Discord, Reddit
### Common Development Tasks
## Testing Strategy
**Adding a new block:**
1. Create new file in `/backend/backend/blocks/`
2. Inherit from `Block` base class
3. Define input/output schemas
4. Implement `run` method
5. Register in block registry
### Backend Testing
- **pytest** with async support for unit/integration tests
- **Docker PostgreSQL** instance for database tests
- **Faker** for test data generation
- Run tests: `poetry run test`
**Modifying the API:**
1. Update route in `/backend/backend/server/routers/`
2. Add/update Pydantic models in same directory
3. Write tests alongside the route file
4. Run `poetry run test` to verify
### Frontend Testing
- **Playwright** for end-to-end testing
- **Storybook** for component testing and documentation
- **TypeScript** strict mode for compile-time safety
- Run tests: `npm run test` or `npm run test-ui`
## Development Workflow
1. **Start backend services**: `docker compose up -d`
2. **Start frontend**: `cd frontend && npm run dev`
3. **Access application**: http://localhost:3000
4. **View Storybook**: http://localhost:6006
5. **Monitor logs**: `docker compose logs -f <service>`
### Code Quality
- **Backend**: Use `poetry run format` then `poetry run lint` before commits
- **Frontend**: Use `npm run format` then `npm run lint` before commits
- **Type checking**: Run `npm run type-check` for frontend TypeScript validation
### Database Changes
1. Edit `schema.prisma` file
2. Run `poetry run prisma migrate dev --name <migration_name>`
3. Commit both schema and migration files
## Performance Considerations
- **Executor scaling**: Use `docker compose up -d --scale executor=3` for high load
- **Redis caching**: Implemented for user sessions and API responses
- **Database indexing**: Key indexes on user_id, execution_id, created_at fields
- **Frontend optimization**: Next.js build includes automatic code splitting
**Frontend feature development:**
1. Components go in `/frontend/src/components/`
2. Use existing UI components from `/frontend/src/components/ui/`
3. Add Storybook stories for new components
4. Test with Playwright if user-facing

View File

@@ -0,0 +1,237 @@
# Backend Testing Guide
This guide covers testing practices for the AutoGPT Platform backend, with a focus on snapshot testing for API endpoints.
## Table of Contents
- [Overview](#overview)
- [Running Tests](#running-tests)
- [Snapshot Testing](#snapshot-testing)
- [Writing Tests for API Routes](#writing-tests-for-api-routes)
- [Best Practices](#best-practices)
## Overview
The backend uses pytest for testing with the following key libraries:
- `pytest` - Test framework
- `pytest-asyncio` - Async test support
- `pytest-mock` - Mocking support
- `pytest-snapshot` - Snapshot testing for API responses
## Running Tests
### Run all tests
```bash
poetry run test
```
### Run specific test file
```bash
poetry run pytest path/to/test_file.py
```
### Run with verbose output
```bash
poetry run pytest -v
```
### Run with coverage
```bash
poetry run pytest --cov=backend
```
## Snapshot Testing
Snapshot testing captures the output of your code and compares it against previously saved snapshots. This is particularly useful for testing API responses.
### How Snapshot Testing Works
1. First run: Creates snapshot files in `snapshots/` directories
2. Subsequent runs: Compares output against saved snapshots
3. Changes detected: Test fails if output differs from snapshot
### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Snapshot Test Example
```python
import json
from pytest_snapshot.plugin import Snapshot
def test_api_endpoint(snapshot: Snapshot):
response = client.get("/api/endpoint")
# Snapshot the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"endpoint_response"
)
```
### Best Practices for Snapshots
1. **Use descriptive names**: `"user_list_response"` not `"response1"`
2. **Sort JSON keys**: Ensures consistent snapshots
3. **Format JSON**: Use `indent=2` for readable diffs
4. **Exclude dynamic data**: Remove timestamps, IDs, etc. that change between runs
Example of excluding dynamic data:
```python
response_data = response.json()
# Remove dynamic fields for snapshot
response_data.pop("created_at", None)
response_data.pop("id", None)
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"static_response_data"
)
```
## Writing Tests for API Routes
### Basic Structure
```python
import json
import fastapi
import fastapi.testclient
import pytest
from pytest_snapshot.plugin import Snapshot
from backend.server.v2.myroute import router
app = fastapi.FastAPI()
app.include_router(router)
client = fastapi.testclient.TestClient(app)
def test_endpoint_success(snapshot: Snapshot):
response = client.get("/endpoint")
assert response.status_code == 200
# Test specific fields
data = response.json()
assert data["status"] == "success"
# Snapshot the full response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(data, indent=2, sort_keys=True),
"endpoint_success_response"
)
```
### Testing with Authentication
```python
def override_auth_middleware():
return {"sub": "test-user-id"}
def override_get_user_id():
return "test-user-id"
app.dependency_overrides[auth_middleware] = override_auth_middleware
app.dependency_overrides[get_user_id] = override_get_user_id
```
### Mocking External Services
```python
def test_external_api_call(mocker, snapshot):
# Mock external service
mock_response = {"external": "data"}
mocker.patch(
"backend.services.external_api.call",
return_value=mock_response
)
response = client.post("/api/process")
assert response.status_code == 200
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"process_with_external_response"
)
```
## Best Practices
### 1. Test Organization
- Place tests next to the code: `routes.py``routes_test.py`
- Use descriptive test names: `test_create_user_with_invalid_email`
- Group related tests in classes when appropriate
### 2. Test Coverage
- Test happy path and error cases
- Test edge cases (empty data, invalid formats)
- Test authentication and authorization
### 3. Snapshot Testing Guidelines
- Review all snapshot changes carefully
- Don't snapshot sensitive data
- Keep snapshots focused and minimal
- Update snapshots intentionally, not accidentally
### 4. Async Testing
- Use regular `def` for FastAPI TestClient tests
- Use `async def` with `@pytest.mark.asyncio` for testing async functions directly
### 5. Fixtures
Create reusable fixtures for common test data:
```python
@pytest.fixture
def sample_user():
return {
"email": "test@example.com",
"name": "Test User"
}
def test_create_user(sample_user, snapshot):
response = client.post("/users", json=sample_user)
# ... test implementation
```
## CI/CD Integration
The GitHub Actions workflow automatically runs tests on:
- Pull requests
- Pushes to main branch
Snapshot tests work in CI by:
1. Committing snapshot files to the repository
2. CI compares against committed snapshots
3. Fails if snapshots don't match
## Troubleshooting
### Snapshot Mismatches
- Review the diff carefully
- If changes are expected: `poetry run pytest --snapshot-update`
- If changes are unexpected: Fix the code causing the difference
### Async Test Issues
- Ensure async functions use `@pytest.mark.asyncio`
- Use `AsyncMock` for mocking async functions
- FastAPI TestClient handles async automatically
### Import Errors
- Check that all dependencies are in `pyproject.toml`
- Run `poetry install` to ensure dependencies are installed
- Verify import paths are correct
## Summary
Snapshot testing provides a powerful way to ensure API responses remain consistent. Combined with traditional assertions, it creates a robust test suite that catches regressions while remaining maintainable.
Remember: Good tests are as important as good code!

View File

@@ -0,0 +1,17 @@
"""Common test fixtures for server tests."""
import pytest
from pytest_snapshot.plugin import Snapshot
@pytest.fixture
def configured_snapshot(snapshot: Snapshot) -> Snapshot:
"""Pre-configured snapshot fixture with standard settings."""
snapshot.snapshot_dir = "snapshots"
return snapshot
# Test ID constants
TEST_USER_ID = "test-user-id"
ADMIN_USER_ID = "admin-user-id"
TARGET_USER_ID = "target-user-id"

View File

@@ -0,0 +1,139 @@
"""Example of analytics tests with improved error handling and assertions."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
from backend.server.conftest import TEST_USER_ID
from backend.server.test_helpers import (
assert_error_response_structure,
assert_mock_called_with_partial,
assert_response_status,
safe_parse_json,
)
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[get_user_id] = override_get_user_id
def test_log_raw_metric_success_improved(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful raw metric logging with improved assertions."""
# Mock the analytics function
mock_result = Mock(id="metric-123-uuid")
mock_log_metric = mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": "page_load_time",
"metric_value": 2.5,
"data_string": "/dashboard",
}
response = client.post("/log_raw_metric", json=request_data)
# Improved assertions with better error messages
assert_response_status(response, 200, "Metric logging should succeed")
response_data = safe_parse_json(response, "Metric response parsing")
assert response_data == "metric-123-uuid", f"Unexpected response: {response_data}"
# Verify the function was called with correct parameters
assert_mock_called_with_partial(
mock_log_metric,
user_id=TEST_USER_ID,
metric_name="page_load_time",
metric_value=2.5,
data_string="/dashboard",
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps({"metric_id": response_data}, indent=2, sort_keys=True),
"analytics_log_metric_success_improved",
)
def test_log_raw_metric_invalid_request_improved() -> None:
"""Test invalid metric request with improved error assertions."""
# Test missing required fields
response = client.post("/log_raw_metric", json={})
error_data = assert_error_response_structure(
response, expected_status=422, expected_error_fields=["loc", "msg", "type"]
)
# Verify specific error details
detail = error_data["detail"]
assert isinstance(detail, list), "Error detail should be a list"
assert len(detail) > 0, "Should have at least one error"
# Check that required fields are mentioned in errors
error_fields = [error["loc"][-1] for error in detail if "loc" in error]
assert "metric_name" in error_fields, "Should report missing metric_name"
assert "metric_value" in error_fields, "Should report missing metric_value"
assert "data_string" in error_fields, "Should report missing data_string"
def test_log_raw_metric_type_validation_improved() -> None:
"""Test metric type validation with improved assertions."""
invalid_requests = [
{
"data": {
"metric_name": "test",
"metric_value": "not_a_number", # Invalid type
"data_string": "test",
},
"expected_error": "Input should be a valid number",
},
{
"data": {
"metric_name": "", # Empty string
"metric_value": 1.0,
"data_string": "test",
},
"expected_error": "String should have at least 1 character",
},
{
"data": {
"metric_name": "test",
"metric_value": float("inf"), # Infinity
"data_string": "test",
},
"expected_error": "ensure this value is finite",
},
]
for test_case in invalid_requests:
response = client.post("/log_raw_metric", json=test_case["data"])
error_data = assert_error_response_structure(response, expected_status=422)
# Check that expected error is in the response
error_text = json.dumps(error_data)
assert (
test_case["expected_error"] in error_text
or test_case["expected_error"].lower() in error_text.lower()
), f"Expected error '{test_case['expected_error']}' not found in: {error_text}"

View File

@@ -0,0 +1,107 @@
"""Example of parametrized tests for analytics endpoints."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
from backend.server.conftest import TEST_USER_ID
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[get_user_id] = override_get_user_id
@pytest.mark.parametrize(
"metric_value,metric_name,data_string,test_id",
[
(100, "api_calls_count", "external_api", "integer_value"),
(0, "error_count", "no_errors", "zero_value"),
(-5.2, "temperature_delta", "cooling", "negative_value"),
(1.23456789, "precision_test", "float_precision", "float_precision"),
(999999999, "large_number", "max_value", "large_number"),
(0.0000001, "tiny_number", "min_value", "tiny_number"),
],
)
def test_log_raw_metric_values_parametrized(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
metric_value: float,
metric_name: str,
data_string: str,
test_id: str,
) -> None:
"""Test raw metric logging with various metric values using parametrize."""
# Mock the analytics function
mock_result = Mock(id=f"metric-{test_id}-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": metric_name,
"metric_value": metric_value,
"data_string": data_string,
}
response = client.post("/log_raw_metric", json=request_data)
# Better error handling
assert response.status_code == 200, f"Failed for {test_id}: {response.text}"
response_data = response.json()
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps(
{"metric_id": response_data, "test_case": test_id}, indent=2, sort_keys=True
),
f"analytics_metric_{test_id}",
)
@pytest.mark.parametrize(
"invalid_data,expected_error",
[
({}, "Field required"), # Missing all fields
({"metric_name": "test"}, "Field required"), # Missing metric_value
(
{"metric_name": "test", "metric_value": "not_a_number"},
"Input should be a valid number",
), # Invalid type
(
{"metric_name": "", "metric_value": 1.0, "data_string": "test"},
"String should have at least 1 character",
), # Empty name
],
)
def test_log_raw_metric_invalid_requests_parametrized(
invalid_data: dict,
expected_error: str,
) -> None:
"""Test invalid metric requests with parametrize."""
response = client.post("/log_raw_metric", json=invalid_data)
assert response.status_code == 422
error_detail = response.json()
assert "detail" in error_detail
# Verify error message contains expected error
error_text = json.dumps(error_detail)
assert expected_error in error_text or expected_error.lower() in error_text.lower()

View File

@@ -0,0 +1,281 @@
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
from backend.server.conftest import TEST_USER_ID
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[get_user_id] = override_get_user_id
def test_log_raw_metric_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful raw metric logging"""
# Mock the analytics function
mock_result = Mock(id="metric-123-uuid")
mock_log_metric = mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": "page_load_time",
"metric_value": 2.5,
"data_string": "/dashboard",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data == "metric-123-uuid"
# Verify the function was called with correct parameters
mock_log_metric.assert_called_once_with(
user_id=TEST_USER_ID,
metric_name="page_load_time",
metric_value=2.5,
data_string="/dashboard",
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_metric_success",
)
def test_log_raw_metric_various_values(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test raw metric logging with various metric values"""
# Mock the analytics function
mock_result = Mock(id="metric-456-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
# Test with integer value
request_data = {
"metric_name": "api_calls_count",
"metric_value": 100,
"data_string": "external_api",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
# Test with zero value
request_data = {
"metric_name": "error_count",
"metric_value": 0,
"data_string": "no_errors",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
# Test with negative value
request_data = {
"metric_name": "temperature_delta",
"metric_value": -5.2,
"data_string": "cooling",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
# Snapshot the last response
configured_snapshot.assert_match(
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_metric_various_values",
)
def test_log_raw_analytics_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful raw analytics logging"""
# Mock the analytics function
mock_result = Mock(id="analytics-789-uuid")
mock_log_analytics = mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "user_action",
"data": {
"action": "button_click",
"button_id": "submit_form",
"timestamp": "2023-01-01T00:00:00Z",
"metadata": {
"form_type": "registration",
"fields_filled": 5,
},
},
"data_index": "button_click_submit_form",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data == "analytics-789-uuid"
# Verify the function was called with correct parameters
mock_log_analytics.assert_called_once_with(
TEST_USER_ID,
"user_action",
request_data["data"],
"button_click_submit_form",
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps({"analytics_id": response_data}, indent=2, sort_keys=True),
"analytics_log_analytics_success",
)
def test_log_raw_analytics_complex_data(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test raw analytics logging with complex nested data"""
# Mock the analytics function
mock_result = Mock(id="analytics-complex-uuid")
mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "agent_execution",
"data": {
"agent_id": "agent_123",
"execution_id": "exec_456",
"status": "completed",
"duration_ms": 3500,
"nodes_executed": 15,
"blocks_used": [
{"block_id": "llm_block", "count": 3},
{"block_id": "http_block", "count": 5},
{"block_id": "code_block", "count": 2},
],
"errors": [],
"metadata": {
"trigger": "manual",
"user_tier": "premium",
"environment": "production",
},
},
"data_index": "agent_123_exec_456",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200
response_data = response.json()
# Snapshot test the complex data structure
configured_snapshot.assert_match(
json.dumps(
{
"analytics_id": response_data,
"logged_data": request_data["data"],
},
indent=2,
sort_keys=True,
),
"analytics_log_analytics_complex_data",
)
def test_log_raw_metric_invalid_request() -> None:
"""Test raw metric logging with invalid request data"""
# Missing required fields
response = client.post("/log_raw_metric", json={})
assert response.status_code == 422
# Invalid metric_value type
response = client.post(
"/log_raw_metric",
json={
"metric_name": "test",
"metric_value": "not_a_number",
"data_string": "test",
},
)
assert response.status_code == 422
# Missing data_string
response = client.post(
"/log_raw_metric",
json={
"metric_name": "test",
"metric_value": 1.0,
},
)
assert response.status_code == 422
def test_log_raw_analytics_invalid_request() -> None:
"""Test raw analytics logging with invalid request data"""
# Missing required fields
response = client.post("/log_raw_analytics", json={})
assert response.status_code == 422
# Invalid data type (should be dict)
response = client.post(
"/log_raw_analytics",
json={
"type": "test",
"data": "not_a_dict",
"data_index": "test",
},
)
assert response.status_code == 422
# Missing data_index
response = client.post(
"/log_raw_analytics",
json={
"type": "test",
"data": {"key": "value"},
},
)
assert response.status_code == 422

View File

@@ -575,6 +575,13 @@ async def execute_graph(
graph_version: Optional[int] = None,
preset_id: Optional[str] = None,
) -> ExecuteGraphResponse:
current_balance = await _user_credit_model.get_credits(user_id)
if current_balance <= 0:
raise HTTPException(
status_code=402,
detail="Insufficient balance to execute the agent. Please top up your account.",
)
graph_exec = await execution_utils.add_graph_execution_async(
graph_id=graph_id,
user_id=user_id,

View File

@@ -0,0 +1,391 @@
import json
from unittest.mock import AsyncMock, Mock
import autogpt_libs.auth.depends
import fastapi
import fastapi.testclient
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.v1 as v1_routes
from backend.data.credit import AutoTopUpConfig
from backend.data.graph import GraphModel
from backend.server.conftest import TEST_USER_ID
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(v1_routes.v1_router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware(request: fastapi.Request) -> dict[str, str]:
"""Override auth middleware for testing"""
return {"sub": TEST_USER_ID, "role": "user", "email": "test@example.com"}
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
override_auth_middleware
)
app.dependency_overrides[get_user_id] = override_get_user_id
# Auth endpoints tests
def test_get_or_create_user_route(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test get or create user endpoint"""
mock_user = Mock()
mock_user.model_dump.return_value = {
"id": TEST_USER_ID,
"email": "test@example.com",
"name": "Test User",
}
mocker.patch(
"backend.server.routers.v1.get_or_create_user",
return_value=mock_user,
)
response = client.post("/auth/user")
assert response.status_code == 200
response_data = response.json()
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"auth_user",
)
def test_update_user_email_route(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test update user email endpoint"""
mocker.patch(
"backend.server.routers.v1.update_user_email",
return_value=None,
)
response = client.post("/auth/user/email", json="newemail@example.com")
assert response.status_code == 200
response_data = response.json()
assert response_data["email"] == "newemail@example.com"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"auth_email",
)
# Blocks endpoints tests
def test_get_graph_blocks(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get blocks endpoint"""
# Mock block
mock_block = Mock()
mock_block.to_dict.return_value = {
"id": "test-block",
"name": "Test Block",
"description": "A test block",
"disabled": False,
}
mock_block.id = "test-block"
mock_block.disabled = False
# Mock get_blocks
mocker.patch(
"backend.server.routers.v1.get_blocks",
return_value={"test-block": lambda: mock_block},
)
# Mock block costs
mocker.patch(
"backend.server.routers.v1.get_block_costs",
return_value={"test-block": [{"cost": 10, "type": "credit"}]},
)
response = client.get("/blocks")
assert response.status_code == 200
response_data = response.json()
assert len(response_data) == 1
assert response_data[0]["id"] == "test-block"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"blks_all",
)
def test_execute_graph_block(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test execute block endpoint"""
# Mock block
mock_block = Mock()
mock_block.execute.return_value = [
("output1", {"data": "result1"}),
("output2", {"data": "result2"}),
]
mocker.patch(
"backend.server.routers.v1.get_block",
return_value=mock_block,
)
request_data = {
"input_name": "test_input",
"input_value": "test_value",
}
response = client.post("/blocks/test-block/execute", json=request_data)
assert response.status_code == 200
response_data = response.json()
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"blks_exec",
)
def test_execute_graph_block_not_found(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test execute block with non-existent block"""
mocker.patch(
"backend.server.routers.v1.get_block",
return_value=None,
)
response = client.post("/blocks/nonexistent-block/execute", json={})
assert response.status_code == 404
assert "not found" in response.json()["detail"]
# Credits endpoints tests
def test_get_user_credits(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get user credits endpoint"""
mock_credit_model = mocker.patch("backend.server.routers.v1._user_credit_model")
mock_credit_model.get_credits = AsyncMock(return_value=1000)
response = client.get("/credits")
assert response.status_code == 200
response_data = response.json()
assert response_data["credits"] == 1000
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"cred_bal",
)
def test_request_top_up(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test request top up endpoint"""
mock_credit_model = mocker.patch("backend.server.routers.v1._user_credit_model")
mock_credit_model.top_up_intent = AsyncMock(
return_value="https://checkout.example.com/session123"
)
request_data = {"credit_amount": 500}
response = client.post("/credits", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert "checkout_url" in response_data
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"cred_topup_req",
)
def test_get_auto_top_up(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get auto top-up configuration endpoint"""
mock_config = AutoTopUpConfig(threshold=100, amount=500)
mocker.patch(
"backend.server.routers.v1.get_auto_top_up",
return_value=mock_config,
)
response = client.get("/credits/auto-top-up")
assert response.status_code == 200
response_data = response.json()
assert response_data["threshold"] == 100
assert response_data["amount"] == 500
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"cred_topup_cfg",
)
# Graphs endpoints tests
def test_get_graphs(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get graphs endpoint"""
mock_graph = GraphModel(
id="graph-123",
version=1,
is_active=True,
name="Test Graph",
description="A test graph",
user_id="test-user-id",
)
mocker.patch(
"backend.server.routers.v1.graph_db.get_graphs",
return_value=[mock_graph],
)
response = client.get("/graphs")
assert response.status_code == 200
response_data = response.json()
assert len(response_data) == 1
assert response_data[0]["id"] == "graph-123"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"grphs_all",
)
def test_get_graph(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test get single graph endpoint"""
mock_graph = GraphModel(
id="graph-123",
version=1,
is_active=True,
name="Test Graph",
description="A test graph",
user_id="test-user-id",
)
mocker.patch(
"backend.server.routers.v1.graph_db.get_graph",
return_value=mock_graph,
)
response = client.get("/graphs/graph-123")
assert response.status_code == 200
response_data = response.json()
assert response_data["id"] == "graph-123"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"grph_single",
)
def test_get_graph_not_found(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test get graph with non-existent ID"""
mocker.patch(
"backend.server.routers.v1.graph_db.get_graph",
return_value=None,
)
response = client.get("/graphs/nonexistent-graph")
assert response.status_code == 404
assert "not found" in response.json()["detail"]
def test_delete_graph(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test delete graph endpoint"""
# Mock active graph for deactivation
mock_graph = GraphModel(
id="graph-123",
version=1,
is_active=True,
name="Test Graph",
description="A test graph",
user_id="test-user-id",
)
mocker.patch(
"backend.server.routers.v1.graph_db.get_graph",
return_value=mock_graph,
)
mocker.patch(
"backend.server.routers.v1.on_graph_deactivate",
return_value=None,
)
mocker.patch(
"backend.server.routers.v1.graph_db.delete_graph",
return_value=3, # Number of versions deleted
)
response = client.delete("/graphs/graph-123")
assert response.status_code == 200
response_data = response.json()
assert response_data["version_counts"] == 3
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"grphs_del",
)
# Invalid request tests
def test_invalid_json_request() -> None:
"""Test endpoint with invalid JSON"""
response = client.post(
"/auth/user/email",
content="invalid json",
headers={"Content-Type": "application/json"},
)
assert response.status_code == 422
def test_missing_required_field() -> None:
"""Test endpoint with missing required field"""
response = client.post("/credits", json={}) # Missing credit_amount
assert response.status_code == 422

View File

@@ -0,0 +1,139 @@
"""Common test fixtures with proper setup and teardown."""
from contextlib import asynccontextmanager
from typing import AsyncGenerator
from unittest.mock import Mock, patch
import pytest
from prisma import Prisma
@pytest.fixture
async def test_db_connection() -> AsyncGenerator[Prisma, None]:
"""Provide a test database connection with proper cleanup.
This fixture ensures the database connection is properly
closed after the test, even if the test fails.
"""
db = Prisma()
try:
await db.connect()
yield db
finally:
await db.disconnect()
@pytest.fixture
def mock_transaction():
"""Mock database transaction with proper async context manager."""
@asynccontextmanager
async def mock_context(*args, **kwargs):
yield None
with patch("backend.data.db.locked_transaction", side_effect=mock_context) as mock:
yield mock
@pytest.fixture
def isolated_app_state():
"""Fixture that ensures app state is isolated between tests."""
# Example: Save original state
# from backend.server.app import app
# original_overrides = app.dependency_overrides.copy()
# try:
# yield app
# finally:
# # Restore original state
# app.dependency_overrides = original_overrides
# For now, just yield None as this is an example
yield None
@pytest.fixture
def cleanup_files():
"""Fixture to track and cleanup files created during tests."""
created_files = []
def track_file(filepath: str):
created_files.append(filepath)
yield track_file
# Cleanup
import os
for filepath in created_files:
try:
if os.path.exists(filepath):
os.remove(filepath)
except Exception as e:
print(f"Warning: Failed to cleanup {filepath}: {e}")
@pytest.fixture
async def async_mock_with_cleanup():
"""Create async mocks that are properly cleaned up."""
mocks = []
def create_mock(**kwargs):
mock = Mock(**kwargs)
mocks.append(mock)
return mock
yield create_mock
# Reset all mocks
for mock in mocks:
mock.reset_mock()
class TestDatabaseIsolation:
"""Example of proper test isolation with database operations."""
@pytest.fixture(autouse=True)
async def setup_and_teardown(self, test_db_connection):
"""Setup and teardown for each test method."""
# Setup: Clear test data
await test_db_connection.user.delete_many(
where={"email": {"contains": "@test.example"}}
)
yield
# Teardown: Clear test data again
await test_db_connection.user.delete_many(
where={"email": {"contains": "@test.example"}}
)
async def test_create_user(self, test_db_connection):
"""Test that demonstrates proper isolation."""
# This test has access to a clean database
user = await test_db_connection.user.create(
data={"email": "test@test.example", "name": "Test User"}
)
assert user.email == "test@test.example"
# User will be cleaned up automatically
@pytest.fixture(scope="function") # Explicitly use function scope
def reset_singleton_state():
"""Reset singleton state between tests."""
# Example: Reset a singleton instance
# from backend.data.some_singleton import SingletonClass
# # Save original state
# original_instance = getattr(SingletonClass, "_instance", None)
# try:
# # Clear singleton
# SingletonClass._instance = None
# yield
# finally:
# # Restore original state
# SingletonClass._instance = original_instance
# For now, just yield None as this is an example
yield None

View File

@@ -0,0 +1,109 @@
"""Helper functions for improved test assertions and error handling."""
import json
from typing import Any, Dict, Optional
def assert_response_status(
response: Any, expected_status: int = 200, error_context: Optional[str] = None
) -> None:
"""Assert response status with helpful error message.
Args:
response: The HTTP response object
expected_status: Expected status code
error_context: Optional context to include in error message
"""
if response.status_code != expected_status:
error_msg = f"Expected status {expected_status}, got {response.status_code}"
if error_context:
error_msg = f"{error_context}: {error_msg}"
# Try to include response body in error
try:
body = response.json()
error_msg += f"\nResponse body: {json.dumps(body, indent=2)}"
except Exception:
error_msg += f"\nResponse text: {response.text}"
raise AssertionError(error_msg)
def safe_parse_json(
response: Any, error_context: Optional[str] = None
) -> Dict[str, Any]:
"""Safely parse JSON response with error handling.
Args:
response: The HTTP response object
error_context: Optional context for error messages
Returns:
Parsed JSON data
Raises:
AssertionError: If JSON parsing fails
"""
try:
return response.json()
except Exception as e:
error_msg = f"Failed to parse JSON response: {e}"
if error_context:
error_msg = f"{error_context}: {error_msg}"
error_msg += f"\nResponse text: {response.text[:500]}"
raise AssertionError(error_msg)
def assert_error_response_structure(
response: Any,
expected_status: int = 422,
expected_error_fields: Optional[list[str]] = None,
) -> Dict[str, Any]:
"""Assert error response has expected structure.
Args:
response: The HTTP response object
expected_status: Expected error status code
expected_error_fields: List of expected fields in error detail
Returns:
Parsed error response
"""
assert_response_status(response, expected_status, "Error response check")
error_data = safe_parse_json(response, "Error response parsing")
# Check basic error structure
assert "detail" in error_data, f"Missing 'detail' in error response: {error_data}"
# Check specific error fields if provided
if expected_error_fields:
detail = error_data["detail"]
if isinstance(detail, list):
# FastAPI validation errors
for error in detail:
assert "loc" in error, f"Missing 'loc' in error: {error}"
assert "msg" in error, f"Missing 'msg' in error: {error}"
assert "type" in error, f"Missing 'type' in error: {error}"
return error_data
def assert_mock_called_with_partial(mock_obj: Any, **expected_kwargs: Any) -> None:
"""Assert mock was called with expected kwargs (partial match).
Args:
mock_obj: The mock object to check
**expected_kwargs: Expected keyword arguments
"""
assert mock_obj.called, f"Mock {mock_obj} was not called"
actual_kwargs = mock_obj.call_args.kwargs if mock_obj.call_args else {}
for key, expected_value in expected_kwargs.items():
assert (
key in actual_kwargs
), f"Missing key '{key}' in mock call. Actual keys: {list(actual_kwargs.keys())}"
assert (
actual_kwargs[key] == expected_value
), f"Mock called with {key}={actual_kwargs[key]}, expected {expected_value}"

View File

@@ -0,0 +1,74 @@
"""Common test utilities and constants for server tests."""
from typing import Any, Dict
from unittest.mock import Mock
import pytest
# Test ID constants
TEST_USER_ID = "test-user-id"
ADMIN_USER_ID = "admin-user-id"
TARGET_USER_ID = "target-user-id"
# Common test data constants
FIXED_TIMESTAMP = "2024-01-01T00:00:00Z"
TRANSACTION_UUID = "transaction-123-uuid"
METRIC_UUID = "metric-123-uuid"
ANALYTICS_UUID = "analytics-123-uuid"
def create_mock_with_id(mock_id: str) -> Mock:
"""Create a mock object with an id attribute.
Args:
mock_id: The ID value to set on the mock
Returns:
Mock object with id attribute set
"""
return Mock(id=mock_id)
def assert_status_and_parse_json(
response: Any, expected_status: int = 200
) -> Dict[str, Any]:
"""Assert response status and return parsed JSON.
Args:
response: The HTTP response object
expected_status: Expected status code (default: 200)
Returns:
Parsed JSON response data
Raises:
AssertionError: If status code doesn't match expected
"""
assert (
response.status_code == expected_status
), f"Expected status {expected_status}, got {response.status_code}: {response.text}"
return response.json()
@pytest.mark.parametrize(
"metric_value,metric_name,data_string",
[
(100, "api_calls_count", "external_api"),
(0, "error_count", "no_errors"),
(-5.2, "temperature_delta", "cooling"),
(1.23456789, "precision_test", "float_precision"),
(999999999, "large_number", "max_value"),
],
)
def parametrized_metric_values_decorator(func):
"""Decorator for parametrized metric value tests."""
return pytest.mark.parametrize(
"metric_value,metric_name,data_string",
[
(100, "api_calls_count", "external_api"),
(0, "error_count", "no_errors"),
(-5.2, "temperature_delta", "cooling"),
(1.23456789, "precision_test", "float_precision"),
(999999999, "large_number", "max_value"),
],
)(func)

View File

@@ -0,0 +1,331 @@
import json
from unittest.mock import AsyncMock
import autogpt_libs.auth
import autogpt_libs.auth.depends
import fastapi
import fastapi.testclient
import prisma.enums
import pytest_mock
from prisma import Json
from pytest_snapshot.plugin import Snapshot
import backend.server.v2.admin.credit_admin_routes as credit_admin_routes
import backend.server.v2.admin.model as admin_model
from backend.data.model import UserTransaction
from backend.server.conftest import ADMIN_USER_ID, TARGET_USER_ID
from backend.server.model import Pagination
app = fastapi.FastAPI()
app.include_router(credit_admin_routes.router)
client = fastapi.testclient.TestClient(app)
def override_requires_admin_user() -> dict[str, str]:
"""Override admin user check for testing"""
return {"sub": ADMIN_USER_ID, "role": "admin"}
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return ADMIN_USER_ID
app.dependency_overrides[autogpt_libs.auth.requires_admin_user] = (
override_requires_admin_user
)
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id
def test_add_user_credits_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test successful credit addition by admin"""
# Mock the credit model
mock_credit_model = mocker.patch(
"backend.server.v2.admin.credit_admin_routes._user_credit_model"
)
mock_credit_model._add_transaction = AsyncMock(
return_value=(1500, "transaction-123-uuid")
)
request_data = {
"user_id": TARGET_USER_ID,
"amount": 500,
"comments": "Test credit grant for debugging",
}
response = client.post("/admin/add_credits", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["new_balance"] == 1500
assert response_data["transaction_key"] == "transaction-123-uuid"
# Verify the function was called with correct parameters
mock_credit_model._add_transaction.assert_called_once()
call_args = mock_credit_model._add_transaction.call_args
assert call_args[0] == (TARGET_USER_ID, 500)
assert call_args[1]["transaction_type"] == prisma.enums.CreditTransactionType.GRANT
# Check that metadata is a Json object with the expected content
assert isinstance(call_args[1]["metadata"], Json)
assert call_args[1]["metadata"] == Json(
{"admin_id": ADMIN_USER_ID, "reason": "Test credit grant for debugging"}
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"admin_add_credits_success",
)
def test_add_user_credits_negative_amount(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test credit deduction by admin (negative amount)"""
# Mock the credit model
mock_credit_model = mocker.patch(
"backend.server.v2.admin.credit_admin_routes._user_credit_model"
)
mock_credit_model._add_transaction = AsyncMock(
return_value=(200, "transaction-456-uuid")
)
request_data = {
"user_id": "target-user-id",
"amount": -100,
"comments": "Refund adjustment",
}
response = client.post("/admin/add_credits", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["new_balance"] == 200
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"adm_add_cred_neg",
)
def test_get_user_history_success(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test successful retrieval of user credit history"""
# Mock the admin_get_user_history function
mock_history_response = admin_model.UserHistoryResponse(
history=[
UserTransaction(
user_id="user-1",
user_email="user1@example.com",
amount=1000,
reason="Initial grant",
transaction_type=prisma.enums.CreditTransactionType.GRANT,
),
UserTransaction(
user_id="user-2",
user_email="user2@example.com",
amount=-50,
reason="Usage",
transaction_type=prisma.enums.CreditTransactionType.USAGE,
),
],
pagination=Pagination(
total_items=2,
total_pages=1,
current_page=1,
page_size=20,
),
)
mocker.patch(
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
return_value=mock_history_response,
)
response = client.get("/admin/users_history")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["history"]) == 2
assert response_data["pagination"]["total_items"] == 2
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"adm_usr_hist_ok",
)
def test_get_user_history_with_filters(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test user credit history with search and filter parameters"""
# Mock the admin_get_user_history function
mock_history_response = admin_model.UserHistoryResponse(
history=[
UserTransaction(
user_id="user-3",
user_email="test@example.com",
amount=500,
reason="Top up",
transaction_type=prisma.enums.CreditTransactionType.TOP_UP,
),
],
pagination=Pagination(
total_items=1,
total_pages=1,
current_page=1,
page_size=10,
),
)
mock_get_history = mocker.patch(
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
return_value=mock_history_response,
)
response = client.get(
"/admin/users_history",
params={
"search": "test@example.com",
"page": 1,
"page_size": 10,
"transaction_filter": "TOP_UP",
},
)
assert response.status_code == 200
response_data = response.json()
assert len(response_data["history"]) == 1
assert response_data["history"][0]["transaction_type"] == "TOP_UP"
# Verify the function was called with correct parameters
mock_get_history.assert_called_once_with(
page=1,
page_size=10,
search="test@example.com",
transaction_filter=prisma.enums.CreditTransactionType.TOP_UP,
)
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"adm_usr_hist_filt",
)
def test_get_user_history_empty_results(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test user credit history with no results"""
# Mock empty history response
mock_history_response = admin_model.UserHistoryResponse(
history=[],
pagination=Pagination(
total_items=0,
total_pages=0,
current_page=1,
page_size=20,
),
)
mocker.patch(
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
return_value=mock_history_response,
)
response = client.get("/admin/users_history", params={"search": "nonexistent"})
assert response.status_code == 200
response_data = response.json()
assert len(response_data["history"]) == 0
assert response_data["pagination"]["total_items"] == 0
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"adm_usr_hist_empty",
)
def test_add_credits_invalid_request() -> None:
"""Test credit addition with invalid request data"""
# Missing required fields
response = client.post("/admin/add_credits", json={})
assert response.status_code == 422
# Invalid amount type
response = client.post(
"/admin/add_credits",
json={
"user_id": "test",
"amount": "not_a_number",
"comments": "test",
},
)
assert response.status_code == 422
# Missing comments
response = client.post(
"/admin/add_credits",
json={
"user_id": "test",
"amount": 100,
},
)
assert response.status_code == 422
def test_admin_endpoints_require_admin_role(mocker: pytest_mock.MockFixture) -> None:
"""Test that admin endpoints require admin role"""
# Clear the admin override to test authorization
app.dependency_overrides.clear()
# Mock requires_admin_user to raise an exception
mocker.patch(
"autogpt_libs.auth.requires_admin_user",
side_effect=fastapi.HTTPException(
status_code=403, detail="Admin access required"
),
)
# Test add_credits endpoint
response = client.post(
"/admin/add_credits",
json={
"user_id": "test",
"amount": 100,
"comments": "test",
},
)
assert (
response.status_code == 401
) # Auth middleware returns 401 when auth is disabled
# Test users_history endpoint
response = client.get("/admin/users_history")
assert (
response.status_code == 401
) # Auth middleware returns 401 when auth is disabled
# Restore the override
app.dependency_overrides[autogpt_libs.auth.requires_admin_user] = (
override_requires_admin_user
)
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = (
override_get_user_id
)

View File

@@ -3,6 +3,7 @@ from datetime import datetime
import prisma.enums
import prisma.errors
import prisma.models
import prisma.types
import pytest
import backend.server.v2.library.db as db
@@ -84,6 +85,11 @@ async def test_get_library_agents(mocker):
@pytest.mark.asyncio(loop_scope="session")
async def test_add_agent_to_library(mocker):
await connect()
# Mock the transaction context
mock_transaction = mocker.patch("backend.server.v2.library.db.locked_transaction")
mock_transaction.return_value.__aenter__ = mocker.AsyncMock(return_value=None)
mock_transaction.return_value.__aexit__ = mocker.AsyncMock(return_value=None)
# Mock data
mock_store_listing_data = prisma.models.StoreListingVersion(
id="version123",
@@ -142,6 +148,10 @@ async def test_add_agent_to_library(mocker):
return_value=mock_library_agent_data
)
# Mock the model conversion
mock_from_db = mocker.patch("backend.server.v2.library.model.LibraryAgent.from_db")
mock_from_db.return_value = mocker.Mock()
# Call function
await db.add_store_agent_to_library("version123", "test-user")

View File

@@ -1,9 +1,11 @@
import datetime
import json
import autogpt_libs.auth as autogpt_auth_lib
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.model as server_model
import backend.server.v2.library.model as library_model
@@ -14,6 +16,8 @@ app.include_router(library_router)
client = fastapi.testclient.TestClient(app)
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0)
def override_auth_middleware():
"""Override auth middleware for testing"""
@@ -30,7 +34,10 @@ app.dependency_overrides[autogpt_auth_lib.depends.get_user_id] = override_get_us
@pytest.mark.asyncio
async def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
async def test_get_library_agents_success(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = library_model.LibraryAgentResponse(
agents=[
library_model.LibraryAgent(
@@ -82,6 +89,10 @@ async def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
assert data.agents[0].can_access_graph is True
assert data.agents[1].graph_id == "test-agent-2"
assert data.agents[1].can_access_graph is False
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "lib_agts_search")
mock_db_call.assert_called_once_with(
user_id="test-user-id",
search_term="test",

View File

@@ -0,0 +1,271 @@
import json
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import fastapi.testclient
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.v2.otto.models as otto_models
import backend.server.v2.otto.routes as otto_routes
from backend.server.utils import get_user_id
from backend.server.v2.otto.service import OttoService
app = fastapi.FastAPI()
app.include_router(otto_routes.router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
"""Override get_user_id for testing"""
return "test-user-id"
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
override_auth_middleware
)
app.dependency_overrides[get_user_id] = override_get_user_id
def test_ask_otto_success(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test successful Otto API request"""
# Mock the OttoService.ask method
mock_response = otto_models.ApiResponse(
answer="This is Otto's response to your query.",
documents=[
otto_models.Document(
url="https://example.com/doc1",
relevance_score=0.95,
),
otto_models.Document(
url="https://example.com/doc2",
relevance_score=0.87,
),
],
success=True,
)
mocker.patch.object(
OttoService,
"ask",
return_value=mock_response,
)
request_data = {
"query": "How do I create an agent?",
"conversation_history": [
{
"query": "What is AutoGPT?",
"response": "AutoGPT is an AI agent platform.",
}
],
"message_id": "msg_123",
"include_graph_data": False,
}
response = client.post("/ask", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is True
assert response_data["answer"] == "This is Otto's response to your query."
assert len(response_data["documents"]) == 2
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"otto_ok",
)
def test_ask_otto_with_graph_data(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test Otto API request with graph data included"""
# Mock the OttoService.ask method
mock_response = otto_models.ApiResponse(
answer="Here's information about your graph.",
documents=[
otto_models.Document(
url="https://example.com/graph-doc",
relevance_score=0.92,
),
],
success=True,
)
mocker.patch.object(
OttoService,
"ask",
return_value=mock_response,
)
request_data = {
"query": "Tell me about my graph",
"conversation_history": [],
"message_id": "msg_456",
"include_graph_data": True,
"graph_id": "graph_123",
}
response = client.post("/ask", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is True
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"otto_grph",
)
def test_ask_otto_empty_conversation(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test Otto API request with empty conversation history"""
# Mock the OttoService.ask method
mock_response = otto_models.ApiResponse(
answer="Welcome! How can I help you?",
documents=[],
success=True,
)
mocker.patch.object(
OttoService,
"ask",
return_value=mock_response,
)
request_data = {
"query": "Hello",
"conversation_history": [],
"message_id": "msg_789",
"include_graph_data": False,
}
response = client.post("/ask", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is True
assert len(response_data["documents"]) == 0
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"otto_empty",
)
def test_ask_otto_service_error(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
"""Test Otto API request when service returns error"""
# Mock the OttoService.ask method to return failure
mock_response = otto_models.ApiResponse(
answer="An error occurred while processing your request.",
documents=[],
success=False,
)
mocker.patch.object(
OttoService,
"ask",
return_value=mock_response,
)
request_data = {
"query": "Test query",
"conversation_history": [],
"message_id": "msg_error",
"include_graph_data": False,
}
response = client.post("/ask", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is False
# Snapshot test the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"otto_err",
)
def test_ask_otto_invalid_request() -> None:
"""Test Otto API with invalid request data"""
# Missing required fields
response = client.post("/ask", json={})
assert response.status_code == 422
# Invalid conversation history format
response = client.post(
"/ask",
json={
"query": "Test",
"conversation_history": "not a list",
"message_id": "123",
},
)
assert response.status_code == 422
# Missing message_id
response = client.post(
"/ask",
json={
"query": "Test",
"conversation_history": [],
},
)
assert response.status_code == 422
def test_ask_otto_unauthenticated(mocker: pytest_mock.MockFixture) -> None:
"""Test Otto API request without authentication"""
# Remove the auth override to test unauthenticated access
app.dependency_overrides.clear()
# Mock auth_middleware to raise an exception
mocker.patch(
"autogpt_libs.auth.middleware.auth_middleware",
side_effect=fastapi.HTTPException(status_code=401, detail="Unauthorized"),
)
request_data = {
"query": "Test",
"conversation_history": [],
"message_id": "123",
}
response = client.post("/ask", json=request_data)
# When auth is disabled and Otto API URL is not configured, we get 503
assert response.status_code == 503
# Restore the override
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
override_auth_middleware
)
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = (
override_get_user_id
)

View File

@@ -1,4 +1,5 @@
import datetime
import json
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
@@ -6,22 +7,27 @@ import fastapi
import fastapi.testclient
import prisma.enums
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.v2.store.model
import backend.server.v2.store.routes
# Using a fixed timestamp for reproducible tests
# 2023 date is intentionally used to ensure tests work regardless of current year
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0)
app = fastapi.FastAPI()
app.include_router(backend.server.v2.store.routes.router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
def override_auth_middleware() -> dict[str, str]:
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return "test-user-id"
@@ -32,7 +38,10 @@ app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id
def test_get_agents_defaults(mocker: pytest_mock.MockFixture):
def test_get_agents_defaults(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[],
pagination=backend.server.v2.store.model.Pagination(
@@ -52,6 +61,9 @@ def test_get_agents_defaults(mocker: pytest_mock.MockFixture):
)
assert data.pagination.total_pages == 0
assert data.agents == []
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "def_agts")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -63,7 +75,10 @@ def test_get_agents_defaults(mocker: pytest_mock.MockFixture):
)
def test_get_agents_featured(mocker: pytest_mock.MockFixture):
def test_get_agents_featured(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -94,6 +109,8 @@ def test_get_agents_featured(mocker: pytest_mock.MockFixture):
)
assert len(data.agents) == 1
assert data.agents[0].slug == "featured-agent"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "feat_agts")
mock_db_call.assert_called_once_with(
featured=True,
creator=None,
@@ -105,7 +122,10 @@ def test_get_agents_featured(mocker: pytest_mock.MockFixture):
)
def test_get_agents_by_creator(mocker: pytest_mock.MockFixture):
def test_get_agents_by_creator(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -136,6 +156,8 @@ def test_get_agents_by_creator(mocker: pytest_mock.MockFixture):
)
assert len(data.agents) == 1
assert data.agents[0].creator == "specific-creator"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_by_creator")
mock_db_call.assert_called_once_with(
featured=False,
creator="specific-creator",
@@ -147,7 +169,10 @@ def test_get_agents_by_creator(mocker: pytest_mock.MockFixture):
)
def test_get_agents_sorted(mocker: pytest_mock.MockFixture):
def test_get_agents_sorted(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -178,6 +203,8 @@ def test_get_agents_sorted(mocker: pytest_mock.MockFixture):
)
assert len(data.agents) == 1
assert data.agents[0].runs == 1000
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_sorted")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -189,7 +216,10 @@ def test_get_agents_sorted(mocker: pytest_mock.MockFixture):
)
def test_get_agents_search(mocker: pytest_mock.MockFixture):
def test_get_agents_search(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -220,6 +250,8 @@ def test_get_agents_search(mocker: pytest_mock.MockFixture):
)
assert len(data.agents) == 1
assert "specific" in data.agents[0].description.lower()
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_search")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -231,7 +263,10 @@ def test_get_agents_search(mocker: pytest_mock.MockFixture):
)
def test_get_agents_category(mocker: pytest_mock.MockFixture):
def test_get_agents_category(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -261,6 +296,8 @@ def test_get_agents_category(mocker: pytest_mock.MockFixture):
response.json()
)
assert len(data.agents) == 1
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_category")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -272,7 +309,10 @@ def test_get_agents_category(mocker: pytest_mock.MockFixture):
)
def test_get_agents_pagination(mocker: pytest_mock.MockFixture):
def test_get_agents_pagination(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentsResponse(
agents=[
backend.server.v2.store.model.StoreAgent(
@@ -305,6 +345,8 @@ def test_get_agents_pagination(mocker: pytest_mock.MockFixture):
assert len(data.agents) == 5
assert data.pagination.current_page == 2
assert data.pagination.page_size == 5
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agts_pagination")
mock_db_call.assert_called_once_with(
featured=False,
creator=None,
@@ -334,7 +376,10 @@ def test_get_agents_malformed_request(mocker: pytest_mock.MockFixture):
mock_db_call.assert_not_called()
def test_get_agent_details(mocker: pytest_mock.MockFixture):
def test_get_agent_details(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreAgentDetails(
store_listing_version_id="test-version-id",
slug="test-agent",
@@ -349,7 +394,7 @@ def test_get_agent_details(mocker: pytest_mock.MockFixture):
runs=100,
rating=4.5,
versions=["1.0.0", "1.1.0"],
last_updated=datetime.datetime.now(),
last_updated=FIXED_NOW,
)
mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agent_details")
mock_db_call.return_value = mocked_value
@@ -362,10 +407,15 @@ def test_get_agent_details(mocker: pytest_mock.MockFixture):
)
assert data.agent_name == "Test Agent"
assert data.creator == "creator1"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "agt_details")
mock_db_call.assert_called_once_with(username="creator1", agent_name="test-agent")
def test_get_creators_defaults(mocker: pytest_mock.MockFixture):
def test_get_creators_defaults(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.CreatorsResponse(
creators=[],
pagination=backend.server.v2.store.model.Pagination(
@@ -386,12 +436,17 @@ def test_get_creators_defaults(mocker: pytest_mock.MockFixture):
)
assert data.pagination.total_pages == 0
assert data.creators == []
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "def_creators")
mock_db_call.assert_called_once_with(
featured=False, search_query=None, sorted_by=None, page=1, page_size=20
)
def test_get_creators_pagination(mocker: pytest_mock.MockFixture):
def test_get_creators_pagination(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.CreatorsResponse(
creators=[
backend.server.v2.store.model.Creator(
@@ -425,6 +480,8 @@ def test_get_creators_pagination(mocker: pytest_mock.MockFixture):
assert len(data.creators) == 5
assert data.pagination.current_page == 2
assert data.pagination.page_size == 5
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "creators_pagination")
mock_db_call.assert_called_once_with(
featured=False, search_query=None, sorted_by=None, page=2, page_size=5
)
@@ -448,7 +505,10 @@ def test_get_creators_malformed_request(mocker: pytest_mock.MockFixture):
mock_db_call.assert_not_called()
def test_get_creator_details(mocker: pytest_mock.MockFixture):
def test_get_creator_details(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.CreatorDetails(
name="Test User",
username="creator1",
@@ -468,17 +528,22 @@ def test_get_creator_details(mocker: pytest_mock.MockFixture):
data = backend.server.v2.store.model.CreatorDetails.model_validate(response.json())
assert data.username == "creator1"
assert data.name == "Test User"
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "creator_details")
mock_db_call.assert_called_once_with(username="creator1")
def test_get_submissions_success(mocker: pytest_mock.MockFixture):
def test_get_submissions_success(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreSubmissionsResponse(
submissions=[
backend.server.v2.store.model.StoreSubmission(
name="Test Agent",
description="Test agent description",
image_urls=["test.jpg"],
date_submitted=datetime.datetime.now(),
date_submitted=FIXED_NOW,
status=prisma.enums.SubmissionStatus.APPROVED,
runs=50,
rating=4.2,
@@ -507,10 +572,15 @@ def test_get_submissions_success(mocker: pytest_mock.MockFixture):
assert len(data.submissions) == 1
assert data.submissions[0].name == "Test Agent"
assert data.pagination.current_page == 1
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "sub_success")
mock_db_call.assert_called_once_with(user_id="test-user-id", page=1, page_size=20)
def test_get_submissions_pagination(mocker: pytest_mock.MockFixture):
def test_get_submissions_pagination(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
) -> None:
mocked_value = backend.server.v2.store.model.StoreSubmissionsResponse(
submissions=[],
pagination=backend.server.v2.store.model.Pagination(
@@ -531,6 +601,8 @@ def test_get_submissions_pagination(mocker: pytest_mock.MockFixture):
)
assert data.pagination.current_page == 2
assert data.pagination.page_size == 5
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(response.json(), indent=2), "sub_pagination")
mock_db_call.assert_called_once_with(user_id="test-user-id", page=2, page_size=5)

View File

@@ -0,0 +1,32 @@
import fastapi
import fastapi.testclient
import pytest_mock
import backend.server.v2.turnstile.routes as turnstile_routes
app = fastapi.FastAPI()
app.include_router(turnstile_routes.router)
client = fastapi.testclient.TestClient(app)
def test_verify_turnstile_token_no_secret_key(mocker: pytest_mock.MockFixture) -> None:
"""Test token verification without secret key configured"""
# Mock the settings with no secret key
mock_settings = mocker.patch("backend.server.v2.turnstile.routes.settings")
mock_settings.secrets.turnstile_secret_key = None
request_data = {"token": "test_token", "action": "login"}
response = client.post("/verify", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is False
assert response_data["error"] == "CONFIGURATION_ERROR"
def test_verify_turnstile_token_invalid_request() -> None:
"""Test token verification with invalid request data"""
# Missing token
response = client.post("/verify", json={"action": "login"})
assert response.status_code == 422

View File

@@ -2,16 +2,17 @@ services:
postgres-test:
image: ankane/pgvector:latest
environment:
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASS}
- POSTGRES_DB=${DB_NAME}
- POSTGRES_USER=${DB_USER:-postgres}
- POSTGRES_PASSWORD=${DB_PASS:-postgres}
- POSTGRES_DB=${DB_NAME:-postgres}
- POSTGRES_PORT=${DB_PORT:-5432}
healthcheck:
test: pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB
interval: 10s
timeout: 5s
retries: 5
ports:
- "${DB_PORT}:5432"
- "${DB_PORT:-5432}:5432"
networks:
- app-network-test
redis-test:

File diff suppressed because it is too large Load Diff

View File

@@ -65,6 +65,7 @@ websockets = "^14.2"
youtube-transcript-api = "^0.6.2"
zerobouncesdk = "^1.1.1"
# NOTE: please insert new dependencies in their alphabetical location
pytest-snapshot = "^0.9.0"
[tool.poetry.group.dev.dependencies]
aiohappyeyeballs = "^2.6.1"

View File

@@ -1,3 +1,4 @@
import os
import subprocess
import sys
import time
@@ -59,11 +60,52 @@ def test():
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(1)
# Run Prisma migrations
run_command(["prisma", "migrate", "dev"])
# IMPORTANT: Set test database environment variables to prevent accidentally
# resetting the developer's local database.
#
# This script spins up a separate test database container (postgres-test) using
# docker-compose.test.yaml. We explicitly set DATABASE_URL and DIRECT_URL to point
# to this test database to ensure that:
# 1. The prisma migrate reset command only affects the test database
# 2. Tests run against the test database, not the developer's local database
# 3. Any database operations during testing are isolated from development data
#
# Without this, if a developer has DATABASE_URL set in their environment pointing
# to their development database, running tests would wipe their local data!
test_env = os.environ.copy()
# Run the tests
result = subprocess.run(["pytest"] + sys.argv[1:], check=False)
# Use environment variables if set, otherwise use defaults that match docker-compose.test.yaml
db_user = os.getenv("DB_USER", "postgres")
db_pass = os.getenv("DB_PASS", "postgres")
db_name = os.getenv("DB_NAME", "postgres")
db_port = os.getenv("DB_PORT", "5432")
# Construct the test database URL - this ensures we're always pointing to the test container
test_env["DATABASE_URL"] = (
f"postgresql://{db_user}:{db_pass}@localhost:{db_port}/{db_name}"
)
test_env["DIRECT_URL"] = test_env["DATABASE_URL"]
test_env["DB_PORT"] = db_port
test_env["DB_NAME"] = db_name
test_env["DB_PASS"] = db_pass
test_env["DB_USER"] = db_user
# Run Prisma migrations with test database
# First, reset the database to ensure clean state for tests
# This is safe because we've explicitly set DATABASE_URL to the test database above
subprocess.run(
["prisma", "migrate", "reset", "--force", "--skip-seed"],
env=test_env,
check=False,
)
# Then apply migrations to get the test database schema up to date
subprocess.run(["prisma", "migrate", "deploy"], env=test_env, check=True)
# Run the tests with test database environment
# This ensures all database connections in the tests use the test database,
# not any database that might be configured in the developer's environment
result = subprocess.run(["pytest"] + sys.argv[1:], env=test_env, check=False)
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])

View File

@@ -0,0 +1,4 @@
{
"new_balance": 200,
"transaction_key": "transaction-456-uuid"
}

View File

@@ -0,0 +1,4 @@
{
"new_balance": 1500,
"transaction_key": "transaction-123-uuid"
}

View File

@@ -0,0 +1,9 @@
{
"history": [],
"pagination": {
"current_page": 1,
"page_size": 20,
"total_items": 0,
"total_pages": 0
}
}

View File

@@ -0,0 +1,28 @@
{
"history": [
{
"admin_email": null,
"amount": 500,
"current_balance": 0,
"description": null,
"extra_data": null,
"reason": "Top up",
"running_balance": 0,
"transaction_key": "",
"transaction_time": "0001-01-01T00:00:00Z",
"transaction_type": "TOP_UP",
"usage_execution_id": null,
"usage_graph_id": null,
"usage_node_count": 0,
"usage_start_time": "9999-12-31T23:59:59.999999Z",
"user_email": "test@example.com",
"user_id": "user-3"
}
],
"pagination": {
"current_page": 1,
"page_size": 10,
"total_items": 1,
"total_pages": 1
}
}

View File

@@ -0,0 +1,46 @@
{
"history": [
{
"admin_email": null,
"amount": 1000,
"current_balance": 0,
"description": null,
"extra_data": null,
"reason": "Initial grant",
"running_balance": 0,
"transaction_key": "",
"transaction_time": "0001-01-01T00:00:00Z",
"transaction_type": "GRANT",
"usage_execution_id": null,
"usage_graph_id": null,
"usage_node_count": 0,
"usage_start_time": "9999-12-31T23:59:59.999999Z",
"user_email": "user1@example.com",
"user_id": "user-1"
},
{
"admin_email": null,
"amount": -50,
"current_balance": 0,
"description": null,
"extra_data": null,
"reason": "Usage",
"running_balance": 0,
"transaction_key": "",
"transaction_time": "0001-01-01T00:00:00Z",
"transaction_type": "USAGE",
"usage_execution_id": null,
"usage_graph_id": null,
"usage_node_count": 0,
"usage_start_time": "9999-12-31T23:59:59.999999Z",
"user_email": "user2@example.com",
"user_id": "user-2"
}
],
"pagination": {
"current_page": 1,
"page_size": 20,
"total_items": 2,
"total_pages": 1
}
}

View File

@@ -0,0 +1,4 @@
{
"new_balance": 1500,
"transaction_key": "transaction-123-uuid"
}

View File

@@ -0,0 +1,27 @@
{
"store_listing_version_id": "test-version-id",
"slug": "test-agent",
"agent_name": "Test Agent",
"agent_video": "video.mp4",
"agent_image": [
"image1.jpg",
"image2.jpg"
],
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Test agent subheading",
"description": "Test agent description",
"categories": [
"category1",
"category2"
],
"runs": 100,
"rating": 4.5,
"versions": [
"1.0.0",
"1.1.0"
],
"last_updated": "2023-01-01T00:00:00",
"active_version_id": null,
"has_approved_version": false
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "creator-agent",
"agent_name": "Creator Agent",
"agent_image": "agent.jpg",
"creator": "specific-creator",
"creator_avatar": "avatar.jpg",
"sub_heading": "Creator agent subheading",
"description": "Creator agent description",
"runs": 50,
"rating": 4.0
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "category-agent",
"agent_name": "Category Agent",
"agent_image": "category.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Category agent subheading",
"description": "Category agent description",
"runs": 60,
"rating": 4.1
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,65 @@
{
"agents": [
{
"slug": "agent-0",
"agent_name": "Agent 0",
"agent_image": "agent0.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 0 subheading",
"description": "Agent 0 description",
"runs": 0,
"rating": 4.0
},
{
"slug": "agent-1",
"agent_name": "Agent 1",
"agent_image": "agent1.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 1 subheading",
"description": "Agent 1 description",
"runs": 10,
"rating": 4.0
},
{
"slug": "agent-2",
"agent_name": "Agent 2",
"agent_image": "agent2.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 2 subheading",
"description": "Agent 2 description",
"runs": 20,
"rating": 4.0
},
{
"slug": "agent-3",
"agent_name": "Agent 3",
"agent_image": "agent3.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 3 subheading",
"description": "Agent 3 description",
"runs": 30,
"rating": 4.0
},
{
"slug": "agent-4",
"agent_name": "Agent 4",
"agent_image": "agent4.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Agent 4 subheading",
"description": "Agent 4 description",
"runs": 40,
"rating": 4.0
}
],
"pagination": {
"total_items": 15,
"total_pages": 3,
"current_page": 2,
"page_size": 5
}
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "search-agent",
"agent_name": "Search Agent",
"agent_image": "search.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Search agent subheading",
"description": "Specific search term description",
"runs": 75,
"rating": 4.2
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "top-agent",
"agent_name": "Top Agent",
"agent_image": "top.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Top agent subheading",
"description": "Top agent description",
"runs": 1000,
"rating": 5.0
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,30 @@
{
"analytics_id": "analytics-complex-uuid",
"logged_data": {
"agent_id": "agent_123",
"blocks_used": [
{
"block_id": "llm_block",
"count": 3
},
{
"block_id": "http_block",
"count": 5
},
{
"block_id": "code_block",
"count": 2
}
],
"duration_ms": 3500,
"errors": [],
"execution_id": "exec_456",
"metadata": {
"environment": "production",
"trigger": "manual",
"user_tier": "premium"
},
"nodes_executed": 15,
"status": "completed"
}
}

View File

@@ -0,0 +1,3 @@
{
"analytics_id": "analytics-789-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"metric_id": "metric-123-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"metric_id": "metric-456-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"email": "newemail@example.com"
}

View File

@@ -0,0 +1,5 @@
{
"email": "test@example.com",
"id": "test-user-id",
"name": "Test User"
}

View File

@@ -0,0 +1,14 @@
[
{
"costs": [
{
"cost": 10,
"type": "credit"
}
],
"description": "A test block",
"disabled": false,
"id": "test-block",
"name": "Test Block"
}
]

View File

@@ -0,0 +1,12 @@
{
"output1": [
{
"data": "result1"
}
],
"output2": [
{
"data": "result2"
}
]
}

View File

@@ -0,0 +1,16 @@
{
"name": "Test User",
"username": "creator1",
"description": "Test creator description",
"links": [
"link1.com",
"link2.com"
],
"avatar_url": "avatar.jpg",
"agent_rating": 4.8,
"agent_runs": 1000,
"top_categories": [
"category1",
"category2"
]
}

View File

@@ -0,0 +1,60 @@
{
"creators": [
{
"name": "Creator 0",
"username": "creator0",
"description": "Creator 0 description",
"avatar_url": "avatar0.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
},
{
"name": "Creator 1",
"username": "creator1",
"description": "Creator 1 description",
"avatar_url": "avatar1.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
},
{
"name": "Creator 2",
"username": "creator2",
"description": "Creator 2 description",
"avatar_url": "avatar2.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
},
{
"name": "Creator 3",
"username": "creator3",
"description": "Creator 3 description",
"avatar_url": "avatar3.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
},
{
"name": "Creator 4",
"username": "creator4",
"description": "Creator 4 description",
"avatar_url": "avatar4.jpg",
"num_agents": 1,
"agent_rating": 4.5,
"agent_runs": 100,
"is_featured": false
}
],
"pagination": {
"total_items": 15,
"total_pages": 3,
"current_page": 2,
"page_size": 5
}
}

View File

@@ -0,0 +1,3 @@
{
"credits": 1000
}

View File

@@ -0,0 +1,4 @@
{
"amount": 500,
"threshold": 100
}

View File

@@ -0,0 +1,3 @@
{
"checkout_url": "https://checkout.example.com/session123"
}

View File

@@ -0,0 +1,9 @@
{
"agents": [],
"pagination": {
"total_items": 0,
"total_pages": 0,
"current_page": 0,
"page_size": 10
}
}

View File

@@ -0,0 +1,9 @@
{
"creators": [],
"pagination": {
"total_items": 0,
"total_pages": 0,
"current_page": 0,
"page_size": 10
}
}

View File

@@ -0,0 +1,21 @@
{
"agents": [
{
"slug": "featured-agent",
"agent_name": "Featured Agent",
"agent_image": "featured.jpg",
"creator": "creator1",
"creator_avatar": "avatar1.jpg",
"sub_heading": "Featured agent subheading",
"description": "Featured agent description",
"runs": 100,
"rating": 4.5
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,20 @@
{
"properties": {
"in_key_a": {
"advanced": true,
"default": "A",
"secret": false,
"title": "Key A"
},
"in_key_b": {
"advanced": false,
"secret": false,
"title": "in_key_b"
}
},
"required": [
"in_key_b"
],
"title": "ExpectedInputSchema",
"type": "object"
}

View File

@@ -0,0 +1,15 @@
{
"properties": {
"out_key": {
"advanced": false,
"description": "This is an output key",
"secret": false,
"title": "out_key"
}
},
"required": [
"out_key"
],
"title": "ExpectedOutputSchema",
"type": "object"
}

View File

@@ -0,0 +1,29 @@
{
"credentials_input_schema": {
"properties": {},
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"description": "A test graph",
"forked_from_id": null,
"forked_from_version": null,
"has_webhook_trigger": false,
"id": "graph-123",
"input_schema": {
"properties": {},
"required": [],
"type": "object"
},
"is_active": true,
"links": [],
"name": "Test Graph",
"nodes": [],
"output_schema": {
"properties": {},
"required": [],
"type": "object"
},
"sub_graphs": [],
"user_id": "test-user-id",
"version": 1
}

View File

@@ -0,0 +1,17 @@
{
"description": "Test graph",
"link_structure": [
{
"sink_name": "name",
"source_name": "output"
}
],
"links_count": 1,
"name": "TestGraph",
"node_blocks": [
"1ff065e9-88e8-4358-9d82-8dc91f622ba9",
"c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"1ff065e9-88e8-4358-9d82-8dc91f622ba9"
],
"nodes_count": 3
}

View File

@@ -0,0 +1,31 @@
[
{
"credentials_input_schema": {
"properties": {},
"title": "TestGraphCredentialsInputSchema",
"type": "object"
},
"description": "A test graph",
"forked_from_id": null,
"forked_from_version": null,
"has_webhook_trigger": false,
"id": "graph-123",
"input_schema": {
"properties": {},
"required": [],
"type": "object"
},
"is_active": true,
"links": [],
"name": "Test Graph",
"nodes": [],
"output_schema": {
"properties": {},
"required": [],
"type": "object"
},
"sub_graphs": [],
"user_id": "test-user-id",
"version": 1
}
]

View File

@@ -0,0 +1,3 @@
{
"version_counts": 3
}

View File

@@ -0,0 +1,48 @@
{
"agents": [
{
"id": "test-agent-1",
"graph_id": "test-agent-1",
"graph_version": 1,
"image_url": null,
"creator_name": "Test Creator",
"creator_image_url": "",
"status": "COMPLETED",
"updated_at": "2023-01-01T00:00:00",
"name": "Test Agent 1",
"description": "Test Description 1",
"input_schema": {
"type": "object",
"properties": {}
},
"new_output": false,
"can_access_graph": true,
"is_latest_version": true
},
{
"id": "test-agent-2",
"graph_id": "test-agent-2",
"graph_version": 1,
"image_url": null,
"creator_name": "Test Creator",
"creator_image_url": "",
"status": "COMPLETED",
"updated_at": "2023-01-01T00:00:00",
"name": "Test Agent 2",
"description": "Test Description 2",
"input_schema": {
"type": "object",
"properties": {}
},
"new_output": false,
"can_access_graph": false,
"is_latest_version": true
}
],
"pagination": {
"total_items": 2,
"total_pages": 1,
"current_page": 1,
"page_size": 50
}
}

View File

@@ -0,0 +1,30 @@
{
"analytics_id": "analytics-complex-uuid",
"logged_data": {
"agent_id": "agent_123",
"blocks_used": [
{
"block_id": "llm_block",
"count": 3
},
{
"block_id": "http_block",
"count": 5
},
{
"block_id": "code_block",
"count": 2
}
],
"duration_ms": 3500,
"errors": [],
"execution_id": "exec_456",
"metadata": {
"environment": "production",
"trigger": "manual",
"user_tier": "premium"
},
"nodes_executed": 15,
"status": "completed"
}
}

View File

@@ -0,0 +1,3 @@
{
"analytics_id": "analytics-789-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"metric_id": "metric-123-uuid"
}

View File

@@ -0,0 +1,3 @@
{
"metric_id": "metric-456-uuid"
}

View File

@@ -0,0 +1,5 @@
{
"answer": "Welcome! How can I help you?",
"documents": [],
"success": true
}

View File

@@ -0,0 +1,5 @@
{
"answer": "An error occurred while processing your request.",
"documents": [],
"success": false
}

View File

@@ -0,0 +1,10 @@
{
"answer": "Here's information about your graph.",
"documents": [
{
"relevance_score": 0.92,
"url": "https://example.com/graph-doc"
}
],
"success": true
}

View File

@@ -0,0 +1,14 @@
{
"answer": "This is Otto's response to your query.",
"documents": [
{
"relevance_score": 0.95,
"url": "https://example.com/doc1"
},
{
"relevance_score": 0.87,
"url": "https://example.com/doc2"
}
],
"success": true
}

View File

@@ -0,0 +1,7 @@
{
"channel": "3e53486c-cf57-477e-ba2a-cb02dc828e1a|graph_exec#test-graph-exec-1",
"data": null,
"error": null,
"method": "subscribe_graph_execution",
"success": true
}

View File

@@ -0,0 +1,9 @@
{
"submissions": [],
"pagination": {
"total_items": 10,
"total_pages": 2,
"current_page": 2,
"page_size": 5
}
}

View File

@@ -0,0 +1,32 @@
{
"submissions": [
{
"agent_id": "test-agent-id",
"agent_version": 1,
"name": "Test Agent",
"sub_heading": "Test agent subheading",
"slug": "test-agent",
"description": "Test agent description",
"image_urls": [
"test.jpg"
],
"date_submitted": "2023-01-01T00:00:00",
"status": "APPROVED",
"runs": 50,
"rating": 4.2,
"store_listing_version_id": null,
"version": null,
"reviewer_id": null,
"review_comments": null,
"internal_comments": null,
"reviewed_at": null,
"changes_summary": null
}
],
"pagination": {
"total_items": 1,
"total_pages": 1,
"current_page": 1,
"page_size": 20
}
}

View File

@@ -0,0 +1,7 @@
{
"channel": "3e53486c-cf57-477e-ba2a-cb02dc828e1a|graph_exec#test-graph-exec-1",
"data": null,
"error": null,
"method": "unsubscribe",
"success": true
}

View File

@@ -1,9 +1,11 @@
import json
from typing import Any
from uuid import UUID
import autogpt_libs.auth.models
import fastapi.exceptions
import pytest
from pytest_snapshot.plugin import Snapshot
import backend.server.v2.store.model as store
from backend.blocks.basic import StoreValueBlock
@@ -18,7 +20,7 @@ from backend.util.test import SpinTestServer
@pytest.mark.asyncio(loop_scope="session")
async def test_graph_creation(server: SpinTestServer):
async def test_graph_creation(server: SpinTestServer, snapshot: Snapshot):
"""
Test the creation of a graph with nodes and links.
@@ -70,9 +72,27 @@ async def test_graph_creation(server: SpinTestServer):
assert links[0].source_id in {nodes[0].id, nodes[1].id, nodes[2].id}
assert links[0].sink_id in {nodes[0].id, nodes[1].id, nodes[2].id}
# Create a serializable version of the graph for snapshot testing
# Remove dynamic IDs to make snapshots reproducible
graph_data = {
"name": created_graph.name,
"description": created_graph.description,
"nodes_count": len(created_graph.nodes),
"links_count": len(created_graph.links),
"node_blocks": [node.block_id for node in created_graph.nodes],
"link_structure": [
{"source_name": link.source_name, "sink_name": link.sink_name}
for link in created_graph.links
],
}
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(graph_data, indent=2, sort_keys=True), "grph_struct"
)
@pytest.mark.asyncio(loop_scope="session")
async def test_get_input_schema(server: SpinTestServer):
async def test_get_input_schema(server: SpinTestServer, snapshot: Snapshot):
"""
Test the get_input_schema method of a created graph.
@@ -162,10 +182,22 @@ async def test_get_input_schema(server: SpinTestServer):
input_schema["title"] = "ExpectedInputSchema"
assert input_schema == ExpectedInputSchema.jsonschema()
# Add snapshot testing for the schemas
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(input_schema, indent=2, sort_keys=True), "grph_in_schm"
)
output_schema = created_graph.output_schema
output_schema["title"] = "ExpectedOutputSchema"
assert output_schema == ExpectedOutputSchema.jsonschema()
# Add snapshot testing for the output schema
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(output_schema, indent=2, sort_keys=True), "grph_out_schm"
)
@pytest.mark.asyncio(loop_scope="session")
async def test_clean_graph(server: SpinTestServer):

View File

@@ -1,8 +1,10 @@
import json
from typing import cast
from unittest.mock import AsyncMock
import pytest
from fastapi import WebSocket, WebSocketDisconnect
from pytest_snapshot.plugin import Snapshot
from backend.data.user import DEFAULT_USER_ID
from backend.server.conn_manager import ConnectionManager
@@ -27,7 +29,7 @@ def mock_manager() -> AsyncMock:
@pytest.mark.asyncio
async def test_websocket_router_subscribe(
mock_websocket: AsyncMock, mock_manager: AsyncMock
mock_websocket: AsyncMock, mock_manager: AsyncMock, snapshot: Snapshot
) -> None:
mock_websocket.receive_text.side_effect = [
WSMessage(
@@ -56,12 +58,19 @@ async def test_websocket_router_subscribe(
in mock_websocket.send_text.call_args[0][0]
)
assert '"success":true' in mock_websocket.send_text.call_args[0][0]
# Capture and snapshot the WebSocket response message
sent_message = mock_websocket.send_text.call_args[0][0]
parsed_message = json.loads(sent_message)
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(parsed_message, indent=2, sort_keys=True), "sub")
mock_manager.disconnect_socket.assert_called_once_with(mock_websocket)
@pytest.mark.asyncio
async def test_websocket_router_unsubscribe(
mock_websocket: AsyncMock, mock_manager: AsyncMock
mock_websocket: AsyncMock, mock_manager: AsyncMock, snapshot: Snapshot
) -> None:
mock_websocket.receive_text.side_effect = [
WSMessage(
@@ -87,6 +96,13 @@ async def test_websocket_router_unsubscribe(
mock_websocket.send_text.assert_called_once()
assert '"method":"unsubscribe"' in mock_websocket.send_text.call_args[0][0]
assert '"success":true' in mock_websocket.send_text.call_args[0][0]
# Capture and snapshot the WebSocket response message
sent_message = mock_websocket.send_text.call_args[0][0]
parsed_message = json.loads(sent_message)
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(json.dumps(parsed_message, indent=2, sort_keys=True), "unsub")
mock_manager.disconnect_socket.assert_called_once_with(mock_websocket)

View File

@@ -32,4 +32,4 @@ NEXT_PUBLIC_SHOW_BILLING_PAGE=false
## Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
## This is the frontend site key
NEXT_PUBLIC_CLOUDFLARE_TURNSTILE_SITE_KEY=
NEXT_PUBLIC_DISABLE_TURNSTILE=false
NEXT_PUBLIC_TURNSTILE=disabled

View File

@@ -1,15 +1,26 @@
import getServerSupabase from "@/lib/supabase/getServerSupabase";
import BackendAPI from "@/lib/autogpt-server-api";
import { NextResponse } from "next/server";
import { revalidatePath } from "next/cache";
async function shouldShowOnboarding() {
const api = new BackendAPI();
return (
(await api.isOnboardingEnabled()) &&
!(await api.getUserOnboarding()).completedSteps.includes("CONGRATS")
);
}
// Handle the callback to complete the user session login
export async function GET(request: Request) {
const { searchParams, origin } = new URL(request.url);
const code = searchParams.get("code");
// if "next" is in param, use it as the redirect URL
const next = searchParams.get("next") ?? "/";
let next = searchParams.get("next") ?? "/";
if (code) {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
if (!supabase) {
return NextResponse.redirect(`${origin}/error`);
@@ -18,6 +29,21 @@ export async function GET(request: Request) {
const { data, error } = await supabase.auth.exchangeCodeForSession(code);
// data.session?.refresh_token is available if you need to store it for later use
if (!error) {
try {
const api = new BackendAPI();
await api.createUser();
if (await shouldShowOnboarding()) {
next = "/onboarding";
revalidatePath("/onboarding", "layout");
} else {
revalidatePath("/", "layout");
}
} catch (createUserError) {
console.error("Error creating user:", createUserError);
// Continue with redirect even if createUser fails
}
const forwardedHost = request.headers.get("x-forwarded-host"); // original origin before load balancer
const isLocalEnv = process.env.NODE_ENV === "development";
if (isLocalEnv) {

View File

@@ -12,7 +12,7 @@ export async function GET(request: NextRequest) {
const next = searchParams.get("next") ?? "/";
if (token_hash && type) {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
if (!supabase) {
redirect("/error");

View File

@@ -21,7 +21,7 @@ export async function login(
turnstileToken: string,
) {
return await Sentry.withServerActionInstrumentation("login", {}, async () => {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
const api = new BackendAPI();
if (!supabase) {
@@ -60,14 +60,13 @@ export async function providerLogin(provider: LoginProvider) {
"providerLogin",
{},
async () => {
const supabase = getServerSupabase();
const api = new BackendAPI();
const supabase = await getServerSupabase();
if (!supabase) {
redirect("/error");
}
const { error } = await supabase!.auth.signInWithOAuth({
const { data, error } = await supabase!.auth.signInWithOAuth({
provider: provider,
options: {
redirectTo:
@@ -81,12 +80,13 @@ export async function providerLogin(provider: LoginProvider) {
return error.message;
}
await api.createUser();
// Don't onboard if disabled or already onboarded
if (await shouldShowOnboarding()) {
revalidatePath("/onboarding", "layout");
redirect("/onboarding");
// Redirect to the OAuth provider's URL
if (data?.url) {
redirect(data.url);
}
// Note: api.createUser() and onboarding check happen in the callback handler
// after the session is established. See `auth/callback/route.ts`.
},
);
}

View File

@@ -1,5 +1,4 @@
"use client";
import { login, providerLogin } from "./actions";
import {
Form,
FormControl,
@@ -8,14 +7,8 @@ import {
FormLabel,
FormMessage,
} from "@/components/ui/form";
import { useForm } from "react-hook-form";
import { Input } from "@/components/ui/input";
import { z } from "zod";
import { zodResolver } from "@hookform/resolvers/zod";
import { useCallback, useState } from "react";
import { useRouter } from "next/navigation";
import Link from "next/link";
import useSupabase from "@/lib/supabase/useSupabase";
import LoadingBox from "@/components/ui/loading";
import {
AuthCard,
@@ -23,86 +16,34 @@ import {
AuthButton,
AuthFeedback,
AuthBottomText,
GoogleOAuthButton,
PasswordInput,
Turnstile,
} from "@/components/auth";
import { loginFormSchema } from "@/types/auth";
import { getBehaveAs } from "@/lib/utils";
import { useTurnstile } from "@/hooks/useTurnstile";
import { useLoginPage } from "./useLoginPage";
export default function LoginPage() {
const { supabase, user, isUserLoading } = useSupabase();
const [feedback, setFeedback] = useState<string | null>(null);
const router = useRouter();
const [isLoading, setIsLoading] = useState(false);
const {
form,
feedback,
turnstile,
captchaKey,
isLoading,
isCloudEnv,
isLoggedIn,
isUserLoading,
isGoogleLoading,
isSupabaseAvailable,
handleSubmit,
handleProviderLogin,
} = useLoginPage();
const turnstile = useTurnstile({
action: "login",
autoVerify: false,
resetOnError: true,
});
const form = useForm<z.infer<typeof loginFormSchema>>({
resolver: zodResolver(loginFormSchema),
defaultValues: {
email: "",
password: "",
},
});
// TODO: uncomment when we enable social login
// const onProviderLogin = useCallback(async (
// provider: LoginProvider,
// ) => {
// setIsLoading(true);
// const error = await providerLogin(provider);
// setIsLoading(false);
// if (error) {
// setFeedback(error);
// return;
// }
// setFeedback(null);
// }, [supabase]);
const onLogin = useCallback(
async (data: z.infer<typeof loginFormSchema>) => {
setIsLoading(true);
if (!(await form.trigger())) {
setIsLoading(false);
return;
}
if (!turnstile.verified) {
setFeedback("Please complete the CAPTCHA challenge.");
setIsLoading(false);
return;
}
const error = await login(data, turnstile.token as string);
await supabase?.auth.refreshSession();
setIsLoading(false);
if (error) {
setFeedback(error);
// Always reset the turnstile on any error
turnstile.reset();
return;
}
setFeedback(null);
},
[form, turnstile, supabase],
);
if (user) {
console.debug("User exists, redirecting to /");
router.push("/");
}
if (isUserLoading || user) {
if (isUserLoading || isLoggedIn) {
return <LoadingBox className="h-[80vh]" />;
}
if (!supabase) {
if (!isSupabaseAvailable) {
return (
<div>
User accounts are disabled because Supabase client is unavailable
@@ -113,8 +54,26 @@ export default function LoginPage() {
return (
<AuthCard className="mx-auto">
<AuthHeader>Login to your account</AuthHeader>
{isCloudEnv ? (
<>
<div className="mb-6">
<GoogleOAuthButton
onClick={() => handleProviderLogin("google")}
isLoading={isGoogleLoading}
disabled={isLoading}
/>
</div>
<div className="mb-6 flex items-center">
<div className="flex-1 border-t border-gray-300"></div>
<span className="mx-3 text-sm text-gray-500">or</span>
<div className="flex-1 border-t border-gray-300"></div>
</div>
</>
) : null}
<Form {...form}>
<form onSubmit={form.handleSubmit(onLogin)}>
<form onSubmit={handleSubmit}>
<FormField
control={form.control}
name="email"
@@ -160,6 +119,7 @@ export default function LoginPage() {
{/* Turnstile CAPTCHA Component */}
<Turnstile
key={captchaKey}
siteKey={turnstile.siteKey}
onVerify={turnstile.handleVerify}
onExpire={turnstile.handleExpire}
@@ -169,11 +129,7 @@ export default function LoginPage() {
shouldRender={turnstile.shouldRender}
/>
<AuthButton
onClick={() => onLogin(form.getValues())}
isLoading={isLoading}
type="submit"
>
<AuthButton isLoading={isLoading} type="submit">
Login
</AuthButton>
</form>

View File

@@ -0,0 +1,102 @@
import { useTurnstile } from "@/hooks/useTurnstile";
import useSupabase from "@/lib/supabase/useSupabase";
import { loginFormSchema, LoginProvider } from "@/types/auth";
import { zodResolver } from "@hookform/resolvers/zod";
import { useRouter } from "next/navigation";
import { useCallback, useEffect, useState } from "react";
import { useForm } from "react-hook-form";
import { login, providerLogin } from "./actions";
import z from "zod";
import { BehaveAs } from "@/lib/utils";
import { getBehaveAs } from "@/lib/utils";
export function useLoginPage() {
const { supabase, user, isUserLoading } = useSupabase();
const [feedback, setFeedback] = useState<string | null>(null);
const [captchaKey, setCaptchaKey] = useState(0);
const router = useRouter();
const [isLoading, setIsLoading] = useState(false);
const [isGoogleLoading, setIsGoogleLoading] = useState(false);
const isCloudEnv = getBehaveAs() === BehaveAs.CLOUD;
const turnstile = useTurnstile({
action: "login",
autoVerify: false,
resetOnError: true,
});
const form = useForm<z.infer<typeof loginFormSchema>>({
resolver: zodResolver(loginFormSchema),
defaultValues: {
email: "",
password: "",
},
});
const resetCaptcha = useCallback(() => {
setCaptchaKey((k) => k + 1);
turnstile.reset();
}, [turnstile]);
useEffect(() => {
if (user) router.push("/");
}, [user]);
async function handleProviderLogin(provider: LoginProvider) {
setIsGoogleLoading(true);
try {
const error = await providerLogin(provider);
if (error) throw error;
setFeedback(null);
} catch (error) {
resetCaptcha();
setFeedback(JSON.stringify(error));
} finally {
setIsGoogleLoading(false);
}
}
async function handleLogin(data: z.infer<typeof loginFormSchema>) {
setIsLoading(true);
if (!turnstile.verified) {
setFeedback("Please complete the CAPTCHA challenge.");
setIsLoading(false);
resetCaptcha();
return;
}
if (data.email.includes("@agpt.co")) {
setFeedback("Please use Google SSO to login using an AutoGPT email.");
setIsLoading(false);
resetCaptcha();
return;
}
const error = await login(data, turnstile.token as string);
await supabase?.auth.refreshSession();
setIsLoading(false);
if (error) {
setFeedback(error);
resetCaptcha();
// Always reset the turnstile on any error
turnstile.reset();
return;
}
setFeedback(null);
}
return {
form,
feedback,
turnstile,
captchaKey,
isLoggedIn: !!user,
isLoading,
isCloudEnv,
isUserLoading,
isGoogleLoading,
isSupabaseAvailable: !!supabase,
handleSubmit: form.handleSubmit(handleLogin),
handleProviderLogin,
};
}

View File

@@ -6,7 +6,7 @@ import BackendApi from "@/lib/autogpt-server-api";
import { NotificationPreferenceDTO } from "@/lib/autogpt-server-api/types";
export async function updateSettings(formData: FormData) {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
const {
data: { user },
} = await supabase.auth.getUser();

View File

@@ -9,7 +9,7 @@ export async function sendResetEmail(email: string, turnstileToken: string) {
"sendResetEmail",
{},
async () => {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
const origin = process.env.FRONTEND_BASE_URL || "http://localhost:3000";
if (!supabase) {
@@ -42,7 +42,7 @@ export async function changePassword(password: string, turnstileToken: string) {
"changePassword",
{},
async () => {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
if (!supabase) {
redirect("/error");

View File

@@ -34,6 +34,8 @@ export default function ResetPasswordPage() {
const [feedback, setFeedback] = useState<string | null>(null);
const [isError, setIsError] = useState(false);
const [disabled, setDisabled] = useState(false);
const [sendEmailCaptchaKey, setSendEmailCaptchaKey] = useState(0);
const [changePasswordCaptchaKey, setChangePasswordCaptchaKey] = useState(0);
const sendEmailTurnstile = useTurnstile({
action: "reset_password",
@@ -62,6 +64,16 @@ export default function ResetPasswordPage() {
},
});
const resetSendEmailCaptcha = useCallback(() => {
setSendEmailCaptchaKey((k) => k + 1);
sendEmailTurnstile.reset();
}, [sendEmailTurnstile]);
const resetChangePasswordCaptcha = useCallback(() => {
setChangePasswordCaptchaKey((k) => k + 1);
changePasswordTurnstile.reset();
}, [changePasswordTurnstile]);
const onSendEmail = useCallback(
async (data: z.infer<typeof sendEmailFormSchema>) => {
setIsLoading(true);
@@ -76,6 +88,7 @@ export default function ResetPasswordPage() {
setFeedback("Please complete the CAPTCHA challenge.");
setIsError(true);
setIsLoading(false);
resetSendEmailCaptcha();
return;
}
@@ -87,7 +100,7 @@ export default function ResetPasswordPage() {
if (error) {
setFeedback(error);
setIsError(true);
sendEmailTurnstile.reset();
resetSendEmailCaptcha();
return;
}
setDisabled(true);
@@ -96,7 +109,7 @@ export default function ResetPasswordPage() {
);
setIsError(false);
},
[sendEmailForm, sendEmailTurnstile],
[sendEmailForm, sendEmailTurnstile, resetSendEmailCaptcha],
);
const onChangePassword = useCallback(
@@ -113,6 +126,7 @@ export default function ResetPasswordPage() {
setFeedback("Please complete the CAPTCHA challenge.");
setIsError(true);
setIsLoading(false);
resetChangePasswordCaptcha();
return;
}
@@ -124,13 +138,13 @@ export default function ResetPasswordPage() {
if (error) {
setFeedback(error);
setIsError(true);
changePasswordTurnstile.reset();
resetChangePasswordCaptcha();
return;
}
setFeedback("Password changed successfully. Redirecting to login.");
setIsError(false);
},
[changePasswordForm, changePasswordTurnstile],
[changePasswordForm, changePasswordTurnstile, resetChangePasswordCaptcha],
);
if (isUserLoading) {
@@ -184,6 +198,7 @@ export default function ResetPasswordPage() {
{/* Turnstile CAPTCHA Component for password change */}
<Turnstile
key={changePasswordCaptchaKey}
siteKey={changePasswordTurnstile.siteKey}
onVerify={changePasswordTurnstile.handleVerify}
onExpire={changePasswordTurnstile.handleExpire}
@@ -227,6 +242,7 @@ export default function ResetPasswordPage() {
{/* Turnstile CAPTCHA Component for reset email */}
<Turnstile
key={sendEmailCaptchaKey}
siteKey={sendEmailTurnstile.siteKey}
onVerify={sendEmailTurnstile.handleVerify}
onExpire={sendEmailTurnstile.handleExpire}

View File

@@ -17,7 +17,7 @@ export async function signup(
"signup",
{},
async () => {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
if (!supabase) {
redirect("/error");

View File

@@ -1,5 +1,4 @@
"use client";
import { signup } from "./actions";
import {
Form,
FormControl,
@@ -9,95 +8,44 @@ import {
FormLabel,
FormMessage,
} from "@/components/ui/form";
import { useForm } from "react-hook-form";
import { Input } from "@/components/ui/input";
import type { z } from "zod";
import { zodResolver } from "@hookform/resolvers/zod";
import { useCallback, useState } from "react";
import { useRouter } from "next/navigation";
import Link from "next/link";
import { Checkbox } from "@/components/ui/checkbox";
import useSupabase from "@/lib/supabase/useSupabase";
import LoadingBox from "@/components/ui/loading";
import {
AuthCard,
AuthHeader,
AuthButton,
AuthBottomText,
GoogleOAuthButton,
PasswordInput,
Turnstile,
} from "@/components/auth";
import AuthFeedback from "@/components/auth/AuthFeedback";
import { signupFormSchema } from "@/types/auth";
import { getBehaveAs } from "@/lib/utils";
import { useTurnstile } from "@/hooks/useTurnstile";
import { useSignupPage } from "./useSignupPage";
export default function SignupPage() {
const { supabase, user, isUserLoading } = useSupabase();
const [feedback, setFeedback] = useState<string | null>(null);
const router = useRouter();
const [isLoading, setIsLoading] = useState(false);
//TODO: Remove after closed beta
const {
form,
feedback,
turnstile,
captchaKey,
isLoggedIn,
isLoading,
isCloudEnv,
isUserLoading,
isGoogleLoading,
isSupabaseAvailable,
handleSubmit,
handleProviderSignup,
} = useSignupPage();
const turnstile = useTurnstile({
action: "signup",
autoVerify: false,
resetOnError: true,
});
const form = useForm<z.infer<typeof signupFormSchema>>({
resolver: zodResolver(signupFormSchema),
defaultValues: {
email: "",
password: "",
confirmPassword: "",
agreeToTerms: false,
},
});
const onSignup = useCallback(
async (data: z.infer<typeof signupFormSchema>) => {
setIsLoading(true);
if (!(await form.trigger())) {
setIsLoading(false);
return;
}
if (!turnstile.verified) {
setFeedback("Please complete the CAPTCHA challenge.");
setIsLoading(false);
return;
}
const error = await signup(data, turnstile.token as string);
setIsLoading(false);
if (error) {
if (error === "user_already_exists") {
setFeedback("User with this email already exists");
turnstile.reset();
return;
} else {
setFeedback(error);
turnstile.reset();
}
return;
}
setFeedback(null);
},
[form, turnstile],
);
if (user) {
console.debug("User exists, redirecting to /");
router.push("/");
}
if (isUserLoading || user) {
if (isUserLoading || isLoggedIn) {
return <LoadingBox className="h-[80vh]" />;
}
if (!supabase) {
if (!isSupabaseAvailable) {
return (
<div>
User accounts are disabled because Supabase client is unavailable
@@ -108,8 +56,26 @@ export default function SignupPage() {
return (
<AuthCard className="mx-auto mt-12">
<AuthHeader>Create a new account</AuthHeader>
{isCloudEnv ? (
<>
<div className="mb-6">
<GoogleOAuthButton
onClick={() => handleProviderSignup("google")}
isLoading={isGoogleLoading}
disabled={isLoading}
/>
</div>
<div className="mb-6 flex items-center">
<div className="flex-1 border-t border-gray-300"></div>
<span className="mx-3 text-sm text-gray-500">or</span>
<div className="flex-1 border-t border-gray-300"></div>
</div>
</>
) : null}
<Form {...form}>
<form onSubmit={form.handleSubmit(onSignup)}>
<form onSubmit={handleSubmit}>
<FormField
control={form.control}
name="email"
@@ -160,6 +126,7 @@ export default function SignupPage() {
{/* Turnstile CAPTCHA Component */}
<Turnstile
key={captchaKey}
siteKey={turnstile.siteKey}
onVerify={turnstile.handleVerify}
onExpire={turnstile.handleExpire}
@@ -169,11 +136,7 @@ export default function SignupPage() {
shouldRender={turnstile.shouldRender}
/>
<AuthButton
onClick={() => onSignup(form.getValues())}
isLoading={isLoading}
type="submit"
>
<AuthButton isLoading={isLoading} type="submit">
Sign up
</AuthButton>
<FormField

View File

@@ -0,0 +1,110 @@
import { useTurnstile } from "@/hooks/useTurnstile";
import useSupabase from "@/lib/supabase/useSupabase";
import { signupFormSchema, LoginProvider } from "@/types/auth";
import { zodResolver } from "@hookform/resolvers/zod";
import { useRouter } from "next/navigation";
import { useCallback, useEffect, useState } from "react";
import { useForm } from "react-hook-form";
import { signup } from "./actions";
import { providerLogin } from "../login/actions";
import z from "zod";
import { BehaveAs, getBehaveAs } from "@/lib/utils";
export function useSignupPage() {
const { supabase, user, isUserLoading } = useSupabase();
const [feedback, setFeedback] = useState<string | null>(null);
const [captchaKey, setCaptchaKey] = useState(0);
const router = useRouter();
const [isLoading, setIsLoading] = useState(false);
const [isGoogleLoading, setIsGoogleLoading] = useState(false);
const isCloudEnv = getBehaveAs() === BehaveAs.CLOUD;
const turnstile = useTurnstile({
action: "signup",
autoVerify: false,
resetOnError: true,
});
const resetCaptcha = useCallback(() => {
setCaptchaKey((k) => k + 1);
turnstile.reset();
}, [turnstile]);
const form = useForm<z.infer<typeof signupFormSchema>>({
resolver: zodResolver(signupFormSchema),
defaultValues: {
email: "",
password: "",
confirmPassword: "",
agreeToTerms: false,
},
});
useEffect(() => {
if (user) router.push("/");
}, [user]);
async function handleProviderSignup(provider: LoginProvider) {
setIsGoogleLoading(true);
const error = await providerLogin(provider);
setIsGoogleLoading(false);
if (error) {
resetCaptcha();
setFeedback(error);
return;
}
setFeedback(null);
}
async function handleSignup(data: z.infer<typeof signupFormSchema>) {
setIsLoading(true);
if (!turnstile.verified) {
setFeedback("Please complete the CAPTCHA challenge.");
setIsLoading(false);
resetCaptcha();
return;
}
if (data.email.includes("@agpt.co")) {
setFeedback(
"Please use Google SSO to create an account using an AutoGPT email.",
);
setIsLoading(false);
resetCaptcha();
return;
}
const error = await signup(data, turnstile.token as string);
setIsLoading(false);
if (error) {
if (error === "user_already_exists") {
setFeedback("User with this email already exists");
turnstile.reset();
return;
} else {
setFeedback(error);
resetCaptcha();
turnstile.reset();
}
return;
}
setFeedback(null);
}
return {
form,
feedback,
turnstile,
captchaKey,
isLoggedIn: !!user,
isLoading,
isCloudEnv,
isUserLoading,
isGoogleLoading,
isSupabaseAvailable: !!supabase,
handleSubmit: form.handleSubmit(handleSignup),
handleProviderSignup,
};
}

View File

@@ -82,6 +82,7 @@ export type CustomNodeData = {
executionResults?: {
execId: string;
data: NodeExecutionResult["output_data"];
status: NodeExecutionResult["status"];
}[];
block_id: string;
backend_id?: string;

View File

@@ -14,7 +14,7 @@ const buttonVariants = cva(
destructive:
"bg-red-600 text-neutral-50 border border-red-500/50 hover:bg-red-500/90 dark:bg-red-700 dark:text-neutral-50 dark:hover:bg-red-600",
accent: "bg-accent text-accent-foreground hover:bg-violet-500",
primary: "bg-neutral-800 text-white hover:bg-black/60",
primary: "bg-zinc-700 text-white hover:bg-zinc-800 text-white",
outline:
"border border-black/50 text-neutral-800 hover:bg-neutral-100 dark:bg-neutral-800 dark:text-neutral-100 dark:hover:bg-neutral-700",
secondary:

View File

@@ -1,25 +1,26 @@
import { ReactNode } from "react";
import { Button } from "../ui/button";
import { FaSpinner } from "react-icons/fa";
import { Button } from "../ui/button";
interface Props {
children?: ReactNode;
onClick: () => void;
isLoading?: boolean;
disabled?: boolean;
type?: "button" | "submit" | "reset";
onClick?: () => void;
}
export default function AuthButton({
children,
onClick,
isLoading = false,
disabled = false,
type = "button",
onClick,
}: Props) {
return (
<Button
className="mt-2 w-full self-stretch rounded-md bg-slate-900 px-4 py-2"
className="mt-2 w-full px-4 py-2 text-zinc-800"
variant="outline"
type={type}
disabled={isLoading || disabled}
onClick={onClick}
@@ -27,9 +28,7 @@ export default function AuthButton({
{isLoading ? (
<FaSpinner className="animate-spin" />
) : (
<div className="text-sm font-medium leading-normal text-slate-50">
{children}
</div>
<div className="text-sm font-medium">{children}</div>
)}
</Button>
);

View File

@@ -0,0 +1,33 @@
import { useState } from "react";
import { FaGoogle, FaSpinner } from "react-icons/fa";
import { Button } from "../ui/button";
interface GoogleOAuthButtonProps {
onClick: () => void;
isLoading?: boolean;
disabled?: boolean;
}
export default function GoogleOAuthButton({
onClick,
isLoading = false,
disabled = false,
}: GoogleOAuthButtonProps) {
return (
<Button
type="button"
className="w-full border bg-zinc-700 py-2 text-white disabled:opacity-50"
disabled={isLoading || disabled}
onClick={onClick}
>
{isLoading ? (
<FaSpinner className="mr-2 h-4 w-4 animate-spin" />
) : (
<FaGoogle className="mr-2 h-4 w-4" />
)}
<span className="text-sm font-medium">
{isLoading ? "Signing in..." : "Continue with Google"}
</span>
</Button>
);
}

View File

@@ -3,6 +3,7 @@ import AuthButton from "./AuthButton";
import AuthCard from "./AuthCard";
import AuthFeedback from "./AuthFeedback";
import AuthHeader from "./AuthHeader";
import GoogleOAuthButton from "./GoogleOAuthButton";
import { PasswordInput } from "./PasswordInput";
import Turnstile from "./Turnstile";
@@ -12,6 +13,7 @@ export {
AuthCard,
AuthFeedback,
AuthHeader,
GoogleOAuthButton,
PasswordInput,
Turnstile,
};

View File

@@ -9,19 +9,6 @@
margin-bottom: 1rem;
}
.custom-node input:not([type="checkbox"]):not([type="file"]),
.custom-node textarea,
.custom-node select,
.custom-node [data-id^="date-picker"],
.custom-node [data-list-container],
.custom-node [data-add-item],
.custom-node [data-content-settings] .array-item-container {
display: flex;
align-items: center;
min-width: calc(100% - 2.5rem);
max-width: 100%;
}
.custom-node .custom-switch {
padding: 0.5rem 1.25rem;
display: flex;

View File

@@ -229,7 +229,6 @@ const NodeFileInput: FC<{
const handleFileChange = useCallback(
(event: React.ChangeEvent<HTMLInputElement>) => {
const file = event.target.files?.[0];
console.log(">>> file", file);
if (!file) return;
const reader = new FileReader();

View File

@@ -354,6 +354,55 @@ export default function useAgentGraph(
[getFrontendId, nodes],
);
const addExecutionDataToNode = useCallback(
(node: CustomNode, executionData: NodeExecutionResult) => {
if (!executionData.output_data) {
console.warn(
`Execution data for node ${executionData.node_id} is empty, skipping update`,
);
return node;
}
const executionResults = [
// Execution updates are not cumulative, so we need to filter out the old ones.
...(node.data.executionResults?.filter(
(result) => result.execId !== executionData.node_exec_id,
) || []),
{
execId: executionData.node_exec_id,
data: {
"[Input]": [executionData.input_data],
...executionData.output_data,
},
status: executionData.status,
},
];
const statusRank = {
RUNNING: 0,
QUEUED: 1,
INCOMPLETE: 2,
TERMINATED: 3,
COMPLETED: 4,
FAILED: 5,
};
const status = executionResults
.map((v) => v.status)
.reduce((a, b) => (statusRank[a] < statusRank[b] ? a : b));
return {
...node,
data: {
...node.data,
status,
executionResults,
isOutputOpen: true,
},
};
},
[],
);
const updateNodesWithExecutionData = useCallback(
(executionData: NodeExecutionResult) => {
if (!executionData.node_id) return;
@@ -374,31 +423,7 @@ export default function useAgentGraph(
}
return nodes.map((node) =>
node.id === nodeId
? {
...node,
data: {
...node.data,
status: executionData.status,
executionResults:
Object.keys(executionData.output_data).length > 0
? [
// Execution updates are not cumulative, so we need to filter out the old ones.
...(node.data.executionResults?.filter(
(result) =>
result.execId !== executionData.node_exec_id,
) || []),
{
execId: executionData.node_exec_id,
data: {
"[Input]": [executionData.input_data],
...executionData.output_data,
},
},
]
: node.data.executionResults,
isOutputOpen: true,
},
}
? addExecutionDataToNode(node, executionData)
: node,
);
});
@@ -694,20 +719,17 @@ export default function useAgentGraph(
return [...prev, ...execution.node_executions];
});
// Track execution until completed
const pendingNodeExecutions: Set<string> = new Set();
const cancelExecListener = api.onWebSocketMessage(
"node_execution_event",
(nodeResult) => {
// We are racing the server here, since we need the ID to filter events
if (nodeResult.graph_exec_id != flowExecutionID) {
const cancelGraphExecListener = api.onWebSocketMessage(
"graph_execution_event",
(graphExec) => {
if (graphExec.id != flowExecutionID) {
return;
}
if (
nodeResult.status === "FAILED" &&
nodeResult.output_data?.error?.[0]
.toLowerCase()
.includes("insufficient balance")
graphExec.status === "FAILED" &&
graphExec?.stats?.error
?.toLowerCase()
?.includes("insufficient balance")
) {
// Show no credits toast if user has low credits
toast({
@@ -731,17 +753,11 @@ export default function useAgentGraph(
});
}
if (
!["COMPLETED", "TERMINATED", "FAILED"].includes(nodeResult.status)
graphExec.status === "COMPLETED" ||
graphExec.status === "TERMINATED" ||
graphExec.status === "FAILED"
) {
pendingNodeExecutions.add(nodeResult.node_exec_id);
} else {
pendingNodeExecutions.delete(nodeResult.node_exec_id);
}
if (pendingNodeExecutions.size == 0) {
// Assuming the first event is always a QUEUED node, and
// following nodes are QUEUED before all preceding nodes are COMPLETED,
// an empty set means the graph has finished running.
cancelExecListener();
cancelGraphExecListener();
setSaveRunRequest({ request: "none", state: "none" });
incrementRuns();
}

View File

@@ -47,8 +47,7 @@ export function useTurnstile({
useEffect(() => {
const behaveAs = getBehaveAs();
const hasTurnstileKey = !!TURNSTILE_SITE_KEY;
const turnstileDisabled =
process.env.NEXT_PUBLIC_DISABLE_TURNSTILE === "true";
const turnstileDisabled = process.env.NEXT_PUBLIC_TURNSTILE !== "enabled";
// Only render Turnstile in cloud environment if not explicitly disabled
setShouldRender(

View File

@@ -89,21 +89,22 @@ export default class BackendAPI {
this.wsUrl = wsUrl;
}
private get supabaseClient(): SupabaseClient | null {
private async getSupabaseClient(): Promise<SupabaseClient | null> {
return isClient
? createBrowserClient(
process.env.NEXT_PUBLIC_SUPABASE_URL!,
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!,
{ isSingleton: true },
)
: getServerSupabase();
: await getServerSupabase();
}
async isAuthenticated(): Promise<boolean> {
if (!this.supabaseClient) return false;
const supabaseClient = await this.getSupabaseClient();
if (!supabaseClient) return false;
const {
data: { session },
} = await this.supabaseClient.auth.getSession();
} = await supabaseClient.auth.getSession();
return session != null;
}
@@ -750,9 +751,10 @@ export default class BackendAPI {
const maxRetries = 3;
while (retryCount < maxRetries) {
const supabaseClient = await this.getSupabaseClient();
const {
data: { session },
} = (await this.supabaseClient?.auth.getSession()) || {
} = (await supabaseClient?.auth.getSession()) || {
data: { session: null },
};
@@ -803,9 +805,10 @@ export default class BackendAPI {
const maxRetries = 3;
while (retryCount < maxRetries) {
const supabaseClient = await this.getSupabaseClient();
const {
data: { session },
} = (await this.supabaseClient?.auth.getSession()) || {
} = (await supabaseClient?.auth.getSession()) || {
data: { session: null },
};
@@ -980,8 +983,9 @@ export default class BackendAPI {
async connectWebSocket(): Promise<void> {
return (this.wsConnecting ??= new Promise(async (resolve, reject) => {
try {
const supabaseClient = await this.getSupabaseClient();
const token =
(await this.supabaseClient?.auth.getSession())?.data.session
(await supabaseClient?.auth.getSession())?.data.session
?.access_token || "";
const wsUrlWithToken = `${this.wsUrl}?token=${token}`;
this.webSocket = new WebSocket(wsUrlWithToken);

View File

@@ -281,6 +281,7 @@ export type GraphExecutionMeta = {
started_at: Date;
ended_at: Date;
stats?: {
error?: string;
cost: number;
duration: number;
duration_cpu_only: number;

View File

@@ -1,10 +1,10 @@
import type { UnsafeUnwrappedCookies } from "next/headers";
import { createServerClient } from "@supabase/ssr";
export default function getServerSupabase() {
export default async function getServerSupabase() {
// Need require here, so Next.js doesn't complain about importing this on client side
const { cookies } = require("next/headers");
const cookieStore = cookies() as UnsafeUnwrappedCookies;
const cookieStore = await cookies();
try {
const supabase = createServerClient(

View File

@@ -1,7 +1,7 @@
import getServerSupabase from "./getServerSupabase";
const getServerUser = async () => {
const supabase = getServerSupabase();
const supabase = await getServerSupabase();
if (!supabase) {
return { user: null, error: "Failed to create Supabase client" };

View File

@@ -7,8 +7,8 @@ export async function verifyTurnstileToken(
token: string,
action?: string,
): Promise<boolean> {
// Skip verification if explicitly disabled via environment variable
if (process.env.NEXT_PUBLIC_DISABLE_TURNSTILE === "true") {
// Skip verification unless explicitly enabled via environment variable
if (process.env.NEXT_PUBLIC_TURNSTILE !== "enabled") {
return true;
}

View File

@@ -342,6 +342,12 @@ To run the tests:
poetry run test
```
To update stored snapshots after intentional API changes:
```sh
pytest --snapshot-update
```
## Project Outline
The current project has the following main modules:

View File

@@ -13,13 +13,31 @@ Follow these steps to set up and run Ollama with the AutoGPT platform.
### 1. Launch Ollama
Open a new terminal and execute:
To properly set up Ollama for network access, follow these steps:
```bash
ollama run llama3.2
```
1. **Set the host environment variable:**
> **Note**: This will download the [llama3.2](https://ollama.com/library/llama3.2) model and start the service. Keep this terminal running in the background.
**Windows (Command Prompt):**
```
set OLLAMA_HOST=0.0.0.0:11434
```
**Linux/macOS (Terminal):**
```bash
export OLLAMA_HOST=0.0.0.0:11434
```
2. Start the Ollama server:
```
ollama serve
```
3. **Open a new terminal/command window** and download your desired model:
```
ollama pull llama3.2
```
> **Note**: This will download the [llama3.2](https://ollama.com/library/llama3.2) model. Keep the terminal with `ollama serve` running in the background throughout your session.
### 2. Start the Backend
@@ -53,7 +71,38 @@ Now that both Ollama and the AutoGPT platform are running we can move onto using
2. In the "LLM Model" dropdown, select "llama3.2" (This is the model we downloaded earlier)
![Select Ollama Model](../imgs/ollama/Ollama-Select-Llama32.png)
3. Now we need to add some prompts then save and then run the graph:
> **Compatible Models**: Not all models work with Ollama in AutoGPT. Here are the models that are confirmed to work:
> - `llama3.2`
> - `llama3`
> - `llama3.1:405b`
> - `dolphin-mistral:latest`
3. **Set your local IP address** in the "Ollama Host" field:
**To find your local IP address:**
**Windows (Command Prompt):**
```
ipconfig
```
**Linux/macOS (Terminal):**
```bash
ip addr show
```
or
```bash
ipconfig
```
Look for your IPv4 address (e.g., `192.168.0.39`), then enter it with port `11434` in the "Ollama Host" field:
```
192.168.0.39:11434
```
![Ollama Remote Host](../imgs/ollama/Ollama-Remote-Host.png)
4. Now we need to add some prompts then save and then run the graph:
![Add Prompt](../imgs/ollama/Ollama-Add-Prompts.png)
That's it! You've successfully setup the AutoGPT platform and made a LLM call to Ollama.
@@ -61,7 +110,30 @@ That's it! You've successfully setup the AutoGPT platform and made a LLM call to
### Using Ollama on a Remote Server with AutoGPT
For running Ollama on a remote server, simply make sure the Ollama server is running and is accessible from other devices on your network/remotely through the port 11434, then you can use the same steps above but you need to add the Ollama servers IP address to the "Ollama Host" field in the block settings like so:
For running Ollama on a remote server, simply make sure the Ollama server is running and is accessible from other devices on your network/remotely through the port 11434.
**To find your local IP address of the system running Ollama:**
**Windows (Command Prompt):**
```
ipconfig
```
**Linux/macOS (Terminal):**
```bash
ip addr show
```
or
```bash
ipconfig
```
Look for your IPv4 address (e.g., `192.168.0.39`).
Then you can use the same steps above but you need to add the Ollama server's IP address to the "Ollama Host" field in the block settings like so:
```
192.168.0.39:11434
```
![Ollama Remote Host](../imgs/ollama/Ollama-Remote-Host.png)

Some files were not shown because too many files have changed in this diff Show More