mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
46 Commits
fix/schedu
...
update-ins
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82bddd885b | ||
|
|
c6247f265e | ||
|
|
38610d1e7a | ||
|
|
ebfbf31c73 | ||
|
|
4abe37396c | ||
|
|
fa14bf461b | ||
|
|
e2c33e3d2a | ||
|
|
650be0d1f7 | ||
|
|
35bd7f7f7a | ||
|
|
312cb0227f | ||
|
|
a8feb3c8d0 | ||
|
|
5da5c2ecd6 | ||
|
|
ba65fee862 | ||
|
|
908dcd7b4b | ||
|
|
542f951dd8 | ||
|
|
72938590f2 | ||
|
|
5d364e13f6 | ||
|
|
32513b26ab | ||
|
|
bf92e7dbc8 | ||
|
|
6fce3a09ea | ||
|
|
9158d4b6a2 | ||
|
|
2403931c2e | ||
|
|
af58b316a2 | ||
|
|
03e3e2ea9a | ||
|
|
6bb6a081a2 | ||
|
|
df20b70f44 | ||
|
|
21faf1b677 | ||
|
|
b53c373a59 | ||
|
|
4bfeddc03d | ||
|
|
af7d56612d | ||
|
|
0dd30e275c | ||
|
|
c71406af8b | ||
|
|
a135f09336 | ||
|
|
2d436caa84 | ||
|
|
34dd218a91 | ||
|
|
41f500790f | ||
|
|
793de77e76 | ||
|
|
a2059c6023 | ||
|
|
b9c3920227 | ||
|
|
abba10b649 | ||
|
|
6c34790b42 | ||
|
|
c168277b1d | ||
|
|
89eb5d1189 | ||
|
|
468d1af802 | ||
|
|
a2c88c7786 | ||
|
|
e79b7a95dc |
@@ -15,6 +15,7 @@
|
||||
!autogpt_platform/backend/pyproject.toml
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
!autogpt_platform/backend/.env
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
@@ -27,6 +28,7 @@
|
||||
# Platform - Frontend
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/scripts/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
@@ -34,6 +36,7 @@
|
||||
## config
|
||||
!autogpt_platform/frontend/*.config.*
|
||||
!autogpt_platform/frontend/.env.*
|
||||
!autogpt_platform/frontend/.env
|
||||
|
||||
# Classic - AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
|
||||
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -24,7 +24,8 @@
|
||||
</details>
|
||||
|
||||
#### For configuration changes:
|
||||
- [ ] `.env.example` is updated or already compatible with my changes
|
||||
|
||||
- [ ] `.env.default` is updated or already compatible with my changes
|
||||
- [ ] `docker-compose.yml` is updated or already compatible with my changes
|
||||
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
|
||||
|
||||
|
||||
46
.github/workflows/platform-frontend-ci.yml
vendored
46
.github/workflows/platform-frontend-ci.yml
vendored
@@ -82,37 +82,6 @@ jobs:
|
||||
- name: Run lint
|
||||
run: pnpm lint
|
||||
|
||||
type-check:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run tsc check
|
||||
run: pnpm type-check
|
||||
|
||||
chromatic:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
@@ -176,11 +145,7 @@ jobs:
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.example ../backend/.env
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@@ -252,15 +217,6 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Setup .env
|
||||
run: cp .env.example .env
|
||||
|
||||
- name: Build frontend
|
||||
run: pnpm build --turbo
|
||||
# uses Turbopack, much faster and safe enough for a test pipeline
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Install Browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
|
||||
132
.github/workflows/platform-fullstack-ci.yml
vendored
Normal file
132
.github/workflows/platform-fullstack-ci.yml
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- "autogpt_platform/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- "autogpt_platform/**"
|
||||
merge_group:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
types:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Setup .env
|
||||
run: cp .env.default .env
|
||||
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Generate API queries
|
||||
run: pnpm generate:api:force
|
||||
|
||||
- name: Check for API schema changes
|
||||
run: |
|
||||
if ! git diff --exit-code src/app/api/openapi.json; then
|
||||
echo "❌ API schema changes detected in src/app/api/openapi.json"
|
||||
echo ""
|
||||
echo "The openapi.json file has been modified after running 'pnpm generate:api-all'."
|
||||
echo "This usually means changes have been made in the BE endpoints without updating the Frontend."
|
||||
echo "The API schema is now out of sync with the Front-end queries."
|
||||
echo ""
|
||||
echo "To fix this:"
|
||||
echo "1. Pull the backend 'docker compose pull && docker compose up -d --build --force-recreate'"
|
||||
echo "2. Run 'pnpm generate:api' locally"
|
||||
echo "3. Run 'pnpm types' locally"
|
||||
echo "4. Fix any TypeScript errors that may have been introduced"
|
||||
echo "5. Commit and push your changes"
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
echo "✅ No API schema changes detected"
|
||||
fi
|
||||
|
||||
- name: Run Typescript checks
|
||||
run: pnpm types
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -5,6 +5,8 @@ classic/original_autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
# Root .env files
|
||||
/.env
|
||||
azure.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
@@ -121,7 +123,6 @@ celerybeat.pid
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv*/
|
||||
|
||||
@@ -235,7 +235,7 @@ repos:
|
||||
hooks:
|
||||
- id: tsc
|
||||
name: Typecheck - AutoGPT Platform - Frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && pnpm type-check'
|
||||
entry: bash -c 'cd autogpt_platform/frontend && pnpm types'
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
|
||||
10
README.md
10
README.md
@@ -3,6 +3,16 @@
|
||||
[](https://discord.gg/autogpt)  
|
||||
[](https://twitter.com/Auto_GPT)  
|
||||
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
[Deutsch](https://zdoc.app/de/Significant-Gravitas/AutoGPT) |
|
||||
[Español](https://zdoc.app/es/Significant-Gravitas/AutoGPT) |
|
||||
[français](https://zdoc.app/fr/Significant-Gravitas/AutoGPT) |
|
||||
[日本語](https://zdoc.app/ja/Significant-Gravitas/AutoGPT) |
|
||||
[한국어](https://zdoc.app/ko/Significant-Gravitas/AutoGPT) |
|
||||
[Português](https://zdoc.app/pt/Significant-Gravitas/AutoGPT) |
|
||||
[Русский](https://zdoc.app/ru/Significant-Gravitas/AutoGPT) |
|
||||
[中文](https://zdoc.app/zh/Significant-Gravitas/AutoGPT)
|
||||
|
||||
**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows.
|
||||
|
||||
## Hosting Options
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Repository Overview
|
||||
|
||||
AutoGPT Platform is a monorepo containing:
|
||||
|
||||
- **Backend** (`/backend`): Python FastAPI server with async support
|
||||
- **Frontend** (`/frontend`): Next.js React application
|
||||
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
|
||||
@@ -11,6 +13,7 @@ AutoGPT Platform is a monorepo containing:
|
||||
## Essential Commands
|
||||
|
||||
### Backend Development
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd backend && poetry install
|
||||
@@ -41,6 +44,7 @@ poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[GetC
|
||||
poetry run format # Black + isort
|
||||
poetry run lint # ruff
|
||||
```
|
||||
|
||||
More details can be found in TESTING.md
|
||||
|
||||
#### Creating/Updating Snapshots
|
||||
@@ -53,8 +57,8 @@ poetry run pytest path/to/test.py --snapshot-update
|
||||
|
||||
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
|
||||
|
||||
|
||||
### Frontend Development
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd frontend && npm install
|
||||
@@ -72,12 +76,13 @@ npm run storybook
|
||||
npm run build
|
||||
|
||||
# Type checking
|
||||
npm run type-check
|
||||
npm run types
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Backend Architecture
|
||||
|
||||
- **API Layer**: FastAPI with REST and WebSocket endpoints
|
||||
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
|
||||
- **Queue System**: RabbitMQ for async task processing
|
||||
@@ -86,6 +91,7 @@ npm run type-check
|
||||
- **Security**: Cache protection middleware prevents sensitive data caching in browsers/proxies
|
||||
|
||||
### Frontend Architecture
|
||||
|
||||
- **Framework**: Next.js App Router with React Server Components
|
||||
- **State Management**: React hooks + Supabase client for real-time updates
|
||||
- **Workflow Builder**: Visual graph editor using @xyflow/react
|
||||
@@ -93,6 +99,7 @@ npm run type-check
|
||||
- **Feature Flags**: LaunchDarkly integration
|
||||
|
||||
### Key Concepts
|
||||
|
||||
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
|
||||
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
|
||||
3. **Integrations**: OAuth and API connections stored per user
|
||||
@@ -100,13 +107,16 @@ npm run type-check
|
||||
5. **Virus Scanning**: ClamAV integration for file upload security
|
||||
|
||||
### Testing Approach
|
||||
|
||||
- Backend uses pytest with snapshot testing for API responses
|
||||
- Test files are colocated with source files (`*_test.py`)
|
||||
- Frontend uses Playwright for E2E tests
|
||||
- Component testing via Storybook
|
||||
|
||||
### Database Schema
|
||||
|
||||
Key models (defined in `/backend/schema.prisma`):
|
||||
|
||||
- `User`: Authentication and profile data
|
||||
- `AgentGraph`: Workflow definitions with version control
|
||||
- `AgentGraphExecution`: Execution history and results
|
||||
@@ -114,13 +124,31 @@ Key models (defined in `/backend/schema.prisma`):
|
||||
- `StoreListing`: Marketplace listings for sharing agents
|
||||
|
||||
### Environment Configuration
|
||||
- Backend: `.env` file in `/backend`
|
||||
- Frontend: `.env.local` file in `/frontend`
|
||||
- Both require Supabase credentials and API keys for various services
|
||||
|
||||
#### Configuration Files
|
||||
|
||||
- **Backend**: `/backend/.env.default` (defaults) → `/backend/.env` (user overrides)
|
||||
- **Frontend**: `/frontend/.env.default` (defaults) → `/frontend/.env` (user overrides)
|
||||
- **Platform**: `/.env.default` (Supabase/shared defaults) → `/.env` (user overrides)
|
||||
|
||||
#### Docker Environment Loading Order
|
||||
|
||||
1. `.env.default` files provide base configuration (tracked in git)
|
||||
2. `.env` files provide user-specific overrides (gitignored)
|
||||
3. Docker Compose `environment:` sections provide service-specific overrides
|
||||
4. Shell environment variables have highest precedence
|
||||
|
||||
#### Key Points
|
||||
|
||||
- All services use hardcoded defaults in docker-compose files (no `${VARIABLE}` substitutions)
|
||||
- The `env_file` directive loads variables INTO containers at runtime
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
**Adding a new block:**
|
||||
|
||||
1. Create new file in `/backend/backend/blocks/`
|
||||
2. Inherit from `Block` base class
|
||||
3. Define input/output schemas
|
||||
@@ -132,12 +160,14 @@ Note: when making many new blocks analyze the interfaces for each of these blcok
|
||||
ex: do the inputs and outputs tie well together?
|
||||
|
||||
**Modifying the API:**
|
||||
|
||||
1. Update route in `/backend/backend/server/routers/`
|
||||
2. Add/update Pydantic models in same directory
|
||||
3. Write tests alongside the route file
|
||||
4. Run `poetry run test` to verify
|
||||
|
||||
**Frontend feature development:**
|
||||
|
||||
1. Components go in `/frontend/src/components/`
|
||||
2. Use existing UI components from `/frontend/src/components/ui/`
|
||||
3. Add Storybook stories for new components
|
||||
@@ -146,6 +176,7 @@ ex: do the inputs and outputs tie well together?
|
||||
### Security Implementation
|
||||
|
||||
**Cache Protection Middleware:**
|
||||
|
||||
- Located in `/backend/backend/server/middleware/security.py`
|
||||
- Default behavior: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
|
||||
- Uses an allow list approach - only explicitly permitted paths can be cached
|
||||
@@ -154,14 +185,20 @@ ex: do the inputs and outputs tie well together?
|
||||
- To allow caching for a new endpoint, add it to `CACHEABLE_PATHS` in the middleware
|
||||
- Applied to both main API server and external API applications
|
||||
|
||||
|
||||
### Creating Pull Requests
|
||||
|
||||
- Create the PR aginst the `dev` branch of the repository.
|
||||
- Ensure the branch name is descriptive (e.g., `feature/add-new-block`)/
|
||||
- Use conventional commit messages (see below)/
|
||||
- Fill out the .github/PULL_REQUEST_TEMPLATE.md template as the PR description/
|
||||
- Run the github pre-commit hooks to ensure code quality.
|
||||
|
||||
### Reviewing/Revising Pull Requests
|
||||
|
||||
- When the user runs /pr-comments or tries to fetch them, also run gh api /repos/Significant-Gravitas/AutoGPT/pulls/[issuenum]/reviews to get the reviews
|
||||
- Use gh api /repos/Significant-Gravitas/AutoGPT/pulls/[issuenum]/reviews/[review_id]/comments to get the review contents
|
||||
- Use gh api /repos/Significant-Gravitas/AutoGPT/issues/9924/comments to get the pr specific comments
|
||||
|
||||
### Conventional Commits
|
||||
|
||||
Use this format for commit messages and Pull Request titles:
|
||||
|
||||
@@ -8,7 +8,6 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
|
||||
|
||||
- Docker
|
||||
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
|
||||
- Node.js & NPM (for running the frontend application)
|
||||
|
||||
### Running the System
|
||||
|
||||
@@ -24,10 +23,10 @@ To run the AutoGPT Platform, follow these steps:
|
||||
2. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env
|
||||
cp .env.default .env
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
This command will copy the `.env.default` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
3. Run the following command:
|
||||
|
||||
@@ -37,44 +36,7 @@ To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
4. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
|
||||
```
|
||||
cd frontend
|
||||
```
|
||||
|
||||
You will need to run your frontend application separately on your local machine.
|
||||
|
||||
5. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
6. Run the following command:
|
||||
|
||||
Enable corepack and install dependencies by running:
|
||||
|
||||
```
|
||||
corepack enable
|
||||
pnpm i
|
||||
```
|
||||
|
||||
Generate the API client (this step is required before running the frontend):
|
||||
|
||||
```
|
||||
pnpm generate:api-client
|
||||
```
|
||||
|
||||
Then start the frontend application in development mode:
|
||||
|
||||
```
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
4. After all the services are in ready state, open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
@@ -177,20 +139,21 @@ The platform includes scripts for generating and managing the API client:
|
||||
|
||||
- `pnpm fetch:openapi`: Fetches the OpenAPI specification from the backend service (requires backend to be running on port 8006)
|
||||
- `pnpm generate:api-client`: Generates the TypeScript API client from the OpenAPI specification using Orval
|
||||
- `pnpm generate:api-all`: Runs both fetch and generate commands in sequence
|
||||
- `pnpm generate:api`: Runs both fetch and generate commands in sequence
|
||||
|
||||
#### Manual API Client Updates
|
||||
|
||||
If you need to update the API client after making changes to the backend API:
|
||||
|
||||
1. Ensure the backend services are running:
|
||||
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
2. Generate the updated API client:
|
||||
```
|
||||
pnpm generate:api-all
|
||||
pnpm generate:api
|
||||
```
|
||||
|
||||
This will fetch the latest OpenAPI specification and regenerate the TypeScript client code.
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
import logging
|
||||
from functools import wraps
|
||||
from typing import Any, Awaitable, Callable, Dict, Optional, TypeVar, Union, cast
|
||||
|
||||
import ldclient
|
||||
from fastapi import HTTPException
|
||||
from ldclient import Context, LDClient
|
||||
from ldclient.config import Config
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from .config import SETTINGS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
_is_initialized = False
|
||||
|
||||
|
||||
def get_client() -> LDClient:
|
||||
"""Get the LaunchDarkly client singleton."""
|
||||
if not _is_initialized:
|
||||
initialize_launchdarkly()
|
||||
return ldclient.get()
|
||||
|
||||
|
||||
def initialize_launchdarkly() -> None:
|
||||
sdk_key = SETTINGS.launch_darkly_sdk_key
|
||||
logger.debug(
|
||||
f"Initializing LaunchDarkly with SDK key: {'present' if sdk_key else 'missing'}"
|
||||
)
|
||||
|
||||
if not sdk_key:
|
||||
logger.warning("LaunchDarkly SDK key not configured")
|
||||
return
|
||||
|
||||
config = Config(sdk_key)
|
||||
ldclient.set_config(config)
|
||||
|
||||
if ldclient.get().is_initialized():
|
||||
global _is_initialized
|
||||
_is_initialized = True
|
||||
logger.info("LaunchDarkly client initialized successfully")
|
||||
else:
|
||||
logger.error("LaunchDarkly client failed to initialize")
|
||||
|
||||
|
||||
def shutdown_launchdarkly() -> None:
|
||||
"""Shutdown the LaunchDarkly client."""
|
||||
if ldclient.get().is_initialized():
|
||||
ldclient.get().close()
|
||||
logger.info("LaunchDarkly client closed successfully")
|
||||
|
||||
|
||||
def create_context(
|
||||
user_id: str, additional_attributes: Optional[Dict[str, Any]] = None
|
||||
) -> Context:
|
||||
"""Create LaunchDarkly context with optional additional attributes."""
|
||||
builder = Context.builder(str(user_id)).kind("user")
|
||||
if additional_attributes:
|
||||
for key, value in additional_attributes.items():
|
||||
builder.set(key, value)
|
||||
return builder.build()
|
||||
|
||||
|
||||
def is_feature_enabled(flag_key: str, user_id: str, default: bool = False) -> bool:
|
||||
"""
|
||||
Simple helper to check if a feature flag is enabled for a user.
|
||||
|
||||
Args:
|
||||
flag_key: The LaunchDarkly feature flag key
|
||||
user_id: The user ID to evaluate the flag for
|
||||
default: Default value if LaunchDarkly is unavailable or flag evaluation fails
|
||||
|
||||
Returns:
|
||||
True if feature is enabled, False otherwise
|
||||
"""
|
||||
try:
|
||||
client = get_client()
|
||||
context = create_context(str(user_id))
|
||||
return client.variation(flag_key, context, default)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"LaunchDarkly flag evaluation failed for {flag_key}: {e}, using default={default}"
|
||||
)
|
||||
return default
|
||||
|
||||
|
||||
def feature_flag(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""
|
||||
Decorator for feature flag protected endpoints.
|
||||
"""
|
||||
|
||||
def decorator(
|
||||
func: Callable[P, Union[T, Awaitable[T]]],
|
||||
) -> Callable[P, Union[T, Awaitable[T]]]:
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
context = create_context(str(user_id))
|
||||
is_enabled = get_client().variation(flag_key, context, default)
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
result = func(*args, **kwargs)
|
||||
if asyncio.iscoroutine(result):
|
||||
return await result
|
||||
return cast(T, result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
context = create_context(str(user_id))
|
||||
is_enabled = get_client().variation(flag_key, context, default)
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
return cast(T, func(*args, **kwargs))
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
return cast(
|
||||
Callable[P, Union[T, Awaitable[T]]],
|
||||
async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper,
|
||||
)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def percentage_rollout(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""Decorator for percentage-based rollouts."""
|
||||
return feature_flag(flag_key, default)
|
||||
|
||||
|
||||
def beta_feature(
|
||||
flag_key: Optional[str] = None,
|
||||
unauthorized_response: Any = {"message": "Not available in beta"},
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""Decorator for beta features."""
|
||||
actual_key = f"beta-{flag_key}" if flag_key else "beta"
|
||||
return feature_flag(actual_key, False)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mock_flag_variation(flag_key: str, return_value: Any):
|
||||
"""Context manager for testing feature flags."""
|
||||
original_variation = get_client().variation
|
||||
get_client().variation = lambda key, context, default: (
|
||||
return_value if key == flag_key else original_variation(key, context, default)
|
||||
)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
get_client().variation = original_variation
|
||||
@@ -1,84 +0,0 @@
|
||||
import pytest
|
||||
from ldclient import LDClient
|
||||
|
||||
from autogpt_libs.feature_flag.client import (
|
||||
feature_flag,
|
||||
is_feature_enabled,
|
||||
mock_flag_variation,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ld_client(mocker):
|
||||
client = mocker.Mock(spec=LDClient)
|
||||
mocker.patch("ldclient.get", return_value=client)
|
||||
client.is_initialized.return_value = True
|
||||
return client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_enabled(ld_client):
|
||||
ld_client.variation.return_value = True
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == "success"
|
||||
ld_client.variation.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_unauthorized_response(ld_client):
|
||||
ld_client.variation.return_value = False
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == {"error": "disabled"}
|
||||
|
||||
|
||||
def test_mock_flag_variation(ld_client):
|
||||
with mock_flag_variation("test-flag", True):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
|
||||
with mock_flag_variation("test-flag", False):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
|
||||
|
||||
def test_is_feature_enabled(ld_client):
|
||||
"""Test the is_feature_enabled helper function."""
|
||||
ld_client.is_initialized.return_value = True
|
||||
ld_client.variation.return_value = True
|
||||
|
||||
result = is_feature_enabled("test-flag", "user123", default=False)
|
||||
assert result is True
|
||||
|
||||
ld_client.variation.assert_called_once()
|
||||
call_args = ld_client.variation.call_args
|
||||
assert call_args[0][0] == "test-flag" # flag_key
|
||||
assert call_args[0][2] is False # default value
|
||||
|
||||
|
||||
def test_is_feature_enabled_not_initialized(ld_client):
|
||||
"""Test is_feature_enabled when LaunchDarkly is not initialized."""
|
||||
ld_client.is_initialized.return_value = False
|
||||
|
||||
result = is_feature_enabled("test-flag", "user123", default=True)
|
||||
assert result is True # Should return default
|
||||
|
||||
ld_client.variation.assert_not_called()
|
||||
|
||||
|
||||
def test_is_feature_enabled_exception(mocker):
|
||||
"""Test is_feature_enabled when get_client() raises an exception."""
|
||||
mocker.patch(
|
||||
"autogpt_libs.feature_flag.client.get_client",
|
||||
side_effect=Exception("Client error"),
|
||||
)
|
||||
|
||||
result = is_feature_enabled("test-flag", "user123", default=True)
|
||||
assert result is True # Should return default
|
||||
@@ -1,15 +0,0 @@
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
launch_darkly_sdk_key: str = Field(
|
||||
default="",
|
||||
description="The Launch Darkly SDK key",
|
||||
validation_alias="LAUNCH_DARKLY_SDK_KEY",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(case_sensitive=True, extra="ignore")
|
||||
|
||||
|
||||
SETTINGS = Settings()
|
||||
@@ -1,6 +1,8 @@
|
||||
"""Logging module for Auto-GPT."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
@@ -10,6 +12,15 @@ from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import AGPTFormatter
|
||||
|
||||
# Configure global socket timeout and gRPC keepalive to prevent deadlocks
|
||||
# This must be done at import time before any gRPC connections are established
|
||||
socket.setdefaulttimeout(30) # 30-second socket timeout
|
||||
|
||||
# Enable gRPC keepalive to detect dead connections faster
|
||||
os.environ.setdefault("GRPC_KEEPALIVE_TIME_MS", "30000") # 30 seconds
|
||||
os.environ.setdefault("GRPC_KEEPALIVE_TIMEOUT_MS", "5000") # 5 seconds
|
||||
os.environ.setdefault("GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS", "true")
|
||||
|
||||
LOG_DIR = Path(__file__).parent.parent.parent.parent / "logs"
|
||||
LOG_FILE = "activity.log"
|
||||
DEBUG_LOG_FILE = "debug.log"
|
||||
@@ -79,7 +90,6 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
Note: This function is typically called at the start of the application
|
||||
to set up the logging infrastructure.
|
||||
"""
|
||||
|
||||
config = LoggingConfig()
|
||||
log_handlers: list[logging.Handler] = []
|
||||
|
||||
@@ -105,13 +115,17 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
if config.enable_cloud_logging or force_cloud_logging:
|
||||
import google.cloud.logging
|
||||
from google.cloud.logging.handlers import CloudLoggingHandler
|
||||
from google.cloud.logging_v2.handlers.transports.sync import SyncTransport
|
||||
from google.cloud.logging_v2.handlers.transports import (
|
||||
BackgroundThreadTransport,
|
||||
)
|
||||
|
||||
client = google.cloud.logging.Client()
|
||||
# Use BackgroundThreadTransport to prevent blocking the main thread
|
||||
# and deadlocks when gRPC calls to Google Cloud Logging hang
|
||||
cloud_handler = CloudLoggingHandler(
|
||||
client,
|
||||
name="autogpt_logs",
|
||||
transport=SyncTransport,
|
||||
transport=BackgroundThreadTransport,
|
||||
)
|
||||
cloud_handler.setLevel(config.level)
|
||||
log_handlers.append(cloud_handler)
|
||||
|
||||
@@ -1,39 +1,5 @@
|
||||
import logging
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
import uvicorn.config
|
||||
from colorama import Fore
|
||||
|
||||
|
||||
def remove_color_codes(s: str) -> str:
|
||||
return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s)
|
||||
|
||||
|
||||
def fmt_kwargs(kwargs: dict) -> str:
|
||||
return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items())
|
||||
|
||||
|
||||
def print_attribute(
|
||||
title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = ""
|
||||
) -> None:
|
||||
logger = logging.getLogger()
|
||||
logger.info(
|
||||
str(value),
|
||||
extra={
|
||||
"title": f"{title.rstrip(':')}:",
|
||||
"title_color": title_color,
|
||||
"color": value_color,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def generate_uvicorn_config():
|
||||
"""
|
||||
Generates a uvicorn logging config that silences uvicorn's default logging and tells it to use the native logging module.
|
||||
"""
|
||||
log_config = dict(uvicorn.config.LOGGING_CONFIG)
|
||||
log_config["loggers"]["uvicorn"] = {"handlers": []}
|
||||
log_config["loggers"]["uvicorn.error"] = {"handlers": []}
|
||||
log_config["loggers"]["uvicorn.access"] = {"handlers": []}
|
||||
return log_config
|
||||
|
||||
@@ -1,17 +1,34 @@
|
||||
import inspect
|
||||
import logging
|
||||
import threading
|
||||
from typing import Awaitable, Callable, ParamSpec, TypeVar, cast, overload
|
||||
import time
|
||||
from functools import wraps
|
||||
from typing import (
|
||||
Awaitable,
|
||||
Callable,
|
||||
ParamSpec,
|
||||
Protocol,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
cast,
|
||||
overload,
|
||||
runtime_checkable,
|
||||
)
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
@overload
|
||||
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ...
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@overload
|
||||
def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ...
|
||||
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]:
|
||||
pass
|
||||
|
||||
|
||||
@overload
|
||||
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
|
||||
pass
|
||||
|
||||
|
||||
def thread_cached(
|
||||
@@ -57,3 +74,193 @@ def thread_cached(
|
||||
def clear_thread_cache(func: Callable) -> None:
|
||||
if clear := getattr(func, "clear_cache", None):
|
||||
clear()
|
||||
|
||||
|
||||
FuncT = TypeVar("FuncT")
|
||||
|
||||
|
||||
R_co = TypeVar("R_co", covariant=True)
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class AsyncCachedFunction(Protocol[P, R_co]):
|
||||
"""Protocol for async functions with cache management methods."""
|
||||
|
||||
def cache_clear(self) -> None:
|
||||
"""Clear all cached entries."""
|
||||
return None
|
||||
|
||||
def cache_info(self) -> dict[str, int | None]:
|
||||
"""Get cache statistics."""
|
||||
return {}
|
||||
|
||||
async def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R_co:
|
||||
"""Call the cached function."""
|
||||
return None # type: ignore
|
||||
|
||||
|
||||
def async_ttl_cache(
|
||||
maxsize: int = 128, ttl_seconds: int | None = None
|
||||
) -> Callable[[Callable[P, Awaitable[R]]], AsyncCachedFunction[P, R]]:
|
||||
"""
|
||||
TTL (Time To Live) cache decorator for async functions.
|
||||
|
||||
Similar to functools.lru_cache but works with async functions and includes optional TTL.
|
||||
|
||||
Args:
|
||||
maxsize: Maximum number of cached entries
|
||||
ttl_seconds: Time to live in seconds. If None, entries never expire (like lru_cache)
|
||||
|
||||
Returns:
|
||||
Decorator function
|
||||
|
||||
Example:
|
||||
# With TTL
|
||||
@async_ttl_cache(maxsize=1000, ttl_seconds=300)
|
||||
async def api_call(param: str) -> dict:
|
||||
return {"result": param}
|
||||
|
||||
# Without TTL (permanent cache like lru_cache)
|
||||
@async_ttl_cache(maxsize=1000)
|
||||
async def expensive_computation(param: str) -> dict:
|
||||
return {"result": param}
|
||||
"""
|
||||
|
||||
def decorator(
|
||||
async_func: Callable[P, Awaitable[R]],
|
||||
) -> AsyncCachedFunction[P, R]:
|
||||
# Cache storage - use union type to handle both cases
|
||||
cache_storage: dict[tuple, R | Tuple[R, float]] = {}
|
||||
|
||||
@wraps(async_func)
|
||||
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
# Create cache key from arguments
|
||||
key = (args, tuple(sorted(kwargs.items())))
|
||||
current_time = time.time()
|
||||
|
||||
# Check if we have a valid cached entry
|
||||
if key in cache_storage:
|
||||
if ttl_seconds is None:
|
||||
# No TTL - return cached result directly
|
||||
logger.debug(
|
||||
f"Cache hit for {async_func.__name__} with key: {str(key)[:50]}"
|
||||
)
|
||||
return cast(R, cache_storage[key])
|
||||
else:
|
||||
# With TTL - check expiration
|
||||
cached_data = cache_storage[key]
|
||||
if isinstance(cached_data, tuple):
|
||||
result, timestamp = cached_data
|
||||
if current_time - timestamp < ttl_seconds:
|
||||
logger.debug(
|
||||
f"Cache hit for {async_func.__name__} with key: {str(key)[:50]}"
|
||||
)
|
||||
return cast(R, result)
|
||||
else:
|
||||
# Expired entry
|
||||
del cache_storage[key]
|
||||
logger.debug(
|
||||
f"Cache entry expired for {async_func.__name__}"
|
||||
)
|
||||
|
||||
# Cache miss or expired - fetch fresh data
|
||||
logger.debug(
|
||||
f"Cache miss for {async_func.__name__} with key: {str(key)[:50]}"
|
||||
)
|
||||
result = await async_func(*args, **kwargs)
|
||||
|
||||
# Store in cache
|
||||
if ttl_seconds is None:
|
||||
cache_storage[key] = result
|
||||
else:
|
||||
cache_storage[key] = (result, current_time)
|
||||
|
||||
# Simple cleanup when cache gets too large
|
||||
if len(cache_storage) > maxsize:
|
||||
# Remove oldest entries (simple FIFO cleanup)
|
||||
cutoff = maxsize // 2
|
||||
oldest_keys = list(cache_storage.keys())[:-cutoff] if cutoff > 0 else []
|
||||
for old_key in oldest_keys:
|
||||
cache_storage.pop(old_key, None)
|
||||
logger.debug(
|
||||
f"Cache cleanup: removed {len(oldest_keys)} entries for {async_func.__name__}"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
# Add cache management methods (similar to functools.lru_cache)
|
||||
def cache_clear() -> None:
|
||||
cache_storage.clear()
|
||||
|
||||
def cache_info() -> dict[str, int | None]:
|
||||
return {
|
||||
"size": len(cache_storage),
|
||||
"maxsize": maxsize,
|
||||
"ttl_seconds": ttl_seconds,
|
||||
}
|
||||
|
||||
# Attach methods to wrapper
|
||||
setattr(wrapper, "cache_clear", cache_clear)
|
||||
setattr(wrapper, "cache_info", cache_info)
|
||||
|
||||
return cast(AsyncCachedFunction[P, R], wrapper)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@overload
|
||||
def async_cache(
|
||||
func: Callable[P, Awaitable[R]],
|
||||
) -> AsyncCachedFunction[P, R]:
|
||||
pass
|
||||
|
||||
|
||||
@overload
|
||||
def async_cache(
|
||||
func: None = None,
|
||||
*,
|
||||
maxsize: int = 128,
|
||||
) -> Callable[[Callable[P, Awaitable[R]]], AsyncCachedFunction[P, R]]:
|
||||
pass
|
||||
|
||||
|
||||
def async_cache(
|
||||
func: Callable[P, Awaitable[R]] | None = None,
|
||||
*,
|
||||
maxsize: int = 128,
|
||||
) -> (
|
||||
AsyncCachedFunction[P, R]
|
||||
| Callable[[Callable[P, Awaitable[R]]], AsyncCachedFunction[P, R]]
|
||||
):
|
||||
"""
|
||||
Process-level cache decorator for async functions (no TTL).
|
||||
|
||||
Similar to functools.lru_cache but works with async functions.
|
||||
This is a convenience wrapper around async_ttl_cache with ttl_seconds=None.
|
||||
|
||||
Args:
|
||||
func: The async function to cache (when used without parentheses)
|
||||
maxsize: Maximum number of cached entries
|
||||
|
||||
Returns:
|
||||
Decorated function or decorator
|
||||
|
||||
Example:
|
||||
# Without parentheses (uses default maxsize=128)
|
||||
@async_cache
|
||||
async def get_data(param: str) -> dict:
|
||||
return {"result": param}
|
||||
|
||||
# With parentheses and custom maxsize
|
||||
@async_cache(maxsize=1000)
|
||||
async def expensive_computation(param: str) -> dict:
|
||||
# Expensive computation here
|
||||
return {"result": param}
|
||||
"""
|
||||
if func is None:
|
||||
# Called with parentheses @async_cache() or @async_cache(maxsize=...)
|
||||
return async_ttl_cache(maxsize=maxsize, ttl_seconds=None)
|
||||
else:
|
||||
# Called without parentheses @async_cache
|
||||
decorator = async_ttl_cache(maxsize=maxsize, ttl_seconds=None)
|
||||
return decorator(func)
|
||||
|
||||
@@ -16,7 +16,12 @@ from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt_libs.utils.cache import clear_thread_cache, thread_cached
|
||||
from autogpt_libs.utils.cache import (
|
||||
async_cache,
|
||||
async_ttl_cache,
|
||||
clear_thread_cache,
|
||||
thread_cached,
|
||||
)
|
||||
|
||||
|
||||
class TestThreadCached:
|
||||
@@ -323,3 +328,378 @@ class TestThreadCached:
|
||||
|
||||
assert function_using_mock(2) == 42
|
||||
assert mock.call_count == 2
|
||||
|
||||
|
||||
class TestAsyncTTLCache:
|
||||
"""Tests for the @async_ttl_cache decorator."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_basic_caching(self):
|
||||
"""Test basic caching functionality."""
|
||||
call_count = 0
|
||||
|
||||
@async_ttl_cache(maxsize=10, ttl_seconds=60)
|
||||
async def cached_function(x: int, y: int = 0) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
await asyncio.sleep(0.01) # Simulate async work
|
||||
return x + y
|
||||
|
||||
# First call
|
||||
result1 = await cached_function(1, 2)
|
||||
assert result1 == 3
|
||||
assert call_count == 1
|
||||
|
||||
# Second call with same args - should use cache
|
||||
result2 = await cached_function(1, 2)
|
||||
assert result2 == 3
|
||||
assert call_count == 1 # No additional call
|
||||
|
||||
# Different args - should call function again
|
||||
result3 = await cached_function(2, 3)
|
||||
assert result3 == 5
|
||||
assert call_count == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ttl_expiration(self):
|
||||
"""Test that cache entries expire after TTL."""
|
||||
call_count = 0
|
||||
|
||||
@async_ttl_cache(maxsize=10, ttl_seconds=1) # Short TTL
|
||||
async def short_lived_cache(x: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return x * 2
|
||||
|
||||
# First call
|
||||
result1 = await short_lived_cache(5)
|
||||
assert result1 == 10
|
||||
assert call_count == 1
|
||||
|
||||
# Second call immediately - should use cache
|
||||
result2 = await short_lived_cache(5)
|
||||
assert result2 == 10
|
||||
assert call_count == 1
|
||||
|
||||
# Wait for TTL to expire
|
||||
await asyncio.sleep(1.1)
|
||||
|
||||
# Third call after expiration - should call function again
|
||||
result3 = await short_lived_cache(5)
|
||||
assert result3 == 10
|
||||
assert call_count == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_info(self):
|
||||
"""Test cache info functionality."""
|
||||
|
||||
@async_ttl_cache(maxsize=5, ttl_seconds=300)
|
||||
async def info_test_function(x: int) -> int:
|
||||
return x * 3
|
||||
|
||||
# Check initial cache info
|
||||
info = info_test_function.cache_info()
|
||||
assert info["size"] == 0
|
||||
assert info["maxsize"] == 5
|
||||
assert info["ttl_seconds"] == 300
|
||||
|
||||
# Add an entry
|
||||
await info_test_function(1)
|
||||
info = info_test_function.cache_info()
|
||||
assert info["size"] == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_clear(self):
|
||||
"""Test cache clearing functionality."""
|
||||
call_count = 0
|
||||
|
||||
@async_ttl_cache(maxsize=10, ttl_seconds=60)
|
||||
async def clearable_function(x: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return x * 4
|
||||
|
||||
# First call
|
||||
result1 = await clearable_function(2)
|
||||
assert result1 == 8
|
||||
assert call_count == 1
|
||||
|
||||
# Second call - should use cache
|
||||
result2 = await clearable_function(2)
|
||||
assert result2 == 8
|
||||
assert call_count == 1
|
||||
|
||||
# Clear cache
|
||||
clearable_function.cache_clear()
|
||||
|
||||
# Third call after clear - should call function again
|
||||
result3 = await clearable_function(2)
|
||||
assert result3 == 8
|
||||
assert call_count == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_maxsize_cleanup(self):
|
||||
"""Test that cache cleans up when maxsize is exceeded."""
|
||||
call_count = 0
|
||||
|
||||
@async_ttl_cache(maxsize=3, ttl_seconds=60)
|
||||
async def size_limited_function(x: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return x**2
|
||||
|
||||
# Fill cache to maxsize
|
||||
await size_limited_function(1) # call_count: 1
|
||||
await size_limited_function(2) # call_count: 2
|
||||
await size_limited_function(3) # call_count: 3
|
||||
|
||||
info = size_limited_function.cache_info()
|
||||
assert info["size"] == 3
|
||||
|
||||
# Add one more entry - should trigger cleanup
|
||||
await size_limited_function(4) # call_count: 4
|
||||
|
||||
# Cache size should be reduced (cleanup removes oldest entries)
|
||||
info = size_limited_function.cache_info()
|
||||
assert info["size"] is not None and info["size"] <= 3 # Should be cleaned up
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_argument_variations(self):
|
||||
"""Test caching with different argument patterns."""
|
||||
call_count = 0
|
||||
|
||||
@async_ttl_cache(maxsize=10, ttl_seconds=60)
|
||||
async def arg_test_function(a: int, b: str = "default", *, c: int = 100) -> str:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return f"{a}-{b}-{c}"
|
||||
|
||||
# Different ways to call with same logical arguments
|
||||
result1 = await arg_test_function(1, "test", c=200)
|
||||
assert call_count == 1
|
||||
|
||||
# Same arguments, same order - should use cache
|
||||
result2 = await arg_test_function(1, "test", c=200)
|
||||
assert call_count == 1
|
||||
assert result1 == result2
|
||||
|
||||
# Different arguments - should call function
|
||||
result3 = await arg_test_function(2, "test", c=200)
|
||||
assert call_count == 2
|
||||
assert result1 != result3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_exception_handling(self):
|
||||
"""Test that exceptions are not cached."""
|
||||
call_count = 0
|
||||
|
||||
@async_ttl_cache(maxsize=10, ttl_seconds=60)
|
||||
async def exception_function(x: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if x < 0:
|
||||
raise ValueError("Negative value not allowed")
|
||||
return x * 2
|
||||
|
||||
# Successful call - should be cached
|
||||
result1 = await exception_function(5)
|
||||
assert result1 == 10
|
||||
assert call_count == 1
|
||||
|
||||
# Same successful call - should use cache
|
||||
result2 = await exception_function(5)
|
||||
assert result2 == 10
|
||||
assert call_count == 1
|
||||
|
||||
# Exception call - should not be cached
|
||||
with pytest.raises(ValueError):
|
||||
await exception_function(-1)
|
||||
assert call_count == 2
|
||||
|
||||
# Same exception call - should call again (not cached)
|
||||
with pytest.raises(ValueError):
|
||||
await exception_function(-1)
|
||||
assert call_count == 3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_concurrent_calls(self):
|
||||
"""Test caching behavior with concurrent calls."""
|
||||
call_count = 0
|
||||
|
||||
@async_ttl_cache(maxsize=10, ttl_seconds=60)
|
||||
async def concurrent_function(x: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
await asyncio.sleep(0.05) # Simulate work
|
||||
return x * x
|
||||
|
||||
# Launch concurrent calls with same arguments
|
||||
tasks = [concurrent_function(3) for _ in range(5)]
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
# All results should be the same
|
||||
assert all(result == 9 for result in results)
|
||||
|
||||
# Note: Due to race conditions, call_count might be up to 5 for concurrent calls
|
||||
# This tests that the cache doesn't break under concurrent access
|
||||
assert 1 <= call_count <= 5
|
||||
|
||||
|
||||
class TestAsyncCache:
|
||||
"""Tests for the @async_cache decorator (no TTL)."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_basic_caching_no_ttl(self):
|
||||
"""Test basic caching functionality without TTL."""
|
||||
call_count = 0
|
||||
|
||||
@async_cache(maxsize=10)
|
||||
async def cached_function(x: int, y: int = 0) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
await asyncio.sleep(0.01) # Simulate async work
|
||||
return x + y
|
||||
|
||||
# First call
|
||||
result1 = await cached_function(1, 2)
|
||||
assert result1 == 3
|
||||
assert call_count == 1
|
||||
|
||||
# Second call with same args - should use cache
|
||||
result2 = await cached_function(1, 2)
|
||||
assert result2 == 3
|
||||
assert call_count == 1 # No additional call
|
||||
|
||||
# Third call after some time - should still use cache (no TTL)
|
||||
await asyncio.sleep(0.05)
|
||||
result3 = await cached_function(1, 2)
|
||||
assert result3 == 3
|
||||
assert call_count == 1 # Still no additional call
|
||||
|
||||
# Different args - should call function again
|
||||
result4 = await cached_function(2, 3)
|
||||
assert result4 == 5
|
||||
assert call_count == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_ttl_vs_ttl_behavior(self):
|
||||
"""Test the difference between TTL and no-TTL caching."""
|
||||
ttl_call_count = 0
|
||||
no_ttl_call_count = 0
|
||||
|
||||
@async_ttl_cache(maxsize=10, ttl_seconds=1) # Short TTL
|
||||
async def ttl_function(x: int) -> int:
|
||||
nonlocal ttl_call_count
|
||||
ttl_call_count += 1
|
||||
return x * 2
|
||||
|
||||
@async_cache(maxsize=10) # No TTL
|
||||
async def no_ttl_function(x: int) -> int:
|
||||
nonlocal no_ttl_call_count
|
||||
no_ttl_call_count += 1
|
||||
return x * 2
|
||||
|
||||
# First calls
|
||||
await ttl_function(5)
|
||||
await no_ttl_function(5)
|
||||
assert ttl_call_count == 1
|
||||
assert no_ttl_call_count == 1
|
||||
|
||||
# Wait for TTL to expire
|
||||
await asyncio.sleep(1.1)
|
||||
|
||||
# Second calls after TTL expiry
|
||||
await ttl_function(5) # Should call function again (TTL expired)
|
||||
await no_ttl_function(5) # Should use cache (no TTL)
|
||||
assert ttl_call_count == 2 # TTL function called again
|
||||
assert no_ttl_call_count == 1 # No-TTL function still cached
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_cache_info(self):
|
||||
"""Test cache info for no-TTL cache."""
|
||||
|
||||
@async_cache(maxsize=5)
|
||||
async def info_test_function(x: int) -> int:
|
||||
return x * 3
|
||||
|
||||
# Check initial cache info
|
||||
info = info_test_function.cache_info()
|
||||
assert info["size"] == 0
|
||||
assert info["maxsize"] == 5
|
||||
assert info["ttl_seconds"] is None # No TTL
|
||||
|
||||
# Add an entry
|
||||
await info_test_function(1)
|
||||
info = info_test_function.cache_info()
|
||||
assert info["size"] == 1
|
||||
|
||||
|
||||
class TestTTLOptional:
|
||||
"""Tests for optional TTL functionality."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ttl_none_behavior(self):
|
||||
"""Test that ttl_seconds=None works like no TTL."""
|
||||
call_count = 0
|
||||
|
||||
@async_ttl_cache(maxsize=10, ttl_seconds=None)
|
||||
async def no_ttl_via_none(x: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return x**2
|
||||
|
||||
# First call
|
||||
result1 = await no_ttl_via_none(3)
|
||||
assert result1 == 9
|
||||
assert call_count == 1
|
||||
|
||||
# Wait (would expire if there was TTL)
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# Second call - should still use cache
|
||||
result2 = await no_ttl_via_none(3)
|
||||
assert result2 == 9
|
||||
assert call_count == 1 # No additional call
|
||||
|
||||
# Check cache info
|
||||
info = no_ttl_via_none.cache_info()
|
||||
assert info["ttl_seconds"] is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_options_comparison(self):
|
||||
"""Test different cache options work as expected."""
|
||||
ttl_calls = 0
|
||||
no_ttl_calls = 0
|
||||
|
||||
@async_ttl_cache(maxsize=10, ttl_seconds=1) # With TTL
|
||||
async def ttl_function(x: int) -> int:
|
||||
nonlocal ttl_calls
|
||||
ttl_calls += 1
|
||||
return x * 10
|
||||
|
||||
@async_cache(maxsize=10) # Process-level cache (no TTL)
|
||||
async def process_function(x: int) -> int:
|
||||
nonlocal no_ttl_calls
|
||||
no_ttl_calls += 1
|
||||
return x * 10
|
||||
|
||||
# Both should cache initially
|
||||
await ttl_function(3)
|
||||
await process_function(3)
|
||||
assert ttl_calls == 1
|
||||
assert no_ttl_calls == 1
|
||||
|
||||
# Immediate second calls - both should use cache
|
||||
await ttl_function(3)
|
||||
await process_function(3)
|
||||
assert ttl_calls == 1
|
||||
assert no_ttl_calls == 1
|
||||
|
||||
# Wait for TTL to expire
|
||||
await asyncio.sleep(1.1)
|
||||
|
||||
# After TTL expiry
|
||||
await ttl_function(3) # Should call function again
|
||||
await process_function(3) # Should still use cache
|
||||
assert ttl_calls == 2 # TTL cache expired, called again
|
||||
assert no_ttl_calls == 1 # Process cache never expires
|
||||
|
||||
52
autogpt_platform/backend/.dockerignore
Normal file
52
autogpt_platform/backend/.dockerignore
Normal file
@@ -0,0 +1,52 @@
|
||||
# Development and testing files
|
||||
**/__pycache__
|
||||
**/*.pyc
|
||||
**/*.pyo
|
||||
**/*.pyd
|
||||
**/.Python
|
||||
**/env/
|
||||
**/venv/
|
||||
**/.venv/
|
||||
**/pip-log.txt
|
||||
**/.pytest_cache/
|
||||
**/test-results/
|
||||
**/snapshots/
|
||||
**/test/
|
||||
|
||||
# IDE and editor files
|
||||
**/.vscode/
|
||||
**/.idea/
|
||||
**/*.swp
|
||||
**/*.swo
|
||||
*~
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
**/*.log
|
||||
**/logs/
|
||||
|
||||
# Git
|
||||
.git/
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
**/*.md
|
||||
!README.md
|
||||
|
||||
# Local development files
|
||||
.env
|
||||
.env.local
|
||||
**/.env.test
|
||||
|
||||
# Build artifacts
|
||||
**/dist/
|
||||
**/build/
|
||||
**/target/
|
||||
|
||||
# Docker files (avoid recursion)
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
.dockerignore
|
||||
@@ -1,3 +1,9 @@
|
||||
# Backend Configuration
|
||||
# This file contains environment variables that MUST be set for the AutoGPT platform
|
||||
# Variables with working defaults in settings.py are not included here
|
||||
|
||||
## ===== REQUIRED DATABASE CONFIGURATION ===== ##
|
||||
# PostgreSQL Database Connection
|
||||
DB_USER=postgres
|
||||
DB_PASS=your-super-secret-and-long-postgres-password
|
||||
DB_NAME=postgres
|
||||
@@ -10,72 +16,50 @@ DB_SCHEMA=platform
|
||||
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
|
||||
DIRECT_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
|
||||
PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
ENABLE_AUTH=true
|
||||
|
||||
# EXECUTOR
|
||||
NUM_GRAPH_WORKERS=10
|
||||
|
||||
BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
|
||||
# generate using `from cryptography.fernet import Fernet;Fernet.generate_key().decode()`
|
||||
ENCRYPTION_KEY='dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw='
|
||||
UNSUBSCRIBE_SECRET_KEY = 'HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio='
|
||||
|
||||
## ===== REQUIRED SERVICE CREDENTIALS ===== ##
|
||||
# Redis Configuration
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=password
|
||||
|
||||
ENABLE_CREDIT=false
|
||||
STRIPE_API_KEY=
|
||||
STRIPE_WEBHOOK_SECRET=
|
||||
# RabbitMQ Credentials
|
||||
RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
|
||||
# What environment things should be logged under: local dev or prod
|
||||
APP_ENV=local
|
||||
# What environment to behave as: "local" or "cloud"
|
||||
BEHAVE_AS=local
|
||||
PYRO_HOST=localhost
|
||||
SENTRY_DSN=
|
||||
|
||||
# Email For Postmark so we can send emails
|
||||
POSTMARK_SERVER_API_TOKEN=
|
||||
POSTMARK_SENDER_EMAIL=invalid@invalid.com
|
||||
POSTMARK_WEBHOOK_TOKEN=
|
||||
|
||||
## User auth with Supabase is required for any of the 3rd party integrations with auth to work.
|
||||
ENABLE_AUTH=true
|
||||
# Supabase Authentication
|
||||
SUPABASE_URL=http://localhost:8000
|
||||
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
|
||||
# RabbitMQ credentials -- Used for communication between services
|
||||
RABBITMQ_HOST=localhost
|
||||
RABBITMQ_PORT=5672
|
||||
RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
## ===== REQUIRED SECURITY KEYS ===== ##
|
||||
# Generate using: from cryptography.fernet import Fernet;Fernet.generate_key().decode()
|
||||
ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=
|
||||
UNSUBSCRIBE_SECRET_KEY=HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio=
|
||||
|
||||
## GCS bucket is required for marketplace and library functionality
|
||||
## ===== IMPORTANT OPTIONAL CONFIGURATION ===== ##
|
||||
# Platform URLs (set these for webhooks and OAuth to work)
|
||||
PLATFORM_BASE_URL=http://localhost:8000
|
||||
FRONTEND_BASE_URL=http://localhost:3000
|
||||
|
||||
# Media Storage (required for marketplace and library functionality)
|
||||
MEDIA_GCS_BUCKET_NAME=
|
||||
|
||||
## For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow
|
||||
## for integrations to work. Defaults to the value of PLATFORM_BASE_URL if not set.
|
||||
# FRONTEND_BASE_URL=http://localhost:3000
|
||||
## ===== API KEYS AND OAUTH CREDENTIALS ===== ##
|
||||
# All API keys below are optional - only add what you need
|
||||
|
||||
## PLATFORM_BASE_URL must be set to a *publicly accessible* URL pointing to your backend
|
||||
## to use the platform's webhook-related functionality.
|
||||
## If you are developing locally, you can use something like ngrok to get a publc URL
|
||||
## and tunnel it to your locally running backend.
|
||||
PLATFORM_BASE_URL=http://localhost:3000
|
||||
|
||||
## Cloudflare Turnstile (CAPTCHA) Configuration
|
||||
## Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
|
||||
## This is the backend secret key
|
||||
TURNSTILE_SECRET_KEY=
|
||||
## This is the verify URL
|
||||
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
|
||||
|
||||
## == INTEGRATION CREDENTIALS == ##
|
||||
# Each set of server side credentials is required for the corresponding 3rd party
|
||||
# integration to work.
|
||||
# AI/LLM Services
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
LLAMA_API_KEY=
|
||||
AIML_API_KEY=
|
||||
V0_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
NVIDIA_API_KEY=
|
||||
|
||||
# OAuth Credentials
|
||||
# For the OAuth callback URL, use <your_frontend_url>/auth/integrations/oauth_callback,
|
||||
# e.g. http://localhost:3000/auth/integrations/oauth_callback
|
||||
|
||||
@@ -85,7 +69,6 @@ GITHUB_CLIENT_SECRET=
|
||||
|
||||
# Google OAuth App server credentials - https://console.cloud.google.com/apis/credentials, and enable gmail api and set scopes
|
||||
# https://console.cloud.google.com/apis/credentials/consent ?project=<your_project_id>
|
||||
|
||||
# You'll need to add/enable the following scopes (minimum):
|
||||
# https://console.developers.google.com/apis/api/gmail.googleapis.com/overview ?project=<your_project_id>
|
||||
# https://console.cloud.google.com/apis/library/sheets.googleapis.com/ ?project=<your_project_id>
|
||||
@@ -121,104 +104,66 @@ LINEAR_CLIENT_SECRET=
|
||||
TODOIST_CLIENT_ID=
|
||||
TODOIST_CLIENT_SECRET=
|
||||
|
||||
## ===== OPTIONAL API KEYS ===== ##
|
||||
|
||||
# LLM
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
AIML_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
LLAMA_API_KEY=
|
||||
|
||||
# Reddit
|
||||
# Go to https://www.reddit.com/prefs/apps and create a new app
|
||||
# Choose "script" for the type
|
||||
# Fill in the redirect uri as <your_frontend_url>/auth/integrations/oauth_callback, e.g. http://localhost:3000/auth/integrations/oauth_callback
|
||||
NOTION_CLIENT_ID=
|
||||
NOTION_CLIENT_SECRET=
|
||||
REDDIT_CLIENT_ID=
|
||||
REDDIT_CLIENT_SECRET=
|
||||
REDDIT_USER_AGENT="AutoGPT:1.0 (by /u/autogpt)"
|
||||
|
||||
# Discord
|
||||
DISCORD_BOT_TOKEN=
|
||||
# Payment Processing
|
||||
STRIPE_API_KEY=
|
||||
STRIPE_WEBHOOK_SECRET=
|
||||
|
||||
# SMTP/Email
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
# Email Service (for sending notifications and confirmations)
|
||||
POSTMARK_SERVER_API_TOKEN=
|
||||
POSTMARK_SENDER_EMAIL=invalid@invalid.com
|
||||
POSTMARK_WEBHOOK_TOKEN=
|
||||
|
||||
# D-ID
|
||||
# Error Tracking
|
||||
SENTRY_DSN=
|
||||
|
||||
# Cloudflare Turnstile (CAPTCHA) Configuration
|
||||
# Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
|
||||
# This is the backend secret key
|
||||
TURNSTILE_SECRET_KEY=
|
||||
# This is the verify URL
|
||||
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
|
||||
|
||||
# Feature Flags
|
||||
LAUNCH_DARKLY_SDK_KEY=
|
||||
|
||||
# Content Generation & Media
|
||||
DID_API_KEY=
|
||||
FAL_API_KEY=
|
||||
IDEOGRAM_API_KEY=
|
||||
REPLICATE_API_KEY=
|
||||
REVID_API_KEY=
|
||||
SCREENSHOTONE_API_KEY=
|
||||
UNREAL_SPEECH_API_KEY=
|
||||
|
||||
# Open Weather Map
|
||||
# Data & Search Services
|
||||
E2B_API_KEY=
|
||||
EXA_API_KEY=
|
||||
JINA_API_KEY=
|
||||
MEM0_API_KEY=
|
||||
OPENWEATHERMAP_API_KEY=
|
||||
|
||||
# SMTP
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# Medium
|
||||
MEDIUM_API_KEY=
|
||||
MEDIUM_AUTHOR_ID=
|
||||
|
||||
# Google Maps
|
||||
GOOGLE_MAPS_API_KEY=
|
||||
|
||||
# Replicate
|
||||
REPLICATE_API_KEY=
|
||||
# Communication Services
|
||||
DISCORD_BOT_TOKEN=
|
||||
MEDIUM_API_KEY=
|
||||
MEDIUM_AUTHOR_ID=
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# Ideogram
|
||||
IDEOGRAM_API_KEY=
|
||||
|
||||
# Fal
|
||||
FAL_API_KEY=
|
||||
|
||||
# Exa
|
||||
EXA_API_KEY=
|
||||
|
||||
# E2B
|
||||
E2B_API_KEY=
|
||||
|
||||
# Mem0
|
||||
MEM0_API_KEY=
|
||||
|
||||
# Nvidia
|
||||
NVIDIA_API_KEY=
|
||||
|
||||
# Apollo
|
||||
# Business & Marketing Tools
|
||||
APOLLO_API_KEY=
|
||||
|
||||
# SmartLead
|
||||
SMARTLEAD_API_KEY=
|
||||
|
||||
# ZeroBounce
|
||||
ZEROBOUNCE_API_KEY=
|
||||
|
||||
# Ayrshare
|
||||
ENRICHLAYER_API_KEY=
|
||||
AYRSHARE_API_KEY=
|
||||
AYRSHARE_JWT_KEY=
|
||||
SMARTLEAD_API_KEY=
|
||||
ZEROBOUNCE_API_KEY=
|
||||
|
||||
## ===== OPTIONAL API KEYS END ===== ##
|
||||
|
||||
# Block Error Rate Monitoring
|
||||
BLOCK_ERROR_RATE_THRESHOLD=0.5
|
||||
BLOCK_ERROR_RATE_CHECK_INTERVAL_SECS=86400
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_CLOUD_LOGGING=false
|
||||
ENABLE_FILE_LOGGING=false
|
||||
# Use to manually set the log directory
|
||||
# LOG_DIR=./logs
|
||||
|
||||
# Example Blocks Configuration
|
||||
# Set to true to enable example blocks in development
|
||||
# These blocks are disabled by default in production
|
||||
ENABLE_EXAMPLE_BLOCKS=false
|
||||
|
||||
# Cloud Storage Configuration
|
||||
# Cleanup interval for expired files (hours between cleanup runs, 1-24 hours)
|
||||
CLOUD_STORAGE_CLEANUP_INTERVAL_HOURS=6
|
||||
# Other Services
|
||||
AUTOMOD_API_KEY=
|
||||
1
autogpt_platform/backend/.gitignore
vendored
1
autogpt_platform/backend/.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
.env
|
||||
database.db
|
||||
database.db-journal
|
||||
dev.db
|
||||
|
||||
@@ -8,14 +8,14 @@ WORKDIR /app
|
||||
|
||||
RUN echo 'Acquire::http::Pipeline-Depth 0;\nAcquire::http::No-Cache true;\nAcquire::BrokenProxy true;\n' > /etc/apt/apt.conf.d/99fixbadproxy
|
||||
|
||||
RUN apt-get update --allow-releaseinfo-change --fix-missing
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get install -y build-essential
|
||||
RUN apt-get install -y libpq5
|
||||
RUN apt-get install -y libz-dev
|
||||
RUN apt-get install -y libssl-dev
|
||||
RUN apt-get install -y postgresql-client
|
||||
# Update package list and install build dependencies in a single layer
|
||||
RUN apt-get update --allow-releaseinfo-change --fix-missing \
|
||||
&& apt-get install -y \
|
||||
build-essential \
|
||||
libpq5 \
|
||||
libz-dev \
|
||||
libssl-dev \
|
||||
postgresql-client
|
||||
|
||||
ENV POETRY_HOME=/opt/poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
@@ -68,6 +68,12 @@ COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.tom
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
FROM server_dependencies AS migrate
|
||||
|
||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
||||
|
||||
FROM server_dependencies AS server
|
||||
|
||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||
|
||||
408
autogpt_platform/backend/backend/blocks/enrichlayer/_api.py
Normal file
408
autogpt_platform/backend/backend/blocks/enrichlayer/_api.py
Normal file
@@ -0,0 +1,408 @@
|
||||
"""
|
||||
API module for Enrichlayer integration.
|
||||
|
||||
This module provides a client for interacting with the Enrichlayer API,
|
||||
which allows fetching LinkedIn profile data and related information.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import enum
|
||||
import logging
|
||||
from json import JSONDecodeError
|
||||
from typing import Any, Optional, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.data.model import APIKeyCredentials
|
||||
from backend.util.request import Requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class EnrichlayerAPIException(Exception):
|
||||
"""Exception raised for Enrichlayer API errors."""
|
||||
|
||||
def __init__(self, message: str, status_code: int):
|
||||
super().__init__(message)
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class FallbackToCache(enum.Enum):
|
||||
ON_ERROR = "on-error"
|
||||
NEVER = "never"
|
||||
|
||||
|
||||
class UseCache(enum.Enum):
|
||||
IF_PRESENT = "if-present"
|
||||
NEVER = "never"
|
||||
|
||||
|
||||
class SocialMediaProfiles(BaseModel):
|
||||
"""Social media profiles model."""
|
||||
|
||||
twitter: Optional[str] = None
|
||||
facebook: Optional[str] = None
|
||||
github: Optional[str] = None
|
||||
|
||||
|
||||
class Experience(BaseModel):
|
||||
"""Experience model for LinkedIn profiles."""
|
||||
|
||||
company: Optional[str] = None
|
||||
title: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
location: Optional[str] = None
|
||||
starts_at: Optional[dict[str, int]] = None
|
||||
ends_at: Optional[dict[str, int]] = None
|
||||
company_linkedin_profile_url: Optional[str] = None
|
||||
|
||||
|
||||
class Education(BaseModel):
|
||||
"""Education model for LinkedIn profiles."""
|
||||
|
||||
school: Optional[str] = None
|
||||
degree_name: Optional[str] = None
|
||||
field_of_study: Optional[str] = None
|
||||
starts_at: Optional[dict[str, int]] = None
|
||||
ends_at: Optional[dict[str, int]] = None
|
||||
school_linkedin_profile_url: Optional[str] = None
|
||||
|
||||
|
||||
class PersonProfileResponse(BaseModel):
|
||||
"""Response model for LinkedIn person profile.
|
||||
|
||||
This model represents the response from Enrichlayer's LinkedIn profile API.
|
||||
The API returns comprehensive profile data including work experience,
|
||||
education, skills, and contact information (when available).
|
||||
|
||||
Example API Response:
|
||||
{
|
||||
"public_identifier": "johnsmith",
|
||||
"full_name": "John Smith",
|
||||
"occupation": "Software Engineer at Tech Corp",
|
||||
"experiences": [
|
||||
{
|
||||
"company": "Tech Corp",
|
||||
"title": "Software Engineer",
|
||||
"starts_at": {"year": 2020, "month": 1}
|
||||
}
|
||||
],
|
||||
"education": [...],
|
||||
"skills": ["Python", "JavaScript", ...]
|
||||
}
|
||||
"""
|
||||
|
||||
public_identifier: Optional[str] = None
|
||||
profile_pic_url: Optional[str] = None
|
||||
full_name: Optional[str] = None
|
||||
first_name: Optional[str] = None
|
||||
last_name: Optional[str] = None
|
||||
occupation: Optional[str] = None
|
||||
headline: Optional[str] = None
|
||||
summary: Optional[str] = None
|
||||
country: Optional[str] = None
|
||||
country_full_name: Optional[str] = None
|
||||
city: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
experiences: Optional[list[Experience]] = None
|
||||
education: Optional[list[Education]] = None
|
||||
languages: Optional[list[str]] = None
|
||||
skills: Optional[list[str]] = None
|
||||
inferred_salary: Optional[dict[str, Any]] = None
|
||||
personal_email: Optional[str] = None
|
||||
personal_contact_number: Optional[str] = None
|
||||
social_media_profiles: Optional[SocialMediaProfiles] = None
|
||||
extra: Optional[dict[str, Any]] = None
|
||||
|
||||
|
||||
class SimilarProfile(BaseModel):
|
||||
"""Similar profile model for LinkedIn person lookup."""
|
||||
|
||||
similarity: float
|
||||
linkedin_profile_url: str
|
||||
|
||||
|
||||
class PersonLookupResponse(BaseModel):
|
||||
"""Response model for LinkedIn person lookup.
|
||||
|
||||
This model represents the response from Enrichlayer's person lookup API.
|
||||
The API returns a LinkedIn profile URL and similarity scores when
|
||||
searching for a person by name and company.
|
||||
|
||||
Example API Response:
|
||||
{
|
||||
"url": "https://www.linkedin.com/in/johnsmith/",
|
||||
"name_similarity_score": 0.95,
|
||||
"company_similarity_score": 0.88,
|
||||
"title_similarity_score": 0.75,
|
||||
"location_similarity_score": 0.60
|
||||
}
|
||||
"""
|
||||
|
||||
url: str | None = None
|
||||
name_similarity_score: float | None
|
||||
company_similarity_score: float | None
|
||||
title_similarity_score: float | None
|
||||
location_similarity_score: float | None
|
||||
last_updated: datetime.datetime | None = None
|
||||
profile: PersonProfileResponse | None = None
|
||||
|
||||
|
||||
class RoleLookupResponse(BaseModel):
|
||||
"""Response model for LinkedIn role lookup.
|
||||
|
||||
This model represents the response from Enrichlayer's role lookup API.
|
||||
The API returns LinkedIn profile data for a specific role at a company.
|
||||
|
||||
Example API Response:
|
||||
{
|
||||
"linkedin_profile_url": "https://www.linkedin.com/in/johnsmith/",
|
||||
"profile_data": {...} // Full PersonProfileResponse data when enrich_profile=True
|
||||
}
|
||||
"""
|
||||
|
||||
linkedin_profile_url: Optional[str] = None
|
||||
profile_data: Optional[PersonProfileResponse] = None
|
||||
|
||||
|
||||
class ProfilePictureResponse(BaseModel):
|
||||
"""Response model for LinkedIn profile picture.
|
||||
|
||||
This model represents the response from Enrichlayer's profile picture API.
|
||||
The API returns a URL to the person's LinkedIn profile picture.
|
||||
|
||||
Example API Response:
|
||||
{
|
||||
"tmp_profile_pic_url": "https://media.licdn.com/dms/image/..."
|
||||
}
|
||||
"""
|
||||
|
||||
tmp_profile_pic_url: str = Field(
|
||||
..., description="URL of the profile picture", alias="tmp_profile_pic_url"
|
||||
)
|
||||
|
||||
@property
|
||||
def profile_picture_url(self) -> str:
|
||||
"""Backward compatibility property for profile_picture_url."""
|
||||
return self.tmp_profile_pic_url
|
||||
|
||||
|
||||
class EnrichlayerClient:
|
||||
"""Client for interacting with the Enrichlayer API."""
|
||||
|
||||
API_BASE_URL = "https://enrichlayer.com/api/v2"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
credentials: Optional[APIKeyCredentials] = None,
|
||||
custom_requests: Optional[Requests] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the Enrichlayer client.
|
||||
|
||||
Args:
|
||||
credentials: The credentials to use for authentication.
|
||||
custom_requests: Custom Requests instance for testing.
|
||||
"""
|
||||
if custom_requests:
|
||||
self._requests = custom_requests
|
||||
else:
|
||||
headers: dict[str, str] = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
if credentials:
|
||||
headers["Authorization"] = (
|
||||
f"Bearer {credentials.api_key.get_secret_value()}"
|
||||
)
|
||||
|
||||
self._requests = Requests(
|
||||
extra_headers=headers,
|
||||
raise_for_status=False,
|
||||
)
|
||||
|
||||
async def _handle_response(self, response) -> Any:
|
||||
"""
|
||||
Handle API response and check for errors.
|
||||
|
||||
Args:
|
||||
response: The response object from the request.
|
||||
|
||||
Returns:
|
||||
The response data.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
if not response.ok:
|
||||
try:
|
||||
error_data = response.json()
|
||||
error_message = error_data.get("message", "")
|
||||
except JSONDecodeError:
|
||||
error_message = response.text
|
||||
|
||||
raise EnrichlayerAPIException(
|
||||
f"Enrichlayer API request failed ({response.status_code}): {error_message}",
|
||||
response.status_code,
|
||||
)
|
||||
|
||||
return response.json()
|
||||
|
||||
async def fetch_profile(
|
||||
self,
|
||||
linkedin_url: str,
|
||||
fallback_to_cache: FallbackToCache = FallbackToCache.ON_ERROR,
|
||||
use_cache: UseCache = UseCache.IF_PRESENT,
|
||||
include_skills: bool = False,
|
||||
include_inferred_salary: bool = False,
|
||||
include_personal_email: bool = False,
|
||||
include_personal_contact_number: bool = False,
|
||||
include_social_media: bool = False,
|
||||
include_extra: bool = False,
|
||||
) -> PersonProfileResponse:
|
||||
"""
|
||||
Fetch a LinkedIn profile with optional parameters.
|
||||
|
||||
Args:
|
||||
linkedin_url: The LinkedIn profile URL to fetch.
|
||||
fallback_to_cache: Cache usage if live fetch fails ('on-error' or 'never').
|
||||
use_cache: Cache utilization ('if-present' or 'never').
|
||||
include_skills: Whether to include skills data.
|
||||
include_inferred_salary: Whether to include inferred salary data.
|
||||
include_personal_email: Whether to include personal email.
|
||||
include_personal_contact_number: Whether to include personal contact number.
|
||||
include_social_media: Whether to include social media profiles.
|
||||
include_extra: Whether to include additional data.
|
||||
|
||||
Returns:
|
||||
The LinkedIn profile data.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
params = {
|
||||
"url": linkedin_url,
|
||||
"fallback_to_cache": fallback_to_cache.value.lower(),
|
||||
"use_cache": use_cache.value.lower(),
|
||||
}
|
||||
|
||||
if include_skills:
|
||||
params["skills"] = "include"
|
||||
if include_inferred_salary:
|
||||
params["inferred_salary"] = "include"
|
||||
if include_personal_email:
|
||||
params["personal_email"] = "include"
|
||||
if include_personal_contact_number:
|
||||
params["personal_contact_number"] = "include"
|
||||
if include_social_media:
|
||||
params["twitter_profile_id"] = "include"
|
||||
params["facebook_profile_id"] = "include"
|
||||
params["github_profile_id"] = "include"
|
||||
if include_extra:
|
||||
params["extra"] = "include"
|
||||
|
||||
response = await self._requests.get(
|
||||
f"{self.API_BASE_URL}/profile", params=params
|
||||
)
|
||||
return PersonProfileResponse(**await self._handle_response(response))
|
||||
|
||||
async def lookup_person(
|
||||
self,
|
||||
first_name: str,
|
||||
company_domain: str,
|
||||
last_name: str | None = None,
|
||||
location: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
include_similarity_checks: bool = False,
|
||||
enrich_profile: bool = False,
|
||||
) -> PersonLookupResponse:
|
||||
"""
|
||||
Look up a LinkedIn profile by person's information.
|
||||
|
||||
Args:
|
||||
first_name: The person's first name.
|
||||
last_name: The person's last name.
|
||||
company_domain: The domain of the company they work for.
|
||||
location: The person's location.
|
||||
title: The person's job title.
|
||||
include_similarity_checks: Whether to include similarity checks.
|
||||
enrich_profile: Whether to enrich the profile.
|
||||
|
||||
Returns:
|
||||
The LinkedIn profile lookup result.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
params = {"first_name": first_name, "company_domain": company_domain}
|
||||
|
||||
if last_name:
|
||||
params["last_name"] = last_name
|
||||
if location:
|
||||
params["location"] = location
|
||||
if title:
|
||||
params["title"] = title
|
||||
if include_similarity_checks:
|
||||
params["similarity_checks"] = "include"
|
||||
if enrich_profile:
|
||||
params["enrich_profile"] = "enrich"
|
||||
|
||||
response = await self._requests.get(
|
||||
f"{self.API_BASE_URL}/profile/resolve", params=params
|
||||
)
|
||||
return PersonLookupResponse(**await self._handle_response(response))
|
||||
|
||||
async def lookup_role(
|
||||
self, role: str, company_name: str, enrich_profile: bool = False
|
||||
) -> RoleLookupResponse:
|
||||
"""
|
||||
Look up a LinkedIn profile by role in a company.
|
||||
|
||||
Args:
|
||||
role: The role title (e.g., CEO, CTO).
|
||||
company_name: The name of the company.
|
||||
enrich_profile: Whether to enrich the profile.
|
||||
|
||||
Returns:
|
||||
The LinkedIn profile lookup result.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
params = {
|
||||
"role": role,
|
||||
"company_name": company_name,
|
||||
}
|
||||
|
||||
if enrich_profile:
|
||||
params["enrich_profile"] = "enrich"
|
||||
|
||||
response = await self._requests.get(
|
||||
f"{self.API_BASE_URL}/find/company/role", params=params
|
||||
)
|
||||
return RoleLookupResponse(**await self._handle_response(response))
|
||||
|
||||
async def get_profile_picture(
|
||||
self, linkedin_profile_url: str
|
||||
) -> ProfilePictureResponse:
|
||||
"""
|
||||
Get a LinkedIn profile picture URL.
|
||||
|
||||
Args:
|
||||
linkedin_profile_url: The LinkedIn profile URL.
|
||||
|
||||
Returns:
|
||||
The profile picture URL.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
params = {
|
||||
"linkedin_person_profile_url": linkedin_profile_url,
|
||||
}
|
||||
|
||||
response = await self._requests.get(
|
||||
f"{self.API_BASE_URL}/person/profile-picture", params=params
|
||||
)
|
||||
return ProfilePictureResponse(**await self._handle_response(response))
|
||||
34
autogpt_platform/backend/backend/blocks/enrichlayer/_auth.py
Normal file
34
autogpt_platform/backend/backend/blocks/enrichlayer/_auth.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""
|
||||
Authentication module for Enrichlayer API integration.
|
||||
|
||||
This module provides credential types and test credentials for the Enrichlayer API.
|
||||
"""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
# Define the type of credentials input expected for Enrichlayer API
|
||||
EnrichlayerCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.ENRICHLAYER], Literal["api_key"]
|
||||
]
|
||||
|
||||
# Mock credentials for testing Enrichlayer API integration
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="1234a567-89bc-4def-ab12-3456cdef7890",
|
||||
provider="enrichlayer",
|
||||
api_key=SecretStr("mock-enrichlayer-api-key"),
|
||||
title="Mock Enrichlayer API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
# Dictionary representation of test credentials for input fields
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
527
autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py
Normal file
527
autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py
Normal file
@@ -0,0 +1,527 @@
|
||||
"""
|
||||
Block definitions for Enrichlayer API integration.
|
||||
|
||||
This module implements blocks for interacting with the Enrichlayer API,
|
||||
which provides access to LinkedIn profile data and related information.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField
|
||||
from backend.util.type import MediaFileType
|
||||
|
||||
from ._api import (
|
||||
EnrichlayerClient,
|
||||
Experience,
|
||||
FallbackToCache,
|
||||
PersonLookupResponse,
|
||||
PersonProfileResponse,
|
||||
RoleLookupResponse,
|
||||
UseCache,
|
||||
)
|
||||
from ._auth import TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, EnrichlayerCredentialsInput
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GetLinkedinProfileBlock(Block):
|
||||
"""Block to fetch LinkedIn profile data using Enrichlayer API."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
"""Input schema for GetLinkedinProfileBlock."""
|
||||
|
||||
linkedin_url: str = SchemaField(
|
||||
description="LinkedIn profile URL to fetch data from",
|
||||
placeholder="https://www.linkedin.com/in/username/",
|
||||
)
|
||||
fallback_to_cache: FallbackToCache = SchemaField(
|
||||
description="Cache usage if live fetch fails",
|
||||
default=FallbackToCache.ON_ERROR,
|
||||
advanced=True,
|
||||
)
|
||||
use_cache: UseCache = SchemaField(
|
||||
description="Cache utilization strategy",
|
||||
default=UseCache.IF_PRESENT,
|
||||
advanced=True,
|
||||
)
|
||||
include_skills: bool = SchemaField(
|
||||
description="Include skills data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_inferred_salary: bool = SchemaField(
|
||||
description="Include inferred salary data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_personal_email: bool = SchemaField(
|
||||
description="Include personal email",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_personal_contact_number: bool = SchemaField(
|
||||
description="Include personal contact number",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_social_media: bool = SchemaField(
|
||||
description="Include social media profiles",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_extra: bool = SchemaField(
|
||||
description="Include additional data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
credentials: EnrichlayerCredentialsInput = CredentialsField(
|
||||
description="Enrichlayer API credentials"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
"""Output schema for GetLinkedinProfileBlock."""
|
||||
|
||||
profile: PersonProfileResponse = SchemaField(
|
||||
description="LinkedIn profile data"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize GetLinkedinProfileBlock."""
|
||||
super().__init__(
|
||||
id="f6e0ac73-4f1d-4acb-b4b7-b67066c5984e",
|
||||
description="Fetch LinkedIn profile data using Enrichlayer",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=GetLinkedinProfileBlock.Input,
|
||||
output_schema=GetLinkedinProfileBlock.Output,
|
||||
test_input={
|
||||
"linkedin_url": "https://www.linkedin.com/in/williamhgates/",
|
||||
"include_skills": True,
|
||||
"include_social_media": True,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"profile",
|
||||
PersonProfileResponse(
|
||||
public_identifier="williamhgates",
|
||||
full_name="Bill Gates",
|
||||
occupation="Co-chair at Bill & Melinda Gates Foundation",
|
||||
experiences=[
|
||||
Experience(
|
||||
company="Bill & Melinda Gates Foundation",
|
||||
title="Co-chair",
|
||||
starts_at={"year": 2000},
|
||||
)
|
||||
],
|
||||
),
|
||||
)
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"_fetch_profile": lambda *args, **kwargs: PersonProfileResponse(
|
||||
public_identifier="williamhgates",
|
||||
full_name="Bill Gates",
|
||||
occupation="Co-chair at Bill & Melinda Gates Foundation",
|
||||
experiences=[
|
||||
Experience(
|
||||
company="Bill & Melinda Gates Foundation",
|
||||
title="Co-chair",
|
||||
starts_at={"year": 2000},
|
||||
)
|
||||
],
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _fetch_profile(
|
||||
credentials: APIKeyCredentials,
|
||||
linkedin_url: str,
|
||||
fallback_to_cache: FallbackToCache = FallbackToCache.ON_ERROR,
|
||||
use_cache: UseCache = UseCache.IF_PRESENT,
|
||||
include_skills: bool = False,
|
||||
include_inferred_salary: bool = False,
|
||||
include_personal_email: bool = False,
|
||||
include_personal_contact_number: bool = False,
|
||||
include_social_media: bool = False,
|
||||
include_extra: bool = False,
|
||||
):
|
||||
client = EnrichlayerClient(credentials)
|
||||
profile = await client.fetch_profile(
|
||||
linkedin_url=linkedin_url,
|
||||
fallback_to_cache=fallback_to_cache,
|
||||
use_cache=use_cache,
|
||||
include_skills=include_skills,
|
||||
include_inferred_salary=include_inferred_salary,
|
||||
include_personal_email=include_personal_email,
|
||||
include_personal_contact_number=include_personal_contact_number,
|
||||
include_social_media=include_social_media,
|
||||
include_extra=include_extra,
|
||||
)
|
||||
return profile
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Run the block to fetch LinkedIn profile data.
|
||||
|
||||
Args:
|
||||
input_data: Input parameters for the block
|
||||
credentials: API key credentials for Enrichlayer
|
||||
**kwargs: Additional keyword arguments
|
||||
|
||||
Yields:
|
||||
Tuples of (output_name, output_value)
|
||||
"""
|
||||
try:
|
||||
profile = await self._fetch_profile(
|
||||
credentials=credentials,
|
||||
linkedin_url=input_data.linkedin_url,
|
||||
fallback_to_cache=input_data.fallback_to_cache,
|
||||
use_cache=input_data.use_cache,
|
||||
include_skills=input_data.include_skills,
|
||||
include_inferred_salary=input_data.include_inferred_salary,
|
||||
include_personal_email=input_data.include_personal_email,
|
||||
include_personal_contact_number=input_data.include_personal_contact_number,
|
||||
include_social_media=input_data.include_social_media,
|
||||
include_extra=input_data.include_extra,
|
||||
)
|
||||
yield "profile", profile
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching LinkedIn profile: {str(e)}")
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class LinkedinPersonLookupBlock(Block):
|
||||
"""Block to look up LinkedIn profiles by person's information using Enrichlayer API."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
"""Input schema for LinkedinPersonLookupBlock."""
|
||||
|
||||
first_name: str = SchemaField(
|
||||
description="Person's first name",
|
||||
placeholder="John",
|
||||
advanced=False,
|
||||
)
|
||||
last_name: str | None = SchemaField(
|
||||
description="Person's last name",
|
||||
placeholder="Doe",
|
||||
default=None,
|
||||
advanced=False,
|
||||
)
|
||||
company_domain: str = SchemaField(
|
||||
description="Domain of the company they work for (optional)",
|
||||
placeholder="example.com",
|
||||
advanced=False,
|
||||
)
|
||||
location: Optional[str] = SchemaField(
|
||||
description="Person's location (optional)",
|
||||
placeholder="San Francisco",
|
||||
default=None,
|
||||
)
|
||||
title: Optional[str] = SchemaField(
|
||||
description="Person's job title (optional)",
|
||||
placeholder="CEO",
|
||||
default=None,
|
||||
)
|
||||
include_similarity_checks: bool = SchemaField(
|
||||
description="Include similarity checks",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
enrich_profile: bool = SchemaField(
|
||||
description="Enrich the profile with additional data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
credentials: EnrichlayerCredentialsInput = CredentialsField(
|
||||
description="Enrichlayer API credentials"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
"""Output schema for LinkedinPersonLookupBlock."""
|
||||
|
||||
lookup_result: PersonLookupResponse = SchemaField(
|
||||
description="LinkedIn profile lookup result"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize LinkedinPersonLookupBlock."""
|
||||
super().__init__(
|
||||
id="d237a98a-5c4b-4a1c-b9e3-e6f9a6c81df7",
|
||||
description="Look up LinkedIn profiles by person information using Enrichlayer",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=LinkedinPersonLookupBlock.Input,
|
||||
output_schema=LinkedinPersonLookupBlock.Output,
|
||||
test_input={
|
||||
"first_name": "Bill",
|
||||
"last_name": "Gates",
|
||||
"company_domain": "gatesfoundation.org",
|
||||
"include_similarity_checks": True,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"lookup_result",
|
||||
PersonLookupResponse(
|
||||
url="https://www.linkedin.com/in/williamhgates/",
|
||||
name_similarity_score=0.93,
|
||||
company_similarity_score=0.83,
|
||||
title_similarity_score=0.3,
|
||||
location_similarity_score=0.20,
|
||||
),
|
||||
)
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"_lookup_person": lambda *args, **kwargs: PersonLookupResponse(
|
||||
url="https://www.linkedin.com/in/williamhgates/",
|
||||
name_similarity_score=0.93,
|
||||
company_similarity_score=0.83,
|
||||
title_similarity_score=0.3,
|
||||
location_similarity_score=0.20,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _lookup_person(
|
||||
credentials: APIKeyCredentials,
|
||||
first_name: str,
|
||||
company_domain: str,
|
||||
last_name: str | None = None,
|
||||
location: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
include_similarity_checks: bool = False,
|
||||
enrich_profile: bool = False,
|
||||
):
|
||||
client = EnrichlayerClient(credentials=credentials)
|
||||
lookup_result = await client.lookup_person(
|
||||
first_name=first_name,
|
||||
last_name=last_name,
|
||||
company_domain=company_domain,
|
||||
location=location,
|
||||
title=title,
|
||||
include_similarity_checks=include_similarity_checks,
|
||||
enrich_profile=enrich_profile,
|
||||
)
|
||||
return lookup_result
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Run the block to look up LinkedIn profiles.
|
||||
|
||||
Args:
|
||||
input_data: Input parameters for the block
|
||||
credentials: API key credentials for Enrichlayer
|
||||
**kwargs: Additional keyword arguments
|
||||
|
||||
Yields:
|
||||
Tuples of (output_name, output_value)
|
||||
"""
|
||||
try:
|
||||
lookup_result = await self._lookup_person(
|
||||
credentials=credentials,
|
||||
first_name=input_data.first_name,
|
||||
last_name=input_data.last_name,
|
||||
company_domain=input_data.company_domain,
|
||||
location=input_data.location,
|
||||
title=input_data.title,
|
||||
include_similarity_checks=input_data.include_similarity_checks,
|
||||
enrich_profile=input_data.enrich_profile,
|
||||
)
|
||||
yield "lookup_result", lookup_result
|
||||
except Exception as e:
|
||||
logger.error(f"Error looking up LinkedIn profile: {str(e)}")
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class LinkedinRoleLookupBlock(Block):
|
||||
"""Block to look up LinkedIn profiles by role in a company using Enrichlayer API."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
"""Input schema for LinkedinRoleLookupBlock."""
|
||||
|
||||
role: str = SchemaField(
|
||||
description="Role title (e.g., CEO, CTO)",
|
||||
placeholder="CEO",
|
||||
)
|
||||
company_name: str = SchemaField(
|
||||
description="Name of the company",
|
||||
placeholder="Microsoft",
|
||||
)
|
||||
enrich_profile: bool = SchemaField(
|
||||
description="Enrich the profile with additional data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
credentials: EnrichlayerCredentialsInput = CredentialsField(
|
||||
description="Enrichlayer API credentials"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
"""Output schema for LinkedinRoleLookupBlock."""
|
||||
|
||||
role_lookup_result: RoleLookupResponse = SchemaField(
|
||||
description="LinkedIn role lookup result"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize LinkedinRoleLookupBlock."""
|
||||
super().__init__(
|
||||
id="3b9fc742-06d4-49c7-b5ce-7e302dd7c8a7",
|
||||
description="Look up LinkedIn profiles by role in a company using Enrichlayer",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=LinkedinRoleLookupBlock.Input,
|
||||
output_schema=LinkedinRoleLookupBlock.Output,
|
||||
test_input={
|
||||
"role": "Co-chair",
|
||||
"company_name": "Gates Foundation",
|
||||
"enrich_profile": True,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"role_lookup_result",
|
||||
RoleLookupResponse(
|
||||
linkedin_profile_url="https://www.linkedin.com/in/williamhgates/",
|
||||
),
|
||||
)
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"_lookup_role": lambda *args, **kwargs: RoleLookupResponse(
|
||||
linkedin_profile_url="https://www.linkedin.com/in/williamhgates/",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _lookup_role(
|
||||
credentials: APIKeyCredentials,
|
||||
role: str,
|
||||
company_name: str,
|
||||
enrich_profile: bool = False,
|
||||
):
|
||||
client = EnrichlayerClient(credentials=credentials)
|
||||
role_lookup_result = await client.lookup_role(
|
||||
role=role,
|
||||
company_name=company_name,
|
||||
enrich_profile=enrich_profile,
|
||||
)
|
||||
return role_lookup_result
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Run the block to look up LinkedIn profiles by role.
|
||||
|
||||
Args:
|
||||
input_data: Input parameters for the block
|
||||
credentials: API key credentials for Enrichlayer
|
||||
**kwargs: Additional keyword arguments
|
||||
|
||||
Yields:
|
||||
Tuples of (output_name, output_value)
|
||||
"""
|
||||
try:
|
||||
role_lookup_result = await self._lookup_role(
|
||||
credentials=credentials,
|
||||
role=input_data.role,
|
||||
company_name=input_data.company_name,
|
||||
enrich_profile=input_data.enrich_profile,
|
||||
)
|
||||
yield "role_lookup_result", role_lookup_result
|
||||
except Exception as e:
|
||||
logger.error(f"Error looking up role in company: {str(e)}")
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GetLinkedinProfilePictureBlock(Block):
|
||||
"""Block to get LinkedIn profile pictures using Enrichlayer API."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
"""Input schema for GetLinkedinProfilePictureBlock."""
|
||||
|
||||
linkedin_profile_url: str = SchemaField(
|
||||
description="LinkedIn profile URL",
|
||||
placeholder="https://www.linkedin.com/in/username/",
|
||||
)
|
||||
credentials: EnrichlayerCredentialsInput = CredentialsField(
|
||||
description="Enrichlayer API credentials"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
"""Output schema for GetLinkedinProfilePictureBlock."""
|
||||
|
||||
profile_picture_url: MediaFileType = SchemaField(
|
||||
description="LinkedIn profile picture URL"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize GetLinkedinProfilePictureBlock."""
|
||||
super().__init__(
|
||||
id="68d5a942-9b3f-4e9a-b7c1-d96ea4321f0d",
|
||||
description="Get LinkedIn profile pictures using Enrichlayer",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=GetLinkedinProfilePictureBlock.Input,
|
||||
output_schema=GetLinkedinProfilePictureBlock.Output,
|
||||
test_input={
|
||||
"linkedin_profile_url": "https://www.linkedin.com/in/williamhgates/",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"profile_picture_url",
|
||||
"https://media.licdn.com/dms/image/C4D03AQFj-xjuXrLFSQ/profile-displayphoto-shrink_800_800/0/1576881858598?e=1686787200&v=beta&t=zrQC76QwsfQQIWthfOnrKRBMZ5D-qIAvzLXLmWgYvTk",
|
||||
)
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"_get_profile_picture": lambda *args, **kwargs: "https://media.licdn.com/dms/image/C4D03AQFj-xjuXrLFSQ/profile-displayphoto-shrink_800_800/0/1576881858598?e=1686787200&v=beta&t=zrQC76QwsfQQIWthfOnrKRBMZ5D-qIAvzLXLmWgYvTk",
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _get_profile_picture(
|
||||
credentials: APIKeyCredentials, linkedin_profile_url: str
|
||||
):
|
||||
client = EnrichlayerClient(credentials=credentials)
|
||||
profile_picture_response = await client.get_profile_picture(
|
||||
linkedin_profile_url=linkedin_profile_url,
|
||||
)
|
||||
return profile_picture_response.profile_picture_url
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Run the block to get LinkedIn profile pictures.
|
||||
|
||||
Args:
|
||||
input_data: Input parameters for the block
|
||||
credentials: API key credentials for Enrichlayer
|
||||
**kwargs: Additional keyword arguments
|
||||
|
||||
Yields:
|
||||
Tuples of (output_name, output_value)
|
||||
"""
|
||||
try:
|
||||
profile_picture = await self._get_profile_picture(
|
||||
credentials=credentials,
|
||||
linkedin_profile_url=input_data.linkedin_profile_url,
|
||||
)
|
||||
yield "profile_picture_url", profile_picture
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting profile picture: {str(e)}")
|
||||
yield "error", str(e)
|
||||
5
autogpt_platform/backend/backend/blocks/firecrawl/extract.py
Normal file → Executable file
5
autogpt_platform/backend/backend/blocks/firecrawl/extract.py
Normal file → Executable file
@@ -29,8 +29,8 @@ class FirecrawlExtractBlock(Block):
|
||||
prompt: str | None = SchemaField(
|
||||
description="The prompt to use for the crawl", default=None, advanced=False
|
||||
)
|
||||
output_schema: str | None = SchemaField(
|
||||
description="A more rigid structure if you already know the JSON layout.",
|
||||
output_schema: dict | None = SchemaField(
|
||||
description="A Json Schema describing the output structure if more rigid structure is desired.",
|
||||
default=None,
|
||||
)
|
||||
enable_web_search: bool = SchemaField(
|
||||
@@ -56,7 +56,6 @@ class FirecrawlExtractBlock(Block):
|
||||
|
||||
app = FirecrawlApp(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
# Sync call
|
||||
extract_result = app.extract(
|
||||
urls=input_data.urls,
|
||||
prompt=input_data.prompt,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -37,6 +37,7 @@ LLMProviderName = Literal[
|
||||
ProviderName.OPENAI,
|
||||
ProviderName.OPEN_ROUTER,
|
||||
ProviderName.LLAMA_API,
|
||||
ProviderName.V0,
|
||||
]
|
||||
AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]]
|
||||
|
||||
@@ -155,6 +156,10 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
|
||||
LLAMA_API_LLAMA3_3_8B = "Llama-3.3-8B-Instruct"
|
||||
LLAMA_API_LLAMA3_3_70B = "Llama-3.3-70B-Instruct"
|
||||
# v0 by Vercel models
|
||||
V0_1_5_MD = "v0-1.5-md"
|
||||
V0_1_5_LG = "v0-1.5-lg"
|
||||
V0_1_0_MD = "v0-1.0-md"
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
@@ -280,6 +285,10 @@ MODEL_METADATA = {
|
||||
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028),
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000),
|
||||
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000),
|
||||
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
@@ -676,7 +685,11 @@ async def llm_call(
|
||||
client = openai.OpenAI(
|
||||
base_url="https://api.aimlapi.com/v2",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
default_headers={"X-Project": "AutoGPT"},
|
||||
default_headers={
|
||||
"X-Project": "AutoGPT",
|
||||
"X-Title": "AutoGPT",
|
||||
"HTTP-Referer": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
},
|
||||
)
|
||||
|
||||
completion = client.chat.completions.create(
|
||||
@@ -696,6 +709,42 @@ async def llm_call(
|
||||
),
|
||||
reasoning=None,
|
||||
)
|
||||
elif provider == "v0":
|
||||
tools_param = tools if tools else openai.NOT_GIVEN
|
||||
client = openai.AsyncOpenAI(
|
||||
base_url="https://api.v0.dev/v1",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
)
|
||||
|
||||
response_format = None
|
||||
if json_format:
|
||||
response_format = {"type": "json_object"}
|
||||
|
||||
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
||||
llm_model, parallel_tool_calls
|
||||
)
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
tools=tools_param, # type: ignore
|
||||
parallel_tool_calls=parallel_tool_calls_param,
|
||||
)
|
||||
|
||||
tool_calls = extract_openai_tool_calls(response)
|
||||
reasoning = extract_openai_reasoning(response)
|
||||
|
||||
return LLMResponse(
|
||||
raw_response=response.choices[0].message,
|
||||
prompt=prompt,
|
||||
response=response.choices[0].message.content or "",
|
||||
tool_calls=tool_calls,
|
||||
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
||||
completion_tokens=response.usage.completion_tokens if response.usage else 0,
|
||||
reasoning=reasoning,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
|
||||
|
||||
@@ -291,9 +291,32 @@ class SmartDecisionMakerBlock(Block):
|
||||
|
||||
for link in links:
|
||||
sink_name = SmartDecisionMakerBlock.cleanup(link.sink_name)
|
||||
properties[sink_name] = sink_block_input_schema.get_field_schema(
|
||||
link.sink_name
|
||||
)
|
||||
|
||||
# Handle dynamic fields (e.g., values_#_*, items_$_*, etc.)
|
||||
# These are fields that get merged by the executor into their base field
|
||||
if (
|
||||
"_#_" in link.sink_name
|
||||
or "_$_" in link.sink_name
|
||||
or "_@_" in link.sink_name
|
||||
):
|
||||
# For dynamic fields, provide a generic string schema
|
||||
# The executor will handle merging these into the appropriate structure
|
||||
properties[sink_name] = {
|
||||
"type": "string",
|
||||
"description": f"Dynamic value for {link.sink_name}",
|
||||
}
|
||||
else:
|
||||
# For regular fields, use the block's schema
|
||||
try:
|
||||
properties[sink_name] = sink_block_input_schema.get_field_schema(
|
||||
link.sink_name
|
||||
)
|
||||
except (KeyError, AttributeError):
|
||||
# If the field doesn't exist in the schema, provide a generic schema
|
||||
properties[sink_name] = {
|
||||
"type": "string",
|
||||
"description": f"Value for {link.sink_name}",
|
||||
}
|
||||
|
||||
tool_function["parameters"] = {
|
||||
**block.input_schema.jsonschema(),
|
||||
@@ -478,10 +501,6 @@ class SmartDecisionMakerBlock(Block):
|
||||
}
|
||||
)
|
||||
prompt.extend(tool_output)
|
||||
if input_data.multiple_tool_calls:
|
||||
input_data.sys_prompt += "\nYou can call a tool (different tools) multiple times in a single response."
|
||||
else:
|
||||
input_data.sys_prompt += "\nOnly provide EXACTLY one function call, multiple tool calls is strictly prohibited."
|
||||
|
||||
values = input_data.prompt_values
|
||||
if values:
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
from prisma.models import User
|
||||
|
||||
from backend.data.model import ProviderName
|
||||
from backend.data.model import ProviderName, User
|
||||
from backend.server.model import CreateGraph
|
||||
from backend.server.rest_api import AgentServer
|
||||
from backend.usecases.sample import create_test_graph, create_test_user
|
||||
|
||||
@@ -0,0 +1,130 @@
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.blocks.data_manipulation import AddToListBlock, CreateDictionaryBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_handles_dynamic_dict_fields():
|
||||
"""Test Smart Decision Maker can handle dynamic dictionary fields (_#_) for any block"""
|
||||
|
||||
# Create a mock node for CreateDictionaryBlock
|
||||
mock_node = Mock()
|
||||
mock_node.block = CreateDictionaryBlock()
|
||||
mock_node.block_id = CreateDictionaryBlock().id
|
||||
mock_node.input_default = {}
|
||||
|
||||
# Create mock links with dynamic dictionary fields
|
||||
mock_links = [
|
||||
Mock(
|
||||
source_name="tools_^_create_dict_~_name",
|
||||
sink_name="values_#_name", # Dynamic dict field
|
||||
sink_id="dict_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_create_dict_~_age",
|
||||
sink_name="values_#_age", # Dynamic dict field
|
||||
sink_id="dict_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_create_dict_~_city",
|
||||
sink_name="values_#_city", # Dynamic dict field
|
||||
sink_id="dict_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
]
|
||||
|
||||
# Generate function signature
|
||||
signature = await SmartDecisionMakerBlock._create_block_function_signature(
|
||||
mock_node, mock_links # type: ignore
|
||||
)
|
||||
|
||||
# Verify the signature was created successfully
|
||||
assert signature["type"] == "function"
|
||||
assert "parameters" in signature["function"]
|
||||
assert "properties" in signature["function"]["parameters"]
|
||||
|
||||
# Check that dynamic fields are handled
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
assert len(properties) == 3 # Should have all three fields
|
||||
|
||||
# Each dynamic field should have proper schema
|
||||
for prop_value in properties.values():
|
||||
assert "type" in prop_value
|
||||
assert prop_value["type"] == "string" # Dynamic fields get string type
|
||||
assert "description" in prop_value
|
||||
assert "Dynamic value for" in prop_value["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_handles_dynamic_list_fields():
|
||||
"""Test Smart Decision Maker can handle dynamic list fields (_$_) for any block"""
|
||||
|
||||
# Create a mock node for AddToListBlock
|
||||
mock_node = Mock()
|
||||
mock_node.block = AddToListBlock()
|
||||
mock_node.block_id = AddToListBlock().id
|
||||
mock_node.input_default = {}
|
||||
|
||||
# Create mock links with dynamic list fields
|
||||
mock_links = [
|
||||
Mock(
|
||||
source_name="tools_^_add_to_list_~_0",
|
||||
sink_name="entries_$_0", # Dynamic list field
|
||||
sink_id="list_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_add_to_list_~_1",
|
||||
sink_name="entries_$_1", # Dynamic list field
|
||||
sink_id="list_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
]
|
||||
|
||||
# Generate function signature
|
||||
signature = await SmartDecisionMakerBlock._create_block_function_signature(
|
||||
mock_node, mock_links # type: ignore
|
||||
)
|
||||
|
||||
# Verify dynamic list fields are handled properly
|
||||
assert signature["type"] == "function"
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
assert len(properties) == 2 # Should have both list items
|
||||
|
||||
# Each dynamic field should have proper schema
|
||||
for prop_value in properties.values():
|
||||
assert prop_value["type"] == "string"
|
||||
assert "Dynamic value for" in prop_value["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_dict_block_with_dynamic_values():
|
||||
"""Test CreateDictionaryBlock processes dynamic values correctly"""
|
||||
|
||||
block = CreateDictionaryBlock()
|
||||
|
||||
# Simulate what happens when executor merges dynamic fields
|
||||
# The executor merges values_#_* fields into the values dict
|
||||
input_data = block.input_schema(
|
||||
values={
|
||||
"existing": "value",
|
||||
"name": "Alice", # This would come from values_#_name
|
||||
"age": 25, # This would come from values_#_age
|
||||
}
|
||||
)
|
||||
|
||||
# Run the block
|
||||
result = {}
|
||||
async for output_name, output_value in block.run(input_data):
|
||||
result[output_name] = output_value
|
||||
|
||||
# Check the result
|
||||
assert "dictionary" in result
|
||||
assert result["dictionary"]["existing"] == "value"
|
||||
assert result["dictionary"]["name"] == "Alice"
|
||||
assert result["dictionary"]["age"] == 25
|
||||
@@ -5,6 +5,12 @@ from backend.blocks.ai_shortform_video_block import AIShortformVideoCreatorBlock
|
||||
from backend.blocks.apollo.organization import SearchOrganizationsBlock
|
||||
from backend.blocks.apollo.people import SearchPeopleBlock
|
||||
from backend.blocks.apollo.person import GetPersonDetailBlock
|
||||
from backend.blocks.enrichlayer.linkedin import (
|
||||
GetLinkedinProfileBlock,
|
||||
GetLinkedinProfilePictureBlock,
|
||||
LinkedinPersonLookupBlock,
|
||||
LinkedinRoleLookupBlock,
|
||||
)
|
||||
from backend.blocks.flux_kontext import AIImageEditorBlock, FluxKontextModelName
|
||||
from backend.blocks.ideogram import IdeogramModelBlock
|
||||
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
|
||||
@@ -30,6 +36,7 @@ from backend.integrations.credentials_store import (
|
||||
anthropic_credentials,
|
||||
apollo_credentials,
|
||||
did_credentials,
|
||||
enrichlayer_credentials,
|
||||
groq_credentials,
|
||||
ideogram_credentials,
|
||||
jina_credentials,
|
||||
@@ -39,6 +46,7 @@ from backend.integrations.credentials_store import (
|
||||
replicate_credentials,
|
||||
revid_credentials,
|
||||
unreal_credentials,
|
||||
v0_credentials,
|
||||
)
|
||||
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
@@ -115,6 +123,10 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
|
||||
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
|
||||
LlmModel.DEEPSEEK_R1_0528: 1,
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: 1,
|
||||
LlmModel.V0_1_5_LG: 2,
|
||||
LlmModel.V0_1_0_MD: 1,
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
@@ -204,6 +216,23 @@ LLM_COST = (
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "llama_api"
|
||||
]
|
||||
# v0 by Vercel Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": v0_credentials.id,
|
||||
"provider": v0_credentials.provider,
|
||||
"type": v0_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "v0"
|
||||
]
|
||||
# AI/ML Api Models
|
||||
+ [
|
||||
BlockCost(
|
||||
@@ -376,6 +405,54 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
},
|
||||
)
|
||||
],
|
||||
GetLinkedinProfileBlock: [
|
||||
BlockCost(
|
||||
cost_amount=1,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": enrichlayer_credentials.id,
|
||||
"provider": enrichlayer_credentials.provider,
|
||||
"type": enrichlayer_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
LinkedinPersonLookupBlock: [
|
||||
BlockCost(
|
||||
cost_amount=2,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": enrichlayer_credentials.id,
|
||||
"provider": enrichlayer_credentials.provider,
|
||||
"type": enrichlayer_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
LinkedinRoleLookupBlock: [
|
||||
BlockCost(
|
||||
cost_amount=3,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": enrichlayer_credentials.id,
|
||||
"provider": enrichlayer_credentials.provider,
|
||||
"type": enrichlayer_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
GetLinkedinProfilePictureBlock: [
|
||||
BlockCost(
|
||||
cost_amount=3,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": enrichlayer_credentials.id,
|
||||
"provider": enrichlayer_credentials.provider,
|
||||
"type": enrichlayer_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
SmartDecisionMakerBlock: LLM_COST,
|
||||
SearchOrganizationsBlock: [
|
||||
BlockCost(
|
||||
|
||||
@@ -286,11 +286,17 @@ class UserCreditBase(ABC):
|
||||
transaction = await CreditTransaction.prisma().find_first_or_raise(
|
||||
where={"transactionKey": transaction_key, "userId": user_id}
|
||||
)
|
||||
|
||||
if transaction.isActive:
|
||||
return
|
||||
|
||||
async with db.locked_transaction(f"usr_trx_{user_id}"):
|
||||
|
||||
transaction = await CreditTransaction.prisma().find_first_or_raise(
|
||||
where={"transactionKey": transaction_key, "userId": user_id}
|
||||
)
|
||||
if transaction.isActive:
|
||||
return
|
||||
|
||||
user_balance, _ = await self._get_credits(user_id)
|
||||
await CreditTransaction.prisma().update(
|
||||
where={
|
||||
@@ -998,8 +1004,8 @@ def get_block_costs() -> dict[str, list[BlockCost]]:
|
||||
async def get_stripe_customer_id(user_id: str) -> str:
|
||||
user = await get_user_by_id(user_id)
|
||||
|
||||
if user.stripeCustomerId:
|
||||
return user.stripeCustomerId
|
||||
if user.stripe_customer_id:
|
||||
return user.stripe_customer_id
|
||||
|
||||
customer = stripe.Customer.create(
|
||||
name=user.name or "",
|
||||
@@ -1022,10 +1028,10 @@ async def set_auto_top_up(user_id: str, config: AutoTopUpConfig):
|
||||
async def get_auto_top_up(user_id: str) -> AutoTopUpConfig:
|
||||
user = await get_user_by_id(user_id)
|
||||
|
||||
if not user.topUpConfig:
|
||||
if not user.top_up_config:
|
||||
return AutoTopUpConfig(threshold=0, amount=0)
|
||||
|
||||
return AutoTopUpConfig.model_validate(user.topUpConfig)
|
||||
return AutoTopUpConfig.model_validate(user.top_up_config)
|
||||
|
||||
|
||||
async def admin_get_user_history(
|
||||
|
||||
109
autogpt_platform/backend/backend/data/generate_data.py
Normal file
109
autogpt_platform/backend/backend/data/generate_data.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
|
||||
from prisma.enums import AgentExecutionStatus
|
||||
|
||||
from backend.data.execution import get_graph_executions
|
||||
from backend.data.graph import get_graph_metadata
|
||||
from backend.data.model import UserExecutionSummaryStats
|
||||
from backend.server.v2.store.exceptions import DatabaseError
|
||||
from backend.util.logging import TruncatedLogger
|
||||
|
||||
logger = TruncatedLogger(logging.getLogger(__name__), prefix="[SummaryData]")
|
||||
|
||||
|
||||
async def get_user_execution_summary_data(
|
||||
user_id: str, start_time: datetime, end_time: datetime
|
||||
) -> UserExecutionSummaryStats:
|
||||
"""Gather all summary data for a user in a time range.
|
||||
|
||||
This function fetches graph executions once and aggregates all required
|
||||
statistics in a single pass for efficiency.
|
||||
"""
|
||||
try:
|
||||
# Fetch graph executions once
|
||||
executions = await get_graph_executions(
|
||||
user_id=user_id,
|
||||
created_time_gte=start_time,
|
||||
created_time_lte=end_time,
|
||||
)
|
||||
|
||||
# Initialize aggregation variables
|
||||
total_credits_used = 0.0
|
||||
total_executions = len(executions)
|
||||
successful_runs = 0
|
||||
failed_runs = 0
|
||||
terminated_runs = 0
|
||||
execution_times = []
|
||||
agent_usage = defaultdict(int)
|
||||
cost_by_graph_id = defaultdict(float)
|
||||
|
||||
# Single pass through executions to aggregate all stats
|
||||
for execution in executions:
|
||||
# Count execution statuses (including TERMINATED as failed)
|
||||
if execution.status == AgentExecutionStatus.COMPLETED:
|
||||
successful_runs += 1
|
||||
elif execution.status == AgentExecutionStatus.FAILED:
|
||||
failed_runs += 1
|
||||
elif execution.status == AgentExecutionStatus.TERMINATED:
|
||||
terminated_runs += 1
|
||||
|
||||
# Aggregate costs from stats
|
||||
if execution.stats and hasattr(execution.stats, "cost"):
|
||||
cost_in_dollars = execution.stats.cost / 100
|
||||
total_credits_used += cost_in_dollars
|
||||
cost_by_graph_id[execution.graph_id] += cost_in_dollars
|
||||
|
||||
# Collect execution times
|
||||
if execution.stats and hasattr(execution.stats, "duration"):
|
||||
execution_times.append(execution.stats.duration)
|
||||
|
||||
# Count agent usage
|
||||
agent_usage[execution.graph_id] += 1
|
||||
|
||||
# Calculate derived stats
|
||||
total_execution_time = sum(execution_times)
|
||||
average_execution_time = (
|
||||
total_execution_time / len(execution_times) if execution_times else 0
|
||||
)
|
||||
|
||||
# Find most used agent
|
||||
most_used_agent = "No agents used"
|
||||
if agent_usage:
|
||||
most_used_agent_id = max(agent_usage, key=lambda k: agent_usage[k])
|
||||
try:
|
||||
graph_meta = await get_graph_metadata(graph_id=most_used_agent_id)
|
||||
most_used_agent = (
|
||||
graph_meta.name if graph_meta else f"Agent {most_used_agent_id[:8]}"
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(f"Could not get metadata for graph {most_used_agent_id}")
|
||||
most_used_agent = f"Agent {most_used_agent_id[:8]}"
|
||||
|
||||
# Convert graph_ids to agent names for cost breakdown
|
||||
cost_breakdown = {}
|
||||
for graph_id, cost in cost_by_graph_id.items():
|
||||
try:
|
||||
graph_meta = await get_graph_metadata(graph_id=graph_id)
|
||||
agent_name = graph_meta.name if graph_meta else f"Agent {graph_id[:8]}"
|
||||
except Exception:
|
||||
logger.warning(f"Could not get metadata for graph {graph_id}")
|
||||
agent_name = f"Agent {graph_id[:8]}"
|
||||
cost_breakdown[agent_name] = cost
|
||||
|
||||
# Build the summary stats object (include terminated runs as failed)
|
||||
return UserExecutionSummaryStats(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs + terminated_runs,
|
||||
most_used_agent=most_used_agent,
|
||||
total_execution_time=total_execution_time,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get user summary data: {e}")
|
||||
raise DatabaseError(f"Failed to get user summary data: {e}") from e
|
||||
@@ -5,6 +5,7 @@ import enum
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from json import JSONDecodeError
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
@@ -40,12 +41,120 @@ from pydantic_core import (
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.json import loads as json_loads
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
# Type alias for any provider name (including custom ones)
|
||||
AnyProviderName = str # Will be validated as ProviderName at runtime
|
||||
|
||||
|
||||
class User(BaseModel):
|
||||
"""Application-layer User model with snake_case convention."""
|
||||
|
||||
model_config = ConfigDict(
|
||||
extra="forbid",
|
||||
str_strip_whitespace=True,
|
||||
)
|
||||
|
||||
id: str = Field(..., description="User ID")
|
||||
email: str = Field(..., description="User email address")
|
||||
email_verified: bool = Field(default=True, description="Whether email is verified")
|
||||
name: Optional[str] = Field(None, description="User display name")
|
||||
created_at: datetime = Field(..., description="When user was created")
|
||||
updated_at: datetime = Field(..., description="When user was last updated")
|
||||
metadata: dict[str, Any] = Field(
|
||||
default_factory=dict, description="User metadata as dict"
|
||||
)
|
||||
integrations: str = Field(default="", description="Encrypted integrations data")
|
||||
stripe_customer_id: Optional[str] = Field(None, description="Stripe customer ID")
|
||||
top_up_config: Optional["AutoTopUpConfig"] = Field(
|
||||
None, description="Top up configuration"
|
||||
)
|
||||
|
||||
# Notification preferences
|
||||
max_emails_per_day: int = Field(default=3, description="Maximum emails per day")
|
||||
notify_on_agent_run: bool = Field(default=True, description="Notify on agent run")
|
||||
notify_on_zero_balance: bool = Field(
|
||||
default=True, description="Notify on zero balance"
|
||||
)
|
||||
notify_on_low_balance: bool = Field(
|
||||
default=True, description="Notify on low balance"
|
||||
)
|
||||
notify_on_block_execution_failed: bool = Field(
|
||||
default=True, description="Notify on block execution failure"
|
||||
)
|
||||
notify_on_continuous_agent_error: bool = Field(
|
||||
default=True, description="Notify on continuous agent error"
|
||||
)
|
||||
notify_on_daily_summary: bool = Field(
|
||||
default=True, description="Notify on daily summary"
|
||||
)
|
||||
notify_on_weekly_summary: bool = Field(
|
||||
default=True, description="Notify on weekly summary"
|
||||
)
|
||||
notify_on_monthly_summary: bool = Field(
|
||||
default=True, description="Notify on monthly summary"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_db(cls, prisma_user: "PrismaUser") -> "User":
|
||||
"""Convert a database User object to application User model."""
|
||||
# Handle metadata field - convert from JSON string or dict to dict
|
||||
metadata = {}
|
||||
if prisma_user.metadata:
|
||||
if isinstance(prisma_user.metadata, str):
|
||||
try:
|
||||
metadata = json_loads(prisma_user.metadata)
|
||||
except (JSONDecodeError, TypeError):
|
||||
metadata = {}
|
||||
elif isinstance(prisma_user.metadata, dict):
|
||||
metadata = prisma_user.metadata
|
||||
|
||||
# Handle topUpConfig field
|
||||
top_up_config = None
|
||||
if prisma_user.topUpConfig:
|
||||
if isinstance(prisma_user.topUpConfig, str):
|
||||
try:
|
||||
config_dict = json_loads(prisma_user.topUpConfig)
|
||||
top_up_config = AutoTopUpConfig.model_validate(config_dict)
|
||||
except (JSONDecodeError, TypeError, ValueError):
|
||||
top_up_config = None
|
||||
elif isinstance(prisma_user.topUpConfig, dict):
|
||||
try:
|
||||
top_up_config = AutoTopUpConfig.model_validate(
|
||||
prisma_user.topUpConfig
|
||||
)
|
||||
except ValueError:
|
||||
top_up_config = None
|
||||
|
||||
return cls(
|
||||
id=prisma_user.id,
|
||||
email=prisma_user.email,
|
||||
email_verified=prisma_user.emailVerified or True,
|
||||
name=prisma_user.name,
|
||||
created_at=prisma_user.createdAt,
|
||||
updated_at=prisma_user.updatedAt,
|
||||
metadata=metadata,
|
||||
integrations=prisma_user.integrations or "",
|
||||
stripe_customer_id=prisma_user.stripeCustomerId,
|
||||
top_up_config=top_up_config,
|
||||
max_emails_per_day=prisma_user.maxEmailsPerDay or 3,
|
||||
notify_on_agent_run=prisma_user.notifyOnAgentRun or True,
|
||||
notify_on_zero_balance=prisma_user.notifyOnZeroBalance or True,
|
||||
notify_on_low_balance=prisma_user.notifyOnLowBalance or True,
|
||||
notify_on_block_execution_failed=prisma_user.notifyOnBlockExecutionFailed
|
||||
or True,
|
||||
notify_on_continuous_agent_error=prisma_user.notifyOnContinuousAgentError
|
||||
or True,
|
||||
notify_on_daily_summary=prisma_user.notifyOnDailySummary or True,
|
||||
notify_on_weekly_summary=prisma_user.notifyOnWeeklySummary or True,
|
||||
notify_on_monthly_summary=prisma_user.notifyOnMonthlySummary or True,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from prisma.models import User as PrismaUser
|
||||
|
||||
from backend.data.block import BlockSchema
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -712,3 +821,21 @@ class GraphExecutionStats(BaseModel):
|
||||
activity_status: Optional[str] = Field(
|
||||
default=None, description="AI-generated summary of what the agent did"
|
||||
)
|
||||
|
||||
|
||||
class UserExecutionSummaryStats(BaseModel):
|
||||
"""Summary of user statistics for a specific user."""
|
||||
|
||||
model_config = ConfigDict(
|
||||
extra="allow",
|
||||
arbitrary_types_allowed=True,
|
||||
)
|
||||
|
||||
total_credits_used: float = Field(default=0)
|
||||
total_executions: int = Field(default=0)
|
||||
successful_runs: int = Field(default=0)
|
||||
failed_runs: int = Field(default=0)
|
||||
most_used_agent: str = Field(default="")
|
||||
total_execution_time: float = Field(default=0)
|
||||
average_execution_time: float = Field(default=0)
|
||||
cost_breakdown: dict[str, float] = Field(default_factory=dict)
|
||||
|
||||
@@ -9,11 +9,11 @@ from urllib.parse import quote_plus
|
||||
from autogpt_libs.auth.models import DEFAULT_USER_ID
|
||||
from fastapi import HTTPException
|
||||
from prisma.enums import NotificationType
|
||||
from prisma.models import User
|
||||
from prisma.models import User as PrismaUser
|
||||
from prisma.types import JsonFilter, UserCreateInput, UserUpdateInput
|
||||
|
||||
from backend.data.db import prisma
|
||||
from backend.data.model import UserIntegrations, UserMetadata
|
||||
from backend.data.model import User, UserIntegrations, UserMetadata
|
||||
from backend.data.notifications import NotificationPreference, NotificationPreferenceDTO
|
||||
from backend.server.v2.store.exceptions import DatabaseError
|
||||
from backend.util.encryption import JSONCryptor
|
||||
@@ -44,7 +44,7 @@ async def get_or_create_user(user_data: dict) -> User:
|
||||
)
|
||||
)
|
||||
|
||||
return User.model_validate(user)
|
||||
return User.from_db(user)
|
||||
except Exception as e:
|
||||
raise DatabaseError(f"Failed to get or create user {user_data}: {e}") from e
|
||||
|
||||
@@ -53,7 +53,7 @@ async def get_user_by_id(user_id: str) -> User:
|
||||
user = await prisma.user.find_unique(where={"id": user_id})
|
||||
if not user:
|
||||
raise ValueError(f"User not found with ID: {user_id}")
|
||||
return User.model_validate(user)
|
||||
return User.from_db(user)
|
||||
|
||||
|
||||
async def get_user_email_by_id(user_id: str) -> Optional[str]:
|
||||
@@ -67,7 +67,7 @@ async def get_user_email_by_id(user_id: str) -> Optional[str]:
|
||||
async def get_user_by_email(email: str) -> Optional[User]:
|
||||
try:
|
||||
user = await prisma.user.find_unique(where={"email": email})
|
||||
return User.model_validate(user) if user else None
|
||||
return User.from_db(user) if user else None
|
||||
except Exception as e:
|
||||
raise DatabaseError(f"Failed to get user by email {email}: {e}") from e
|
||||
|
||||
@@ -91,11 +91,11 @@ async def create_default_user() -> Optional[User]:
|
||||
name="Default User",
|
||||
)
|
||||
)
|
||||
return User.model_validate(user)
|
||||
return User.from_db(user)
|
||||
|
||||
|
||||
async def get_user_integrations(user_id: str) -> UserIntegrations:
|
||||
user = await User.prisma().find_unique_or_raise(
|
||||
user = await PrismaUser.prisma().find_unique_or_raise(
|
||||
where={"id": user_id},
|
||||
)
|
||||
|
||||
@@ -110,7 +110,7 @@ async def get_user_integrations(user_id: str) -> UserIntegrations:
|
||||
|
||||
async def update_user_integrations(user_id: str, data: UserIntegrations):
|
||||
encrypted_data = JSONCryptor().encrypt(data.model_dump(exclude_none=True))
|
||||
await User.prisma().update(
|
||||
await PrismaUser.prisma().update(
|
||||
where={"id": user_id},
|
||||
data={"integrations": encrypted_data},
|
||||
)
|
||||
@@ -118,7 +118,7 @@ async def update_user_integrations(user_id: str, data: UserIntegrations):
|
||||
|
||||
async def migrate_and_encrypt_user_integrations():
|
||||
"""Migrate integration credentials and OAuth states from metadata to integrations column."""
|
||||
users = await User.prisma().find_many(
|
||||
users = await PrismaUser.prisma().find_many(
|
||||
where={
|
||||
"metadata": cast(
|
||||
JsonFilter,
|
||||
@@ -154,7 +154,7 @@ async def migrate_and_encrypt_user_integrations():
|
||||
raw_metadata.pop("integration_oauth_states", None)
|
||||
|
||||
# Update metadata without integration data
|
||||
await User.prisma().update(
|
||||
await PrismaUser.prisma().update(
|
||||
where={"id": user.id},
|
||||
data={"metadata": SafeJson(raw_metadata)},
|
||||
)
|
||||
@@ -162,7 +162,7 @@ async def migrate_and_encrypt_user_integrations():
|
||||
|
||||
async def get_active_user_ids_in_timerange(start_time: str, end_time: str) -> list[str]:
|
||||
try:
|
||||
users = await User.prisma().find_many(
|
||||
users = await PrismaUser.prisma().find_many(
|
||||
where={
|
||||
"AgentGraphExecutions": {
|
||||
"some": {
|
||||
@@ -192,7 +192,7 @@ async def get_active_users_ids() -> list[str]:
|
||||
|
||||
async def get_user_notification_preference(user_id: str) -> NotificationPreference:
|
||||
try:
|
||||
user = await User.prisma().find_unique_or_raise(
|
||||
user = await PrismaUser.prisma().find_unique_or_raise(
|
||||
where={"id": user_id},
|
||||
)
|
||||
|
||||
@@ -269,7 +269,7 @@ async def update_user_notification_preference(
|
||||
if data.daily_limit:
|
||||
update_data["maxEmailsPerDay"] = data.daily_limit
|
||||
|
||||
user = await User.prisma().update(
|
||||
user = await PrismaUser.prisma().update(
|
||||
where={"id": user_id},
|
||||
data=update_data,
|
||||
)
|
||||
@@ -307,7 +307,7 @@ async def update_user_notification_preference(
|
||||
async def set_user_email_verification(user_id: str, verified: bool) -> None:
|
||||
"""Set the email verification status for a user."""
|
||||
try:
|
||||
await User.prisma().update(
|
||||
await PrismaUser.prisma().update(
|
||||
where={"id": user_id},
|
||||
data={"emailVerified": verified},
|
||||
)
|
||||
@@ -320,7 +320,7 @@ async def set_user_email_verification(user_id: str, verified: bool) -> None:
|
||||
async def get_user_email_verification(user_id: str) -> bool:
|
||||
"""Get the email verification status for a user."""
|
||||
try:
|
||||
user = await User.prisma().find_unique_or_raise(
|
||||
user = await PrismaUser.prisma().find_unique_or_raise(
|
||||
where={"id": user_id},
|
||||
)
|
||||
return user.emailVerified
|
||||
|
||||
@@ -6,20 +6,17 @@ import json
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, NotRequired, TypedDict
|
||||
|
||||
from autogpt_libs.feature_flag.client import is_feature_enabled
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.blocks.llm import LlmModel, llm_call
|
||||
from backend.data.block import get_block
|
||||
from backend.data.execution import ExecutionStatus, NodeExecutionResult
|
||||
from backend.data.model import APIKeyCredentials, GraphExecutionStats
|
||||
from backend.util.feature_flag import Flag, is_feature_enabled
|
||||
from backend.util.retry import func_retry
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.truncate import truncate
|
||||
|
||||
# LaunchDarkly feature flag key for AI activity status generation
|
||||
AI_ACTIVITY_STATUS_FLAG_KEY = "ai-agent-execution-summary"
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor import DatabaseManagerAsyncClient
|
||||
|
||||
@@ -102,8 +99,8 @@ async def generate_activity_status_for_execution(
|
||||
Returns:
|
||||
AI-generated activity status string, or None if feature is disabled
|
||||
"""
|
||||
# Check LaunchDarkly feature flag for AI activity status generation
|
||||
if not is_feature_enabled(AI_ACTIVITY_STATUS_FLAG_KEY, user_id, default=False):
|
||||
# Check LaunchDarkly feature flag for AI activity status generation with full context support
|
||||
if not await is_feature_enabled(Flag.AI_ACTIVITY_STATUS, user_id):
|
||||
logger.debug("AI activity status generation is disabled via LaunchDarkly")
|
||||
return None
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ from backend.data.execution import (
|
||||
upsert_execution_input,
|
||||
upsert_execution_output,
|
||||
)
|
||||
from backend.data.generate_data import get_user_execution_summary_data
|
||||
from backend.data.graph import (
|
||||
get_connected_output_nodes,
|
||||
get_graph,
|
||||
@@ -144,6 +145,9 @@ class DatabaseManager(AppService):
|
||||
get_user_notification_oldest_message_in_batch
|
||||
)
|
||||
|
||||
# Summary data - async
|
||||
get_user_execution_summary_data = _(get_user_execution_summary_data)
|
||||
|
||||
|
||||
class DatabaseManagerClient(AppServiceClient):
|
||||
d = DatabaseManager
|
||||
@@ -169,6 +173,9 @@ class DatabaseManagerClient(AppServiceClient):
|
||||
spend_credits = _(d.spend_credits)
|
||||
get_credits = _(d.get_credits)
|
||||
|
||||
# Summary data - async
|
||||
get_user_execution_summary_data = _(d.get_user_execution_summary_data)
|
||||
|
||||
# Block error monitoring
|
||||
get_block_error_stats = _(d.get_block_error_stats)
|
||||
|
||||
@@ -215,3 +222,6 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
get_user_notification_oldest_message_in_batch = (
|
||||
d.get_user_notification_oldest_message_in_batch
|
||||
)
|
||||
|
||||
# Summary data
|
||||
get_user_execution_summary_data = d.get_user_execution_summary_data
|
||||
|
||||
@@ -1208,6 +1208,9 @@ class ExecutionManager(AppProcess):
|
||||
)
|
||||
return
|
||||
|
||||
# Check if channel is closed and force reconnection if needed
|
||||
if not self.cancel_client.is_ready:
|
||||
self.cancel_client.disconnect()
|
||||
self.cancel_client.connect()
|
||||
cancel_channel = self.cancel_client.get_channel()
|
||||
cancel_channel.basic_consume(
|
||||
@@ -1237,6 +1240,9 @@ class ExecutionManager(AppProcess):
|
||||
)
|
||||
return
|
||||
|
||||
# Check if channel is closed and force reconnection if needed
|
||||
if not self.run_client.is_ready:
|
||||
self.run_client.disconnect()
|
||||
self.run_client.connect()
|
||||
run_channel = self.run_client.get_channel()
|
||||
run_channel.basic_qos(prefetch_count=self.pool_size)
|
||||
|
||||
@@ -3,7 +3,6 @@ import logging
|
||||
import autogpt_libs.auth.models
|
||||
import fastapi.responses
|
||||
import pytest
|
||||
from prisma.models import User
|
||||
|
||||
import backend.server.v2.library.model
|
||||
import backend.server.v2.store.model
|
||||
@@ -12,6 +11,7 @@ from backend.blocks.data_manipulation import FindInDictionaryBlock
|
||||
from backend.blocks.io import AgentInputBlock
|
||||
from backend.blocks.maths import CalculatorBlock, Operation
|
||||
from backend.data import execution, graph
|
||||
from backend.data.model import User
|
||||
from backend.server.model import CreateGraph
|
||||
from backend.server.rest_api import AgentServer
|
||||
from backend.usecases.sample import create_test_graph, create_test_user
|
||||
|
||||
@@ -269,7 +269,9 @@ class Scheduler(AppService):
|
||||
|
||||
self.scheduler = BackgroundScheduler(
|
||||
executors={
|
||||
"default": ThreadPoolExecutor(max_workers=10), # Max 10 concurrent jobs
|
||||
"default": ThreadPoolExecutor(
|
||||
max_workers=self.db_pool_size()
|
||||
), # Match DB pool size to prevent resource contention
|
||||
},
|
||||
job_defaults={
|
||||
"coalesce": True, # Skip redundant missed jobs - just run the latest
|
||||
@@ -305,9 +307,10 @@ class Scheduler(AppService):
|
||||
|
||||
if self.register_system_tasks:
|
||||
# Notification PROCESS WEEKLY SUMMARY
|
||||
# Runs every Monday at 9 AM UTC
|
||||
self.scheduler.add_job(
|
||||
process_weekly_summary,
|
||||
CronTrigger.from_crontab("0 * * * *"),
|
||||
CronTrigger.from_crontab("0 9 * * 1"),
|
||||
id="process_weekly_summary",
|
||||
kwargs={},
|
||||
replace_existing=True,
|
||||
|
||||
@@ -548,7 +548,7 @@ async def validate_graph_with_credentials(
|
||||
return node_input_errors
|
||||
|
||||
|
||||
async def _construct_node_execution_input(
|
||||
async def _construct_starting_node_execution_input(
|
||||
graph: GraphModel,
|
||||
user_id: str,
|
||||
graph_inputs: BlockInput,
|
||||
@@ -622,7 +622,7 @@ async def validate_and_construct_node_execution_input(
|
||||
graph_version: Optional[int] = None,
|
||||
graph_credentials_inputs: Optional[dict[str, CredentialsMetaInput]] = None,
|
||||
nodes_input_masks: Optional[dict[str, dict[str, JsonValue]]] = None,
|
||||
) -> tuple[GraphModel, list[tuple[str, BlockInput]]]:
|
||||
) -> tuple[GraphModel, list[tuple[str, BlockInput]], dict[str, dict[str, JsonValue]]]:
|
||||
"""
|
||||
Public wrapper that handles graph fetching, credential mapping, and validation+construction.
|
||||
This centralizes the logic used by both scheduler validation and actual execution.
|
||||
@@ -666,14 +666,14 @@ async def validate_and_construct_node_execution_input(
|
||||
nodes_input_masks or {},
|
||||
)
|
||||
|
||||
starting_nodes_input = await _construct_node_execution_input(
|
||||
starting_nodes_input = await _construct_starting_node_execution_input(
|
||||
graph=graph,
|
||||
user_id=user_id,
|
||||
graph_inputs=graph_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
)
|
||||
|
||||
return graph, starting_nodes_input
|
||||
return graph, starting_nodes_input, nodes_input_masks
|
||||
|
||||
|
||||
def _merge_nodes_input_masks(
|
||||
@@ -856,13 +856,15 @@ async def add_graph_execution(
|
||||
else:
|
||||
edb = get_database_manager_async_client()
|
||||
|
||||
graph, starting_nodes_input = await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
graph, starting_nodes_input, nodes_input_masks = (
|
||||
await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
)
|
||||
)
|
||||
graph_exec = None
|
||||
|
||||
|
||||
@@ -182,6 +182,15 @@ zerobounce_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
enrichlayer_credentials = APIKeyCredentials(
|
||||
id="d9fce73a-6c1d-4e8b-ba2e-12a456789def",
|
||||
provider="enrichlayer",
|
||||
api_key=SecretStr(settings.secrets.enrichlayer_api_key),
|
||||
title="Use Credits for Enrichlayer",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
|
||||
llama_api_credentials = APIKeyCredentials(
|
||||
id="d44045af-1c33-4833-9e19-752313214de2",
|
||||
provider="llama_api",
|
||||
@@ -190,6 +199,14 @@ llama_api_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
v0_credentials = APIKeyCredentials(
|
||||
id="c4e6d1a0-3b5f-4789-a8e2-9b123456789f",
|
||||
provider="v0",
|
||||
api_key=SecretStr(settings.secrets.v0_api_key),
|
||||
title="Use Credits for v0 by Vercel",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
@@ -203,6 +220,7 @@ DEFAULT_CREDENTIALS = [
|
||||
jina_credentials,
|
||||
unreal_credentials,
|
||||
open_router_credentials,
|
||||
enrichlayer_credentials,
|
||||
fal_credentials,
|
||||
exa_credentials,
|
||||
e2b_credentials,
|
||||
@@ -213,6 +231,8 @@ DEFAULT_CREDENTIALS = [
|
||||
smartlead_credentials,
|
||||
zerobounce_credentials,
|
||||
google_maps_credentials,
|
||||
llama_api_credentials,
|
||||
v0_credentials,
|
||||
]
|
||||
|
||||
|
||||
@@ -279,6 +299,8 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(unreal_credentials)
|
||||
if settings.secrets.open_router_api_key:
|
||||
all_credentials.append(open_router_credentials)
|
||||
if settings.secrets.enrichlayer_api_key:
|
||||
all_credentials.append(enrichlayer_credentials)
|
||||
if settings.secrets.fal_api_key:
|
||||
all_credentials.append(fal_credentials)
|
||||
if settings.secrets.exa_api_key:
|
||||
|
||||
@@ -25,6 +25,7 @@ class ProviderName(str, Enum):
|
||||
GROQ = "groq"
|
||||
HTTP = "http"
|
||||
HUBSPOT = "hubspot"
|
||||
ENRICHLAYER = "enrichlayer"
|
||||
IDEOGRAM = "ideogram"
|
||||
JINA = "jina"
|
||||
LLAMA_API = "llama_api"
|
||||
@@ -47,6 +48,7 @@ class ProviderName(str, Enum):
|
||||
TWITTER = "twitter"
|
||||
TODOIST = "todoist"
|
||||
UNREAL_SPEECH = "unreal_speech"
|
||||
V0 = "v0"
|
||||
ZEROBOUNCE = "zerobounce"
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -223,10 +223,14 @@ class NotificationManager(AppService):
|
||||
processed_count = 0
|
||||
current_time = datetime.now(tz=timezone.utc)
|
||||
start_time = current_time - timedelta(days=7)
|
||||
logger.info(
|
||||
f"Querying for active users between {start_time} and {current_time}"
|
||||
)
|
||||
users = await get_database_manager_async_client().get_active_user_ids_in_timerange(
|
||||
end_time=current_time.isoformat(),
|
||||
start_time=start_time.isoformat(),
|
||||
)
|
||||
logger.info(f"Found {len(users)} active users in the last 7 days")
|
||||
for user in users:
|
||||
await self._queue_scheduled_notification(
|
||||
SummaryParamsEventModel(
|
||||
@@ -384,10 +388,13 @@ class NotificationManager(AppService):
|
||||
async def _queue_scheduled_notification(self, event: SummaryParamsEventModel):
|
||||
"""Queue a scheduled notification - exposed method for other services to call"""
|
||||
try:
|
||||
logger.debug(f"Received Request to queue scheduled notification {event=}")
|
||||
logger.info(
|
||||
f"Queueing scheduled notification type={event.type} user_id={event.user_id}"
|
||||
)
|
||||
|
||||
exchange = "notifications"
|
||||
routing_key = get_routing_key(event.type)
|
||||
logger.info(f"Using routing key: {routing_key}")
|
||||
|
||||
# Publish to RabbitMQ
|
||||
await self.rabbit.publish_message(
|
||||
@@ -395,6 +402,7 @@ class NotificationManager(AppService):
|
||||
message=event.model_dump_json(),
|
||||
exchange=next(ex for ex in EXCHANGES if ex.name == exchange),
|
||||
)
|
||||
logger.info(f"Successfully queued notification for user {event.user_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error queueing notification: {e}")
|
||||
@@ -416,85 +424,99 @@ class NotificationManager(AppService):
|
||||
# only if both are true, should we email this person
|
||||
return validated_email and preference
|
||||
|
||||
def _gather_summary_data(
|
||||
async def _gather_summary_data(
|
||||
self, user_id: str, event_type: NotificationType, params: BaseSummaryParams
|
||||
) -> BaseSummaryData:
|
||||
"""Gathers the data to build a summary notification"""
|
||||
|
||||
logger.info(
|
||||
f"Gathering summary data for {user_id} and {event_type} wiht {params=}"
|
||||
f"Gathering summary data for {user_id} and {event_type} with {params=}"
|
||||
)
|
||||
|
||||
# total_credits_used = self.run_and_wait(
|
||||
# get_total_credits_used(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# total_executions = self.run_and_wait(
|
||||
# get_total_executions(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# most_used_agent = self.run_and_wait(
|
||||
# get_most_used_agent(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# execution_times = self.run_and_wait(
|
||||
# get_execution_time(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# runs = self.run_and_wait(
|
||||
# get_runs(user_id, start_time, end_time)
|
||||
# )
|
||||
total_credits_used = 3.0
|
||||
total_executions = 2
|
||||
most_used_agent = {"name": "Some"}
|
||||
execution_times = [1, 2, 3]
|
||||
runs = [{"status": "COMPLETED"}, {"status": "FAILED"}]
|
||||
|
||||
successful_runs = len([run for run in runs if run["status"] == "COMPLETED"])
|
||||
failed_runs = len([run for run in runs if run["status"] != "COMPLETED"])
|
||||
average_execution_time = (
|
||||
sum(execution_times) / len(execution_times) if execution_times else 0
|
||||
)
|
||||
# cost_breakdown = self.run_and_wait(
|
||||
# get_cost_breakdown(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
cost_breakdown = {
|
||||
"agent1": 1.0,
|
||||
"agent2": 2.0,
|
||||
}
|
||||
|
||||
if event_type == NotificationType.DAILY_SUMMARY and isinstance(
|
||||
params, DailySummaryParams
|
||||
):
|
||||
return DailySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent["name"],
|
||||
total_execution_time=sum(execution_times),
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
date=params.date,
|
||||
try:
|
||||
# Get summary data from the database
|
||||
summary_data = await get_database_manager_async_client().get_user_execution_summary_data(
|
||||
user_id=user_id,
|
||||
start_time=params.start_date,
|
||||
end_time=params.end_date,
|
||||
)
|
||||
elif event_type == NotificationType.WEEKLY_SUMMARY and isinstance(
|
||||
params, WeeklySummaryParams
|
||||
):
|
||||
return WeeklySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent["name"],
|
||||
total_execution_time=sum(execution_times),
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
start_date=params.start_date,
|
||||
end_date=params.end_date,
|
||||
)
|
||||
else:
|
||||
raise ValueError("Invalid event type or params")
|
||||
|
||||
# Extract data from summary
|
||||
total_credits_used = summary_data.total_credits_used
|
||||
total_executions = summary_data.total_executions
|
||||
most_used_agent = summary_data.most_used_agent
|
||||
successful_runs = summary_data.successful_runs
|
||||
failed_runs = summary_data.failed_runs
|
||||
total_execution_time = summary_data.total_execution_time
|
||||
average_execution_time = summary_data.average_execution_time
|
||||
cost_breakdown = summary_data.cost_breakdown
|
||||
|
||||
if event_type == NotificationType.DAILY_SUMMARY and isinstance(
|
||||
params, DailySummaryParams
|
||||
):
|
||||
return DailySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent,
|
||||
total_execution_time=total_execution_time,
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
date=params.date,
|
||||
)
|
||||
elif event_type == NotificationType.WEEKLY_SUMMARY and isinstance(
|
||||
params, WeeklySummaryParams
|
||||
):
|
||||
return WeeklySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent,
|
||||
total_execution_time=total_execution_time,
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
start_date=params.start_date,
|
||||
end_date=params.end_date,
|
||||
)
|
||||
else:
|
||||
raise ValueError("Invalid event type or params")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to gather summary data: {e}")
|
||||
# Return sensible defaults in case of error
|
||||
if event_type == NotificationType.DAILY_SUMMARY and isinstance(
|
||||
params, DailySummaryParams
|
||||
):
|
||||
return DailySummaryData(
|
||||
total_credits_used=0.0,
|
||||
total_executions=0,
|
||||
most_used_agent="No data available",
|
||||
total_execution_time=0.0,
|
||||
successful_runs=0,
|
||||
failed_runs=0,
|
||||
average_execution_time=0.0,
|
||||
cost_breakdown={},
|
||||
date=params.date,
|
||||
)
|
||||
elif event_type == NotificationType.WEEKLY_SUMMARY and isinstance(
|
||||
params, WeeklySummaryParams
|
||||
):
|
||||
return WeeklySummaryData(
|
||||
total_credits_used=0.0,
|
||||
total_executions=0,
|
||||
most_used_agent="No data available",
|
||||
total_execution_time=0.0,
|
||||
successful_runs=0,
|
||||
failed_runs=0,
|
||||
average_execution_time=0.0,
|
||||
cost_breakdown={},
|
||||
start_date=params.start_date,
|
||||
end_date=params.end_date,
|
||||
)
|
||||
else:
|
||||
raise ValueError("Invalid event type or params") from e
|
||||
|
||||
async def _should_batch(
|
||||
self, user_id: str, event_type: NotificationType, event: NotificationEventModel
|
||||
@@ -764,7 +786,7 @@ class NotificationManager(AppService):
|
||||
)
|
||||
return True
|
||||
|
||||
summary_data = self._gather_summary_data(
|
||||
summary_data = await self._gather_summary_data(
|
||||
event.user_id, event.type, model.data
|
||||
)
|
||||
|
||||
|
||||
@@ -5,23 +5,64 @@ data.start_date: the start date of the summary
|
||||
data.end_date: the end date of the summary
|
||||
data.total_credits_used: the total credits used during the summary
|
||||
data.total_executions: the total number of executions during the summary
|
||||
data.most_used_agent: the most used agent's nameduring the summary
|
||||
data.most_used_agent: the most used agent's name during the summary
|
||||
data.total_execution_time: the total execution time during the summary
|
||||
data.successful_runs: the total number of successful runs during the summary
|
||||
data.failed_runs: the total number of failed runs during the summary
|
||||
data.average_execution_time: the average execution time during the summary
|
||||
data.cost_breakdown: the cost breakdown during the summary
|
||||
data.cost_breakdown: the cost breakdown during the summary (dict mapping agent names to credit amounts)
|
||||
#}
|
||||
|
||||
<h1>Weekly Summary</h1>
|
||||
<h1 style="color: #5D23BB; font-size: 32px; font-weight: 600; margin-bottom: 25px; margin-top: 0;">
|
||||
Weekly Summary
|
||||
</h1>
|
||||
|
||||
<p>Start Date: {{ data.start_date }}</p>
|
||||
<p>End Date: {{ data.end_date }}</p>
|
||||
<p>Total Credits Used: {{ data.total_credits_used }}</p>
|
||||
<p>Total Executions: {{ data.total_executions }}</p>
|
||||
<p>Most Used Agent: {{ data.most_used_agent }}</p>
|
||||
<p>Total Execution Time: {{ data.total_execution_time }}</p>
|
||||
<p>Successful Runs: {{ data.successful_runs }}</p>
|
||||
<p>Failed Runs: {{ data.failed_runs }}</p>
|
||||
<p>Average Execution Time: {{ data.average_execution_time }}</p>
|
||||
<p>Cost Breakdown: {{ data.cost_breakdown }}</p>
|
||||
<h2 style="color: #070629; font-size: 24px; font-weight: 500; margin-bottom: 20px;">
|
||||
Your Agent Activity: {{ data.start_date.strftime('%B %-d') }} – {{ data.end_date.strftime('%B %-d') }}
|
||||
</h2>
|
||||
|
||||
<div style="background-color: #ffffff; border-radius: 8px; padding: 20px; margin-bottom: 25px;">
|
||||
<ul style="list-style-type: disc; padding-left: 20px; margin: 0;">
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Total Executions:</strong> {{ data.total_executions }}
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Total Credits Used:</strong> {{ data.total_credits_used|format("%.2f") }}
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Total Execution Time:</strong> {{ data.total_execution_time|format("%.1f") }} seconds
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Successful Runs:</strong> {{ data.successful_runs }}
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Failed Runs:</strong> {{ data.failed_runs }}
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Average Execution Time:</strong> {{ data.average_execution_time|format("%.1f") }} seconds
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Most Used Agent:</strong> {{ data.most_used_agent }}
|
||||
</li>
|
||||
{% if data.cost_breakdown %}
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Cost Breakdown:</strong>
|
||||
<ul style="list-style-type: disc; padding-left: 40px; margin-top: 8px;">
|
||||
{% for agent_name, credits in data.cost_breakdown.items() %}
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 4px;">
|
||||
{{ agent_name }}: {{ credits|format("%.2f") }} credits
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</li>
|
||||
{% endif %}
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<p style="font-size: 16px; line-height: 165%; margin-top: 20px; margin-bottom: 10px;">
|
||||
Thank you for being a part of the AutoGPT community! 🎉
|
||||
</p>
|
||||
|
||||
<p style="font-size: 16px; line-height: 165%; margin-bottom: 0;">
|
||||
Join the conversation on <a href="https://discord.gg/autogpt" style="color: #4285F4; text-decoration: underline;">Discord here</a>.
|
||||
</p>
|
||||
@@ -1,11 +0,0 @@
|
||||
from supabase import Client, create_client
|
||||
|
||||
from backend.util.settings import Settings
|
||||
|
||||
settings = Settings()
|
||||
|
||||
|
||||
def get_supabase() -> Client:
|
||||
return create_client(
|
||||
settings.secrets.supabase_url, settings.secrets.supabase_service_role_key
|
||||
)
|
||||
@@ -9,11 +9,6 @@ import fastapi.responses
|
||||
import pydantic
|
||||
import starlette.middleware.cors
|
||||
import uvicorn
|
||||
from autogpt_libs.feature_flag.client import (
|
||||
initialize_launchdarkly,
|
||||
shutdown_launchdarkly,
|
||||
)
|
||||
from autogpt_libs.logging.utils import generate_uvicorn_config
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
from fastapi.routing import APIRoute
|
||||
|
||||
@@ -41,6 +36,7 @@ from backend.server.external.api import external_app
|
||||
from backend.server.middleware.security import SecurityHeadersMiddleware
|
||||
from backend.util import json
|
||||
from backend.util.cloud_storage import shutdown_cloud_storage_handler
|
||||
from backend.util.feature_flag import initialize_launchdarkly, shutdown_launchdarkly
|
||||
from backend.util.service import UnhealthyServiceError
|
||||
|
||||
settings = backend.util.settings.Settings()
|
||||
@@ -250,7 +246,7 @@ class AgentServer(backend.util.service.AppProcess):
|
||||
server_app,
|
||||
host=backend.util.settings.Config().agent_api_host,
|
||||
port=backend.util.settings.Config().agent_api_port,
|
||||
log_config=generate_uvicorn_config(),
|
||||
log_config=None,
|
||||
)
|
||||
|
||||
def cleanup(self):
|
||||
|
||||
@@ -8,7 +8,6 @@ from typing import Annotated, Any, Sequence
|
||||
import pydantic
|
||||
import stripe
|
||||
from autogpt_libs.auth.middleware import auth_middleware
|
||||
from autogpt_libs.feature_flag.client import feature_flag
|
||||
from fastapi import (
|
||||
APIRouter,
|
||||
Body,
|
||||
@@ -85,6 +84,7 @@ from backend.server.utils import get_user_id
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
from backend.util.exceptions import GraphValidationError, NotFoundError
|
||||
from backend.util.feature_flag import feature_flag
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
|
||||
@@ -458,12 +458,16 @@ async def stripe_webhook(request: Request):
|
||||
event = stripe.Webhook.construct_event(
|
||||
payload, sig_header, settings.secrets.stripe_webhook_secret
|
||||
)
|
||||
except ValueError:
|
||||
except ValueError as e:
|
||||
# Invalid payload
|
||||
raise HTTPException(status_code=400)
|
||||
except stripe.SignatureVerificationError:
|
||||
raise HTTPException(
|
||||
status_code=400, detail=f"Invalid payload: {str(e) or type(e).__name__}"
|
||||
)
|
||||
except stripe.SignatureVerificationError as e:
|
||||
# Invalid signature
|
||||
raise HTTPException(status_code=400)
|
||||
raise HTTPException(
|
||||
status_code=400, detail=f"Invalid signature: {str(e) or type(e).__name__}"
|
||||
)
|
||||
|
||||
if (
|
||||
event["type"] == "checkout.session.completed"
|
||||
@@ -676,7 +680,15 @@ async def update_graph(
|
||||
# Handle deactivation of the previously active version
|
||||
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||
|
||||
return new_graph_version
|
||||
# Fetch new graph version *with sub-graphs* (needed for credentials input schema)
|
||||
new_graph_version_with_subgraphs = await graph_db.get_graph(
|
||||
graph_id,
|
||||
new_graph_version.version,
|
||||
user_id=user_id,
|
||||
include_subgraphs=True,
|
||||
)
|
||||
assert new_graph_version_with_subgraphs # make type checker happy
|
||||
return new_graph_version_with_subgraphs
|
||||
|
||||
|
||||
@v1_router.put(
|
||||
|
||||
@@ -6,7 +6,6 @@ from typing import TYPE_CHECKING, Any, Literal
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor import DatabaseManagerAsyncClient
|
||||
|
||||
from autogpt_libs.feature_flag.client import is_feature_enabled
|
||||
from pydantic import ValidationError
|
||||
|
||||
from backend.data.execution import ExecutionStatus
|
||||
@@ -16,6 +15,7 @@ from backend.server.v2.AutoMod.models import (
|
||||
ModerationConfig,
|
||||
)
|
||||
from backend.util.exceptions import ModerationError
|
||||
from backend.util.feature_flag import Flag, is_feature_enabled
|
||||
from backend.util.request import Requests
|
||||
from backend.util.settings import Settings
|
||||
|
||||
@@ -51,7 +51,7 @@ class AutoModManager:
|
||||
return None
|
||||
|
||||
# Check if AutoMod feature is enabled for this user
|
||||
if not is_feature_enabled("AutoMod", graph_exec.user_id, default=False):
|
||||
if not await is_feature_enabled(Flag.AUTOMOD, graph_exec.user_id):
|
||||
logger.debug(f"AutoMod feature not enabled for user {graph_exec.user_id}")
|
||||
return None
|
||||
|
||||
@@ -141,7 +141,7 @@ class AutoModManager:
|
||||
return None
|
||||
|
||||
# Check if AutoMod feature is enabled for this user
|
||||
if not is_feature_enabled("AutoMod", user_id, default=False):
|
||||
if not await is_feature_enabled(Flag.AUTOMOD, user_id):
|
||||
logger.debug(f"AutoMod feature not enabled for user {user_id}")
|
||||
return None
|
||||
|
||||
@@ -320,7 +320,7 @@ class AutoModManager:
|
||||
url = f"{self.config.api_url}/moderate"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": self.config.api_key,
|
||||
"X-API-Key": self.config.api_key.strip(),
|
||||
}
|
||||
|
||||
# Create requests instance with timeout and retry configuration
|
||||
|
||||
@@ -241,7 +241,11 @@ async def get_library_agent_by_graph_id(
|
||||
)
|
||||
if not agent:
|
||||
return None
|
||||
return library_model.LibraryAgent.from_db(agent)
|
||||
|
||||
assert agent.AgentGraph # make type checker happy
|
||||
# Include sub-graphs so we can make a full credentials input schema
|
||||
sub_graphs = await graph_db.get_sub_graphs(agent.AgentGraph)
|
||||
return library_model.LibraryAgent.from_db(agent, sub_graphs=sub_graphs)
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error fetching library agent by graph ID: {e}")
|
||||
raise store_exceptions.DatabaseError("Failed to fetch library agent") from e
|
||||
|
||||
@@ -6,7 +6,6 @@ from typing import Protocol
|
||||
import pydantic
|
||||
import uvicorn
|
||||
from autogpt_libs.auth import parse_jwt_token
|
||||
from autogpt_libs.logging.utils import generate_uvicorn_config
|
||||
from fastapi import Depends, FastAPI, WebSocket, WebSocketDisconnect
|
||||
from starlette.middleware.cors import CORSMiddleware
|
||||
|
||||
@@ -309,7 +308,7 @@ class WebsocketServer(AppProcess):
|
||||
server_app,
|
||||
host=Config().websocket_server_host,
|
||||
port=Config().websocket_server_port,
|
||||
log_config=generate_uvicorn_config(),
|
||||
log_config=None,
|
||||
)
|
||||
|
||||
def cleanup(self):
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
from pathlib import Path
|
||||
|
||||
from prisma.models import User
|
||||
|
||||
from backend.blocks.basic import StoreValueBlock
|
||||
from backend.blocks.block import BlockInstallationBlock
|
||||
from backend.blocks.http import SendWebRequestBlock
|
||||
from backend.blocks.llm import AITextGeneratorBlock
|
||||
from backend.blocks.text import ExtractTextInformationBlock, FillTextTemplateBlock
|
||||
from backend.data.graph import Graph, Link, Node, create_graph
|
||||
from backend.data.model import User
|
||||
from backend.data.user import get_or_create_user
|
||||
from backend.util.test import SpinTestServer, wait_execution
|
||||
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
from prisma.models import User
|
||||
|
||||
from backend.blocks.llm import AIStructuredResponseGeneratorBlock
|
||||
from backend.blocks.reddit import GetRedditPostsBlock, PostRedditCommentBlock
|
||||
from backend.blocks.text import FillTextTemplateBlock, MatchTextPatternBlock
|
||||
from backend.data.graph import Graph, Link, Node, create_graph
|
||||
from backend.data.model import User
|
||||
from backend.data.user import get_or_create_user
|
||||
from backend.util.test import SpinTestServer, wait_execution
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
from prisma.models import User
|
||||
|
||||
from backend.blocks.basic import StoreValueBlock
|
||||
from backend.blocks.io import AgentInputBlock
|
||||
from backend.blocks.text import FillTextTemplateBlock
|
||||
from backend.data import graph
|
||||
from backend.data.graph import create_graph
|
||||
from backend.data.model import User
|
||||
from backend.data.user import get_or_create_user
|
||||
from backend.util.test import SpinTestServer, wait_execution
|
||||
|
||||
|
||||
@@ -2,11 +2,18 @@
|
||||
Centralized service client helpers with thread caching.
|
||||
"""
|
||||
|
||||
from functools import cache
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
from autogpt_libs.utils.cache import async_cache, thread_cached
|
||||
|
||||
from backend.util.settings import Settings
|
||||
|
||||
settings = Settings()
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from supabase import AClient, Client
|
||||
|
||||
from backend.data.execution import (
|
||||
AsyncRedisExecutionEventBus,
|
||||
RedisExecutionEventBus,
|
||||
@@ -109,6 +116,29 @@ def get_integration_credentials_store() -> "IntegrationCredentialsStore":
|
||||
return IntegrationCredentialsStore()
|
||||
|
||||
|
||||
# ============ Supabase Clients ============ #
|
||||
|
||||
|
||||
@cache
|
||||
def get_supabase() -> "Client":
|
||||
"""Get a process-cached synchronous Supabase client instance."""
|
||||
from supabase import create_client
|
||||
|
||||
return create_client(
|
||||
settings.secrets.supabase_url, settings.secrets.supabase_service_role_key
|
||||
)
|
||||
|
||||
|
||||
@async_cache
|
||||
async def get_async_supabase() -> "AClient":
|
||||
"""Get a process-cached asynchronous Supabase client instance."""
|
||||
from supabase import create_async_client
|
||||
|
||||
return await create_async_client(
|
||||
settings.secrets.supabase_url, settings.secrets.supabase_service_role_key
|
||||
)
|
||||
|
||||
|
||||
# ============ Notification Queue Helpers ============ #
|
||||
|
||||
|
||||
|
||||
@@ -71,4 +71,10 @@ class GraphValidationError(ValueError):
|
||||
self.node_errors = node_errors or {}
|
||||
|
||||
def __str__(self):
|
||||
return self.message
|
||||
return self.message + "".join(
|
||||
[
|
||||
f"\n {node_id}:"
|
||||
+ "".join([f"\n {k}: {e}" for k, e in errors.items()])
|
||||
for node_id, errors in self.node_errors.items()
|
||||
]
|
||||
)
|
||||
|
||||
257
autogpt_platform/backend/backend/util/feature_flag.py
Normal file
257
autogpt_platform/backend/backend/util/feature_flag.py
Normal file
@@ -0,0 +1,257 @@
|
||||
import contextlib
|
||||
import logging
|
||||
from enum import Enum
|
||||
from functools import wraps
|
||||
from typing import Any, Awaitable, Callable, TypeVar
|
||||
|
||||
import ldclient
|
||||
from autogpt_libs.utils.cache import async_ttl_cache
|
||||
from fastapi import HTTPException
|
||||
from ldclient import Context, LDClient
|
||||
from ldclient.config import Config
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Load settings at module level
|
||||
settings = Settings()
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
_is_initialized = False
|
||||
|
||||
|
||||
class Flag(str, Enum):
|
||||
"""
|
||||
Centralized enum for all LaunchDarkly feature flags.
|
||||
|
||||
Add new flags here to ensure consistency across the codebase.
|
||||
"""
|
||||
|
||||
AUTOMOD = "AutoMod"
|
||||
AI_ACTIVITY_STATUS = "ai-agent-execution-summary"
|
||||
BETA_BLOCKS = "beta-blocks"
|
||||
AGENT_ACTIVITY = "agent-activity"
|
||||
|
||||
|
||||
def get_client() -> LDClient:
|
||||
"""Get the LaunchDarkly client singleton."""
|
||||
if not _is_initialized:
|
||||
initialize_launchdarkly()
|
||||
return ldclient.get()
|
||||
|
||||
|
||||
def initialize_launchdarkly() -> None:
|
||||
sdk_key = settings.secrets.launch_darkly_sdk_key
|
||||
logger.debug(
|
||||
f"Initializing LaunchDarkly with SDK key: {'present' if sdk_key else 'missing'}"
|
||||
)
|
||||
|
||||
if not sdk_key:
|
||||
logger.warning("LaunchDarkly SDK key not configured")
|
||||
return
|
||||
|
||||
config = Config(sdk_key)
|
||||
ldclient.set_config(config)
|
||||
|
||||
if ldclient.get().is_initialized():
|
||||
global _is_initialized
|
||||
_is_initialized = True
|
||||
logger.info("LaunchDarkly client initialized successfully")
|
||||
else:
|
||||
logger.error("LaunchDarkly client failed to initialize")
|
||||
|
||||
|
||||
def shutdown_launchdarkly() -> None:
|
||||
"""Shutdown the LaunchDarkly client."""
|
||||
if ldclient.get().is_initialized():
|
||||
ldclient.get().close()
|
||||
logger.info("LaunchDarkly client closed successfully")
|
||||
|
||||
|
||||
@async_ttl_cache(maxsize=1000, ttl_seconds=86400) # 1000 entries, 24 hours TTL
|
||||
async def _fetch_user_context_data(user_id: str) -> Context:
|
||||
"""
|
||||
Fetch user context for LaunchDarkly from Supabase.
|
||||
|
||||
Args:
|
||||
user_id: The user ID to fetch data for
|
||||
|
||||
Returns:
|
||||
LaunchDarkly Context object
|
||||
"""
|
||||
builder = Context.builder(user_id).kind("user").anonymous(True)
|
||||
|
||||
try:
|
||||
from backend.util.clients import get_supabase
|
||||
|
||||
# If we have user data, update context
|
||||
response = get_supabase().auth.admin.get_user_by_id(user_id)
|
||||
if response and response.user:
|
||||
user = response.user
|
||||
builder.anonymous(False)
|
||||
if user.role:
|
||||
builder.set("role", user.role)
|
||||
# It's weird, I know, but it is what it is.
|
||||
builder.set("custom", {"role": user.role})
|
||||
if user.email:
|
||||
builder.set("email", user.email)
|
||||
builder.set("email_domain", user.email.split("@")[-1])
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch user context for {user_id}: {e}")
|
||||
|
||||
return builder.build()
|
||||
|
||||
|
||||
async def get_feature_flag_value(
|
||||
flag_key: str,
|
||||
user_id: str,
|
||||
default: Any = None,
|
||||
) -> Any:
|
||||
"""
|
||||
Get the raw value of a feature flag for a user.
|
||||
|
||||
This is the generic function that returns the actual flag value,
|
||||
which could be a boolean, string, number, or JSON object.
|
||||
|
||||
Args:
|
||||
flag_key: The LaunchDarkly feature flag key
|
||||
user_id: The user ID to evaluate the flag for
|
||||
default: Default value if LaunchDarkly is unavailable or flag evaluation fails
|
||||
|
||||
Returns:
|
||||
The flag value from LaunchDarkly
|
||||
"""
|
||||
try:
|
||||
client = get_client()
|
||||
|
||||
# Check if client is initialized
|
||||
if not client.is_initialized():
|
||||
logger.debug(
|
||||
f"LaunchDarkly not initialized, using default={default} for {flag_key}"
|
||||
)
|
||||
return default
|
||||
|
||||
# Get user context from Supabase
|
||||
context = await _fetch_user_context_data(user_id)
|
||||
|
||||
# Evaluate flag
|
||||
result = client.variation(flag_key, context, default)
|
||||
|
||||
logger.debug(
|
||||
f"Feature flag {flag_key} for user {user_id}: {result} (type: {type(result).__name__})"
|
||||
)
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"LaunchDarkly flag evaluation failed for {flag_key}: {e}, using default={default}"
|
||||
)
|
||||
return default
|
||||
|
||||
|
||||
async def is_feature_enabled(
|
||||
flag_key: Flag,
|
||||
user_id: str,
|
||||
default: bool = False,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if a feature flag is enabled for a user.
|
||||
|
||||
Args:
|
||||
flag_key: The Flag enum value
|
||||
user_id: The user ID to evaluate the flag for
|
||||
default: Default value if LaunchDarkly is unavailable or flag evaluation fails
|
||||
|
||||
Returns:
|
||||
True if feature is enabled, False otherwise
|
||||
"""
|
||||
result = await get_feature_flag_value(flag_key.value, user_id, default)
|
||||
|
||||
# If the result is already a boolean, return it
|
||||
if isinstance(result, bool):
|
||||
return result
|
||||
|
||||
# Log a warning if the flag is not returning a boolean
|
||||
logger.warning(
|
||||
f"Feature flag {flag_key} returned non-boolean value: {result} (type: {type(result).__name__}). "
|
||||
f"This flag should be configured as a boolean in LaunchDarkly. Using default={default}"
|
||||
)
|
||||
|
||||
# Return the default if we get a non-boolean value
|
||||
# This prevents objects from being incorrectly treated as True
|
||||
return default
|
||||
|
||||
|
||||
def feature_flag(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[[Callable[P, Awaitable[T]]], Callable[P, Awaitable[T]]]:
|
||||
"""
|
||||
Decorator for async feature flag protected endpoints.
|
||||
|
||||
Args:
|
||||
flag_key: The LaunchDarkly feature flag key
|
||||
default: Default value if flag evaluation fails
|
||||
|
||||
Returns:
|
||||
Decorator that only works with async functions
|
||||
"""
|
||||
|
||||
def decorator(func: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[T]]:
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
# Use the internal function directly since we have a raw string flag_key
|
||||
flag_value = await get_feature_flag_value(
|
||||
flag_key, str(user_id), default
|
||||
)
|
||||
# Ensure we treat flag value as boolean
|
||||
if isinstance(flag_value, bool):
|
||||
is_enabled = flag_value
|
||||
else:
|
||||
# Log warning and use default for non-boolean values
|
||||
logger.warning(
|
||||
f"Feature flag {flag_key} returned non-boolean value: {flag_value} (type: {type(flag_value).__name__}). "
|
||||
f"Using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
return await func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
return async_wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mock_flag_variation(flag_key: str, return_value: Any):
|
||||
"""Context manager for testing feature flags."""
|
||||
original_variation = get_client().variation
|
||||
get_client().variation = lambda key, context, default: (
|
||||
return_value if key == flag_key else original_variation(key, context, default)
|
||||
)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
get_client().variation = original_variation
|
||||
113
autogpt_platform/backend/backend/util/feature_flag_test.py
Normal file
113
autogpt_platform/backend/backend/util/feature_flag_test.py
Normal file
@@ -0,0 +1,113 @@
|
||||
import pytest
|
||||
from fastapi import HTTPException
|
||||
from ldclient import LDClient
|
||||
|
||||
from backend.util.feature_flag import (
|
||||
Flag,
|
||||
feature_flag,
|
||||
is_feature_enabled,
|
||||
mock_flag_variation,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ld_client(mocker):
|
||||
client = mocker.Mock(spec=LDClient)
|
||||
mocker.patch("ldclient.get", return_value=client)
|
||||
client.is_initialized.return_value = True
|
||||
return client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_enabled(ld_client):
|
||||
ld_client.variation.return_value = True
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = await test_function(user_id="test-user")
|
||||
assert result == "success"
|
||||
ld_client.variation.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_unauthorized_response(ld_client):
|
||||
ld_client.variation.return_value = False
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await test_function(user_id="test-user")
|
||||
assert exc_info.value.status_code == 404
|
||||
|
||||
|
||||
def test_mock_flag_variation(ld_client):
|
||||
with mock_flag_variation("test-flag", True):
|
||||
assert ld_client.variation("test-flag", None, False) is True
|
||||
|
||||
with mock_flag_variation("test-flag", False):
|
||||
assert ld_client.variation("test-flag", None, True) is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_is_feature_enabled(ld_client):
|
||||
"""Test the is_feature_enabled helper function."""
|
||||
ld_client.is_initialized.return_value = True
|
||||
ld_client.variation.return_value = True
|
||||
|
||||
result = await is_feature_enabled(Flag.AUTOMOD, "user123", default=False)
|
||||
assert result is True
|
||||
|
||||
ld_client.variation.assert_called_once()
|
||||
call_args = ld_client.variation.call_args
|
||||
assert call_args[0][0] == "AutoMod" # flag_key
|
||||
assert call_args[0][2] is False # default value
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_is_feature_enabled_not_initialized(ld_client):
|
||||
"""Test is_feature_enabled when LaunchDarkly is not initialized."""
|
||||
ld_client.is_initialized.return_value = False
|
||||
|
||||
result = await is_feature_enabled(Flag.AGENT_ACTIVITY, "user123", default=True)
|
||||
assert result is True # Should return default
|
||||
|
||||
ld_client.variation.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_is_feature_enabled_exception(mocker):
|
||||
"""Test is_feature_enabled when get_client() raises an exception."""
|
||||
mocker.patch(
|
||||
"backend.util.feature_flag.get_client",
|
||||
side_effect=Exception("Client error"),
|
||||
)
|
||||
|
||||
result = await is_feature_enabled(Flag.AGENT_ACTIVITY, "user123", default=True)
|
||||
assert result is True # Should return default
|
||||
|
||||
|
||||
def test_flag_enum_values():
|
||||
"""Test that Flag enum has expected values."""
|
||||
assert Flag.AUTOMOD == "AutoMod"
|
||||
assert Flag.AI_ACTIVITY_STATUS == "ai-agent-execution-summary"
|
||||
assert Flag.BETA_BLOCKS == "beta-blocks"
|
||||
assert Flag.AGENT_ACTIVITY == "agent-activity"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_is_feature_enabled_with_flag_enum(mocker):
|
||||
"""Test is_feature_enabled function with Flag enum."""
|
||||
mock_get_feature_flag_value = mocker.patch(
|
||||
"backend.util.feature_flag.get_feature_flag_value"
|
||||
)
|
||||
mock_get_feature_flag_value.return_value = True
|
||||
|
||||
result = await is_feature_enabled(Flag.AUTOMOD, "user123")
|
||||
|
||||
assert result is True
|
||||
# Should call with the flag's string value
|
||||
mock_get_feature_flag_value.assert_called_once()
|
||||
@@ -24,7 +24,6 @@ from typing import (
|
||||
|
||||
import httpx
|
||||
import uvicorn
|
||||
from autogpt_libs.logging.utils import generate_uvicorn_config
|
||||
from fastapi import FastAPI, Request, responses
|
||||
from pydantic import BaseModel, TypeAdapter, create_model
|
||||
|
||||
@@ -45,6 +44,34 @@ api_comm_retry = config.pyro_client_comm_retry
|
||||
api_comm_timeout = config.pyro_client_comm_timeout
|
||||
api_call_timeout = config.rpc_client_call_timeout
|
||||
|
||||
|
||||
def _validate_no_prisma_objects(obj: Any, path: str = "result") -> None:
|
||||
"""
|
||||
Recursively validate that no Prisma objects are being returned from service methods.
|
||||
This enforces proper separation of layers - only application models should cross service boundaries.
|
||||
"""
|
||||
if obj is None:
|
||||
return
|
||||
|
||||
# Check if it's a Prisma model object
|
||||
if hasattr(obj, "__class__") and hasattr(obj.__class__, "__module__"):
|
||||
module_name = obj.__class__.__module__
|
||||
if module_name and "prisma.models" in module_name:
|
||||
raise ValueError(
|
||||
f"Prisma object {obj.__class__.__name__} found in {path}. "
|
||||
"Service methods must return application models, not Prisma objects. "
|
||||
f"Use {obj.__class__.__name__}.from_db() to convert to application model."
|
||||
)
|
||||
|
||||
# Recursively check collections
|
||||
if isinstance(obj, (list, tuple)):
|
||||
for i, item in enumerate(obj):
|
||||
_validate_no_prisma_objects(item, f"{path}[{i}]")
|
||||
elif isinstance(obj, dict):
|
||||
for key, value in obj.items():
|
||||
_validate_no_prisma_objects(value, f"{path}['{key}']")
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
EXPOSED_FLAG = "__exposed__"
|
||||
@@ -111,6 +138,22 @@ class UnhealthyServiceError(ValueError):
|
||||
return self.message
|
||||
|
||||
|
||||
class HTTPClientError(Exception):
|
||||
"""Exception for HTTP client errors (4xx status codes) that should not be retried."""
|
||||
|
||||
def __init__(self, status_code: int, message: str):
|
||||
self.status_code = status_code
|
||||
super().__init__(f"HTTP {status_code}: {message}")
|
||||
|
||||
|
||||
class HTTPServerError(Exception):
|
||||
"""Exception for HTTP server errors (5xx status codes) that can be retried."""
|
||||
|
||||
def __init__(self, status_code: int, message: str):
|
||||
self.status_code = status_code
|
||||
super().__init__(f"HTTP {status_code}: {message}")
|
||||
|
||||
|
||||
EXCEPTION_MAPPING = {
|
||||
e.__name__: e
|
||||
for e in [
|
||||
@@ -119,6 +162,8 @@ EXCEPTION_MAPPING = {
|
||||
TimeoutError,
|
||||
ConnectionError,
|
||||
UnhealthyServiceError,
|
||||
HTTPClientError,
|
||||
HTTPServerError,
|
||||
*[
|
||||
ErrorType
|
||||
for _, ErrorType in inspect.getmembers(exceptions)
|
||||
@@ -191,17 +236,21 @@ class AppService(BaseAppService, ABC):
|
||||
if asyncio.iscoroutinefunction(f):
|
||||
|
||||
async def async_endpoint(body: RequestBodyModel): # type: ignore #RequestBodyModel being variable
|
||||
return await f(
|
||||
result = await f(
|
||||
**{name: getattr(body, name) for name in type(body).model_fields}
|
||||
)
|
||||
_validate_no_prisma_objects(result, f"{func.__name__} result")
|
||||
return result
|
||||
|
||||
return async_endpoint
|
||||
else:
|
||||
|
||||
def sync_endpoint(body: RequestBodyModel): # type: ignore #RequestBodyModel being variable
|
||||
return f(
|
||||
result = f(
|
||||
**{name: getattr(body, name) for name in type(body).model_fields}
|
||||
)
|
||||
_validate_no_prisma_objects(result, f"{func.__name__} result")
|
||||
return result
|
||||
|
||||
return sync_endpoint
|
||||
|
||||
@@ -216,7 +265,7 @@ class AppService(BaseAppService, ABC):
|
||||
self.fastapi_app,
|
||||
host=api_host,
|
||||
port=self.get_port(),
|
||||
log_config=generate_uvicorn_config(),
|
||||
log_config=None, # Explicitly None to avoid uvicorn replacing the logger.
|
||||
log_level=self.log_level,
|
||||
)
|
||||
)
|
||||
@@ -313,6 +362,7 @@ def get_service_client(
|
||||
AttributeError, # Missing attributes
|
||||
asyncio.CancelledError, # Task was cancelled
|
||||
concurrent.futures.CancelledError, # Future was cancelled
|
||||
HTTPClientError, # HTTP 4xx client errors - don't retry
|
||||
),
|
||||
)(fn)
|
||||
|
||||
@@ -390,11 +440,31 @@ def get_service_client(
|
||||
self._connection_failure_count = 0
|
||||
return response.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
error = RemoteCallError.model_validate(e.response.json())
|
||||
# DEBUG HELP: if you made a custom exception, make sure you override self.args to be how to make your exception
|
||||
raise EXCEPTION_MAPPING.get(error.type, Exception)(
|
||||
*(error.args or [str(e)])
|
||||
)
|
||||
status_code = e.response.status_code
|
||||
|
||||
# Try to parse the error response as RemoteCallError for mapped exceptions
|
||||
error_response = None
|
||||
try:
|
||||
error_response = RemoteCallError.model_validate(e.response.json())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# If we successfully parsed a mapped exception type, re-raise it
|
||||
if error_response and error_response.type in EXCEPTION_MAPPING:
|
||||
exception_class = EXCEPTION_MAPPING[error_response.type]
|
||||
args = error_response.args or [str(e)]
|
||||
raise exception_class(*args)
|
||||
|
||||
# Otherwise categorize by HTTP status code
|
||||
if 400 <= status_code < 500:
|
||||
# Client errors (4xx) - wrap to prevent retries
|
||||
raise HTTPClientError(status_code, str(e))
|
||||
elif 500 <= status_code < 600:
|
||||
# Server errors (5xx) - wrap but allow retries
|
||||
raise HTTPServerError(status_code, str(e))
|
||||
else:
|
||||
# Other status codes (1xx, 2xx, 3xx) - re-raise original error
|
||||
raise e
|
||||
|
||||
@_maybe_retry
|
||||
def _call_method_sync(self, method_name: str, **kwargs: Any) -> Any:
|
||||
|
||||
@@ -8,6 +8,8 @@ import pytest
|
||||
from backend.util.service import (
|
||||
AppService,
|
||||
AppServiceClient,
|
||||
HTTPClientError,
|
||||
HTTPServerError,
|
||||
endpoint_to_async,
|
||||
expose,
|
||||
get_service_client,
|
||||
@@ -366,3 +368,125 @@ def test_service_no_retry_when_disabled(server):
|
||||
# This should fail immediately without retry
|
||||
with pytest.raises(RuntimeError, match="Intended error for testing"):
|
||||
client.always_failing_add(5, 3)
|
||||
|
||||
|
||||
class TestHTTPErrorRetryBehavior:
|
||||
"""Test that HTTP client errors (4xx) are not retried but server errors (5xx) can be."""
|
||||
|
||||
# Note: These tests access private methods for testing internal behavior
|
||||
# Type ignore comments are used to suppress warnings about accessing private methods
|
||||
|
||||
def test_http_client_error_not_retried(self):
|
||||
"""Test that 4xx errors are wrapped as HTTPClientError and not retried."""
|
||||
# Create a mock response with 404 status
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 404
|
||||
mock_response.json.return_value = {"message": "Not found"}
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"404 Not Found", request=Mock(), response=mock_response
|
||||
)
|
||||
|
||||
# Create client
|
||||
client = get_service_client(ServiceTestClient)
|
||||
dynamic_client = client
|
||||
|
||||
# Test the _handle_call_method_response directly
|
||||
with pytest.raises(HTTPClientError) as exc_info:
|
||||
dynamic_client._handle_call_method_response( # type: ignore[attr-defined]
|
||||
response=mock_response, method_name="test_method"
|
||||
)
|
||||
|
||||
assert exc_info.value.status_code == 404
|
||||
assert "404" in str(exc_info.value)
|
||||
|
||||
def test_http_server_error_can_be_retried(self):
|
||||
"""Test that 5xx errors are wrapped as HTTPServerError and can be retried."""
|
||||
# Create a mock response with 500 status
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 500
|
||||
mock_response.json.return_value = {"message": "Internal server error"}
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"500 Internal Server Error", request=Mock(), response=mock_response
|
||||
)
|
||||
|
||||
# Create client
|
||||
client = get_service_client(ServiceTestClient)
|
||||
dynamic_client = client
|
||||
|
||||
# Test the _handle_call_method_response directly
|
||||
with pytest.raises(HTTPServerError) as exc_info:
|
||||
dynamic_client._handle_call_method_response( # type: ignore[attr-defined]
|
||||
response=mock_response, method_name="test_method"
|
||||
)
|
||||
|
||||
assert exc_info.value.status_code == 500
|
||||
assert "500" in str(exc_info.value)
|
||||
|
||||
def test_mapped_exception_preserves_original_type(self):
|
||||
"""Test that mapped exceptions preserve their original type regardless of HTTP status."""
|
||||
# Create a mock response with ValueError in the remote call error
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 400
|
||||
mock_response.json.return_value = {
|
||||
"type": "ValueError",
|
||||
"args": ["Invalid parameter value"],
|
||||
}
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"400 Bad Request", request=Mock(), response=mock_response
|
||||
)
|
||||
|
||||
# Create client
|
||||
client = get_service_client(ServiceTestClient)
|
||||
dynamic_client = client
|
||||
|
||||
# Test the _handle_call_method_response directly
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
dynamic_client._handle_call_method_response( # type: ignore[attr-defined]
|
||||
response=mock_response, method_name="test_method"
|
||||
)
|
||||
|
||||
assert "Invalid parameter value" in str(exc_info.value)
|
||||
|
||||
def test_client_error_status_codes_coverage(self):
|
||||
"""Test that various 4xx status codes are all wrapped as HTTPClientError."""
|
||||
client_error_codes = [400, 401, 403, 404, 405, 409, 422, 429]
|
||||
|
||||
for status_code in client_error_codes:
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = status_code
|
||||
mock_response.json.return_value = {"message": f"Error {status_code}"}
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
f"{status_code} Error", request=Mock(), response=mock_response
|
||||
)
|
||||
|
||||
client = get_service_client(ServiceTestClient)
|
||||
dynamic_client = client
|
||||
|
||||
with pytest.raises(HTTPClientError) as exc_info:
|
||||
dynamic_client._handle_call_method_response( # type: ignore
|
||||
response=mock_response, method_name="test_method"
|
||||
)
|
||||
|
||||
assert exc_info.value.status_code == status_code
|
||||
|
||||
def test_server_error_status_codes_coverage(self):
|
||||
"""Test that various 5xx status codes are all wrapped as HTTPServerError."""
|
||||
server_error_codes = [500, 501, 502, 503, 504, 505]
|
||||
|
||||
for status_code in server_error_codes:
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = status_code
|
||||
mock_response.json.return_value = {"message": f"Error {status_code}"}
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
f"{status_code} Error", request=Mock(), response=mock_response
|
||||
)
|
||||
|
||||
client = get_service_client(ServiceTestClient)
|
||||
dynamic_client = client
|
||||
|
||||
with pytest.raises(HTTPServerError) as exc_info:
|
||||
dynamic_client._handle_call_method_response( # type: ignore
|
||||
response=mock_response, method_name="test_method"
|
||||
)
|
||||
|
||||
assert exc_info.value.status_code == status_code
|
||||
|
||||
@@ -360,7 +360,7 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
description="Maximum message size limit for communication with the message bus",
|
||||
)
|
||||
|
||||
backend_cors_allow_origins: List[str] = Field(default_factory=list)
|
||||
backend_cors_allow_origins: List[str] = Field(default=["http://localhost:3000"])
|
||||
|
||||
@field_validator("backend_cors_allow_origins")
|
||||
@classmethod
|
||||
@@ -472,6 +472,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
groq_api_key: str = Field(default="", description="Groq API key")
|
||||
open_router_api_key: str = Field(default="", description="Open Router API Key")
|
||||
llama_api_key: str = Field(default="", description="Llama API Key")
|
||||
v0_api_key: str = Field(default="", description="v0 by Vercel API key")
|
||||
|
||||
reddit_client_id: str = Field(default="", description="Reddit client ID")
|
||||
reddit_client_secret: str = Field(default="", description="Reddit client secret")
|
||||
@@ -521,14 +522,20 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
apollo_api_key: str = Field(default="", description="Apollo API Key")
|
||||
smartlead_api_key: str = Field(default="", description="SmartLead API Key")
|
||||
zerobounce_api_key: str = Field(default="", description="ZeroBounce API Key")
|
||||
enrichlayer_api_key: str = Field(default="", description="Enrichlayer API Key")
|
||||
|
||||
# AutoMod API credentials
|
||||
automod_api_key: str = Field(default="", description="AutoMod API key")
|
||||
|
||||
# LaunchDarkly feature flags
|
||||
launch_darkly_sdk_key: str = Field(
|
||||
default="",
|
||||
description="The LaunchDarkly SDK key for feature flag management",
|
||||
)
|
||||
|
||||
ayrshare_api_key: str = Field(default="", description="Ayrshare API Key")
|
||||
ayrshare_jwt_key: str = Field(default="", description="Ayrshare private Key")
|
||||
# Add more secret fields as needed
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
env_file=".env",
|
||||
env_file_encoding="utf-8",
|
||||
|
||||
@@ -30,10 +30,10 @@ from backend.data.graph import Graph, Link, Node, create_graph
|
||||
|
||||
# Import API functions from the backend
|
||||
from backend.data.user import get_or_create_user
|
||||
from backend.server.integrations.utils import get_supabase
|
||||
from backend.server.v2.library.db import create_library_agent, create_preset
|
||||
from backend.server.v2.library.model import LibraryAgentPresetCreatable
|
||||
from backend.server.v2.store.db import create_store_submission, review_store_submission
|
||||
from backend.util.clients import get_supabase
|
||||
|
||||
faker = Faker()
|
||||
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=supabase
|
||||
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY=your-encryption-key-32-chars-min
|
||||
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
|
||||
############
|
||||
# Supavisor -- Database pooler
|
||||
############
|
||||
POOLER_PROXY_PORT_TRANSACTION=6543
|
||||
POOLER_DEFAULT_POOL_SIZE=20
|
||||
POOLER_MAX_CLIENT_CONN=100
|
||||
POOLER_TENANT_ID=your-tenant-id
|
||||
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3000
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
ENABLE_ANONYMOUS_USERS=false
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3000
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
# Add your OpenAI API key to enable SQL Editor Assistant
|
||||
OPENAI_API_KEY=
|
||||
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
||||
1
autogpt_platform/db/docker/.gitignore
vendored
1
autogpt_platform/db/docker/.gitignore
vendored
@@ -1,5 +1,4 @@
|
||||
volumes/db/data
|
||||
volumes/storage
|
||||
.env
|
||||
test.http
|
||||
docker-compose.override.yml
|
||||
|
||||
@@ -5,8 +5,101 @@
|
||||
# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
|
||||
# Reset everything: ./reset.sh
|
||||
|
||||
# Environment Variable Loading Order (first → last, later overrides earlier):
|
||||
# 1. ../../.env.default - Default values for all Supabase settings
|
||||
# 2. ../../.env - User's custom configuration (if exists)
|
||||
# 3. ./.env - Local overrides specific to db/docker (if exists)
|
||||
# 4. environment key - Service-specific overrides defined below
|
||||
# 5. Shell environment - Variables exported before running docker compose
|
||||
|
||||
name: supabase
|
||||
|
||||
# Common env_file configuration for all Supabase services
|
||||
x-supabase-env-files: &supabase-env-files
|
||||
env_file:
|
||||
- ../../.env.default # Base defaults from platform root
|
||||
- path: ../../.env # User overrides from platform root (optional)
|
||||
required: false
|
||||
- path: ./.env # Local overrides for db/docker (optional)
|
||||
required: false
|
||||
|
||||
# Common Supabase environment - hardcoded defaults to avoid variable substitution
|
||||
x-supabase-env: &supabase-env
|
||||
# Core PostgreSQL settings
|
||||
POSTGRES_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
POSTGRES_HOST: db
|
||||
POSTGRES_PORT: "5432"
|
||||
POSTGRES_DB: postgres
|
||||
|
||||
# Authentication & Security
|
||||
JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME: supabase
|
||||
DASHBOARD_PASSWORD: this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY: your-encryption-key-32-chars-min
|
||||
|
||||
# URLs and Endpoints
|
||||
SITE_URL: http://localhost:3000
|
||||
API_EXTERNAL_URL: http://localhost:8000
|
||||
SUPABASE_PUBLIC_URL: http://localhost:8000
|
||||
ADDITIONAL_REDIRECT_URLS: ""
|
||||
|
||||
# Feature Flags
|
||||
DISABLE_SIGNUP: "false"
|
||||
ENABLE_EMAIL_SIGNUP: "true"
|
||||
ENABLE_EMAIL_AUTOCONFIRM: "false"
|
||||
ENABLE_ANONYMOUS_USERS: "false"
|
||||
ENABLE_PHONE_SIGNUP: "true"
|
||||
ENABLE_PHONE_AUTOCONFIRM: "true"
|
||||
FUNCTIONS_VERIFY_JWT: "false"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: "true"
|
||||
|
||||
# Email/SMTP Configuration
|
||||
SMTP_ADMIN_EMAIL: admin@example.com
|
||||
SMTP_HOST: supabase-mail
|
||||
SMTP_PORT: "2500"
|
||||
SMTP_USER: fake_mail_user
|
||||
SMTP_PASS: fake_mail_password
|
||||
SMTP_SENDER_NAME: fake_sender
|
||||
|
||||
# Mailer URLs
|
||||
MAILER_URLPATHS_CONFIRMATION: /auth/v1/verify
|
||||
MAILER_URLPATHS_INVITE: /auth/v1/verify
|
||||
MAILER_URLPATHS_RECOVERY: /auth/v1/verify
|
||||
MAILER_URLPATHS_EMAIL_CHANGE: /auth/v1/verify
|
||||
|
||||
# JWT Settings
|
||||
JWT_EXPIRY: "3600"
|
||||
|
||||
# Database Schemas
|
||||
PGRST_DB_SCHEMAS: public,storage,graphql_public
|
||||
|
||||
# Studio Settings
|
||||
STUDIO_DEFAULT_ORGANIZATION: Default Organization
|
||||
STUDIO_DEFAULT_PROJECT: Default Project
|
||||
|
||||
# Logging
|
||||
LOGFLARE_API_KEY: your-super-secret-and-long-logflare-key
|
||||
|
||||
# Pooler Settings
|
||||
POOLER_DEFAULT_POOL_SIZE: "20"
|
||||
POOLER_MAX_CLIENT_CONN: "100"
|
||||
POOLER_TENANT_ID: your-tenant-id
|
||||
POOLER_PROXY_PORT_TRANSACTION: "6543"
|
||||
|
||||
# Kong Ports
|
||||
KONG_HTTP_PORT: "8000"
|
||||
KONG_HTTPS_PORT: "8443"
|
||||
|
||||
# Docker
|
||||
DOCKER_SOCKET_LOCATION: /var/run/docker.sock
|
||||
|
||||
# Google Cloud (if needed)
|
||||
GOOGLE_PROJECT_ID: GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER: GOOGLE_PROJECT_NUMBER
|
||||
|
||||
services:
|
||||
|
||||
studio:
|
||||
@@ -24,24 +117,24 @@ services:
|
||||
timeout: 10s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
STUDIO_PG_META_URL: http://meta:8080
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
|
||||
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
||||
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
||||
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
|
||||
DEFAULT_ORGANIZATION_NAME: Default Organization
|
||||
DEFAULT_PROJECT_NAME: Default Project
|
||||
OPENAI_API_KEY: ""
|
||||
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
AUTH_JWT_SECRET: ${JWT_SECRET}
|
||||
SUPABASE_PUBLIC_URL: http://localhost:8000
|
||||
SUPABASE_ANON_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SUPABASE_SERVICE_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
AUTH_JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_API_KEY: your-super-secret-and-long-logflare-key
|
||||
LOGFLARE_URL: http://analytics:4000
|
||||
NEXT_PUBLIC_ENABLE_LOGS: true
|
||||
# Comment to use Big Query backend for analytics
|
||||
@@ -54,15 +147,15 @@ services:
|
||||
image: kong:2.8.1
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- ${KONG_HTTP_PORT}:8000/tcp
|
||||
- ${KONG_HTTPS_PORT}:8443/tcp
|
||||
- 8000:8000/tcp
|
||||
- 8443:8443/tcp
|
||||
volumes:
|
||||
# https://github.com/supabase/supabase/issues/12661
|
||||
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
KONG_DATABASE: "off"
|
||||
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
||||
# https://github.com/supabase/cli/issues/14
|
||||
@@ -70,10 +163,10 @@ services:
|
||||
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
||||
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
||||
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
||||
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
||||
SUPABASE_ANON_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SUPABASE_SERVICE_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME: supabase
|
||||
DASHBOARD_PASSWORD: this_password_is_insecure_and_should_be_updated
|
||||
# https://unix.stackexchange.com/a/294837
|
||||
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
||||
|
||||
@@ -98,48 +191,49 @@ services:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
GOTRUE_API_HOST: 0.0.0.0
|
||||
GOTRUE_API_PORT: 9999
|
||||
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
||||
API_EXTERNAL_URL: http://localhost:8000
|
||||
|
||||
GOTRUE_DB_DRIVER: postgres
|
||||
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:your-super-secret-and-long-postgres-password@db:5432/postgres
|
||||
|
||||
GOTRUE_SITE_URL: ${SITE_URL}
|
||||
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
||||
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
||||
GOTRUE_SITE_URL: http://localhost:3000
|
||||
GOTRUE_URI_ALLOW_LIST: ""
|
||||
GOTRUE_DISABLE_SIGNUP: false
|
||||
|
||||
GOTRUE_JWT_ADMIN_ROLES: service_role
|
||||
GOTRUE_JWT_AUD: authenticated
|
||||
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
||||
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
||||
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
||||
GOTRUE_JWT_EXP: 3600
|
||||
GOTRUE_JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
||||
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
|
||||
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: true
|
||||
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: false
|
||||
GOTRUE_MAILER_AUTOCONFIRM: false
|
||||
|
||||
# Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile.
|
||||
# GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true
|
||||
|
||||
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
||||
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
||||
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
||||
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
||||
GOTRUE_SMTP_USER: ${SMTP_USER}
|
||||
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
||||
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: admin@example.com
|
||||
GOTRUE_SMTP_HOST: supabase-mail
|
||||
GOTRUE_SMTP_PORT: 2500
|
||||
GOTRUE_SMTP_USER: fake_mail_user
|
||||
GOTRUE_SMTP_PASS: fake_mail_password
|
||||
GOTRUE_SMTP_SENDER_NAME: fake_sender
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: /auth/v1/verify
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: /auth/v1/verify
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: /auth/v1/verify
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: /auth/v1/verify
|
||||
|
||||
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
||||
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
||||
GOTRUE_EXTERNAL_PHONE_ENABLED: true
|
||||
GOTRUE_SMS_AUTOCONFIRM: true
|
||||
# Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook
|
||||
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true"
|
||||
@@ -168,16 +262,17 @@ services:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
PGRST_DB_URI: postgres://authenticator:your-super-secret-and-long-postgres-password@db:5432/postgres
|
||||
PGRST_DB_SCHEMAS: public,storage,graphql_public
|
||||
PGRST_DB_ANON_ROLE: anon
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
PGRST_DB_USE_LEGACY_GUCS: "false"
|
||||
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
||||
PGRST_APP_SETTINGS_JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
PGRST_APP_SETTINGS_JWT_EXP: 3600
|
||||
command:
|
||||
[
|
||||
"postgrest"
|
||||
@@ -192,8 +287,6 @@ services:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
@@ -204,23 +297,26 @@ services:
|
||||
"-o",
|
||||
"/dev/null",
|
||||
"-H",
|
||||
"Authorization: Bearer ${ANON_KEY}",
|
||||
"Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE",
|
||||
"http://localhost:4000/api/tenants/realtime-dev/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
PORT: 4000
|
||||
DB_HOST: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_HOST: db
|
||||
DB_PORT: 5432
|
||||
DB_USER: supabase_admin
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_NAME: ${POSTGRES_DB}
|
||||
DB_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
DB_NAME: postgres
|
||||
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
||||
DB_ENC_KEY: supabaserealtime
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
||||
API_JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
DNS_NODES: "''"
|
||||
RLIMIT_NOFILE: "10000"
|
||||
@@ -256,12 +352,15 @@ services:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
ANON_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
PGRST_JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
DATABASE_URL: postgres://supabase_storage_admin:your-super-secret-and-long-postgres-password@db:5432/postgres
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: file
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
@@ -288,11 +387,14 @@ services:
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: true
|
||||
|
||||
meta:
|
||||
container_name: supabase-meta
|
||||
@@ -302,15 +404,16 @@ services:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
PG_META_PORT: 8080
|
||||
PG_META_DB_HOST: ${POSTGRES_HOST}
|
||||
PG_META_DB_PORT: ${POSTGRES_PORT}
|
||||
PG_META_DB_NAME: ${POSTGRES_DB}
|
||||
PG_META_DB_HOST: db
|
||||
PG_META_DB_PORT: 5432
|
||||
PG_META_DB_NAME: postgres
|
||||
PG_META_DB_USER: supabase_admin
|
||||
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PG_META_DB_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
|
||||
functions:
|
||||
container_name: supabase-edge-functions
|
||||
@@ -318,17 +421,17 @@ services:
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/functions:/home/deno/functions:Z
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
||||
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
SUPABASE_ANON_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SUPABASE_SERVICE_ROLE_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
SUPABASE_DB_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres
|
||||
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
||||
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
||||
VERIFY_JWT: "false"
|
||||
command:
|
||||
[
|
||||
"start",
|
||||
@@ -362,26 +465,29 @@ services:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
LOGFLARE_NODE_HOST: 127.0.0.1
|
||||
DB_USERNAME: supabase_admin
|
||||
DB_DATABASE: _supabase
|
||||
DB_HOSTNAME: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_HOSTNAME: db
|
||||
DB_PORT: 5432
|
||||
DB_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
DB_SCHEMA: _analytics
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_API_KEY: your-super-secret-and-long-logflare-key
|
||||
LOGFLARE_SINGLE_TENANT: true
|
||||
LOGFLARE_SUPABASE_MODE: true
|
||||
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
||||
|
||||
# Comment variables to use Big Query backend for analytics
|
||||
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
|
||||
POSTGRES_BACKEND_URL: postgresql://supabase_admin:your-super-secret-and-long-postgres-password@db:5432/_supabase
|
||||
POSTGRES_BACKEND_SCHEMA: _analytics
|
||||
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
||||
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
||||
# GOOGLE_PROJECT_ID: GOOGLE_PROJECT_ID
|
||||
# GOOGLE_PROJECT_NUMBER: GOOGLE_PROJECT_NUMBER
|
||||
|
||||
# Comment out everything below this point if you are using an external Postgres database
|
||||
db:
|
||||
@@ -419,19 +525,19 @@ services:
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
vector:
|
||||
condition: service_healthy
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
POSTGRES_HOST: /var/run/postgresql
|
||||
PGPORT: ${POSTGRES_PORT}
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PGDATABASE: ${POSTGRES_DB}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
JWT_EXP: ${JWT_EXPIRY}
|
||||
PGPORT: 5432
|
||||
POSTGRES_PORT: 5432
|
||||
PGPASSWORD: your-super-secret-and-long-postgres-password
|
||||
POSTGRES_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
PGDATABASE: postgres
|
||||
POSTGRES_DB: postgres
|
||||
JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
JWT_EXP: 3600
|
||||
command:
|
||||
[
|
||||
"postgres",
|
||||
@@ -447,7 +553,7 @@ services:
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
||||
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
@@ -461,8 +567,11 @@ services:
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
LOGFLARE_API_KEY: your-super-secret-and-long-logflare-key
|
||||
command:
|
||||
[
|
||||
"--config",
|
||||
@@ -475,8 +584,8 @@ services:
|
||||
image: supabase/supavisor:2.4.12
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- ${POSTGRES_PORT}:5432
|
||||
- ${POOLER_PROXY_PORT_TRANSACTION}:6543
|
||||
- 5432:5432
|
||||
- 6543:6543
|
||||
volumes:
|
||||
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro
|
||||
healthcheck:
|
||||
@@ -498,22 +607,25 @@ services:
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
<<: *supabase-env-files
|
||||
environment:
|
||||
<<: *supabase-env
|
||||
# Keep any existing environment variables specific to that service
|
||||
PORT: 4000
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase
|
||||
POSTGRES_PORT: 5432
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
DATABASE_URL: ecto://supabase_admin:your-super-secret-and-long-postgres-password@db:5432/_supabase
|
||||
CLUSTER_POSTGRES: true
|
||||
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
||||
VAULT_ENC_KEY: ${VAULT_ENC_KEY}
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
METRICS_JWT_SECRET: ${JWT_SECRET}
|
||||
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY: your-encryption-key-32-chars-min
|
||||
API_JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
METRICS_JWT_SECRET: your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
REGION: local
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
POOLER_TENANT_ID: ${POOLER_TENANT_ID}
|
||||
POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE}
|
||||
POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN}
|
||||
POOLER_TENANT_ID: your-tenant-id
|
||||
POOLER_DEFAULT_POOL_SIZE: 20
|
||||
POOLER_MAX_CLIENT_CONN: 100
|
||||
POOLER_POOL_MODE: transaction
|
||||
command:
|
||||
[
|
||||
|
||||
@@ -34,11 +34,11 @@ else
|
||||
echo "No .env file found. Skipping .env removal step..."
|
||||
fi
|
||||
|
||||
if [ -f ".env.example" ]; then
|
||||
echo "Copying .env.example to .env..."
|
||||
cp .env.example .env
|
||||
if [ -f ".env.default" ]; then
|
||||
echo "Copying .env.default to .env..."
|
||||
cp .env.default .env
|
||||
else
|
||||
echo ".env.example file not found. Skipping .env reset step..."
|
||||
echo ".env.default file not found. Skipping .env reset step..."
|
||||
fi
|
||||
|
||||
echo "Cleanup complete!"
|
||||
@@ -1,9 +1,39 @@
|
||||
# Environment Variable Loading Order (first → last, later overrides earlier):
|
||||
# 1. backend/.env.default - Default values for all settings
|
||||
# 2. backend/.env - User's custom configuration (if exists)
|
||||
# 3. environment key - Docker-specific overrides defined below
|
||||
# 4. Shell environment - Variables exported before running docker compose
|
||||
# 5. CLI arguments - docker compose run -e VAR=value
|
||||
|
||||
# Common backend environment - Docker service names
|
||||
x-backend-env:
|
||||
&backend-env # Docker internal service hostnames (override localhost defaults)
|
||||
PYRO_HOST: "0.0.0.0"
|
||||
AGENTSERVER_HOST: rest_server
|
||||
SCHEDULER_HOST: scheduler_server
|
||||
DATABASEMANAGER_HOST: database_manager
|
||||
EXECUTIONMANAGER_HOST: executor
|
||||
NOTIFICATIONMANAGER_HOST: notification_server
|
||||
CLAMAV_SERVICE_HOST: clamav
|
||||
DB_HOST: db
|
||||
REDIS_HOST: redis
|
||||
RABBITMQ_HOST: rabbitmq
|
||||
# Override Supabase URL for Docker network
|
||||
SUPABASE_URL: http://kong:8000
|
||||
|
||||
# Common env_file configuration for backend services
|
||||
x-backend-env-files: &backend-env-files
|
||||
env_file:
|
||||
- backend/.env.default # Base defaults (always exists)
|
||||
- path: backend/.env # User overrides (optional)
|
||||
required: false
|
||||
|
||||
services:
|
||||
migrate:
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: autogpt_platform/backend/Dockerfile
|
||||
target: server
|
||||
target: migrate
|
||||
command: ["sh", "-c", "poetry run prisma migrate deploy"]
|
||||
develop:
|
||||
watch:
|
||||
@@ -20,10 +50,11 @@ services:
|
||||
- app-network
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "poetry", "run", "prisma", "migrate", "status"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
test: ["CMD-SHELL", "poetry run prisma migrate status | grep -q 'No pending migrations' || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
|
||||
redis:
|
||||
image: redis:latest
|
||||
@@ -73,29 +104,12 @@ services:
|
||||
condition: service_completed_successfully
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
<<: *backend-env-files
|
||||
environment:
|
||||
- SUPABASE_URL=http://kong:8000
|
||||
- SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
- SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
- DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- DIRECT_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- REDIS_HOST=redis
|
||||
- REDIS_PORT=6379
|
||||
- RABBITMQ_HOST=rabbitmq
|
||||
- RABBITMQ_PORT=5672
|
||||
- RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
- RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
- REDIS_PASSWORD=password
|
||||
- ENABLE_AUTH=true
|
||||
- PYRO_HOST=0.0.0.0
|
||||
- SCHEDULER_HOST=scheduler_server
|
||||
- EXECUTIONMANAGER_HOST=executor
|
||||
- NOTIFICATIONMANAGER_HOST=notification_server
|
||||
- CLAMAV_SERVICE_HOST=clamav
|
||||
- NEXT_PUBLIC_FRONTEND_BASE_URL=http://localhost:3000
|
||||
- BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
- ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!!
|
||||
- UNSUBSCRIBE_SECRET_KEY=HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio= # DO NOT USE IN PRODUCTION!!
|
||||
<<: *backend-env
|
||||
# Service-specific overrides
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8006:8006"
|
||||
networks:
|
||||
@@ -123,26 +137,12 @@ services:
|
||||
condition: service_completed_successfully
|
||||
database_manager:
|
||||
condition: service_started
|
||||
<<: *backend-env-files
|
||||
environment:
|
||||
- DATABASEMANAGER_HOST=database_manager
|
||||
- SUPABASE_URL=http://kong:8000
|
||||
- SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
- SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
- DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- DIRECT_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- REDIS_HOST=redis
|
||||
- REDIS_PORT=6379
|
||||
- REDIS_PASSWORD=password
|
||||
- RABBITMQ_HOST=rabbitmq
|
||||
- RABBITMQ_PORT=5672
|
||||
- RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
- RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
- ENABLE_AUTH=true
|
||||
- PYRO_HOST=0.0.0.0
|
||||
- AGENTSERVER_HOST=rest_server
|
||||
- NOTIFICATIONMANAGER_HOST=notification_server
|
||||
- CLAMAV_SERVICE_HOST=clamav
|
||||
- ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!!
|
||||
<<: *backend-env
|
||||
# Service-specific overrides
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8002:8002"
|
||||
networks:
|
||||
@@ -168,22 +168,12 @@ services:
|
||||
condition: service_completed_successfully
|
||||
database_manager:
|
||||
condition: service_started
|
||||
<<: *backend-env-files
|
||||
environment:
|
||||
- DATABASEMANAGER_HOST=database_manager
|
||||
- SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
- DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- DIRECT_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- REDIS_HOST=redis
|
||||
- REDIS_PORT=6379
|
||||
- REDIS_PASSWORD=password
|
||||
# - RABBITMQ_HOST=rabbitmq
|
||||
# - RABBITMQ_PORT=5672
|
||||
# - RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
# - RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
- ENABLE_AUTH=true
|
||||
- PYRO_HOST=0.0.0.0
|
||||
- BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
|
||||
<<: *backend-env
|
||||
# Service-specific overrides
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8001:8001"
|
||||
networks:
|
||||
@@ -205,11 +195,12 @@ services:
|
||||
condition: service_healthy
|
||||
migrate:
|
||||
condition: service_completed_successfully
|
||||
<<: *backend-env-files
|
||||
environment:
|
||||
- DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- DIRECT_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- PYRO_HOST=0.0.0.0
|
||||
- ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!!
|
||||
<<: *backend-env
|
||||
# Service-specific overrides
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8005:8005"
|
||||
networks:
|
||||
@@ -250,23 +241,12 @@ services:
|
||||
# interval: 10s
|
||||
# timeout: 10s
|
||||
# retries: 5
|
||||
<<: *backend-env-files
|
||||
environment:
|
||||
- DATABASEMANAGER_HOST=database_manager
|
||||
- NOTIFICATIONMANAGER_HOST=notification_server
|
||||
- SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
- DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- DIRECT_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
- REDIS_HOST=redis
|
||||
- REDIS_PORT=6379
|
||||
- REDIS_PASSWORD=password
|
||||
- RABBITMQ_HOST=rabbitmq
|
||||
- RABBITMQ_PORT=5672
|
||||
- RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
- RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
- ENABLE_AUTH=true
|
||||
- PYRO_HOST=0.0.0.0
|
||||
- BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
|
||||
<<: *backend-env
|
||||
# Service-specific overrides
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8003:8003"
|
||||
networks:
|
||||
@@ -292,52 +272,39 @@ services:
|
||||
condition: service_completed_successfully
|
||||
database_manager:
|
||||
condition: service_started
|
||||
<<: *backend-env-files
|
||||
environment:
|
||||
- DATABASEMANAGER_HOST=database_manager
|
||||
- REDIS_HOST=redis
|
||||
- REDIS_PORT=6379
|
||||
- REDIS_PASSWORD=password
|
||||
- RABBITMQ_HOST=rabbitmq
|
||||
- RABBITMQ_PORT=5672
|
||||
- RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
- RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
- ENABLE_AUTH=true
|
||||
- PYRO_HOST=0.0.0.0
|
||||
- BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
|
||||
<<: *backend-env
|
||||
ports:
|
||||
- "8007:8007"
|
||||
networks:
|
||||
- app-network
|
||||
|
||||
# frontend:
|
||||
# build:
|
||||
# context: ../
|
||||
# dockerfile: autogpt_platform/frontend/Dockerfile
|
||||
# target: dev
|
||||
# depends_on:
|
||||
# db:
|
||||
# condition: service_healthy
|
||||
# rest_server:
|
||||
# condition: service_started
|
||||
# websocket_server:
|
||||
# condition: service_started
|
||||
# migrate:
|
||||
# condition: service_completed_successfully
|
||||
# environment:
|
||||
# - NEXT_PUBLIC_SUPABASE_URL=http://kong:8000
|
||||
# - NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
# - DATABASE_URL=postgresql://agpt_user:pass123@postgres:5432/postgres?connect_timeout=60&schema=platform
|
||||
# - DIRECT_URL=postgresql://agpt_user:pass123@postgres:5432/postgres?connect_timeout=60&schema=platform
|
||||
# - NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api
|
||||
# - NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws
|
||||
# - NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market
|
||||
# - NEXT_PUBLIC_BEHAVE_AS=LOCAL
|
||||
# ports:
|
||||
# - "3000:3000"
|
||||
# networks:
|
||||
# - app-network
|
||||
|
||||
frontend:
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: autogpt_platform/frontend/Dockerfile
|
||||
target: prod
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
migrate:
|
||||
condition: service_completed_successfully
|
||||
ports:
|
||||
- "3000:3000"
|
||||
networks:
|
||||
- app-network
|
||||
# Load environment variables in order (later overrides earlier)
|
||||
env_file:
|
||||
- path: ./frontend/.env.default # Base defaults (always exists)
|
||||
- path: ./frontend/.env # User overrides (optional)
|
||||
required: false
|
||||
environment:
|
||||
# Server-side environment variables (Docker service names)
|
||||
# These override the localhost URLs from env files when running in Docker
|
||||
AUTH_CALLBACK_URL: http://rest_server:8006/auth/callback
|
||||
SUPABASE_URL: http://kong:8000
|
||||
AGPT_SERVER_URL: http://rest_server:8006/api
|
||||
AGPT_WS_SERVER_URL: ws://websocket_server:8001/ws
|
||||
networks:
|
||||
app-network:
|
||||
driver: bridge
|
||||
|
||||
@@ -20,6 +20,7 @@ x-supabase-services:
|
||||
- app-network
|
||||
- shared-network
|
||||
|
||||
|
||||
services:
|
||||
# AGPT services
|
||||
migrate:
|
||||
@@ -96,19 +97,13 @@ services:
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# frontend:
|
||||
# <<: *agpt-services
|
||||
# extends:
|
||||
# file: ./docker-compose.platform.yml
|
||||
# service: frontend
|
||||
|
||||
# Supabase services
|
||||
studio:
|
||||
<<: *supabase-services
|
||||
frontend:
|
||||
<<: *agpt-services
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: studio
|
||||
file: ./docker-compose.platform.yml
|
||||
service: frontend
|
||||
|
||||
# Supabase services (minimal: auth + db + kong)
|
||||
kong:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
@@ -123,61 +118,35 @@ services:
|
||||
environment:
|
||||
GOTRUE_MAILER_AUTOCONFIRM: true
|
||||
|
||||
rest:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: rest
|
||||
|
||||
realtime:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: realtime
|
||||
|
||||
storage:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: storage
|
||||
|
||||
imgproxy:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: imgproxy
|
||||
|
||||
meta:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: meta
|
||||
|
||||
functions:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: functions
|
||||
|
||||
analytics:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: analytics
|
||||
|
||||
db:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: db
|
||||
ports:
|
||||
- ${POSTGRES_PORT}:5432 # We don't use Supavisor locally, so we expose the db directly.
|
||||
- 5432:5432 # We don't use Supavisor locally, so we expose the db directly.
|
||||
|
||||
vector:
|
||||
# Studio and its dependencies for local development only
|
||||
meta:
|
||||
<<: *supabase-services
|
||||
profiles:
|
||||
- local
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: vector
|
||||
service: meta
|
||||
|
||||
studio:
|
||||
<<: *supabase-services
|
||||
profiles:
|
||||
- local
|
||||
extends:
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: studio
|
||||
depends_on:
|
||||
meta:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# NEXT_PUBLIC_ENABLE_LOGS: false # Disable analytics/logging features
|
||||
|
||||
deps:
|
||||
<<: *supabase-services
|
||||
@@ -186,13 +155,24 @@ services:
|
||||
image: busybox
|
||||
command: /bin/true
|
||||
depends_on:
|
||||
- studio
|
||||
- kong
|
||||
- auth
|
||||
- meta
|
||||
- analytics
|
||||
- db
|
||||
- vector
|
||||
- studio
|
||||
- redis
|
||||
- rabbitmq
|
||||
- clamav
|
||||
- migrate
|
||||
|
||||
deps_backend:
|
||||
<<: *agpt-services
|
||||
profiles:
|
||||
- local
|
||||
image: busybox
|
||||
command: /bin/true
|
||||
depends_on:
|
||||
- deps
|
||||
- rest_server
|
||||
- executor
|
||||
- websocket_server
|
||||
- database_manager
|
||||
|
||||
20
autogpt_platform/frontend/.env.default
Normal file
20
autogpt_platform/frontend/.env.default
Normal file
@@ -0,0 +1,20 @@
|
||||
NEXT_PUBLIC_SUPABASE_URL=http://localhost:8000
|
||||
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
|
||||
NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api
|
||||
NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws
|
||||
NEXT_PUBLIC_FRONTEND_BASE_URL=http://localhost:3000
|
||||
|
||||
NEXT_PUBLIC_APP_ENV=local
|
||||
NEXT_PUBLIC_BEHAVE_AS=LOCAL
|
||||
|
||||
NEXT_PUBLIC_LAUNCHDARKLY_ENABLED=false
|
||||
NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID=687ab1372f497809b131e06e
|
||||
|
||||
NEXT_PUBLIC_SHOW_BILLING_PAGE=false
|
||||
NEXT_PUBLIC_TURNSTILE=disabled
|
||||
NEXT_PUBLIC_REACT_QUERY_DEVTOOL=true
|
||||
|
||||
NEXT_PUBLIC_GA_MEASUREMENT_ID=G-FH2XK2W4GN
|
||||
NEXT_PUBLIC_PW_TEST=true
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
NEXT_PUBLIC_FRONTEND_BASE_URL=http://localhost:3000
|
||||
|
||||
NEXT_PUBLIC_AUTH_CALLBACK_URL=http://localhost:8006/auth/callback
|
||||
NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api
|
||||
NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws
|
||||
NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market
|
||||
NEXT_PUBLIC_LAUNCHDARKLY_ENABLED=false
|
||||
NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID=687ab1372f497809b131e06e # Local environment on Launch darkly
|
||||
NEXT_PUBLIC_APP_ENV=local
|
||||
|
||||
NEXT_PUBLIC_AGPT_SERVER_BASE_URL=http://localhost:8006
|
||||
|
||||
## Locale settings
|
||||
|
||||
NEXT_PUBLIC_DEFAULT_LOCALE=en
|
||||
NEXT_PUBLIC_LOCALES=en,es
|
||||
|
||||
## Supabase credentials
|
||||
|
||||
NEXT_PUBLIC_SUPABASE_URL=http://localhost:8000
|
||||
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
|
||||
## OAuth Callback URL
|
||||
## This should be {domain}/auth/callback
|
||||
## Only used if you're using Supabase and OAuth
|
||||
AUTH_CALLBACK_URL="${NEXT_PUBLIC_FRONTEND_BASE_URL}/auth/callback"
|
||||
GA_MEASUREMENT_ID=G-FH2XK2W4GN
|
||||
|
||||
# When running locally, set NEXT_PUBLIC_BEHAVE_AS=CLOUD to use the a locally hosted marketplace (as is typical in development, and the cloud deployment), otherwise set it to LOCAL to have the marketplace open in a new tab
|
||||
NEXT_PUBLIC_BEHAVE_AS=LOCAL
|
||||
NEXT_PUBLIC_SHOW_BILLING_PAGE=false
|
||||
|
||||
## Cloudflare Turnstile (CAPTCHA) Configuration
|
||||
## Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
|
||||
## This is the frontend site key
|
||||
NEXT_PUBLIC_CLOUDFLARE_TURNSTILE_SITE_KEY=
|
||||
NEXT_PUBLIC_TURNSTILE=disabled
|
||||
|
||||
# Devtools
|
||||
NEXT_PUBLIC_REACT_QUERY_DEVTOOL=true
|
||||
|
||||
# In case you are running Playwright locally
|
||||
# NEXT_PUBLIC_PW_TEST=true
|
||||
|
||||
6
autogpt_platform/frontend/.gitignore
vendored
6
autogpt_platform/frontend/.gitignore
vendored
@@ -31,6 +31,7 @@ yarn.lock
|
||||
package-lock.json
|
||||
|
||||
# local env files
|
||||
.env
|
||||
.env*.local
|
||||
|
||||
# vercel
|
||||
@@ -53,4 +54,7 @@ storybook-static
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
!.npmrc
|
||||
.cursorrules
|
||||
.cursorrules
|
||||
|
||||
# Generated API files
|
||||
src/app/api/__generated__/
|
||||
@@ -5,18 +5,16 @@ RUN corepack enable
|
||||
COPY autogpt_platform/frontend/package.json autogpt_platform/frontend/pnpm-lock.yaml ./
|
||||
RUN --mount=type=cache,target=/root/.local/share/pnpm pnpm install --frozen-lockfile
|
||||
|
||||
# Dev stage
|
||||
FROM base AS dev
|
||||
ENV NODE_ENV=development
|
||||
ENV HOSTNAME=0.0.0.0
|
||||
COPY autogpt_platform/frontend/ .
|
||||
EXPOSE 3000
|
||||
CMD ["pnpm", "run", "dev", "--hostname", "0.0.0.0"]
|
||||
|
||||
# Build stage for prod
|
||||
FROM base AS build
|
||||
|
||||
COPY autogpt_platform/frontend/ .
|
||||
ENV SKIP_STORYBOOK_TESTS=true
|
||||
RUN if [ -f .env ]; then \
|
||||
cat .env.default .env > .env.merged && mv .env.merged .env; \
|
||||
else \
|
||||
cp .env.default .env; \
|
||||
fi
|
||||
RUN pnpm run generate:api
|
||||
RUN pnpm build
|
||||
|
||||
# Prod stage - based on NextJS reference Dockerfile https://github.com/vercel/next.js/blob/64271354533ed16da51be5dce85f0dbd15f17517/examples/with-docker/Dockerfile
|
||||
|
||||
@@ -18,31 +18,58 @@ Make sure you have Node.js 16.10+ installed. Corepack is included with Node.js b
|
||||
>
|
||||
> Then follow the setup steps below.
|
||||
|
||||
### Setup
|
||||
## Setup
|
||||
|
||||
1. **Enable corepack** (run this once on your system):
|
||||
### 1. **Enable corepack** (run this once on your system):
|
||||
|
||||
```bash
|
||||
corepack enable
|
||||
```
|
||||
```bash
|
||||
corepack enable
|
||||
```
|
||||
|
||||
This enables corepack to automatically manage pnpm based on the `packageManager` field in `package.json`.
|
||||
This enables corepack to automatically manage pnpm based on the `packageManager` field in `package.json`.
|
||||
|
||||
2. **Install dependencies**:
|
||||
### 2. **Install dependencies**:
|
||||
|
||||
```bash
|
||||
pnpm i
|
||||
```
|
||||
```bash
|
||||
pnpm i
|
||||
```
|
||||
|
||||
3. **Start the development server**:
|
||||
```bash
|
||||
pnpm dev
|
||||
```
|
||||
### 3. **Start the development server**:
|
||||
|
||||
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
|
||||
#### Running the Front-end & Back-end separately
|
||||
|
||||
We recommend this approach if you are doing active development on the project. First spin up the Back-end:
|
||||
|
||||
```bash
|
||||
# on `autogpt_platform`
|
||||
docker compose --profile local up deps_backend -d
|
||||
# on `autogpt_platform/backend`
|
||||
poetry run app
|
||||
```
|
||||
|
||||
Then start the Front-end:
|
||||
|
||||
```bash
|
||||
# on `autogpt_platform/frontend`
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. If the server starts on `http://localhost:3001` it means the Front-end is already running via Docker. You have to kill the container then or do `docker compose down`.
|
||||
|
||||
You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
|
||||
|
||||
#### Running both the Front-end and Back-end via Docker
|
||||
|
||||
If you run:
|
||||
|
||||
```bash
|
||||
# on `autogpt_platform`
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
It will spin up the Back-end and Front-end via Docker. The Front-end will start on port `3000`. This might not be
|
||||
what you want when actively contributing to the Front-end as you won't have direct/easy access to the Next.js dev server.
|
||||
|
||||
### Subsequent Runs
|
||||
|
||||
For subsequent development sessions, you only need to run:
|
||||
@@ -60,12 +87,12 @@ Every time a new Front-end dependency is added by you or others, you will need t
|
||||
- `pnpm start` - Start production server
|
||||
- `pnpm lint` - Run ESLint and Prettier checks
|
||||
- `pnpm format` - Format code with Prettier
|
||||
- `pnpm type-check` - Run TypeScript type checking
|
||||
- `pnpm types` - Run TypeScript type checking
|
||||
- `pnpm test` - Run Playwright tests
|
||||
- `pnpm test-ui` - Run Playwright tests with UI
|
||||
- `pnpm fetch:openapi` - Fetch OpenAPI spec from backend
|
||||
- `pnpm generate:api-client` - Generate API client from OpenAPI spec
|
||||
- `pnpm generate:api-all` - Fetch OpenAPI spec and generate API client
|
||||
- `pnpm generate:api` - Fetch OpenAPI spec and generate API client
|
||||
|
||||
This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
|
||||
|
||||
@@ -88,7 +115,7 @@ This project uses an auto-generated API client powered by [**Orval**](https://or
|
||||
|
||||
```bash
|
||||
# Fetch OpenAPI spec from backend and generate client
|
||||
pnpm generate:api-all
|
||||
pnpm generate:api
|
||||
|
||||
# Only fetch the OpenAPI spec
|
||||
pnpm fetch:openapi
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
"version": "0.3.4",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "next dev --turbo",
|
||||
"build": "cross-env pnpm run generate:api-client && SKIP_STORYBOOK_TESTS=true next build",
|
||||
"dev": "pnpm run generate:api:force && next dev --turbo",
|
||||
"build": "next build",
|
||||
"start": "next start",
|
||||
"start:standalone": "cd .next/standalone && node server.js",
|
||||
"lint": "next lint && prettier --check .",
|
||||
"format": "prettier --write .",
|
||||
"type-check": "tsc --noEmit",
|
||||
"format": "next lint --fix; prettier --write .",
|
||||
"types": "tsc --noEmit",
|
||||
"test": "next build --turbo && playwright test",
|
||||
"test-ui": "next build --turbo && playwright test --ui",
|
||||
"test:no-build": "playwright test",
|
||||
@@ -18,44 +18,43 @@
|
||||
"build-storybook": "storybook build",
|
||||
"test-storybook": "test-storybook",
|
||||
"test-storybook:ci": "concurrently -k -s first -n \"SB,TEST\" -c \"magenta,blue\" \"pnpm run build-storybook -- --quiet && npx http-server storybook-static --port 6006 --silent\" \"wait-on tcp:6006 && pnpm run test-storybook\"",
|
||||
"fetch:openapi": "curl http://localhost:8006/openapi.json > ./src/app/api/openapi.json && prettier --write ./src/app/api/openapi.json",
|
||||
"generate:api-client": "orval --config ./orval.config.ts",
|
||||
"generate:api-all": "pnpm run fetch:openapi && pnpm run generate:api-client"
|
||||
"generate:api": "npx --yes tsx ./scripts/generate-api-queries.ts && orval --config ./orval.config.ts",
|
||||
"generate:api:force": "npx --yes tsx ./scripts/generate-api-queries.ts --force && orval --config ./orval.config.ts"
|
||||
},
|
||||
"browserslist": [
|
||||
"defaults"
|
||||
],
|
||||
"dependencies": {
|
||||
"@faker-js/faker": "9.9.0",
|
||||
"@hookform/resolvers": "5.2.0",
|
||||
"@next/third-parties": "15.4.4",
|
||||
"@hookform/resolvers": "5.2.1",
|
||||
"@next/third-parties": "15.4.6",
|
||||
"@phosphor-icons/react": "2.1.10",
|
||||
"@radix-ui/react-alert-dialog": "1.1.14",
|
||||
"@radix-ui/react-alert-dialog": "1.1.15",
|
||||
"@radix-ui/react-avatar": "1.1.10",
|
||||
"@radix-ui/react-checkbox": "1.3.2",
|
||||
"@radix-ui/react-collapsible": "1.1.11",
|
||||
"@radix-ui/react-context-menu": "2.2.15",
|
||||
"@radix-ui/react-dialog": "1.1.14",
|
||||
"@radix-ui/react-dropdown-menu": "2.1.15",
|
||||
"@radix-ui/react-checkbox": "1.3.3",
|
||||
"@radix-ui/react-collapsible": "1.1.12",
|
||||
"@radix-ui/react-context-menu": "2.2.16",
|
||||
"@radix-ui/react-dialog": "1.1.15",
|
||||
"@radix-ui/react-dropdown-menu": "2.1.16",
|
||||
"@radix-ui/react-icons": "1.3.2",
|
||||
"@radix-ui/react-label": "2.1.7",
|
||||
"@radix-ui/react-popover": "1.1.14",
|
||||
"@radix-ui/react-radio-group": "1.3.7",
|
||||
"@radix-ui/react-scroll-area": "1.2.9",
|
||||
"@radix-ui/react-select": "2.2.5",
|
||||
"@radix-ui/react-popover": "1.1.15",
|
||||
"@radix-ui/react-radio-group": "1.3.8",
|
||||
"@radix-ui/react-scroll-area": "1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-separator": "1.1.7",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-switch": "1.2.5",
|
||||
"@radix-ui/react-tabs": "1.1.12",
|
||||
"@radix-ui/react-toast": "1.2.14",
|
||||
"@radix-ui/react-tooltip": "1.2.7",
|
||||
"@radix-ui/react-switch": "1.2.6",
|
||||
"@radix-ui/react-tabs": "1.1.13",
|
||||
"@radix-ui/react-toast": "1.2.15",
|
||||
"@radix-ui/react-tooltip": "1.2.8",
|
||||
"@sentry/nextjs": "9.42.0",
|
||||
"@supabase/ssr": "0.6.1",
|
||||
"@supabase/supabase-js": "2.52.1",
|
||||
"@tanstack/react-query": "5.83.0",
|
||||
"@supabase/supabase-js": "2.55.0",
|
||||
"@tanstack/react-query": "5.85.3",
|
||||
"@tanstack/react-table": "8.21.3",
|
||||
"@types/jaro-winkler": "0.2.4",
|
||||
"@xyflow/react": "12.8.2",
|
||||
"@xyflow/react": "12.8.3",
|
||||
"boring-avatars": "1.11.2",
|
||||
"class-variance-authority": "0.7.1",
|
||||
"clsx": "2.1.1",
|
||||
@@ -65,22 +64,22 @@
|
||||
"dotenv": "17.2.1",
|
||||
"elliptic": "6.6.1",
|
||||
"embla-carousel-react": "8.6.0",
|
||||
"framer-motion": "12.23.9",
|
||||
"framer-motion": "12.23.12",
|
||||
"geist": "1.4.2",
|
||||
"jaro-winkler": "0.2.8",
|
||||
"launchdarkly-react-client-sdk": "3.8.1",
|
||||
"lodash": "4.17.21",
|
||||
"lucide-react": "0.525.0",
|
||||
"lucide-react": "0.539.0",
|
||||
"moment": "2.30.1",
|
||||
"next": "15.4.4",
|
||||
"next": "15.4.6",
|
||||
"next-themes": "0.4.6",
|
||||
"nuqs": "2.4.3",
|
||||
"party-js": "2.2.0",
|
||||
"react": "18.3.1",
|
||||
"react-day-picker": "9.8.0",
|
||||
"react-day-picker": "9.8.1",
|
||||
"react-dom": "18.3.1",
|
||||
"react-drag-drop-files": "2.4.0",
|
||||
"react-hook-form": "7.61.1",
|
||||
"react-hook-form": "7.62.0",
|
||||
"react-icons": "5.5.0",
|
||||
"react-markdown": "9.0.3",
|
||||
"react-modal": "3.16.3",
|
||||
@@ -88,7 +87,7 @@
|
||||
"react-window": "1.8.11",
|
||||
"recharts": "2.15.3",
|
||||
"shepherd.js": "14.5.1",
|
||||
"sonner": "2.0.6",
|
||||
"sonner": "2.0.7",
|
||||
"tailwind-merge": "2.6.0",
|
||||
"tailwindcss-animate": "1.0.7",
|
||||
"uuid": "11.1.0",
|
||||
@@ -96,42 +95,42 @@
|
||||
"zod": "3.25.76"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chromatic-com/storybook": "4.0.1",
|
||||
"@playwright/test": "1.54.1",
|
||||
"@storybook/addon-a11y": "9.0.17",
|
||||
"@storybook/addon-docs": "9.0.17",
|
||||
"@storybook/addon-links": "9.0.17",
|
||||
"@storybook/addon-onboarding": "9.0.17",
|
||||
"@storybook/nextjs": "9.0.17",
|
||||
"@tanstack/eslint-plugin-query": "5.81.2",
|
||||
"@tanstack/react-query-devtools": "5.83.0",
|
||||
"@chromatic-com/storybook": "4.1.0",
|
||||
"@playwright/test": "1.54.2",
|
||||
"@storybook/addon-a11y": "9.1.2",
|
||||
"@storybook/addon-docs": "9.1.2",
|
||||
"@storybook/addon-links": "9.1.2",
|
||||
"@storybook/addon-onboarding": "9.1.2",
|
||||
"@storybook/nextjs": "9.1.2",
|
||||
"@tanstack/eslint-plugin-query": "5.83.1",
|
||||
"@tanstack/react-query-devtools": "5.84.2",
|
||||
"@types/canvas-confetti": "1.9.0",
|
||||
"@types/lodash": "4.17.20",
|
||||
"@types/negotiator": "0.6.4",
|
||||
"@types/node": "24.0.15",
|
||||
"@types/node": "24.2.1",
|
||||
"@types/react": "18.3.17",
|
||||
"@types/react-dom": "18.3.5",
|
||||
"@types/react-modal": "3.16.3",
|
||||
"@types/react-window": "1.8.8",
|
||||
"axe-playwright": "2.1.0",
|
||||
"chromatic": "13.1.2",
|
||||
"chromatic": "13.1.3",
|
||||
"concurrently": "9.2.0",
|
||||
"cross-env": "7.0.3",
|
||||
"eslint": "8.57.1",
|
||||
"eslint-config-next": "15.4.2",
|
||||
"eslint-plugin-storybook": "9.0.17",
|
||||
"eslint-config-next": "15.4.6",
|
||||
"eslint-plugin-storybook": "9.1.2",
|
||||
"import-in-the-middle": "1.14.2",
|
||||
"msw": "2.10.4",
|
||||
"msw-storybook-addon": "2.0.5",
|
||||
"orval": "7.10.0",
|
||||
"orval": "7.11.2",
|
||||
"pbkdf2": "3.1.3",
|
||||
"postcss": "8.5.6",
|
||||
"prettier": "3.6.2",
|
||||
"prettier-plugin-tailwindcss": "0.6.14",
|
||||
"require-in-the-middle": "7.5.2",
|
||||
"storybook": "9.0.17",
|
||||
"storybook": "9.1.2",
|
||||
"tailwindcss": "3.4.17",
|
||||
"typescript": "5.8.3"
|
||||
"typescript": "5.9.2"
|
||||
},
|
||||
"msw": {
|
||||
"workerDirectory": [
|
||||
|
||||
@@ -45,7 +45,7 @@ export default defineConfig({
|
||||
webServer: {
|
||||
command: "pnpm start",
|
||||
url: "http://localhost:3000",
|
||||
reuseExistingServer: !process.env.CI,
|
||||
reuseExistingServer: true,
|
||||
},
|
||||
|
||||
/* Configure projects for major browsers */
|
||||
|
||||
2651
autogpt_platform/frontend/pnpm-lock.yaml
generated
2651
autogpt_platform/frontend/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
61
autogpt_platform/frontend/scripts/generate-api-queries.ts
Normal file
61
autogpt_platform/frontend/scripts/generate-api-queries.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import { getAgptServerBaseUrl } from "@/lib/env-config";
|
||||
import { execSync } from "child_process";
|
||||
import * as path from "path";
|
||||
import * as fs from "fs";
|
||||
|
||||
function fetchOpenApiSpec(): void {
|
||||
const args = process.argv.slice(2);
|
||||
const forceFlag = args.includes("--force");
|
||||
|
||||
const baseUrl = getAgptServerBaseUrl();
|
||||
const openApiUrl = `${baseUrl}/openapi.json`;
|
||||
const outputPath = path.join(
|
||||
__dirname,
|
||||
"..",
|
||||
"src",
|
||||
"app",
|
||||
"api",
|
||||
"openapi.json",
|
||||
);
|
||||
|
||||
console.log(`Output path: ${outputPath}`);
|
||||
console.log(`Force flag: ${forceFlag}`);
|
||||
|
||||
// Check if local file exists
|
||||
const localFileExists = fs.existsSync(outputPath);
|
||||
|
||||
if (!forceFlag && localFileExists) {
|
||||
console.log("✅ Using existing local OpenAPI spec file");
|
||||
console.log("💡 Use --force flag to fetch from server");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!localFileExists) {
|
||||
console.log("📄 No local OpenAPI spec found, fetching from server...");
|
||||
} else {
|
||||
console.log(
|
||||
"🔄 Force flag detected, fetching fresh OpenAPI spec from server...",
|
||||
);
|
||||
}
|
||||
|
||||
console.log(`Fetching OpenAPI spec from: ${openApiUrl}`);
|
||||
|
||||
try {
|
||||
// Fetch the OpenAPI spec
|
||||
execSync(`curl "${openApiUrl}" > "${outputPath}"`, { stdio: "inherit" });
|
||||
|
||||
// Format with prettier
|
||||
execSync(`prettier --write "${outputPath}"`, { stdio: "inherit" });
|
||||
|
||||
console.log("✅ OpenAPI spec fetched and formatted successfully");
|
||||
} catch (error) {
|
||||
console.error("❌ Failed to fetch OpenAPI spec:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
fetchOpenApiSpec();
|
||||
}
|
||||
@@ -14,12 +14,7 @@ export async function addDollars(formData: FormData) {
|
||||
comments: formData.get("comments") as string,
|
||||
};
|
||||
const api = new BackendApi();
|
||||
const resp = await api.addUserCredits(
|
||||
data.user_id,
|
||||
data.amount,
|
||||
data.comments,
|
||||
);
|
||||
console.log(resp);
|
||||
await api.addUserCredits(data.user_id, data.amount, data.comments);
|
||||
revalidatePath("/admin/spending");
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ function SpendingDashboard({
|
||||
</div>
|
||||
|
||||
<Suspense
|
||||
key={`${page}-${status}-${search}`}
|
||||
fallback={
|
||||
<div className="py-10 text-center">Loading submissions...</div>
|
||||
}
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { Plus } from "lucide-react";
|
||||
import { ButtonHTMLAttributes } from "react";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
title?: string;
|
||||
description?: string;
|
||||
ai_name?: string;
|
||||
}
|
||||
|
||||
export const AiBlock: React.FC<Props> = ({
|
||||
title,
|
||||
description,
|
||||
className,
|
||||
ai_name,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"group flex h-[5.625rem] w-full min-w-[7.5rem] items-center justify-start space-x-3 whitespace-normal rounded-[0.75rem] bg-zinc-50 px-[0.875rem] py-[0.625rem] text-start shadow-none",
|
||||
"hover:bg-zinc-100 focus:ring-0 active:bg-zinc-100 active:ring-1 active:ring-zinc-300 disabled:pointer-events-none",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<div className="flex flex-1 flex-col items-start gap-1.5">
|
||||
<div className="space-y-0.5">
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-700 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{title}
|
||||
</span>
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-xs font-normal leading-5 text-zinc-500 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{description}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<span
|
||||
className={cn(
|
||||
"rounded-[0.75rem] bg-zinc-200 px-[0.5rem] font-sans text-xs leading-[1.25rem] text-zinc-500",
|
||||
)}
|
||||
>
|
||||
Supports {ai_name}
|
||||
</span>
|
||||
</div>
|
||||
<div
|
||||
className={cn(
|
||||
"flex h-7 w-7 items-center justify-center rounded-[0.5rem] bg-zinc-700 group-disabled:bg-zinc-400",
|
||||
)}
|
||||
>
|
||||
<Plus className="h-5 w-5 text-zinc-50" strokeWidth={2} />
|
||||
</div>
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,77 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import { Plus } from "lucide-react";
|
||||
import React, { ButtonHTMLAttributes } from "react";import { highlightText } from "./helpers";
|
||||
;
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
title?: string;
|
||||
description?: string;
|
||||
highlightedText?: string;
|
||||
}
|
||||
|
||||
interface BlockComponent extends React.FC<Props> {
|
||||
Skeleton: React.FC<{ className?: string }>;
|
||||
}
|
||||
|
||||
export const Block: BlockComponent = ({
|
||||
title,
|
||||
description,
|
||||
highlightedText,
|
||||
className,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"group flex h-16 w-full min-w-[7.5rem] items-center justify-start space-x-3 whitespace-normal rounded-[0.75rem] bg-zinc-50 px-[0.875rem] py-[0.625rem] text-start shadow-none",
|
||||
"hover:cursor-default hover:bg-zinc-100 focus:ring-0 active:bg-zinc-100 active:ring-1 active:ring-zinc-300 disabled:cursor-not-allowed",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<div className="flex flex-1 flex-col items-start gap-0.5">
|
||||
{title && (
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{highlightText(beautifyString(title), highlightedText)}
|
||||
</span>
|
||||
)}
|
||||
{description && (
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-xs font-normal leading-5 text-zinc-500 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{highlightText(description, highlightedText)}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
<div
|
||||
className={cn(
|
||||
"flex h-7 w-7 items-center justify-center rounded-[0.5rem] bg-zinc-700 group-disabled:bg-zinc-400",
|
||||
)}
|
||||
>
|
||||
<Plus className="h-5 w-5 text-zinc-50" strokeWidth={2} />
|
||||
</div>
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
const BlockSkeleton = () => {
|
||||
return (
|
||||
<Skeleton className="flex h-16 w-full min-w-[7.5rem] animate-pulse items-center justify-start space-x-3 rounded-[0.75rem] bg-zinc-100 px-[0.875rem] py-[0.625rem]">
|
||||
<div className="flex flex-1 flex-col items-start gap-0.5">
|
||||
<Skeleton className="h-[1.375rem] w-24 rounded bg-zinc-200" />
|
||||
<Skeleton className="h-5 w-32 rounded bg-zinc-200" />
|
||||
</div>
|
||||
<Skeleton className="h-7 w-7 rounded-[0.5rem] bg-zinc-200" />
|
||||
</Skeleton>
|
||||
);
|
||||
};
|
||||
|
||||
Block.Skeleton = BlockSkeleton;
|
||||
@@ -0,0 +1,51 @@
|
||||
import React from "react";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/ui/popover";
|
||||
import { ToyBrick } from "lucide-react";
|
||||
import { BlockMenuContent } from "../BlockMenuContent/BlockMenuContent";
|
||||
import { ControlPanelButton } from "../ControlPanelButton";
|
||||
import { useBlockMenu } from "./useBlockMenu";
|
||||
|
||||
interface BlockMenuProps {
|
||||
pinBlocksPopover: boolean;
|
||||
blockMenuSelected: "save" | "block" | "";
|
||||
setBlockMenuSelected: React.Dispatch<
|
||||
React.SetStateAction<"" | "save" | "block">
|
||||
>;
|
||||
}
|
||||
|
||||
export const BlockMenu: React.FC<BlockMenuProps> = ({
|
||||
pinBlocksPopover,
|
||||
blockMenuSelected,
|
||||
setBlockMenuSelected,
|
||||
}) => {
|
||||
const {open, onOpen} = useBlockMenu({pinBlocksPopover, setBlockMenuSelected});
|
||||
return (
|
||||
<Popover open={pinBlocksPopover ? true : open} onOpenChange={onOpen}>
|
||||
<PopoverTrigger className="hover:cursor-pointer">
|
||||
<ControlPanelButton
|
||||
data-id="blocks-control-popover-trigger"
|
||||
data-testid="blocks-control-blocks-button"
|
||||
selected={blockMenuSelected === "block"}
|
||||
className="rounded-none"
|
||||
>
|
||||
{/* Need to find phosphor icon alternative for this lucide icon */}
|
||||
<ToyBrick className="h-5 w-6" strokeWidth={2} />
|
||||
</ControlPanelButton>
|
||||
</PopoverTrigger>
|
||||
|
||||
<PopoverContent
|
||||
side="right"
|
||||
align="start"
|
||||
sideOffset={16}
|
||||
className="absolute h-[75vh] w-[46.625rem] overflow-hidden rounded-[1rem] border-none p-0 shadow-[0_2px_6px_0_rgba(0,0,0,0.05)]"
|
||||
data-id="blocks-control-popover-content"
|
||||
>
|
||||
<BlockMenuContent />
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,23 @@
|
||||
import { useState } from "react";
|
||||
|
||||
interface useBlockMenuProps {
|
||||
pinBlocksPopover: boolean;
|
||||
setBlockMenuSelected: React.Dispatch<
|
||||
React.SetStateAction<"" | "save" | "block">
|
||||
>;
|
||||
}
|
||||
|
||||
export const useBlockMenu = ({pinBlocksPopover, setBlockMenuSelected}: useBlockMenuProps) => {
|
||||
const [open, setOpen] = useState(false);
|
||||
const onOpen = (newOpen: boolean) => {
|
||||
if (!pinBlocksPopover) {
|
||||
setOpen(newOpen);
|
||||
setBlockMenuSelected(newOpen ? "block" : "");
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
open,
|
||||
onOpen,
|
||||
};
|
||||
};
|
||||
@@ -0,0 +1,10 @@
|
||||
"use client";
|
||||
import React from "react";
|
||||
|
||||
export const BlockMenuContent = () => {
|
||||
return (
|
||||
<div className="flex h-full w-full flex-col items-center justify-center">
|
||||
This is the block menu content
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,35 @@
|
||||
// BLOCK MENU TODO: We need a disable state in this, currently it's not in design.
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
import React from "react";
|
||||
|
||||
interface Props extends React.HTMLAttributes<HTMLDivElement> {
|
||||
selected?: boolean;
|
||||
children?: React.ReactNode; // For icon purpose
|
||||
disabled?: boolean;
|
||||
}
|
||||
|
||||
export const ControlPanelButton: React.FC<Props> = ({
|
||||
selected = false,
|
||||
children,
|
||||
disabled,
|
||||
className,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
// Using div instead of button, because it's only for design purposes. We are using this to give design to PopoverTrigger.
|
||||
<div
|
||||
role="button"
|
||||
className={cn(
|
||||
"flex h-[4.25rem] w-[4.25rem] items-center justify-center whitespace-normal bg-white p-[1.38rem] text-zinc-800 shadow-none hover:cursor-pointer hover:bg-zinc-100 hover:text-zinc-950 focus:ring-0",
|
||||
selected &&
|
||||
"bg-violet-50 text-violet-700 hover:cursor-default hover:bg-violet-50 hover:text-violet-700 active:bg-violet-50 active:text-violet-700",
|
||||
disabled && "cursor-not-allowed",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
{children}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,54 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { X } from "lucide-react";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
selected?: boolean;
|
||||
number?: number;
|
||||
name?: string;
|
||||
}
|
||||
|
||||
export const FilterChip: React.FC<Props> = ({
|
||||
selected = false,
|
||||
number,
|
||||
name,
|
||||
className,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"group w-fit space-x-1 rounded-[1.5rem] border border-zinc-300 bg-transparent px-[0.625rem] py-[0.375rem] shadow-none transition-transform duration-300 ease-in-out",
|
||||
"hover:border-violet-500 hover:bg-transparent focus:ring-0 disabled:cursor-not-allowed",
|
||||
selected && "border-0 bg-violet-700 hover:border",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<span
|
||||
className={cn(
|
||||
"font-sans text-sm font-medium leading-[1.375rem] text-zinc-600 group-hover:text-zinc-600 group-disabled:text-zinc-400",
|
||||
selected && "text-zinc-50",
|
||||
)}
|
||||
>
|
||||
{name}
|
||||
</span>
|
||||
{selected && (
|
||||
<>
|
||||
<span className="flex h-4 w-4 items-center justify-center rounded-full bg-zinc-50 transition-all duration-300 ease-in-out group-hover:hidden">
|
||||
<X
|
||||
className="h-3 w-3 rounded-full text-violet-700"
|
||||
strokeWidth={2}
|
||||
/>
|
||||
</span>
|
||||
{number !== undefined && (
|
||||
<span className="hidden h-[1.375rem] items-center rounded-[1.25rem] bg-violet-700 p-[0.375rem] text-zinc-50 transition-all duration-300 ease-in-out animate-in fade-in zoom-in group-hover:flex">
|
||||
{number > 100 ? "100+" : number}
|
||||
</span>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,88 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import Image from "next/image";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
title?: string;
|
||||
description?: string;
|
||||
icon_url?: string;
|
||||
number_of_blocks?: number;
|
||||
}
|
||||
|
||||
interface IntegrationComponent extends React.FC<Props> {
|
||||
Skeleton: React.FC<{ className?: string }>;
|
||||
}
|
||||
|
||||
export const Integration: IntegrationComponent = ({
|
||||
title,
|
||||
icon_url,
|
||||
description,
|
||||
className,
|
||||
number_of_blocks,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"group flex h-16 w-full min-w-[7.5rem] items-center justify-start space-x-3 whitespace-normal rounded-[0.75rem] bg-zinc-50 px-[0.875rem] py-[0.625rem] text-start shadow-none",
|
||||
"hover:cursor-default hover:bg-zinc-100 focus:ring-0 active:bg-zinc-50 active:ring-1 active:ring-zinc-300 disabled:pointer-events-none",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<div className="relative h-[2.625rem] w-[2.625rem] overflow-hidden rounded-[0.5rem] bg-white">
|
||||
{icon_url && (
|
||||
<Image
|
||||
src={icon_url}
|
||||
alt="integration-icon"
|
||||
fill
|
||||
sizes="2.25rem"
|
||||
className="w-full rounded-[0.5rem] object-contain group-disabled:opacity-50"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="w-full">
|
||||
<div className="flex items-center justify-between gap-2">
|
||||
{title && (
|
||||
<p className="line-clamp-1 flex-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-700 group-disabled:text-zinc-400">
|
||||
{beautifyString(title)}
|
||||
</p>
|
||||
)}
|
||||
<span className="flex h-[1.375rem] w-[1.6875rem] items-center justify-center rounded-[1.25rem] bg-[#f0f0f0] p-1.5 font-sans text-sm leading-[1.375rem] text-zinc-500 group-disabled:text-zinc-400">
|
||||
{number_of_blocks}
|
||||
</span>
|
||||
</div>
|
||||
<span className="line-clamp-1 font-sans text-xs font-normal leading-5 text-zinc-500 group-disabled:text-zinc-400">
|
||||
{description}
|
||||
</span>
|
||||
</div>
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
const IntegrationSkeleton: React.FC<{ className?: string }> = ({
|
||||
className,
|
||||
}) => {
|
||||
return (
|
||||
<Skeleton
|
||||
className={cn(
|
||||
"flex h-16 w-full min-w-[7.5rem] animate-pulse items-center justify-start space-x-3 rounded-[0.75rem] bg-zinc-100 px-[0.875rem] py-[0.625rem]",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<Skeleton className="h-[2.625rem] w-[2.625rem] rounded-[0.5rem] bg-zinc-200" />
|
||||
<div className="flex flex-1 flex-col items-start gap-0.5">
|
||||
<div className="flex w-full items-center justify-between">
|
||||
<Skeleton className="h-[1.375rem] w-24 rounded bg-zinc-200" />
|
||||
<Skeleton className="h-[1.375rem] w-[1.6875rem] rounded-[1.25rem] bg-zinc-200" />
|
||||
</div>
|
||||
<Skeleton className="h-5 w-[80%] rounded bg-zinc-200" />
|
||||
</div>
|
||||
</Skeleton>
|
||||
);
|
||||
};
|
||||
|
||||
Integration.Skeleton = IntegrationSkeleton;
|
||||
@@ -0,0 +1,60 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import Image from "next/image";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
name?: string;
|
||||
icon_url?: string;
|
||||
}
|
||||
|
||||
interface IntegrationChipComponent extends React.FC<Props> {
|
||||
Skeleton: React.FC;
|
||||
}
|
||||
|
||||
export const IntegrationChip: IntegrationChipComponent = ({
|
||||
icon_url,
|
||||
name,
|
||||
className,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"flex h-[3.25rem] w-full min-w-[7.5rem] justify-start gap-2 whitespace-normal rounded-[0.5rem] bg-zinc-50 p-2 pr-3 shadow-none",
|
||||
"hover:cursor-default hover:bg-zinc-100 focus:ring-0 active:bg-zinc-100 active:ring-1 active:ring-zinc-300",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<div className="relative h-9 w-9 rounded-[0.5rem] bg-transparent">
|
||||
{icon_url && (
|
||||
<Image
|
||||
src={icon_url}
|
||||
alt="integration-icon"
|
||||
fill
|
||||
sizes="2.25rem"
|
||||
className="w-full object-contain"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
{name && (
|
||||
<span className="truncate font-sans text-sm font-normal leading-[1.375rem] text-zinc-800">
|
||||
{beautifyString(name)}
|
||||
</span>
|
||||
)}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
const IntegrationChipSkeleton: React.FC = () => {
|
||||
return (
|
||||
<Skeleton className="flex h-[3.25rem] w-full min-w-[7.5rem] gap-2 rounded-[0.5rem] bg-zinc-100 p-2 pr-3">
|
||||
<Skeleton className="h-9 w-12 rounded-[0.5rem] bg-zinc-200" />
|
||||
<Skeleton className="h-5 w-24 self-center rounded-sm bg-zinc-200" />
|
||||
</Skeleton>
|
||||
);
|
||||
};
|
||||
|
||||
IntegrationChip.Skeleton = IntegrationChipSkeleton;
|
||||
@@ -0,0 +1,99 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import { Plus } from "lucide-react";
|
||||
import Image from "next/image";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
import { highlightText } from "./helpers";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
title?: string;
|
||||
description?: string;
|
||||
icon_url?: string;
|
||||
highlightedText?: string;
|
||||
}
|
||||
|
||||
interface IntegrationBlockComponent extends React.FC<Props> {
|
||||
Skeleton: React.FC<{ className?: string }>;
|
||||
}
|
||||
|
||||
|
||||
|
||||
export const IntegrationBlock: IntegrationBlockComponent = ({
|
||||
title,
|
||||
icon_url,
|
||||
description,
|
||||
className,
|
||||
highlightedText,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"group flex h-16 w-full min-w-[7.5rem] items-center justify-start gap-3 whitespace-normal rounded-[0.75rem] bg-zinc-50 px-[0.875rem] py-[0.625rem] text-start shadow-none",
|
||||
"hover:cursor-default hover:bg-zinc-100 focus:ring-0 active:bg-zinc-100 active:ring-1 active:ring-zinc-300 disabled:cursor-not-allowed",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<div className="relative h-[2.625rem] w-[2.625rem] rounded-[0.5rem] bg-white">
|
||||
{icon_url && (
|
||||
<Image
|
||||
src={icon_url}
|
||||
alt="integration-icon"
|
||||
fill
|
||||
sizes="2.25rem"
|
||||
className="w-full object-contain group-disabled:opacity-50"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex flex-1 flex-col items-start gap-0.5">
|
||||
{title && (
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{highlightText(beautifyString(title), highlightedText)}
|
||||
</span>
|
||||
)}
|
||||
{description && (
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-xs font-normal leading-5 text-zinc-500 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{highlightText(description, highlightedText)}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
<div
|
||||
className={cn(
|
||||
"flex h-7 w-7 items-center justify-center rounded-[0.5rem] bg-zinc-700 group-disabled:bg-zinc-400",
|
||||
)}
|
||||
>
|
||||
<Plus className="h-5 w-5 text-zinc-50" strokeWidth={2} />
|
||||
</div>
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
const IntegrationBlockSkeleton = ({ className }: { className?: string }) => {
|
||||
return (
|
||||
<Skeleton
|
||||
className={cn(
|
||||
"flex h-16 w-full min-w-[7.5rem] animate-pulse items-center justify-start gap-3 rounded-[0.75rem] bg-zinc-100 px-[0.875rem] py-[0.625rem]",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<Skeleton className="h-[2.625rem] w-[2.625rem] rounded-[0.5rem] bg-zinc-200" />
|
||||
<div className="flex flex-1 flex-col items-start gap-0.5">
|
||||
<Skeleton className="h-[1.375rem] w-24 rounded bg-zinc-200" />
|
||||
<Skeleton className="h-5 w-32 rounded bg-zinc-200" />
|
||||
</div>
|
||||
<Skeleton className="h-7 w-7 rounded-[0.5rem] bg-zinc-200" />
|
||||
</Skeleton>
|
||||
);
|
||||
};
|
||||
|
||||
IntegrationBlock.Skeleton = IntegrationBlockSkeleton;
|
||||
@@ -0,0 +1,135 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { ExternalLink, Loader2, Plus } from "lucide-react";
|
||||
import Image from "next/image";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
import Link from "next/link";
|
||||
import { highlightText } from "./helpers";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
title?: string;
|
||||
creator_name?: string;
|
||||
number_of_runs?: number;
|
||||
image_url?: string;
|
||||
highlightedText?: string;
|
||||
slug: string;
|
||||
loading: boolean;
|
||||
}
|
||||
|
||||
interface MarketplaceAgentBlockComponent extends React.FC<Props> {
|
||||
Skeleton: React.FC<{ className?: string }>;
|
||||
}
|
||||
|
||||
export const MarketplaceAgentBlock: MarketplaceAgentBlockComponent = ({
|
||||
title,
|
||||
image_url,
|
||||
creator_name,
|
||||
number_of_runs,
|
||||
className,
|
||||
loading,
|
||||
highlightedText,
|
||||
slug,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"group flex h-[4.375rem] w-full min-w-[7.5rem] items-center justify-start gap-3 whitespace-normal rounded-[0.75rem] bg-zinc-50 p-[0.625rem] pr-[0.875rem] text-start shadow-none",
|
||||
"hover:cursor-default hover:bg-zinc-100 focus:ring-0 active:bg-zinc-100 active:ring-1 active:ring-zinc-300 disabled:pointer-events-none",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<div className="relative h-[3.125rem] w-[5.625rem] overflow-hidden rounded-[0.375rem] bg-white">
|
||||
{image_url && (
|
||||
<Image
|
||||
src={image_url}
|
||||
alt="integration-icon"
|
||||
fill
|
||||
sizes="5.625rem"
|
||||
className="w-full object-contain group-disabled:opacity-50"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex flex-1 flex-col items-start gap-0.5">
|
||||
{title && (
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{highlightText(title, highlightedText)}
|
||||
</span>
|
||||
)}
|
||||
<div className="flex items-center space-x-2.5">
|
||||
<span
|
||||
className={cn(
|
||||
"truncate font-sans text-xs font-normal leading-5 text-zinc-500 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
By {creator_name}
|
||||
</span>
|
||||
|
||||
<span className="font-sans text-zinc-400">•</span>
|
||||
|
||||
<span
|
||||
className={cn(
|
||||
"truncate font-sans text-xs font-normal leading-5 text-zinc-500 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{number_of_runs} runs
|
||||
</span>
|
||||
<span className="font-sans text-zinc-400">•</span>
|
||||
<Link
|
||||
href={`/marketplace/agent/${creator_name}/${slug}`}
|
||||
className="flex gap-0.5 truncate"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
<span className="font-sans text-xs leading-5 text-blue-700 underline">
|
||||
Agent page
|
||||
</span>
|
||||
<ExternalLink className="h-4 w-4 text-blue-700" strokeWidth={1} />
|
||||
</Link>
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
className={cn(
|
||||
"flex h-7 min-w-7 items-center justify-center rounded-[0.5rem] bg-zinc-700 group-disabled:bg-zinc-400",
|
||||
)}
|
||||
>
|
||||
{!loading ? (
|
||||
<Plus className="h-5 w-5 text-zinc-50" strokeWidth={2} />
|
||||
) : (
|
||||
<Loader2 className="h-5 w-5 animate-spin" />
|
||||
)}
|
||||
</div>
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
const MarketplaceAgentBlockSkeleton: React.FC<{ className?: string }> = ({
|
||||
className,
|
||||
}) => {
|
||||
return (
|
||||
<Skeleton
|
||||
className={cn(
|
||||
"flex h-[4.375rem] w-full min-w-[7.5rem] animate-pulse items-center justify-start gap-3 rounded-[0.75rem] bg-zinc-100 p-[0.625rem] pr-[0.875rem]",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<Skeleton className="h-[3.125rem] w-[5.625rem] rounded-[0.375rem] bg-zinc-200" />
|
||||
<div className="flex flex-1 flex-col items-start gap-0.5">
|
||||
<Skeleton className="h-[1.375rem] w-24 rounded bg-zinc-200" />
|
||||
<div className="flex items-center gap-1">
|
||||
<Skeleton className="h-5 w-16 rounded bg-zinc-200" />
|
||||
|
||||
<Skeleton className="h-5 w-16 rounded bg-zinc-200" />
|
||||
</div>
|
||||
</div>
|
||||
<Skeleton className="h-7 w-7 rounded-[0.5rem] bg-zinc-200" />
|
||||
</Skeleton>
|
||||
);
|
||||
};
|
||||
|
||||
MarketplaceAgentBlock.Skeleton = MarketplaceAgentBlockSkeleton;
|
||||
@@ -0,0 +1,40 @@
|
||||
// BLOCK MENU TODO: We need to add a better hover state to it; currently it's not in the design either.
|
||||
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { cn } from "@/lib/utils";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
selected?: boolean;
|
||||
number?: number;
|
||||
name?: string;
|
||||
}
|
||||
|
||||
export const MenuItem: React.FC<Props> = ({
|
||||
selected = false,
|
||||
number,
|
||||
name,
|
||||
className,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"flex h-[2.375rem] w-[12.875rem] justify-between whitespace-normal rounded-[0.5rem] bg-transparent p-2 pl-3 shadow-none",
|
||||
"hover:cursor-default hover:bg-zinc-100 focus:ring-0",
|
||||
selected && "bg-zinc-100",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<span className="truncate font-sans text-sm font-medium leading-[1.375rem] text-zinc-800">
|
||||
{name}
|
||||
</span>
|
||||
{number && (
|
||||
<span className="font-sans text-sm font-normal leading-[1.375rem] text-zinc-600">
|
||||
{number > 100 ? "100+" : number}
|
||||
</span>
|
||||
)}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,110 @@
|
||||
import { Separator } from "@/components/ui/separator";
|
||||
import { cn } from "@/lib/utils";
|
||||
import React, { useMemo } from "react";
|
||||
import { BlockMenu } from "../BlockMenu/BlockMenu";
|
||||
import { useNewControlPanel } from "./useNewControlPanel";
|
||||
import { NewSaveControl } from "../SaveControl/NewSaveControl";
|
||||
import { GraphExecutionID } from "@/lib/autogpt-server-api";
|
||||
import { history } from "@/components/history";
|
||||
import { ControlPanelButton } from "../ControlPanelButton";
|
||||
import { ArrowUUpLeftIcon, ArrowUUpRightIcon } from "@phosphor-icons/react";
|
||||
|
||||
export type Control = {
|
||||
icon: React.ReactNode;
|
||||
label: string;
|
||||
disabled?: boolean;
|
||||
onClick: () => void;
|
||||
};
|
||||
|
||||
interface ControlPanelProps {
|
||||
className?: string;
|
||||
flowExecutionID: GraphExecutionID | undefined;
|
||||
visualizeBeads: "no" | "static" | "animate";
|
||||
pinSavePopover: boolean;
|
||||
pinBlocksPopover: boolean;
|
||||
}
|
||||
|
||||
export const NewControlPanel = ({
|
||||
flowExecutionID,
|
||||
visualizeBeads,
|
||||
pinSavePopover,
|
||||
pinBlocksPopover,
|
||||
className,
|
||||
}: ControlPanelProps) => {
|
||||
const {
|
||||
blockMenuSelected,
|
||||
setBlockMenuSelected,
|
||||
agentDescription,
|
||||
setAgentDescription,
|
||||
saveAgent,
|
||||
agentName,
|
||||
setAgentName,
|
||||
savedAgent,
|
||||
isSaving,
|
||||
isRunning,
|
||||
isStopping,
|
||||
} = useNewControlPanel({ flowExecutionID, visualizeBeads });
|
||||
|
||||
const controls: Control[] = useMemo(
|
||||
() => [
|
||||
{
|
||||
label: "Undo",
|
||||
icon: <ArrowUUpLeftIcon size={20} weight="bold" />,
|
||||
onClick: history.undo,
|
||||
disabled: !history.canUndo(),
|
||||
},
|
||||
{
|
||||
label: "Redo",
|
||||
icon: <ArrowUUpRightIcon size={20} weight="bold" />,
|
||||
onClick: history.redo,
|
||||
disabled: !history.canRedo(),
|
||||
},
|
||||
],
|
||||
[]
|
||||
);
|
||||
|
||||
return (
|
||||
<section
|
||||
className={cn(
|
||||
"absolute left-4 top-24 z-10 w-[4.25rem] overflow-hidden rounded-[1rem] border-none bg-white p-0 shadow-[0_1px_5px_0_rgba(0,0,0,0.1)]",
|
||||
className
|
||||
)}
|
||||
>
|
||||
<div className="flex flex-col items-center justify-center rounded-[1rem] p-0">
|
||||
<BlockMenu
|
||||
pinBlocksPopover={pinBlocksPopover}
|
||||
blockMenuSelected={blockMenuSelected}
|
||||
setBlockMenuSelected={setBlockMenuSelected}
|
||||
/>
|
||||
<Separator className="text-[#E1E1E1]" />
|
||||
{controls.map((control, index) => (
|
||||
<ControlPanelButton
|
||||
key={index}
|
||||
onClick={() => control.onClick()}
|
||||
data-id={`control-button-${index}`}
|
||||
data-testid={`blocks-control-${control.label.toLowerCase()}-button`}
|
||||
disabled={control.disabled || false}
|
||||
className="rounded-none"
|
||||
>
|
||||
{control.icon}
|
||||
</ControlPanelButton>
|
||||
))}
|
||||
<Separator className="text-[#E1E1E1]" />
|
||||
<NewSaveControl
|
||||
agentMeta={savedAgent}
|
||||
canSave={!isSaving && !isRunning && !isStopping}
|
||||
onSave={saveAgent}
|
||||
agentDescription={agentDescription}
|
||||
onDescriptionChange={setAgentDescription}
|
||||
agentName={agentName}
|
||||
onNameChange={setAgentName}
|
||||
pinSavePopover={pinSavePopover}
|
||||
blockMenuSelected={blockMenuSelected}
|
||||
setBlockMenuSelected={setBlockMenuSelected}
|
||||
/>
|
||||
</div>
|
||||
</section>
|
||||
);
|
||||
};
|
||||
|
||||
export default NewControlPanel;
|
||||
@@ -0,0 +1,35 @@
|
||||
import useAgentGraph from "@/hooks/useAgentGraph";
|
||||
import { GraphExecutionID, GraphID } from "@/lib/autogpt-server-api";
|
||||
import { useSearchParams } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
|
||||
export interface NewControlPanelProps {
|
||||
flowExecutionID: GraphExecutionID | undefined;
|
||||
visualizeBeads: "no" | "static" | "animate";
|
||||
}
|
||||
|
||||
export const useNewControlPanel = ({flowExecutionID, visualizeBeads}: NewControlPanelProps) => {
|
||||
const [blockMenuSelected, setBlockMenuSelected] = useState<
|
||||
"save" | "block" | ""
|
||||
>("");
|
||||
const query = useSearchParams();
|
||||
const _graphVersion = query.get("flowVersion");
|
||||
const graphVersion = _graphVersion ? parseInt(_graphVersion) : undefined;
|
||||
|
||||
const flowID = query.get("flowID") as GraphID | null ?? undefined;
|
||||
const {agentDescription, setAgentDescription, saveAgent, agentName, setAgentName, savedAgent, isSaving, isRunning, isStopping} = useAgentGraph(flowID, graphVersion, flowExecutionID, visualizeBeads !== "no")
|
||||
|
||||
return {
|
||||
blockMenuSelected,
|
||||
setBlockMenuSelected,
|
||||
agentDescription,
|
||||
setAgentDescription,
|
||||
saveAgent,
|
||||
agentName,
|
||||
setAgentName,
|
||||
savedAgent,
|
||||
isSaving,
|
||||
isRunning,
|
||||
isStopping,
|
||||
}
|
||||
};
|
||||
@@ -0,0 +1,17 @@
|
||||
import { SmileySadIcon } from "@phosphor-icons/react";
|
||||
|
||||
export const NoSearchResult = () => {
|
||||
return (
|
||||
<div className="flex h-full w-full flex-col items-center justify-center text-center">
|
||||
<SmileySadIcon size={64} className="mb-10 text-zinc-400" />
|
||||
<div className="space-y-1">
|
||||
<p className="font-sans text-sm font-medium leading-[1.375rem] text-zinc-800">
|
||||
No match found
|
||||
</p>
|
||||
<p className="font-sans text-sm font-normal leading-[1.375rem] text-zinc-600">
|
||||
Try adjusting your search terms
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,158 @@
|
||||
import React, { useCallback, useEffect } from "react";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/ui/popover";
|
||||
import { Card, CardContent, CardFooter } from "@/components/ui/card";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { GraphMeta } from "@/lib/autogpt-server-api";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import { IconSave } from "@/components/ui/icons";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { ControlPanelButton } from "../ControlPanelButton";
|
||||
|
||||
interface SaveControlProps {
|
||||
agentMeta: GraphMeta | null;
|
||||
agentName: string;
|
||||
agentDescription: string;
|
||||
canSave: boolean;
|
||||
onSave: () => void;
|
||||
onNameChange: (name: string) => void;
|
||||
onDescriptionChange: (description: string) => void;
|
||||
pinSavePopover: boolean;
|
||||
|
||||
blockMenuSelected: "save" | "block" | "";
|
||||
setBlockMenuSelected: React.Dispatch<
|
||||
React.SetStateAction<"" | "save" | "block">
|
||||
>;
|
||||
}
|
||||
|
||||
export const NewSaveControl = ({
|
||||
agentMeta,
|
||||
canSave,
|
||||
onSave,
|
||||
agentName,
|
||||
onNameChange,
|
||||
agentDescription,
|
||||
onDescriptionChange,
|
||||
blockMenuSelected,
|
||||
setBlockMenuSelected,
|
||||
pinSavePopover,
|
||||
}: SaveControlProps) => {
|
||||
|
||||
const handleSave = useCallback(() => {
|
||||
onSave();
|
||||
}, [onSave]);
|
||||
|
||||
const { toast } = useToast();
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (event: KeyboardEvent) => {
|
||||
if ((event.ctrlKey || event.metaKey) && event.key === "s") {
|
||||
event.preventDefault();
|
||||
handleSave();
|
||||
toast({
|
||||
duration: 2000,
|
||||
title: "All changes saved successfully!",
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
window.addEventListener("keydown", handleKeyDown);
|
||||
|
||||
return () => {
|
||||
window.removeEventListener("keydown", handleKeyDown);
|
||||
};
|
||||
}, [handleSave, toast]);
|
||||
|
||||
return (
|
||||
<Popover
|
||||
open={pinSavePopover ? true : undefined}
|
||||
onOpenChange={(open) => open || setBlockMenuSelected("")}
|
||||
>
|
||||
<PopoverTrigger>
|
||||
<ControlPanelButton
|
||||
data-id="save-control-popover-trigger"
|
||||
data-testid="blocks-control-save-button"
|
||||
selected={blockMenuSelected === "save"}
|
||||
onClick={() => {
|
||||
setBlockMenuSelected("save");
|
||||
}}
|
||||
className="rounded-none"
|
||||
>
|
||||
{/* Need to find phosphor icon alternative for this lucide icon */}
|
||||
<IconSave className="h-5 w-5" strokeWidth={2} />
|
||||
</ControlPanelButton>
|
||||
</PopoverTrigger>
|
||||
|
||||
<PopoverContent
|
||||
side="right"
|
||||
sideOffset={16}
|
||||
align="start"
|
||||
className="w-[17rem] rounded-xl border-none p-0 shadow-none md:w-[30rem]"
|
||||
data-id="save-control-popover-content"
|
||||
>
|
||||
<Card className="border-none shadow-none dark:bg-slate-900">
|
||||
<CardContent className="p-4">
|
||||
<div className="grid gap-3">
|
||||
<Label htmlFor="name" className="dark:text-gray-300">
|
||||
Name
|
||||
</Label>
|
||||
<Input
|
||||
id="name"
|
||||
placeholder="Enter your agent name"
|
||||
className="col-span-3"
|
||||
value={agentName}
|
||||
onChange={(e) => onNameChange(e.target.value)}
|
||||
data-id="save-control-name-input"
|
||||
data-testid="save-control-name-input"
|
||||
maxLength={100}
|
||||
/>
|
||||
<Label htmlFor="description" className="dark:text-gray-300">
|
||||
Description
|
||||
</Label>
|
||||
<Input
|
||||
id="description"
|
||||
placeholder="Your agent description"
|
||||
className="col-span-3"
|
||||
value={agentDescription}
|
||||
onChange={(e) => onDescriptionChange(e.target.value)}
|
||||
data-id="save-control-description-input"
|
||||
data-testid="save-control-description-input"
|
||||
maxLength={500}
|
||||
/>
|
||||
{agentMeta?.version && (
|
||||
<>
|
||||
<Label htmlFor="version" className="dark:text-gray-300">
|
||||
Version
|
||||
</Label>
|
||||
<Input
|
||||
id="version"
|
||||
placeholder="Version"
|
||||
className="col-span-3"
|
||||
value={agentMeta?.version || "-"}
|
||||
disabled
|
||||
data-testid="save-control-version-output"
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</CardContent>
|
||||
<CardFooter className="flex flex-col items-stretch gap-2">
|
||||
<Button
|
||||
className="w-full dark:bg-slate-700 dark:text-slate-100 dark:hover:bg-slate-800"
|
||||
onClick={handleSave}
|
||||
data-id="save-control-save-agent"
|
||||
data-testid="save-control-save-agent-button"
|
||||
disabled={!canSave}
|
||||
>
|
||||
Save Agent
|
||||
</Button>
|
||||
</CardFooter>
|
||||
</Card>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,47 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { ArrowUpRight } from "lucide-react";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
content?: string;
|
||||
}
|
||||
|
||||
interface SearchHistoryChipComponent extends React.FC<Props> {
|
||||
Skeleton: React.FC<{ className?: string }>;
|
||||
}
|
||||
|
||||
export const SearchHistoryChip: SearchHistoryChipComponent = ({
|
||||
content,
|
||||
className,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"my-[1px] h-[2.25rem] space-x-1 rounded-[1.5rem] bg-zinc-50 p-[0.375rem] pr-[0.625rem] shadow-none",
|
||||
"hover:cursor-default hover:bg-zinc-100 focus:ring-0 active:bg-zinc-100 active:ring-1 active:ring-zinc-300",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<ArrowUpRight className="h-6 w-6 text-zinc-500" strokeWidth={1.25} />
|
||||
<span className="font-sans text-sm font-normal leading-[1.375rem] text-zinc-800">
|
||||
{content}
|
||||
</span>
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
const SearchHistoryChipSkeleton: React.FC<{ className?: string }> = ({
|
||||
className,
|
||||
}) => {
|
||||
return (
|
||||
<Skeleton
|
||||
className={cn("h-[2.25rem] w-32 rounded-[1.5rem] bg-zinc-100", className)}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
SearchHistoryChip.Skeleton = SearchHistoryChipSkeleton;
|
||||
@@ -0,0 +1,117 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { Plus } from "lucide-react";
|
||||
import Image from "next/image";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
import { highlightText } from "./helpers";
|
||||
import { formatTimeAgo } from "@/lib/utils/time";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
title?: string;
|
||||
edited_time?: Date;
|
||||
version?: number;
|
||||
image_url?: string;
|
||||
highlightedText?: string;
|
||||
}
|
||||
|
||||
interface UGCAgentBlockComponent extends React.FC<Props> {
|
||||
Skeleton: React.FC<{ className?: string }>;
|
||||
}
|
||||
|
||||
export const UGCAgentBlock: UGCAgentBlockComponent = ({
|
||||
title,
|
||||
image_url,
|
||||
edited_time = new Date(),
|
||||
version,
|
||||
className,
|
||||
highlightedText,
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"group flex h-[4.375rem] w-full min-w-[7.5rem] items-center justify-start gap-3 whitespace-normal rounded-[0.75rem] bg-zinc-50 p-[0.625rem] pr-[0.875rem] text-start shadow-none",
|
||||
"hover:cursor-default hover:bg-zinc-100 focus:ring-0 active:bg-zinc-100 active:ring-1 active:ring-zinc-300 disabled:cursor-not-allowed",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
{image_url && (
|
||||
<div className="relative h-[3.125rem] w-[5.625rem] overflow-hidden rounded-[0.375rem] bg-white">
|
||||
<Image
|
||||
src={image_url}
|
||||
alt="integration-icon"
|
||||
fill
|
||||
sizes="5.625rem"
|
||||
className="w-full object-contain group-disabled:opacity-50"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex flex-1 flex-col items-start gap-0.5">
|
||||
{title && (
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
{highlightText(title, highlightedText)}
|
||||
</span>
|
||||
)}
|
||||
<div className="flex items-center space-x-1.5">
|
||||
{edited_time && (
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-xs font-normal leading-5 text-zinc-500 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
Edited {formatTimeAgo(edited_time.toISOString())}
|
||||
</span>
|
||||
)}
|
||||
|
||||
<span className="font-sans text-zinc-400">•</span>
|
||||
|
||||
<span
|
||||
className={cn(
|
||||
"line-clamp-1 font-sans text-xs font-normal leading-5 text-zinc-500 group-disabled:text-zinc-400",
|
||||
)}
|
||||
>
|
||||
Version {version}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
className={cn(
|
||||
"flex h-7 w-7 items-center justify-center rounded-[0.5rem] bg-zinc-700 group-disabled:bg-zinc-400",
|
||||
)}
|
||||
>
|
||||
<Plus className="h-5 w-5 text-zinc-50" strokeWidth={2} />
|
||||
</div>
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
const UGCAgentBlockSkeleton: React.FC<{ className?: string }> = ({
|
||||
className,
|
||||
}) => {
|
||||
return (
|
||||
<Skeleton
|
||||
className={cn(
|
||||
"flex h-[4.375rem] w-full min-w-[7.5rem] animate-pulse items-center justify-start gap-3 rounded-[0.75rem] bg-zinc-100 p-[0.625rem] pr-[0.875rem]",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<Skeleton className="h-[3.125rem] w-[5.625rem] rounded-[0.375rem] bg-zinc-200" />
|
||||
<div className="flex flex-1 flex-col items-start gap-0.5">
|
||||
<Skeleton className="h-[1.375rem] w-24 rounded bg-zinc-200" />
|
||||
<div className="flex items-center gap-1">
|
||||
<Skeleton className="h-5 w-16 rounded bg-zinc-200" />
|
||||
<Skeleton className="h-5 w-16 rounded bg-zinc-200" />
|
||||
</div>
|
||||
</div>
|
||||
<Skeleton className="h-7 w-7 rounded-[0.5rem] bg-zinc-200" />
|
||||
</Skeleton>
|
||||
);
|
||||
};
|
||||
|
||||
UGCAgentBlock.Skeleton = UGCAgentBlockSkeleton;
|
||||
@@ -0,0 +1,22 @@
|
||||
export const highlightText = (
|
||||
text: string | undefined,
|
||||
highlight: string | undefined,
|
||||
) => {
|
||||
if (!text || !highlight) return text;
|
||||
|
||||
function escapeRegExp(s: string) {
|
||||
return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
||||
}
|
||||
|
||||
const escaped = escapeRegExp(highlight);
|
||||
const parts = text.split(new RegExp(`(${escaped})`, "gi"));
|
||||
return parts.map((part, i) =>
|
||||
part.toLowerCase() === highlight?.toLowerCase() ? (
|
||||
<mark key={i} className="bg-transparent font-bold">
|
||||
{part}
|
||||
</mark>
|
||||
) : (
|
||||
part
|
||||
),
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,74 @@
|
||||
"use client";
|
||||
|
||||
import { Breadcrumbs } from "@/components/molecules/Breadcrumbs/Breadcrumbs";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { useAgentRunsView } from "./useAgentRunsView";
|
||||
import { AgentRunsLoading } from "./components/AgentRunsLoading";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Plus } from "@phosphor-icons/react";
|
||||
|
||||
export function AgentRunsView() {
|
||||
const { response, ready, error, agentId } = useAgentRunsView();
|
||||
|
||||
// Handle loading state
|
||||
if (!ready) {
|
||||
return <AgentRunsLoading />;
|
||||
}
|
||||
|
||||
// Handle errors - check for query error first, then response errors
|
||||
if (error || (response && response.status !== 200)) {
|
||||
return (
|
||||
<ErrorCard
|
||||
isSuccess={false}
|
||||
responseError={error || undefined}
|
||||
httpError={
|
||||
response?.status !== 200
|
||||
? {
|
||||
status: response?.status,
|
||||
statusText: "Request failed",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
context="agent"
|
||||
onRetry={() => window.location.reload()}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
// Handle missing data
|
||||
if (!response?.data) {
|
||||
return (
|
||||
<ErrorCard
|
||||
isSuccess={false}
|
||||
responseError={{ message: "No agent data found" }}
|
||||
context="agent"
|
||||
onRetry={() => window.location.reload()}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const agent = response.data;
|
||||
|
||||
return (
|
||||
<div className="grid h-screen grid-cols-[25%_85%] gap-4 pt-8">
|
||||
{/* Left Sidebar - 30% */}
|
||||
<div className="bg-gray-50 p-4">
|
||||
<Button variant="primary" size="large" className="w-full">
|
||||
<Plus size={20} /> New Run
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Main Content - 70% */}
|
||||
<div className="p-4">
|
||||
<Breadcrumbs
|
||||
items={[
|
||||
{ name: "My Library", link: "/library" },
|
||||
{ name: agent.name, link: `/library/agents/${agentId}` },
|
||||
]}
|
||||
/>
|
||||
{/* Main content will go here */}
|
||||
<div className="mt-4 text-gray-600">Main content area</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user