mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
74 Commits
feat/launc
...
gmail-repl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
516ae38329 | ||
|
|
8a68e03eb1 | ||
|
|
da585a34e1 | ||
|
|
bc43d05cac | ||
|
|
469b1fccbb | ||
|
|
1aa7e10cbd | ||
|
|
890bb3b8b4 | ||
|
|
2bb8e91040 | ||
|
|
76090f0ba2 | ||
|
|
d20ac49211 | ||
|
|
5b12e02c4e | ||
|
|
e0520f5e0a | ||
|
|
a9530b7304 | ||
|
|
8a9c165faf | ||
|
|
476bfc6c84 | ||
|
|
61f17e5b97 | ||
|
|
a54bed6d68 | ||
|
|
5502256bea | ||
|
|
bd97727763 | ||
|
|
aa256f21cd | ||
|
|
2848e62f8a | ||
|
|
fa5ff9ca3c | ||
|
|
7c908c10b8 | ||
|
|
68cd1cb398 | ||
|
|
f4538d6f5a | ||
|
|
4589b15450 | ||
|
|
ccc4d0dc6c | ||
|
|
2610c4579f | ||
|
|
0c09b0c459 | ||
|
|
1105e6c0d2 | ||
|
|
c6247f265e | ||
|
|
38610d1e7a | ||
|
|
ebfbf31c73 | ||
|
|
4abe37396c | ||
|
|
fa14bf461b | ||
|
|
e2c33e3d2a | ||
|
|
650be0d1f7 | ||
|
|
35bd7f7f7a | ||
|
|
312cb0227f | ||
|
|
a8feb3c8d0 | ||
|
|
5da5c2ecd6 | ||
|
|
ba65fee862 | ||
|
|
908dcd7b4b | ||
|
|
542f951dd8 | ||
|
|
72938590f2 | ||
|
|
5d364e13f6 | ||
|
|
32513b26ab | ||
|
|
bf92e7dbc8 | ||
|
|
6fce3a09ea | ||
|
|
9158d4b6a2 | ||
|
|
2403931c2e | ||
|
|
af58b316a2 | ||
|
|
03e3e2ea9a | ||
|
|
6bb6a081a2 | ||
|
|
df20b70f44 | ||
|
|
21faf1b677 | ||
|
|
b53c373a59 | ||
|
|
4bfeddc03d | ||
|
|
af7d56612d | ||
|
|
0dd30e275c | ||
|
|
a135f09336 | ||
|
|
2d436caa84 | ||
|
|
34dd218a91 | ||
|
|
41f500790f | ||
|
|
793de77e76 | ||
|
|
a2059c6023 | ||
|
|
b9c3920227 | ||
|
|
abba10b649 | ||
|
|
6c34790b42 | ||
|
|
c168277b1d | ||
|
|
89eb5d1189 | ||
|
|
e13e0d4376 | ||
|
|
f4a732373b | ||
|
|
28d85ad61c |
@@ -15,6 +15,7 @@
|
||||
!autogpt_platform/backend/pyproject.toml
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
!autogpt_platform/backend/.env
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
@@ -27,6 +28,7 @@
|
||||
# Platform - Frontend
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/scripts/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
@@ -34,6 +36,7 @@
|
||||
## config
|
||||
!autogpt_platform/frontend/*.config.*
|
||||
!autogpt_platform/frontend/.env.*
|
||||
!autogpt_platform/frontend/.env
|
||||
|
||||
# Classic - AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
|
||||
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -24,7 +24,8 @@
|
||||
</details>
|
||||
|
||||
#### For configuration changes:
|
||||
- [ ] `.env.example` is updated or already compatible with my changes
|
||||
|
||||
- [ ] `.env.default` is updated or already compatible with my changes
|
||||
- [ ] `docker-compose.yml` is updated or already compatible with my changes
|
||||
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
|
||||
|
||||
|
||||
244
.github/copilot-instructions.md
vendored
Normal file
244
.github/copilot-instructions.md
vendored
Normal file
@@ -0,0 +1,244 @@
|
||||
# GitHub Copilot Instructions for AutoGPT
|
||||
|
||||
This file provides comprehensive onboarding information for GitHub Copilot coding agent to work efficiently with the AutoGPT repository.
|
||||
|
||||
## Repository Overview
|
||||
|
||||
**AutoGPT** is a powerful platform for creating, deploying, and managing continuous AI agents that automate complex workflows. This is a large monorepo (~150MB) containing multiple components:
|
||||
|
||||
- **AutoGPT Platform** (`autogpt_platform/`) - Main focus: Modern AI agent platform (Polyform Shield License)
|
||||
- **Classic AutoGPT** (`classic/`) - Legacy agent system (MIT License)
|
||||
- **Documentation** (`docs/`) - MkDocs-based documentation site
|
||||
- **Infrastructure** - Docker configurations, CI/CD, and development tools
|
||||
|
||||
**Primary Languages & Frameworks:**
|
||||
- **Backend**: Python 3.10-3.13, FastAPI, Prisma ORM, PostgreSQL, RabbitMQ
|
||||
- **Frontend**: TypeScript, Next.js 15, React, Tailwind CSS, Radix UI
|
||||
- **Development**: Docker, Poetry, pnpm, Playwright, Storybook
|
||||
|
||||
## Build and Validation Instructions
|
||||
|
||||
### Essential Setup Commands
|
||||
|
||||
**Always run these commands in the correct directory and in this order:**
|
||||
|
||||
1. **Initial Setup** (required once):
|
||||
```bash
|
||||
# Clone and enter repository
|
||||
git clone <repo> && cd AutoGPT
|
||||
|
||||
# Start all services (database, redis, rabbitmq, clamav)
|
||||
cd autogpt_platform && docker compose --profile local up deps --build --detach
|
||||
```
|
||||
|
||||
2. **Backend Setup** (always run before backend development):
|
||||
```bash
|
||||
cd autogpt_platform/backend
|
||||
poetry install # Install dependencies
|
||||
poetry run prisma migrate dev # Run database migrations
|
||||
poetry run prisma generate # Generate Prisma client
|
||||
```
|
||||
|
||||
3. **Frontend Setup** (always run before frontend development):
|
||||
```bash
|
||||
cd autogpt_platform/frontend
|
||||
pnpm install # Install dependencies
|
||||
```
|
||||
|
||||
### Runtime Requirements
|
||||
|
||||
**Critical:** Always ensure Docker services are running before starting development:
|
||||
```bash
|
||||
cd autogpt_platform && docker compose --profile local up deps --build --detach
|
||||
```
|
||||
|
||||
**Python Version:** Use Python 3.11 (required; managed by Poetry via pyproject.toml)
|
||||
**Node.js Version:** Use Node.js 21+ with pnpm package manager
|
||||
|
||||
### Development Commands
|
||||
|
||||
**Backend Development:**
|
||||
```bash
|
||||
cd autogpt_platform/backend
|
||||
poetry run serve # Start development server (port 8000)
|
||||
poetry run test # Run all tests (requires ~5 minutes)
|
||||
poetry run pytest path/to/test.py # Run specific test
|
||||
poetry run format # Format code (Black + isort) - always run first
|
||||
poetry run lint # Lint code (ruff) - run after format
|
||||
```
|
||||
|
||||
**Frontend Development:**
|
||||
```bash
|
||||
cd autogpt_platform/frontend
|
||||
pnpm dev # Start development server (port 3000) - use for active development
|
||||
pnpm build # Build for production (only needed for E2E tests or deployment)
|
||||
pnpm test # Run Playwright E2E tests (requires build first)
|
||||
pnpm test-ui # Run tests with UI
|
||||
pnpm format # Format and lint code
|
||||
pnpm storybook # Start component development server
|
||||
```
|
||||
|
||||
### Testing Strategy
|
||||
|
||||
**Backend Tests:**
|
||||
- **Block Tests**: `poetry run pytest backend/blocks/test/test_block.py -xvs` (validates all blocks)
|
||||
- **Specific Block**: `poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[BlockName]' -xvs`
|
||||
- **Snapshot Tests**: Use `--snapshot-update` when output changes, always review with `git diff`
|
||||
|
||||
**Frontend Tests:**
|
||||
- **E2E Tests**: Always run `pnpm dev` before `pnpm test` (Playwright requires running instance)
|
||||
- **Component Tests**: Use Storybook for isolated component development
|
||||
|
||||
### Critical Validation Steps
|
||||
|
||||
**Before committing changes:**
|
||||
1. Run `poetry run format` (backend) and `pnpm format` (frontend)
|
||||
2. Ensure all tests pass in modified areas
|
||||
3. Verify Docker services are still running
|
||||
4. Check that database migrations apply cleanly
|
||||
|
||||
**Common Issues & Workarounds:**
|
||||
- **Prisma issues**: Run `poetry run prisma generate` after schema changes
|
||||
- **Permission errors**: Ensure Docker has proper permissions
|
||||
- **Port conflicts**: Check the `docker-compose.yml` file for the current list of exposed ports. You can list all mapped ports with:
|
||||
- **Test timeouts**: Backend tests can take 5+ minutes, use `-x` flag to stop on first failure
|
||||
|
||||
## Project Layout & Architecture
|
||||
|
||||
### Core Architecture
|
||||
|
||||
**AutoGPT Platform** (`autogpt_platform/`):
|
||||
- `backend/` - FastAPI server with async support
|
||||
- `backend/backend/` - Core API logic
|
||||
- `backend/blocks/` - Agent execution blocks
|
||||
- `backend/data/` - Database models and schemas
|
||||
- `schema.prisma` - Database schema definition
|
||||
- `frontend/` - Next.js application
|
||||
- `src/app/` - App Router pages and layouts
|
||||
- `src/components/` - Reusable React components
|
||||
- `src/lib/` - Utilities and configurations
|
||||
- `autogpt_libs/` - Shared Python utilities
|
||||
- `docker-compose.yml` - Development stack orchestration
|
||||
|
||||
**Key Configuration Files:**
|
||||
- `pyproject.toml` - Python dependencies and tooling
|
||||
- `package.json` - Node.js dependencies and scripts
|
||||
- `schema.prisma` - Database schema and migrations
|
||||
- `next.config.mjs` - Next.js configuration
|
||||
- `tailwind.config.ts` - Styling configuration
|
||||
|
||||
### Security & Middleware
|
||||
|
||||
**Cache Protection**: Backend includes middleware preventing sensitive data caching in browsers/proxies
|
||||
**Authentication**: JWT-based with Supabase integration
|
||||
**User ID Validation**: All data access requires user ID checks - verify this for any `data/*.py` changes
|
||||
|
||||
### Development Workflow
|
||||
|
||||
**GitHub Actions**: Multiple CI/CD workflows in `.github/workflows/`
|
||||
- `platform-backend-ci.yml` - Backend testing and validation
|
||||
- `platform-frontend-ci.yml` - Frontend testing and validation
|
||||
- `platform-fullstack-ci.yml` - End-to-end integration tests
|
||||
|
||||
**Pre-commit Hooks**: Run linting and formatting checks
|
||||
**Conventional Commits**: Use format `type(scope): description` (e.g., `feat(backend): add API`)
|
||||
|
||||
### Key Source Files
|
||||
|
||||
**Backend Entry Points:**
|
||||
- `backend/backend/server/server.py` - FastAPI application setup
|
||||
- `backend/backend/data/` - Database models and user management
|
||||
- `backend/blocks/` - Agent execution blocks and logic
|
||||
|
||||
**Frontend Entry Points:**
|
||||
- `frontend/src/app/layout.tsx` - Root application layout
|
||||
- `frontend/src/app/page.tsx` - Home page
|
||||
- `frontend/src/lib/supabase/` - Authentication and database client
|
||||
|
||||
**Protected Routes**: Update `frontend/lib/supabase/middleware.ts` when adding protected routes
|
||||
|
||||
### Agent Block System
|
||||
|
||||
Agents are built using a visual block-based system where each block performs a single action. Blocks are defined in `backend/blocks/` and must include:
|
||||
- Block definition with input/output schemas
|
||||
- Execution logic with proper error handling
|
||||
- Tests validating functionality
|
||||
|
||||
### Database & ORM
|
||||
|
||||
**Prisma ORM** with PostgreSQL backend including pgvector for embeddings:
|
||||
- Schema in `schema.prisma`
|
||||
- Migrations in `backend/migrations/`
|
||||
- Always run `prisma migrate dev` and `prisma generate` after schema changes
|
||||
|
||||
## Environment Configuration
|
||||
|
||||
### Configuration Files Priority Order
|
||||
1. **Backend**: `/backend/.env.default` → `/backend/.env` (user overrides)
|
||||
2. **Frontend**: `/frontend/.env.default` → `/frontend/.env` (user overrides)
|
||||
3. **Platform**: `/.env.default` (Supabase/shared) → `/.env` (user overrides)
|
||||
4. Docker Compose `environment:` sections override file-based config
|
||||
5. Shell environment variables have highest precedence
|
||||
|
||||
### Docker Environment Setup
|
||||
- All services use hardcoded defaults (no `${VARIABLE}` substitutions)
|
||||
- The `env_file` directive loads variables INTO containers at runtime
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
- Copy `.env.default` files to `.env` for local development customization
|
||||
|
||||
## Advanced Development Patterns
|
||||
|
||||
### Adding New Blocks
|
||||
1. Create file in `/backend/backend/blocks/`
|
||||
2. Inherit from `Block` base class with input/output schemas
|
||||
3. Implement `run` method with proper error handling
|
||||
4. Generate block UUID using `uuid.uuid4()`
|
||||
5. Register in block registry
|
||||
6. Write tests alongside block implementation
|
||||
7. Consider how inputs/outputs connect with other blocks in graph editor
|
||||
|
||||
### API Development
|
||||
1. Update routes in `/backend/backend/server/routers/`
|
||||
2. Add/update Pydantic models in same directory
|
||||
3. Write tests alongside route files
|
||||
4. For `data/*.py` changes, validate user ID checks
|
||||
5. Run `poetry run test` to verify changes
|
||||
|
||||
### Frontend Development
|
||||
1. Components in `/frontend/src/components/`
|
||||
2. Use existing UI components from `/frontend/src/components/ui/`
|
||||
3. Add Storybook stories for component development
|
||||
4. Test user-facing features with Playwright E2E tests
|
||||
5. Update protected routes in middleware when needed
|
||||
|
||||
### Security Guidelines
|
||||
**Cache Protection Middleware** (`/backend/backend/server/middleware/security.py`):
|
||||
- Default: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
|
||||
- Uses allow list approach for cacheable paths (static assets, health checks, public pages)
|
||||
- Prevents sensitive data caching in browsers/proxies
|
||||
- Add new cacheable endpoints to `CACHEABLE_PATHS`
|
||||
|
||||
### CI/CD Alignment
|
||||
The repository has comprehensive CI workflows that test:
|
||||
- **Backend**: Python 3.11-3.13, services (Redis/RabbitMQ/ClamAV), Prisma migrations, Poetry lock validation
|
||||
- **Frontend**: Node.js 21, pnpm, Playwright with Docker Compose stack, API schema validation
|
||||
- **Integration**: Full-stack type checking and E2E testing
|
||||
|
||||
Match these patterns when developing locally - the copilot setup environment mirrors these CI configurations.
|
||||
|
||||
## Collaboration with Other AI Assistants
|
||||
|
||||
This repository is actively developed with assistance from Claude (via CLAUDE.md files). When working on this codebase:
|
||||
- Check for existing CLAUDE.md files that provide additional context
|
||||
- Follow established patterns and conventions already in the codebase
|
||||
- Maintain consistency with existing code style and architecture
|
||||
- Consider that changes may be reviewed and extended by both human developers and AI assistants
|
||||
|
||||
## Trust These Instructions
|
||||
|
||||
These instructions are comprehensive and tested. Only perform additional searches if:
|
||||
1. Information here is incomplete for your specific task
|
||||
2. You encounter errors not covered by the workarounds
|
||||
3. You need to understand implementation details not covered above
|
||||
|
||||
For detailed platform development patterns, refer to `autogpt_platform/CLAUDE.md` and `AGENTS.md` in the repository root.
|
||||
302
.github/workflows/copilot-setup-steps.yml
vendored
Normal file
302
.github/workflows/copilot-setup-steps.yml
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
name: "Copilot Setup Steps"
|
||||
|
||||
# Automatically run the setup steps when they are changed to allow for easy validation, and
|
||||
# allow manual testing through the repository's "Actions" tab
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- .github/workflows/copilot-setup-steps.yml
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/copilot-setup-steps.yml
|
||||
|
||||
jobs:
|
||||
# The job MUST be called `copilot-setup-steps` or it will not be picked up by Copilot.
|
||||
copilot-setup-steps:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
|
||||
# Set the permissions to the lowest permissions possible needed for your steps.
|
||||
# Copilot will be given its own token for its operations.
|
||||
permissions:
|
||||
# If you want to clone the repository as part of your setup steps, for example to install dependencies, you'll need the `contents: read` permission. If you don't clone the repository in your setup steps, Copilot will do this for you automatically after the steps complete.
|
||||
contents: read
|
||||
|
||||
# You can define any steps you want, and they will run before the agent starts.
|
||||
# If you do not check out your code, Copilot will do this for you.
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11" # Use standard version matching CI
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
# Extract Poetry version from backend/poetry.lock (matches CI)
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
|
||||
# Install Poetry
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
|
||||
# Add Poetry to PATH
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Check poetry.lock
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry lock
|
||||
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
|
||||
echo "Warning: poetry.lock not up to date, but continuing for setup"
|
||||
git checkout poetry.lock # Reset for clean setup
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
# Install Playwright browsers for frontend testing
|
||||
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
|
||||
# - name: Install Playwright browsers
|
||||
# working-directory: autogpt_platform/frontend
|
||||
# run: pnpm playwright install --with-deps chromium
|
||||
|
||||
# Docker setup for development environment
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Copy default environment files
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Copy default environment files for development
|
||||
cp .env.default .env
|
||||
cp backend/.env.default backend/.env
|
||||
cp frontend/.env.default frontend/.env
|
||||
|
||||
# Phase 1: Cache and load Docker images for faster setup
|
||||
- name: Set up Docker image cache
|
||||
id: docker-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/docker-cache
|
||||
# Use a versioned key for cache invalidation when image list changes
|
||||
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
|
||||
restore-keys: |
|
||||
docker-images-v2-${{ runner.os }}-
|
||||
docker-images-v1-${{ runner.os }}-
|
||||
|
||||
- name: Load or pull Docker images
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
mkdir -p ~/docker-cache
|
||||
|
||||
# Define image list for easy maintenance
|
||||
IMAGES=(
|
||||
"redis:latest"
|
||||
"rabbitmq:management"
|
||||
"clamav/clamav-debian:latest"
|
||||
"busybox:latest"
|
||||
"kong:2.8.1"
|
||||
"supabase/gotrue:v2.170.0"
|
||||
"supabase/postgres:15.8.1.049"
|
||||
"supabase/postgres-meta:v0.86.1"
|
||||
"supabase/studio:20250224-d10db0f"
|
||||
)
|
||||
|
||||
# Check if any cached tar files exist (more reliable than cache-hit)
|
||||
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
|
||||
echo "Docker cache found, loading images in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
# Convert image name to filename (replace : and / with -)
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
if [ -f ~/docker-cache/${filename}.tar ]; then
|
||||
echo "Loading $image..."
|
||||
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
|
||||
fi
|
||||
done
|
||||
wait
|
||||
echo "All cached images loaded"
|
||||
else
|
||||
echo "No Docker cache found, pulling images in parallel..."
|
||||
# Pull all images in parallel
|
||||
for image in "${IMAGES[@]}"; do
|
||||
docker pull "$image" &
|
||||
done
|
||||
wait
|
||||
|
||||
# Only save cache on main branches (not PRs) to avoid cache pollution
|
||||
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "Saving Docker images to cache in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
# Convert image name to filename (replace : and / with -)
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
echo "Saving $image..."
|
||||
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
|
||||
done
|
||||
wait
|
||||
echo "Docker image cache saved"
|
||||
else
|
||||
echo "Skipping cache save for PR/feature branch"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Docker images ready for use"
|
||||
|
||||
# Phase 2: Build migrate service with GitHub Actions cache
|
||||
- name: Build migrate Docker image with cache
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Build the migrate image with buildx for GHA caching
|
||||
docker buildx build \
|
||||
--cache-from type=gha \
|
||||
--cache-to type=gha,mode=max \
|
||||
--target migrate \
|
||||
--tag autogpt_platform-migrate:latest \
|
||||
--load \
|
||||
-f backend/Dockerfile \
|
||||
..
|
||||
|
||||
# Start services using pre-built images
|
||||
- name: Start Docker services for development
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Start essential services (migrate image already built with correct tag)
|
||||
docker compose --profile local up deps --no-build --detach
|
||||
echo "Waiting for services to be ready..."
|
||||
|
||||
# Wait for database to be ready
|
||||
echo "Checking database readiness..."
|
||||
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
|
||||
echo " Waiting for database..."
|
||||
sleep 2
|
||||
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
|
||||
|
||||
# Check migrate service status
|
||||
echo "Checking migration status..."
|
||||
docker compose ps migrate || echo " Migrate service not visible in ps output"
|
||||
|
||||
# Wait for migrate service to complete
|
||||
echo "Waiting for migrations to complete..."
|
||||
timeout 30 bash -c '
|
||||
ATTEMPTS=0
|
||||
while [ $ATTEMPTS -lt 15 ]; do
|
||||
ATTEMPTS=$((ATTEMPTS + 1))
|
||||
|
||||
# Check using docker directly (more reliable than docker compose ps)
|
||||
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
|
||||
|
||||
if [ -z "$CONTAINER_STATUS" ]; then
|
||||
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
|
||||
echo "✅ Migrations completed successfully"
|
||||
docker compose logs migrate --tail=5 2>/dev/null || true
|
||||
exit 0
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
|
||||
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
|
||||
echo "❌ Migrations failed with exit code: $EXIT_CODE"
|
||||
echo "Migration logs:"
|
||||
docker compose logs migrate --tail=20 2>/dev/null || true
|
||||
exit 1
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
|
||||
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
|
||||
else
|
||||
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
|
||||
echo "Final container check:"
|
||||
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
|
||||
echo "Migration logs (if available):"
|
||||
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
|
||||
' || echo "⚠️ Migration check completed with warnings, continuing..."
|
||||
|
||||
# Brief wait for other services to stabilize
|
||||
echo "Waiting 5 seconds for other services to stabilize..."
|
||||
sleep 5
|
||||
|
||||
# Verify installations and provide environment info
|
||||
- name: Verify setup and show environment info
|
||||
run: |
|
||||
echo "=== Python Setup ==="
|
||||
python --version
|
||||
poetry --version
|
||||
|
||||
echo "=== Node.js Setup ==="
|
||||
node --version
|
||||
pnpm --version
|
||||
|
||||
echo "=== Additional Tools ==="
|
||||
docker --version
|
||||
docker compose version
|
||||
gh --version || true
|
||||
|
||||
echo "=== Services Status ==="
|
||||
cd autogpt_platform
|
||||
docker compose ps || true
|
||||
|
||||
echo "=== Backend Dependencies ==="
|
||||
cd backend
|
||||
poetry show | head -10 || true
|
||||
|
||||
echo "=== Frontend Dependencies ==="
|
||||
cd ../frontend
|
||||
pnpm list --depth=0 | head -10 || true
|
||||
|
||||
echo "=== Environment Files ==="
|
||||
ls -la ../.env* || true
|
||||
ls -la .env* || true
|
||||
ls -la ../backend/.env* || true
|
||||
|
||||
echo "✅ AutoGPT Platform development environment setup complete!"
|
||||
echo "🚀 Ready for development with Docker services running"
|
||||
echo "📝 Backend server: poetry run serve (port 8000)"
|
||||
echo "🌐 Frontend server: pnpm dev (port 3000)"
|
||||
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.11"]
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
|
||||
46
.github/workflows/platform-frontend-ci.yml
vendored
46
.github/workflows/platform-frontend-ci.yml
vendored
@@ -82,37 +82,6 @@ jobs:
|
||||
- name: Run lint
|
||||
run: pnpm lint
|
||||
|
||||
type-check:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run tsc check
|
||||
run: pnpm type-check
|
||||
|
||||
chromatic:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
@@ -176,11 +145,7 @@ jobs:
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.example ../backend/.env
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@@ -252,15 +217,6 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Setup .env
|
||||
run: cp .env.example .env
|
||||
|
||||
- name: Build frontend
|
||||
run: pnpm build --turbo
|
||||
# uses Turbopack, much faster and safe enough for a test pipeline
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Install Browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
|
||||
132
.github/workflows/platform-fullstack-ci.yml
vendored
Normal file
132
.github/workflows/platform-fullstack-ci.yml
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- "autogpt_platform/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- "autogpt_platform/**"
|
||||
merge_group:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
types:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Setup .env
|
||||
run: cp .env.default .env
|
||||
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Generate API queries
|
||||
run: pnpm generate:api:force
|
||||
|
||||
- name: Check for API schema changes
|
||||
run: |
|
||||
if ! git diff --exit-code src/app/api/openapi.json; then
|
||||
echo "❌ API schema changes detected in src/app/api/openapi.json"
|
||||
echo ""
|
||||
echo "The openapi.json file has been modified after running 'pnpm generate:api-all'."
|
||||
echo "This usually means changes have been made in the BE endpoints without updating the Frontend."
|
||||
echo "The API schema is now out of sync with the Front-end queries."
|
||||
echo ""
|
||||
echo "To fix this:"
|
||||
echo "1. Pull the backend 'docker compose pull && docker compose up -d --build --force-recreate'"
|
||||
echo "2. Run 'pnpm generate:api' locally"
|
||||
echo "3. Run 'pnpm types' locally"
|
||||
echo "4. Fix any TypeScript errors that may have been introduced"
|
||||
echo "5. Commit and push your changes"
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
echo "✅ No API schema changes detected"
|
||||
fi
|
||||
|
||||
- name: Run Typescript checks
|
||||
run: pnpm types
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -5,6 +5,8 @@ classic/original_autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
# Root .env files
|
||||
/.env
|
||||
azure.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
@@ -121,7 +123,6 @@ celerybeat.pid
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv*/
|
||||
|
||||
@@ -235,7 +235,7 @@ repos:
|
||||
hooks:
|
||||
- id: tsc
|
||||
name: Typecheck - AutoGPT Platform - Frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && pnpm type-check'
|
||||
entry: bash -c 'cd autogpt_platform/frontend && pnpm types'
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
|
||||
10
README.md
10
README.md
@@ -3,6 +3,16 @@
|
||||
[](https://discord.gg/autogpt)  
|
||||
[](https://twitter.com/Auto_GPT)  
|
||||
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
[Deutsch](https://zdoc.app/de/Significant-Gravitas/AutoGPT) |
|
||||
[Español](https://zdoc.app/es/Significant-Gravitas/AutoGPT) |
|
||||
[français](https://zdoc.app/fr/Significant-Gravitas/AutoGPT) |
|
||||
[日本語](https://zdoc.app/ja/Significant-Gravitas/AutoGPT) |
|
||||
[한국어](https://zdoc.app/ko/Significant-Gravitas/AutoGPT) |
|
||||
[Português](https://zdoc.app/pt/Significant-Gravitas/AutoGPT) |
|
||||
[Русский](https://zdoc.app/ru/Significant-Gravitas/AutoGPT) |
|
||||
[中文](https://zdoc.app/zh/Significant-Gravitas/AutoGPT)
|
||||
|
||||
**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows.
|
||||
|
||||
## Hosting Options
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Repository Overview
|
||||
|
||||
AutoGPT Platform is a monorepo containing:
|
||||
|
||||
- **Backend** (`/backend`): Python FastAPI server with async support
|
||||
- **Frontend** (`/frontend`): Next.js React application
|
||||
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
|
||||
@@ -11,6 +13,7 @@ AutoGPT Platform is a monorepo containing:
|
||||
## Essential Commands
|
||||
|
||||
### Backend Development
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd backend && poetry install
|
||||
@@ -41,6 +44,7 @@ poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[GetC
|
||||
poetry run format # Black + isort
|
||||
poetry run lint # ruff
|
||||
```
|
||||
|
||||
More details can be found in TESTING.md
|
||||
|
||||
#### Creating/Updating Snapshots
|
||||
@@ -53,8 +57,8 @@ poetry run pytest path/to/test.py --snapshot-update
|
||||
|
||||
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
|
||||
|
||||
|
||||
### Frontend Development
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd frontend && npm install
|
||||
@@ -72,12 +76,13 @@ npm run storybook
|
||||
npm run build
|
||||
|
||||
# Type checking
|
||||
npm run type-check
|
||||
npm run types
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Backend Architecture
|
||||
|
||||
- **API Layer**: FastAPI with REST and WebSocket endpoints
|
||||
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
|
||||
- **Queue System**: RabbitMQ for async task processing
|
||||
@@ -86,6 +91,7 @@ npm run type-check
|
||||
- **Security**: Cache protection middleware prevents sensitive data caching in browsers/proxies
|
||||
|
||||
### Frontend Architecture
|
||||
|
||||
- **Framework**: Next.js App Router with React Server Components
|
||||
- **State Management**: React hooks + Supabase client for real-time updates
|
||||
- **Workflow Builder**: Visual graph editor using @xyflow/react
|
||||
@@ -93,6 +99,7 @@ npm run type-check
|
||||
- **Feature Flags**: LaunchDarkly integration
|
||||
|
||||
### Key Concepts
|
||||
|
||||
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
|
||||
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
|
||||
3. **Integrations**: OAuth and API connections stored per user
|
||||
@@ -100,13 +107,16 @@ npm run type-check
|
||||
5. **Virus Scanning**: ClamAV integration for file upload security
|
||||
|
||||
### Testing Approach
|
||||
|
||||
- Backend uses pytest with snapshot testing for API responses
|
||||
- Test files are colocated with source files (`*_test.py`)
|
||||
- Frontend uses Playwright for E2E tests
|
||||
- Component testing via Storybook
|
||||
|
||||
### Database Schema
|
||||
|
||||
Key models (defined in `/backend/schema.prisma`):
|
||||
|
||||
- `User`: Authentication and profile data
|
||||
- `AgentGraph`: Workflow definitions with version control
|
||||
- `AgentGraphExecution`: Execution history and results
|
||||
@@ -114,13 +124,31 @@ Key models (defined in `/backend/schema.prisma`):
|
||||
- `StoreListing`: Marketplace listings for sharing agents
|
||||
|
||||
### Environment Configuration
|
||||
- Backend: `.env` file in `/backend`
|
||||
- Frontend: `.env.local` file in `/frontend`
|
||||
- Both require Supabase credentials and API keys for various services
|
||||
|
||||
#### Configuration Files
|
||||
|
||||
- **Backend**: `/backend/.env.default` (defaults) → `/backend/.env` (user overrides)
|
||||
- **Frontend**: `/frontend/.env.default` (defaults) → `/frontend/.env` (user overrides)
|
||||
- **Platform**: `/.env.default` (Supabase/shared defaults) → `/.env` (user overrides)
|
||||
|
||||
#### Docker Environment Loading Order
|
||||
|
||||
1. `.env.default` files provide base configuration (tracked in git)
|
||||
2. `.env` files provide user-specific overrides (gitignored)
|
||||
3. Docker Compose `environment:` sections provide service-specific overrides
|
||||
4. Shell environment variables have highest precedence
|
||||
|
||||
#### Key Points
|
||||
|
||||
- All services use hardcoded defaults in docker-compose files (no `${VARIABLE}` substitutions)
|
||||
- The `env_file` directive loads variables INTO containers at runtime
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
**Adding a new block:**
|
||||
|
||||
1. Create new file in `/backend/backend/blocks/`
|
||||
2. Inherit from `Block` base class
|
||||
3. Define input/output schemas
|
||||
@@ -128,16 +156,18 @@ Key models (defined in `/backend/schema.prisma`):
|
||||
5. Register in block registry
|
||||
6. Generate the block uuid using `uuid.uuid4()`
|
||||
|
||||
Note: when making many new blocks analyze the interfaces for each of these blcoks and picture if they would go well together in a graph based editor or would they struggle to connect productively?
|
||||
Note: when making many new blocks analyze the interfaces for each of these blocks and picture if they would go well together in a graph based editor or would they struggle to connect productively?
|
||||
ex: do the inputs and outputs tie well together?
|
||||
|
||||
**Modifying the API:**
|
||||
|
||||
1. Update route in `/backend/backend/server/routers/`
|
||||
2. Add/update Pydantic models in same directory
|
||||
3. Write tests alongside the route file
|
||||
4. Run `poetry run test` to verify
|
||||
|
||||
**Frontend feature development:**
|
||||
|
||||
1. Components go in `/frontend/src/components/`
|
||||
2. Use existing UI components from `/frontend/src/components/ui/`
|
||||
3. Add Storybook stories for new components
|
||||
@@ -146,6 +176,7 @@ ex: do the inputs and outputs tie well together?
|
||||
### Security Implementation
|
||||
|
||||
**Cache Protection Middleware:**
|
||||
|
||||
- Located in `/backend/backend/server/middleware/security.py`
|
||||
- Default behavior: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
|
||||
- Uses an allow list approach - only explicitly permitted paths can be cached
|
||||
@@ -154,14 +185,20 @@ ex: do the inputs and outputs tie well together?
|
||||
- To allow caching for a new endpoint, add it to `CACHEABLE_PATHS` in the middleware
|
||||
- Applied to both main API server and external API applications
|
||||
|
||||
|
||||
### Creating Pull Requests
|
||||
|
||||
- Create the PR aginst the `dev` branch of the repository.
|
||||
- Ensure the branch name is descriptive (e.g., `feature/add-new-block`)/
|
||||
- Use conventional commit messages (see below)/
|
||||
- Fill out the .github/PULL_REQUEST_TEMPLATE.md template as the PR description/
|
||||
- Run the github pre-commit hooks to ensure code quality.
|
||||
|
||||
### Reviewing/Revising Pull Requests
|
||||
|
||||
- When the user runs /pr-comments or tries to fetch them, also run gh api /repos/Significant-Gravitas/AutoGPT/pulls/[issuenum]/reviews to get the reviews
|
||||
- Use gh api /repos/Significant-Gravitas/AutoGPT/pulls/[issuenum]/reviews/[review_id]/comments to get the review contents
|
||||
- Use gh api /repos/Significant-Gravitas/AutoGPT/issues/9924/comments to get the pr specific comments
|
||||
|
||||
### Conventional Commits
|
||||
|
||||
Use this format for commit messages and Pull Request titles:
|
||||
|
||||
@@ -8,7 +8,6 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
|
||||
|
||||
- Docker
|
||||
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
|
||||
- Node.js & NPM (for running the frontend application)
|
||||
|
||||
### Running the System
|
||||
|
||||
@@ -24,10 +23,10 @@ To run the AutoGPT Platform, follow these steps:
|
||||
2. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env
|
||||
cp .env.default .env
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
This command will copy the `.env.default` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
3. Run the following command:
|
||||
|
||||
@@ -37,44 +36,7 @@ To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
4. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
|
||||
```
|
||||
cd frontend
|
||||
```
|
||||
|
||||
You will need to run your frontend application separately on your local machine.
|
||||
|
||||
5. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
6. Run the following command:
|
||||
|
||||
Enable corepack and install dependencies by running:
|
||||
|
||||
```
|
||||
corepack enable
|
||||
pnpm i
|
||||
```
|
||||
|
||||
Generate the API client (this step is required before running the frontend):
|
||||
|
||||
```
|
||||
pnpm generate:api-client
|
||||
```
|
||||
|
||||
Then start the frontend application in development mode:
|
||||
|
||||
```
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
4. After all the services are in ready state, open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
@@ -177,20 +139,21 @@ The platform includes scripts for generating and managing the API client:
|
||||
|
||||
- `pnpm fetch:openapi`: Fetches the OpenAPI specification from the backend service (requires backend to be running on port 8006)
|
||||
- `pnpm generate:api-client`: Generates the TypeScript API client from the OpenAPI specification using Orval
|
||||
- `pnpm generate:api-all`: Runs both fetch and generate commands in sequence
|
||||
- `pnpm generate:api`: Runs both fetch and generate commands in sequence
|
||||
|
||||
#### Manual API Client Updates
|
||||
|
||||
If you need to update the API client after making changes to the backend API:
|
||||
|
||||
1. Ensure the backend services are running:
|
||||
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
2. Generate the updated API client:
|
||||
```
|
||||
pnpm generate:api-all
|
||||
pnpm generate:api
|
||||
```
|
||||
|
||||
This will fetch the latest OpenAPI specification and regenerate the TypeScript client code.
|
||||
|
||||
@@ -10,8 +10,8 @@ from starlette.status import HTTP_401_UNAUTHORIZED
|
||||
from .config import settings
|
||||
from .jwt_utils import parse_jwt_token
|
||||
|
||||
security = HTTPBearer()
|
||||
logger = logging.getLogger(__name__)
|
||||
bearer_auth = HTTPBearer(auto_error=False)
|
||||
|
||||
|
||||
async def auth_middleware(request: Request):
|
||||
@@ -20,11 +20,10 @@ async def auth_middleware(request: Request):
|
||||
logger.warning("Auth disabled")
|
||||
return {}
|
||||
|
||||
security = HTTPBearer()
|
||||
credentials = await security(request)
|
||||
credentials = await bearer_auth(request)
|
||||
|
||||
if not credentials:
|
||||
raise HTTPException(status_code=401, detail="Authorization header is missing")
|
||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||
|
||||
try:
|
||||
payload = parse_jwt_token(credentials.credentials)
|
||||
|
||||
@@ -1,324 +0,0 @@
|
||||
import contextlib
|
||||
import logging
|
||||
from functools import wraps
|
||||
from json import JSONDecodeError
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, TypeVar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.model import User
|
||||
|
||||
import ldclient
|
||||
from backend.util.json import loads as json_loads
|
||||
from fastapi import HTTPException
|
||||
from ldclient import Context, LDClient
|
||||
from ldclient.config import Config
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from .config import SETTINGS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
_is_initialized = False
|
||||
|
||||
|
||||
def get_client() -> LDClient:
|
||||
"""Get the LaunchDarkly client singleton."""
|
||||
if not _is_initialized:
|
||||
initialize_launchdarkly()
|
||||
return ldclient.get()
|
||||
|
||||
|
||||
def initialize_launchdarkly() -> None:
|
||||
sdk_key = SETTINGS.launch_darkly_sdk_key
|
||||
logger.debug(
|
||||
f"Initializing LaunchDarkly with SDK key: {'present' if sdk_key else 'missing'}"
|
||||
)
|
||||
|
||||
if not sdk_key:
|
||||
logger.warning("LaunchDarkly SDK key not configured")
|
||||
return
|
||||
|
||||
config = Config(sdk_key)
|
||||
ldclient.set_config(config)
|
||||
|
||||
if ldclient.get().is_initialized():
|
||||
global _is_initialized
|
||||
_is_initialized = True
|
||||
logger.info("LaunchDarkly client initialized successfully")
|
||||
else:
|
||||
logger.error("LaunchDarkly client failed to initialize")
|
||||
|
||||
|
||||
def shutdown_launchdarkly() -> None:
|
||||
"""Shutdown the LaunchDarkly client."""
|
||||
if ldclient.get().is_initialized():
|
||||
ldclient.get().close()
|
||||
logger.info("LaunchDarkly client closed successfully")
|
||||
|
||||
|
||||
def create_context(
|
||||
user_id: str, additional_attributes: Optional[dict[str, Any]] = None
|
||||
) -> Context:
|
||||
"""Create LaunchDarkly context with optional additional attributes."""
|
||||
# Use the key from attributes if provided, otherwise use user_id
|
||||
context_key = user_id
|
||||
if additional_attributes and "key" in additional_attributes:
|
||||
context_key = additional_attributes["key"]
|
||||
|
||||
builder = Context.builder(str(context_key)).kind("user")
|
||||
|
||||
if additional_attributes:
|
||||
for key, value in additional_attributes.items():
|
||||
# Skip kind and key as they're already set
|
||||
if key in ["kind", "key"]:
|
||||
continue
|
||||
elif key == "custom" and isinstance(value, dict):
|
||||
# Handle custom attributes object - these go as individual attributes
|
||||
for custom_key, custom_value in value.items():
|
||||
builder.set(custom_key, custom_value)
|
||||
else:
|
||||
builder.set(key, value)
|
||||
return builder.build()
|
||||
|
||||
|
||||
async def _fetch_user_context_data(user_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
Fetch user data and build LaunchDarkly context.
|
||||
|
||||
Args:
|
||||
user_id: The user ID to fetch data for
|
||||
|
||||
Returns:
|
||||
Dictionary with user context data including role
|
||||
"""
|
||||
# Use the unified database access approach
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
|
||||
db_client = get_database_manager_async_client()
|
||||
user = await db_client.get_user_by_id(user_id)
|
||||
|
||||
# Build LaunchDarkly context from user data
|
||||
return _build_launchdarkly_context(user)
|
||||
|
||||
|
||||
def _build_launchdarkly_context(user: "User") -> dict[str, Any]:
|
||||
"""
|
||||
Build LaunchDarkly context data matching frontend format.
|
||||
|
||||
Returns a context like:
|
||||
{
|
||||
"kind": "user",
|
||||
"key": "user-id",
|
||||
"email": "user@example.com", # Optional
|
||||
"anonymous": false,
|
||||
"custom": {
|
||||
"role": "admin" # Optional
|
||||
}
|
||||
}
|
||||
|
||||
Args:
|
||||
user: User object from database
|
||||
|
||||
Returns:
|
||||
Dictionary with user context data
|
||||
"""
|
||||
from autogpt_libs.auth.models import DEFAULT_USER_ID
|
||||
|
||||
# Build basic context - always include kind, key, and anonymous
|
||||
context_data: dict[str, Any] = {
|
||||
"kind": "user",
|
||||
"key": user.id,
|
||||
"anonymous": False,
|
||||
}
|
||||
|
||||
# Add email if present
|
||||
if user.email:
|
||||
context_data["email"] = user.email
|
||||
|
||||
# Initialize custom attributes
|
||||
custom: dict[str, Any] = {}
|
||||
|
||||
# Determine user role from metadata
|
||||
role = None
|
||||
|
||||
# Check if user is default/system user
|
||||
if user.id == DEFAULT_USER_ID:
|
||||
role = "admin" # Default user has admin privileges when auth is disabled
|
||||
elif user.metadata:
|
||||
# Check for role in metadata
|
||||
try:
|
||||
# Handle both string (direct DB) and dict (RPC) formats
|
||||
if isinstance(user.metadata, str):
|
||||
metadata = json_loads(user.metadata)
|
||||
elif isinstance(user.metadata, dict):
|
||||
metadata = user.metadata
|
||||
else:
|
||||
metadata = {}
|
||||
|
||||
# Extract role from metadata if present
|
||||
if metadata.get("role"):
|
||||
role = metadata["role"]
|
||||
|
||||
except (JSONDecodeError, TypeError) as e:
|
||||
logger.debug(f"Failed to parse user metadata for context: {e}")
|
||||
|
||||
# Add role to custom attributes if present
|
||||
if role:
|
||||
custom["role"] = role
|
||||
|
||||
# Only add custom object if it has content
|
||||
if custom:
|
||||
context_data["custom"] = custom
|
||||
|
||||
return context_data
|
||||
|
||||
|
||||
async def is_feature_enabled(
|
||||
flag_key: str,
|
||||
user_id: str,
|
||||
default: bool = False,
|
||||
use_user_id_only: bool = False,
|
||||
additional_attributes: Optional[dict[str, Any]] = None,
|
||||
user_role: Optional[str] = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if a feature flag is enabled for a user with full LaunchDarkly context support.
|
||||
|
||||
Args:
|
||||
flag_key: The LaunchDarkly feature flag key
|
||||
user_id: The user ID to evaluate the flag for
|
||||
default: Default value if LaunchDarkly is unavailable or flag evaluation fails
|
||||
use_user_id_only: If True, only use user_id without fetching database context
|
||||
additional_attributes: Additional attributes to include in the context
|
||||
user_role: Optional user role (e.g., "admin", "user") to add to segments
|
||||
|
||||
Returns:
|
||||
True if feature is enabled, False otherwise
|
||||
"""
|
||||
try:
|
||||
client = get_client()
|
||||
|
||||
if use_user_id_only:
|
||||
# Simple context with just user ID (for backward compatibility)
|
||||
attrs = additional_attributes or {}
|
||||
if user_role:
|
||||
# Add role to custom attributes for consistency
|
||||
if "custom" not in attrs:
|
||||
attrs["custom"] = {}
|
||||
if isinstance(attrs["custom"], dict):
|
||||
attrs["custom"]["role"] = user_role
|
||||
context = create_context(str(user_id), attrs)
|
||||
else:
|
||||
# Full context with user segments and metadata from database
|
||||
try:
|
||||
user_data = await _fetch_user_context_data(user_id)
|
||||
except ImportError as e:
|
||||
# Database modules not available - fallback to simple context
|
||||
logger.debug(f"Database modules not available: {e}")
|
||||
user_data = {}
|
||||
except Exception as e:
|
||||
# Database error - log and fallback to simple context
|
||||
logger.warning(f"Failed to fetch user context for {user_id}: {e}")
|
||||
user_data = {}
|
||||
|
||||
# Merge additional attributes and role
|
||||
attrs = additional_attributes or {}
|
||||
|
||||
# If user_role is provided, add it to custom attributes
|
||||
if user_role:
|
||||
if "custom" not in user_data:
|
||||
user_data["custom"] = {}
|
||||
user_data["custom"]["role"] = user_role
|
||||
|
||||
# Merge additional attributes with user data
|
||||
# Handle custom attributes specially
|
||||
if "custom" in attrs and isinstance(attrs["custom"], dict):
|
||||
if "custom" not in user_data:
|
||||
user_data["custom"] = {}
|
||||
user_data["custom"].update(attrs["custom"])
|
||||
# Remove custom from attrs to avoid duplication
|
||||
attrs = {k: v for k, v in attrs.items() if k != "custom"}
|
||||
|
||||
# Merge remaining attributes
|
||||
final_attrs = {**user_data, **attrs}
|
||||
|
||||
context = create_context(str(user_id), final_attrs)
|
||||
|
||||
# Evaluate the flag
|
||||
result = client.variation(flag_key, context, default)
|
||||
|
||||
logger.debug(
|
||||
f"Feature flag {flag_key} for user {user_id}: {result} "
|
||||
f"(use_user_id_only: {use_user_id_only})"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"LaunchDarkly flag evaluation failed for {flag_key}: {e}, using default={default}"
|
||||
)
|
||||
return default
|
||||
|
||||
|
||||
def feature_flag(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[[Callable[P, Awaitable[T]]], Callable[P, Awaitable[T]]]:
|
||||
"""
|
||||
Decorator for async feature flag protected endpoints.
|
||||
|
||||
Args:
|
||||
flag_key: The LaunchDarkly feature flag key
|
||||
default: Default value if flag evaluation fails
|
||||
|
||||
Returns:
|
||||
Decorator that only works with async functions
|
||||
"""
|
||||
|
||||
def decorator(func: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[T]]:
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
# Use the unified function with full context support
|
||||
is_enabled = await is_feature_enabled(
|
||||
flag_key, str(user_id), default, use_user_id_only=False
|
||||
)
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
return await func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
return async_wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mock_flag_variation(flag_key: str, return_value: Any):
|
||||
"""Context manager for testing feature flags."""
|
||||
original_variation = get_client().variation
|
||||
get_client().variation = lambda key, context, default: (
|
||||
return_value if key == flag_key else original_variation(key, context, default)
|
||||
)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
get_client().variation = original_variation
|
||||
@@ -1,84 +0,0 @@
|
||||
import pytest
|
||||
from ldclient import LDClient
|
||||
|
||||
from autogpt_libs.feature_flag.client import (
|
||||
feature_flag,
|
||||
is_feature_enabled,
|
||||
mock_flag_variation,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ld_client(mocker):
|
||||
client = mocker.Mock(spec=LDClient)
|
||||
mocker.patch("ldclient.get", return_value=client)
|
||||
client.is_initialized.return_value = True
|
||||
return client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_enabled(ld_client):
|
||||
ld_client.variation.return_value = True
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == "success"
|
||||
ld_client.variation.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_unauthorized_response(ld_client):
|
||||
ld_client.variation.return_value = False
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == {"error": "disabled"}
|
||||
|
||||
|
||||
def test_mock_flag_variation(ld_client):
|
||||
with mock_flag_variation("test-flag", True):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
|
||||
with mock_flag_variation("test-flag", False):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
|
||||
|
||||
def test_is_feature_enabled(ld_client):
|
||||
"""Test the is_feature_enabled helper function."""
|
||||
ld_client.is_initialized.return_value = True
|
||||
ld_client.variation.return_value = True
|
||||
|
||||
result = is_feature_enabled("test-flag", "user123", default=False)
|
||||
assert result is True
|
||||
|
||||
ld_client.variation.assert_called_once()
|
||||
call_args = ld_client.variation.call_args
|
||||
assert call_args[0][0] == "test-flag" # flag_key
|
||||
assert call_args[0][2] is False # default value
|
||||
|
||||
|
||||
def test_is_feature_enabled_not_initialized(ld_client):
|
||||
"""Test is_feature_enabled when LaunchDarkly is not initialized."""
|
||||
ld_client.is_initialized.return_value = False
|
||||
|
||||
result = is_feature_enabled("test-flag", "user123", default=True)
|
||||
assert result is True # Should return default
|
||||
|
||||
ld_client.variation.assert_not_called()
|
||||
|
||||
|
||||
def test_is_feature_enabled_exception(mocker):
|
||||
"""Test is_feature_enabled when get_client() raises an exception."""
|
||||
mocker.patch(
|
||||
"autogpt_libs.feature_flag.client.get_client",
|
||||
side_effect=Exception("Client error"),
|
||||
)
|
||||
|
||||
result = is_feature_enabled("test-flag", "user123", default=True)
|
||||
assert result is True # Should return default
|
||||
@@ -1,15 +0,0 @@
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
launch_darkly_sdk_key: str = Field(
|
||||
default="",
|
||||
description="The Launch Darkly SDK key",
|
||||
validation_alias="LAUNCH_DARKLY_SDK_KEY",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(case_sensitive=True, extra="ignore")
|
||||
|
||||
|
||||
SETTINGS = Settings()
|
||||
@@ -1,6 +1,8 @@
|
||||
"""Logging module for Auto-GPT."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
@@ -10,6 +12,15 @@ from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import AGPTFormatter
|
||||
|
||||
# Configure global socket timeout and gRPC keepalive to prevent deadlocks
|
||||
# This must be done at import time before any gRPC connections are established
|
||||
socket.setdefaulttimeout(30) # 30-second socket timeout
|
||||
|
||||
# Enable gRPC keepalive to detect dead connections faster
|
||||
os.environ.setdefault("GRPC_KEEPALIVE_TIME_MS", "30000") # 30 seconds
|
||||
os.environ.setdefault("GRPC_KEEPALIVE_TIMEOUT_MS", "5000") # 5 seconds
|
||||
os.environ.setdefault("GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS", "true")
|
||||
|
||||
LOG_DIR = Path(__file__).parent.parent.parent.parent / "logs"
|
||||
LOG_FILE = "activity.log"
|
||||
DEBUG_LOG_FILE = "debug.log"
|
||||
@@ -79,7 +90,6 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
Note: This function is typically called at the start of the application
|
||||
to set up the logging infrastructure.
|
||||
"""
|
||||
|
||||
config = LoggingConfig()
|
||||
log_handlers: list[logging.Handler] = []
|
||||
|
||||
@@ -105,13 +115,17 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
if config.enable_cloud_logging or force_cloud_logging:
|
||||
import google.cloud.logging
|
||||
from google.cloud.logging.handlers import CloudLoggingHandler
|
||||
from google.cloud.logging_v2.handlers.transports.sync import SyncTransport
|
||||
from google.cloud.logging_v2.handlers.transports import (
|
||||
BackgroundThreadTransport,
|
||||
)
|
||||
|
||||
client = google.cloud.logging.Client()
|
||||
# Use BackgroundThreadTransport to prevent blocking the main thread
|
||||
# and deadlocks when gRPC calls to Google Cloud Logging hang
|
||||
cloud_handler = CloudLoggingHandler(
|
||||
client,
|
||||
name="autogpt_logs",
|
||||
transport=SyncTransport,
|
||||
transport=BackgroundThreadTransport,
|
||||
)
|
||||
cloud_handler.setLevel(config.level)
|
||||
log_handlers.append(cloud_handler)
|
||||
|
||||
@@ -1,39 +1,5 @@
|
||||
import logging
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
import uvicorn.config
|
||||
from colorama import Fore
|
||||
|
||||
|
||||
def remove_color_codes(s: str) -> str:
|
||||
return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s)
|
||||
|
||||
|
||||
def fmt_kwargs(kwargs: dict) -> str:
|
||||
return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items())
|
||||
|
||||
|
||||
def print_attribute(
|
||||
title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = ""
|
||||
) -> None:
|
||||
logger = logging.getLogger()
|
||||
logger.info(
|
||||
str(value),
|
||||
extra={
|
||||
"title": f"{title.rstrip(':')}:",
|
||||
"title_color": title_color,
|
||||
"color": value_color,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def generate_uvicorn_config():
|
||||
"""
|
||||
Generates a uvicorn logging config that silences uvicorn's default logging and tells it to use the native logging module.
|
||||
"""
|
||||
log_config = dict(uvicorn.config.LOGGING_CONFIG)
|
||||
log_config["loggers"]["uvicorn"] = {"handlers": []}
|
||||
log_config["loggers"]["uvicorn.error"] = {"handlers": []}
|
||||
log_config["loggers"]["uvicorn.access"] = {"handlers": []}
|
||||
return log_config
|
||||
|
||||
@@ -4,7 +4,6 @@ import threading
|
||||
import time
|
||||
from functools import wraps
|
||||
from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
ParamSpec,
|
||||
@@ -23,11 +22,13 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@overload
|
||||
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ...
|
||||
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]:
|
||||
pass
|
||||
|
||||
|
||||
@overload
|
||||
def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ...
|
||||
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
|
||||
pass
|
||||
|
||||
|
||||
def thread_cached(
|
||||
@@ -75,26 +76,32 @@ def clear_thread_cache(func: Callable) -> None:
|
||||
clear()
|
||||
|
||||
|
||||
FuncT = TypeVar("FuncT")
|
||||
|
||||
|
||||
R_co = TypeVar("R_co", covariant=True)
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class AsyncCachedFunction(Protocol):
|
||||
class AsyncCachedFunction(Protocol[P, R_co]):
|
||||
"""Protocol for async functions with cache management methods."""
|
||||
|
||||
def cache_clear(self) -> None:
|
||||
"""Clear all cached entries."""
|
||||
return None
|
||||
|
||||
def cache_info(self) -> dict[str, Any]:
|
||||
def cache_info(self) -> dict[str, int | None]:
|
||||
"""Get cache statistics."""
|
||||
return {}
|
||||
|
||||
async def __call__(self, *args: Any, **kwargs: Any) -> Any:
|
||||
async def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R_co:
|
||||
"""Call the cached function."""
|
||||
return None
|
||||
return None # type: ignore
|
||||
|
||||
|
||||
def async_ttl_cache(
|
||||
maxsize: int = 128, ttl_seconds: int | None = None
|
||||
) -> Callable[[Callable[..., Awaitable[Any]]], AsyncCachedFunction]:
|
||||
) -> Callable[[Callable[P, Awaitable[R]]], AsyncCachedFunction[P, R]]:
|
||||
"""
|
||||
TTL (Time To Live) cache decorator for async functions.
|
||||
|
||||
@@ -120,13 +127,13 @@ def async_ttl_cache(
|
||||
"""
|
||||
|
||||
def decorator(
|
||||
async_func: Callable[..., Awaitable[Any]],
|
||||
) -> AsyncCachedFunction:
|
||||
async_func: Callable[P, Awaitable[R]],
|
||||
) -> AsyncCachedFunction[P, R]:
|
||||
# Cache storage - use union type to handle both cases
|
||||
cache_storage: dict[Any, Any | Tuple[Any, float]] = {}
|
||||
cache_storage: dict[tuple, R | Tuple[R, float]] = {}
|
||||
|
||||
@wraps(async_func)
|
||||
async def wrapper(*args, **kwargs):
|
||||
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
# Create cache key from arguments
|
||||
key = (args, tuple(sorted(kwargs.items())))
|
||||
current_time = time.time()
|
||||
@@ -138,7 +145,7 @@ def async_ttl_cache(
|
||||
logger.debug(
|
||||
f"Cache hit for {async_func.__name__} with key: {str(key)[:50]}"
|
||||
)
|
||||
return cache_storage[key]
|
||||
return cast(R, cache_storage[key])
|
||||
else:
|
||||
# With TTL - check expiration
|
||||
cached_data = cache_storage[key]
|
||||
@@ -148,7 +155,7 @@ def async_ttl_cache(
|
||||
logger.debug(
|
||||
f"Cache hit for {async_func.__name__} with key: {str(key)[:50]}"
|
||||
)
|
||||
return result
|
||||
return cast(R, result)
|
||||
else:
|
||||
# Expired entry
|
||||
del cache_storage[key]
|
||||
@@ -185,7 +192,7 @@ def async_ttl_cache(
|
||||
def cache_clear() -> None:
|
||||
cache_storage.clear()
|
||||
|
||||
def cache_info() -> dict[str, Any]:
|
||||
def cache_info() -> dict[str, int | None]:
|
||||
return {
|
||||
"size": len(cache_storage),
|
||||
"maxsize": maxsize,
|
||||
@@ -196,14 +203,35 @@ def async_ttl_cache(
|
||||
setattr(wrapper, "cache_clear", cache_clear)
|
||||
setattr(wrapper, "cache_info", cache_info)
|
||||
|
||||
return cast(AsyncCachedFunction, wrapper)
|
||||
return cast(AsyncCachedFunction[P, R], wrapper)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@overload
|
||||
def async_cache(
|
||||
func: Callable[P, Awaitable[R]],
|
||||
) -> AsyncCachedFunction[P, R]:
|
||||
pass
|
||||
|
||||
|
||||
@overload
|
||||
def async_cache(
|
||||
func: None = None,
|
||||
*,
|
||||
maxsize: int = 128,
|
||||
) -> Callable[[Callable[..., Awaitable[Any]]], AsyncCachedFunction]:
|
||||
) -> Callable[[Callable[P, Awaitable[R]]], AsyncCachedFunction[P, R]]:
|
||||
pass
|
||||
|
||||
|
||||
def async_cache(
|
||||
func: Callable[P, Awaitable[R]] | None = None,
|
||||
*,
|
||||
maxsize: int = 128,
|
||||
) -> (
|
||||
AsyncCachedFunction[P, R]
|
||||
| Callable[[Callable[P, Awaitable[R]]], AsyncCachedFunction[P, R]]
|
||||
):
|
||||
"""
|
||||
Process-level cache decorator for async functions (no TTL).
|
||||
|
||||
@@ -211,15 +239,28 @@ def async_cache(
|
||||
This is a convenience wrapper around async_ttl_cache with ttl_seconds=None.
|
||||
|
||||
Args:
|
||||
func: The async function to cache (when used without parentheses)
|
||||
maxsize: Maximum number of cached entries
|
||||
|
||||
Returns:
|
||||
Decorator function
|
||||
Decorated function or decorator
|
||||
|
||||
Example:
|
||||
# Without parentheses (uses default maxsize=128)
|
||||
@async_cache
|
||||
async def get_data(param: str) -> dict:
|
||||
return {"result": param}
|
||||
|
||||
# With parentheses and custom maxsize
|
||||
@async_cache(maxsize=1000)
|
||||
async def expensive_computation(param: str) -> dict:
|
||||
# Expensive computation here
|
||||
return {"result": param}
|
||||
"""
|
||||
return async_ttl_cache(maxsize=maxsize, ttl_seconds=None)
|
||||
if func is None:
|
||||
# Called with parentheses @async_cache() or @async_cache(maxsize=...)
|
||||
return async_ttl_cache(maxsize=maxsize, ttl_seconds=None)
|
||||
else:
|
||||
# Called without parentheses @async_cache
|
||||
decorator = async_ttl_cache(maxsize=maxsize, ttl_seconds=None)
|
||||
return decorator(func)
|
||||
|
||||
@@ -461,7 +461,7 @@ class TestAsyncTTLCache:
|
||||
|
||||
# Cache size should be reduced (cleanup removes oldest entries)
|
||||
info = size_limited_function.cache_info()
|
||||
assert info["size"] <= 3 # Should be cleaned up
|
||||
assert info["size"] is not None and info["size"] <= 3 # Should be cleaned up
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_argument_variations(self):
|
||||
|
||||
41
autogpt_platform/autogpt_libs/poetry.lock
generated
41
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -1253,30 +1253,31 @@ pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.12.3"
|
||||
version = "0.12.9"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.12.3-py3-none-linux_armv6l.whl", hash = "sha256:47552138f7206454eaf0c4fe827e546e9ddac62c2a3d2585ca54d29a890137a2"},
|
||||
{file = "ruff-0.12.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:0a9153b000c6fe169bb307f5bd1b691221c4286c133407b8827c406a55282041"},
|
||||
{file = "ruff-0.12.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fa6b24600cf3b750e48ddb6057e901dd5b9aa426e316addb2a1af185a7509882"},
|
||||
{file = "ruff-0.12.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2506961bf6ead54887ba3562604d69cb430f59b42133d36976421bc8bd45901"},
|
||||
{file = "ruff-0.12.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4faaff1f90cea9d3033cbbcdf1acf5d7fb11d8180758feb31337391691f3df0"},
|
||||
{file = "ruff-0.12.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40dced4a79d7c264389de1c59467d5d5cefd79e7e06d1dfa2c75497b5269a5a6"},
|
||||
{file = "ruff-0.12.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:0262d50ba2767ed0fe212aa7e62112a1dcbfd46b858c5bf7bbd11f326998bafc"},
|
||||
{file = "ruff-0.12.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12371aec33e1a3758597c5c631bae9a5286f3c963bdfb4d17acdd2d395406687"},
|
||||
{file = "ruff-0.12.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:560f13b6baa49785665276c963edc363f8ad4b4fc910a883e2625bdb14a83a9e"},
|
||||
{file = "ruff-0.12.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:023040a3499f6f974ae9091bcdd0385dd9e9eb4942f231c23c57708147b06311"},
|
||||
{file = "ruff-0.12.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:883d844967bffff5ab28bba1a4d246c1a1b2933f48cb9840f3fdc5111c603b07"},
|
||||
{file = "ruff-0.12.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2120d3aa855ff385e0e562fdee14d564c9675edbe41625c87eeab744a7830d12"},
|
||||
{file = "ruff-0.12.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6b16647cbb470eaf4750d27dddc6ebf7758b918887b56d39e9c22cce2049082b"},
|
||||
{file = "ruff-0.12.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e1417051edb436230023575b149e8ff843a324557fe0a265863b7602df86722f"},
|
||||
{file = "ruff-0.12.3-py3-none-win32.whl", hash = "sha256:dfd45e6e926deb6409d0616078a666ebce93e55e07f0fb0228d4b2608b2c248d"},
|
||||
{file = "ruff-0.12.3-py3-none-win_amd64.whl", hash = "sha256:a946cf1e7ba3209bdef039eb97647f1c77f6f540e5845ec9c114d3af8df873e7"},
|
||||
{file = "ruff-0.12.3-py3-none-win_arm64.whl", hash = "sha256:5f9c7c9c8f84c2d7f27e93674d27136fbf489720251544c4da7fb3d742e011b1"},
|
||||
{file = "ruff-0.12.3.tar.gz", hash = "sha256:f1b5a4b6668fd7b7ea3697d8d98857390b40c1320a63a178eee6be0899ea2d77"},
|
||||
{file = "ruff-0.12.9-py3-none-linux_armv6l.whl", hash = "sha256:fcebc6c79fcae3f220d05585229463621f5dbf24d79fdc4936d9302e177cfa3e"},
|
||||
{file = "ruff-0.12.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aed9d15f8c5755c0e74467731a007fcad41f19bcce41cd75f768bbd687f8535f"},
|
||||
{file = "ruff-0.12.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5b15ea354c6ff0d7423814ba6d44be2807644d0c05e9ed60caca87e963e93f70"},
|
||||
{file = "ruff-0.12.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d596c2d0393c2502eaabfef723bd74ca35348a8dac4267d18a94910087807c53"},
|
||||
{file = "ruff-0.12.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b15599931a1a7a03c388b9c5df1bfa62be7ede6eb7ef753b272381f39c3d0ff"},
|
||||
{file = "ruff-0.12.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d02faa2977fb6f3f32ddb7828e212b7dd499c59eb896ae6c03ea5c303575756"},
|
||||
{file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:17d5b6b0b3a25259b69ebcba87908496e6830e03acfb929ef9fd4c58675fa2ea"},
|
||||
{file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72db7521860e246adbb43f6ef464dd2a532ef2ef1f5dd0d470455b8d9f1773e0"},
|
||||
{file = "ruff-0.12.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a03242c1522b4e0885af63320ad754d53983c9599157ee33e77d748363c561ce"},
|
||||
{file = "ruff-0.12.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fc83e4e9751e6c13b5046d7162f205d0a7bac5840183c5beebf824b08a27340"},
|
||||
{file = "ruff-0.12.9-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:881465ed56ba4dd26a691954650de6ad389a2d1fdb130fe51ff18a25639fe4bb"},
|
||||
{file = "ruff-0.12.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:43f07a3ccfc62cdb4d3a3348bf0588358a66da756aa113e071b8ca8c3b9826af"},
|
||||
{file = "ruff-0.12.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:07adb221c54b6bba24387911e5734357f042e5669fa5718920ee728aba3cbadc"},
|
||||
{file = "ruff-0.12.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f5cd34fabfdea3933ab85d72359f118035882a01bff15bd1d2b15261d85d5f66"},
|
||||
{file = "ruff-0.12.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f6be1d2ca0686c54564da8e7ee9e25f93bdd6868263805f8c0b8fc6a449db6d7"},
|
||||
{file = "ruff-0.12.9-py3-none-win32.whl", hash = "sha256:cc7a37bd2509974379d0115cc5608a1a4a6c4bff1b452ea69db83c8855d53f93"},
|
||||
{file = "ruff-0.12.9-py3-none-win_amd64.whl", hash = "sha256:6fb15b1977309741d7d098c8a3cb7a30bc112760a00fb6efb7abc85f00ba5908"},
|
||||
{file = "ruff-0.12.9-py3-none-win_arm64.whl", hash = "sha256:63c8c819739d86b96d500cce885956a1a48ab056bbcbc61b747ad494b2485089"},
|
||||
{file = "ruff-0.12.9.tar.gz", hash = "sha256:fbd94b2e3c623f659962934e52c2bea6fc6da11f667a427a368adaf3af2c866a"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1614,4 +1615,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "f67db13e6f68b1d67a55eee908c1c560bfa44da8509f98f842889a7570a9830f"
|
||||
content-hash = "4cc687aabe5865665fb8c4ccc0ea7e0af80b41e401ca37919f57efa6e0b5be00"
|
||||
|
||||
@@ -23,7 +23,7 @@ supabase = "^2.16.0"
|
||||
uvicorn = "^0.35.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
ruff = "^0.12.3"
|
||||
ruff = "^0.12.9"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
52
autogpt_platform/backend/.dockerignore
Normal file
52
autogpt_platform/backend/.dockerignore
Normal file
@@ -0,0 +1,52 @@
|
||||
# Development and testing files
|
||||
**/__pycache__
|
||||
**/*.pyc
|
||||
**/*.pyo
|
||||
**/*.pyd
|
||||
**/.Python
|
||||
**/env/
|
||||
**/venv/
|
||||
**/.venv/
|
||||
**/pip-log.txt
|
||||
**/.pytest_cache/
|
||||
**/test-results/
|
||||
**/snapshots/
|
||||
**/test/
|
||||
|
||||
# IDE and editor files
|
||||
**/.vscode/
|
||||
**/.idea/
|
||||
**/*.swp
|
||||
**/*.swo
|
||||
*~
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
**/*.log
|
||||
**/logs/
|
||||
|
||||
# Git
|
||||
.git/
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
**/*.md
|
||||
!README.md
|
||||
|
||||
# Local development files
|
||||
.env
|
||||
.env.local
|
||||
**/.env.test
|
||||
|
||||
# Build artifacts
|
||||
**/dist/
|
||||
**/build/
|
||||
**/target/
|
||||
|
||||
# Docker files (avoid recursion)
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
.dockerignore
|
||||
@@ -1,3 +1,9 @@
|
||||
# Backend Configuration
|
||||
# This file contains environment variables that MUST be set for the AutoGPT platform
|
||||
# Variables with working defaults in settings.py are not included here
|
||||
|
||||
## ===== REQUIRED DATABASE CONFIGURATION ===== ##
|
||||
# PostgreSQL Database Connection
|
||||
DB_USER=postgres
|
||||
DB_PASS=your-super-secret-and-long-postgres-password
|
||||
DB_NAME=postgres
|
||||
@@ -10,72 +16,50 @@ DB_SCHEMA=platform
|
||||
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
|
||||
DIRECT_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
|
||||
PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
ENABLE_AUTH=true
|
||||
|
||||
# EXECUTOR
|
||||
NUM_GRAPH_WORKERS=10
|
||||
|
||||
BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
|
||||
# generate using `from cryptography.fernet import Fernet;Fernet.generate_key().decode()`
|
||||
ENCRYPTION_KEY='dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw='
|
||||
UNSUBSCRIBE_SECRET_KEY = 'HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio='
|
||||
|
||||
## ===== REQUIRED SERVICE CREDENTIALS ===== ##
|
||||
# Redis Configuration
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=password
|
||||
|
||||
ENABLE_CREDIT=false
|
||||
STRIPE_API_KEY=
|
||||
STRIPE_WEBHOOK_SECRET=
|
||||
# RabbitMQ Credentials
|
||||
RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
|
||||
# What environment things should be logged under: local dev or prod
|
||||
APP_ENV=local
|
||||
# What environment to behave as: "local" or "cloud"
|
||||
BEHAVE_AS=local
|
||||
PYRO_HOST=localhost
|
||||
SENTRY_DSN=
|
||||
|
||||
# Email For Postmark so we can send emails
|
||||
POSTMARK_SERVER_API_TOKEN=
|
||||
POSTMARK_SENDER_EMAIL=invalid@invalid.com
|
||||
POSTMARK_WEBHOOK_TOKEN=
|
||||
|
||||
## User auth with Supabase is required for any of the 3rd party integrations with auth to work.
|
||||
ENABLE_AUTH=true
|
||||
# Supabase Authentication
|
||||
SUPABASE_URL=http://localhost:8000
|
||||
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
|
||||
# RabbitMQ credentials -- Used for communication between services
|
||||
RABBITMQ_HOST=localhost
|
||||
RABBITMQ_PORT=5672
|
||||
RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
## ===== REQUIRED SECURITY KEYS ===== ##
|
||||
# Generate using: from cryptography.fernet import Fernet;Fernet.generate_key().decode()
|
||||
ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=
|
||||
UNSUBSCRIBE_SECRET_KEY=HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio=
|
||||
|
||||
## GCS bucket is required for marketplace and library functionality
|
||||
## ===== IMPORTANT OPTIONAL CONFIGURATION ===== ##
|
||||
# Platform URLs (set these for webhooks and OAuth to work)
|
||||
PLATFORM_BASE_URL=http://localhost:8000
|
||||
FRONTEND_BASE_URL=http://localhost:3000
|
||||
|
||||
# Media Storage (required for marketplace and library functionality)
|
||||
MEDIA_GCS_BUCKET_NAME=
|
||||
|
||||
## For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow
|
||||
## for integrations to work. Defaults to the value of PLATFORM_BASE_URL if not set.
|
||||
# FRONTEND_BASE_URL=http://localhost:3000
|
||||
## ===== API KEYS AND OAUTH CREDENTIALS ===== ##
|
||||
# All API keys below are optional - only add what you need
|
||||
|
||||
## PLATFORM_BASE_URL must be set to a *publicly accessible* URL pointing to your backend
|
||||
## to use the platform's webhook-related functionality.
|
||||
## If you are developing locally, you can use something like ngrok to get a publc URL
|
||||
## and tunnel it to your locally running backend.
|
||||
PLATFORM_BASE_URL=http://localhost:3000
|
||||
|
||||
## Cloudflare Turnstile (CAPTCHA) Configuration
|
||||
## Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
|
||||
## This is the backend secret key
|
||||
TURNSTILE_SECRET_KEY=
|
||||
## This is the verify URL
|
||||
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
|
||||
|
||||
## == INTEGRATION CREDENTIALS == ##
|
||||
# Each set of server side credentials is required for the corresponding 3rd party
|
||||
# integration to work.
|
||||
# AI/LLM Services
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
LLAMA_API_KEY=
|
||||
AIML_API_KEY=
|
||||
V0_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
NVIDIA_API_KEY=
|
||||
|
||||
# OAuth Credentials
|
||||
# For the OAuth callback URL, use <your_frontend_url>/auth/integrations/oauth_callback,
|
||||
# e.g. http://localhost:3000/auth/integrations/oauth_callback
|
||||
|
||||
@@ -85,7 +69,6 @@ GITHUB_CLIENT_SECRET=
|
||||
|
||||
# Google OAuth App server credentials - https://console.cloud.google.com/apis/credentials, and enable gmail api and set scopes
|
||||
# https://console.cloud.google.com/apis/credentials/consent ?project=<your_project_id>
|
||||
|
||||
# You'll need to add/enable the following scopes (minimum):
|
||||
# https://console.developers.google.com/apis/api/gmail.googleapis.com/overview ?project=<your_project_id>
|
||||
# https://console.cloud.google.com/apis/library/sheets.googleapis.com/ ?project=<your_project_id>
|
||||
@@ -121,104 +104,75 @@ LINEAR_CLIENT_SECRET=
|
||||
TODOIST_CLIENT_ID=
|
||||
TODOIST_CLIENT_SECRET=
|
||||
|
||||
## ===== OPTIONAL API KEYS ===== ##
|
||||
NOTION_CLIENT_ID=
|
||||
NOTION_CLIENT_SECRET=
|
||||
|
||||
# LLM
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
AIML_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
LLAMA_API_KEY=
|
||||
# Discord OAuth App credentials
|
||||
# 1. Go to https://discord.com/developers/applications
|
||||
# 2. Create a new application
|
||||
# 3. Go to OAuth2 section and add redirect URI: http://localhost:3000/auth/integrations/oauth_callback
|
||||
# 4. Copy Client ID and Client Secret below
|
||||
DISCORD_CLIENT_ID=
|
||||
DISCORD_CLIENT_SECRET=
|
||||
|
||||
# Reddit
|
||||
# Go to https://www.reddit.com/prefs/apps and create a new app
|
||||
# Choose "script" for the type
|
||||
# Fill in the redirect uri as <your_frontend_url>/auth/integrations/oauth_callback, e.g. http://localhost:3000/auth/integrations/oauth_callback
|
||||
REDDIT_CLIENT_ID=
|
||||
REDDIT_CLIENT_SECRET=
|
||||
REDDIT_USER_AGENT="AutoGPT:1.0 (by /u/autogpt)"
|
||||
|
||||
# Discord
|
||||
DISCORD_BOT_TOKEN=
|
||||
# Payment Processing
|
||||
STRIPE_API_KEY=
|
||||
STRIPE_WEBHOOK_SECRET=
|
||||
|
||||
# SMTP/Email
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
# Email Service (for sending notifications and confirmations)
|
||||
POSTMARK_SERVER_API_TOKEN=
|
||||
POSTMARK_SENDER_EMAIL=invalid@invalid.com
|
||||
POSTMARK_WEBHOOK_TOKEN=
|
||||
|
||||
# D-ID
|
||||
# Error Tracking
|
||||
SENTRY_DSN=
|
||||
|
||||
# Cloudflare Turnstile (CAPTCHA) Configuration
|
||||
# Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
|
||||
# This is the backend secret key
|
||||
TURNSTILE_SECRET_KEY=
|
||||
# This is the verify URL
|
||||
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
|
||||
|
||||
# Feature Flags
|
||||
LAUNCH_DARKLY_SDK_KEY=
|
||||
|
||||
# Content Generation & Media
|
||||
DID_API_KEY=
|
||||
FAL_API_KEY=
|
||||
IDEOGRAM_API_KEY=
|
||||
REPLICATE_API_KEY=
|
||||
REVID_API_KEY=
|
||||
SCREENSHOTONE_API_KEY=
|
||||
UNREAL_SPEECH_API_KEY=
|
||||
|
||||
# Open Weather Map
|
||||
# Data & Search Services
|
||||
E2B_API_KEY=
|
||||
EXA_API_KEY=
|
||||
JINA_API_KEY=
|
||||
MEM0_API_KEY=
|
||||
OPENWEATHERMAP_API_KEY=
|
||||
|
||||
# SMTP
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# Medium
|
||||
MEDIUM_API_KEY=
|
||||
MEDIUM_AUTHOR_ID=
|
||||
|
||||
# Google Maps
|
||||
GOOGLE_MAPS_API_KEY=
|
||||
|
||||
# Replicate
|
||||
REPLICATE_API_KEY=
|
||||
# Communication Services
|
||||
DISCORD_BOT_TOKEN=
|
||||
MEDIUM_API_KEY=
|
||||
MEDIUM_AUTHOR_ID=
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# Ideogram
|
||||
IDEOGRAM_API_KEY=
|
||||
|
||||
# Fal
|
||||
FAL_API_KEY=
|
||||
|
||||
# Exa
|
||||
EXA_API_KEY=
|
||||
|
||||
# E2B
|
||||
E2B_API_KEY=
|
||||
|
||||
# Mem0
|
||||
MEM0_API_KEY=
|
||||
|
||||
# Nvidia
|
||||
NVIDIA_API_KEY=
|
||||
|
||||
# Apollo
|
||||
# Business & Marketing Tools
|
||||
APOLLO_API_KEY=
|
||||
|
||||
# SmartLead
|
||||
SMARTLEAD_API_KEY=
|
||||
|
||||
# ZeroBounce
|
||||
ZEROBOUNCE_API_KEY=
|
||||
|
||||
# Ayrshare
|
||||
ENRICHLAYER_API_KEY=
|
||||
AYRSHARE_API_KEY=
|
||||
AYRSHARE_JWT_KEY=
|
||||
SMARTLEAD_API_KEY=
|
||||
ZEROBOUNCE_API_KEY=
|
||||
|
||||
## ===== OPTIONAL API KEYS END ===== ##
|
||||
|
||||
# Block Error Rate Monitoring
|
||||
BLOCK_ERROR_RATE_THRESHOLD=0.5
|
||||
BLOCK_ERROR_RATE_CHECK_INTERVAL_SECS=86400
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_CLOUD_LOGGING=false
|
||||
ENABLE_FILE_LOGGING=false
|
||||
# Use to manually set the log directory
|
||||
# LOG_DIR=./logs
|
||||
|
||||
# Example Blocks Configuration
|
||||
# Set to true to enable example blocks in development
|
||||
# These blocks are disabled by default in production
|
||||
ENABLE_EXAMPLE_BLOCKS=false
|
||||
|
||||
# Cloud Storage Configuration
|
||||
# Cleanup interval for expired files (hours between cleanup runs, 1-24 hours)
|
||||
CLOUD_STORAGE_CLEANUP_INTERVAL_HOURS=6
|
||||
# Other Services
|
||||
AUTOMOD_API_KEY=
|
||||
1
autogpt_platform/backend/.gitignore
vendored
1
autogpt_platform/backend/.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
.env
|
||||
database.db
|
||||
database.db-journal
|
||||
dev.db
|
||||
|
||||
@@ -1,31 +1,34 @@
|
||||
FROM python:3.11.10-slim-bookworm AS builder
|
||||
FROM debian:13-slim AS builder
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN echo 'Acquire::http::Pipeline-Depth 0;\nAcquire::http::No-Cache true;\nAcquire::BrokenProxy true;\n' > /etc/apt/apt.conf.d/99fixbadproxy
|
||||
|
||||
RUN apt-get update --allow-releaseinfo-change --fix-missing
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get install -y build-essential
|
||||
RUN apt-get install -y libpq5
|
||||
RUN apt-get install -y libz-dev
|
||||
RUN apt-get install -y libssl-dev
|
||||
RUN apt-get install -y postgresql-client
|
||||
# Update package list and install Python and build dependencies
|
||||
RUN apt-get update --allow-releaseinfo-change --fix-missing \
|
||||
&& apt-get install -y \
|
||||
python3.13 \
|
||||
python3.13-dev \
|
||||
python3.13-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
libpq5 \
|
||||
libz-dev \
|
||||
libssl-dev \
|
||||
postgresql-client
|
||||
|
||||
ENV POETRY_HOME=/opt/poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_CREATE=false
|
||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Upgrade pip and setuptools to fix security vulnerabilities
|
||||
RUN pip3 install --upgrade pip setuptools
|
||||
|
||||
RUN pip3 install poetry
|
||||
RUN pip3 install poetry --break-system-packages
|
||||
|
||||
# Copy and install dependencies
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
@@ -37,27 +40,30 @@ RUN poetry install --no-ansi --no-root
|
||||
COPY autogpt_platform/backend/schema.prisma ./
|
||||
RUN poetry run prisma generate
|
||||
|
||||
FROM python:3.11.10-slim-bookworm AS server_dependencies
|
||||
FROM debian:13-slim AS server_dependencies
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ENV POETRY_HOME=/opt/poetry \
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
POETRY_VIRTUALENVS_CREATE=false
|
||||
POETRY_VIRTUALENVS_CREATE=true \
|
||||
POETRY_VIRTUALENVS_IN_PROJECT=true \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Upgrade pip and setuptools to fix security vulnerabilities
|
||||
RUN pip3 install --upgrade pip setuptools
|
||||
# Install Python without upgrading system-managed packages
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3.13 \
|
||||
python3-pip
|
||||
|
||||
# Copy only necessary files from builder
|
||||
COPY --from=builder /app /app
|
||||
COPY --from=builder /usr/local/lib/python3.11 /usr/local/lib/python3.11
|
||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||
# Copy Prisma binaries
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||
|
||||
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
||||
RUN mkdir -p /app/autogpt_platform/backend
|
||||
@@ -68,6 +74,12 @@ COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.tom
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
FROM server_dependencies AS migrate
|
||||
|
||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
||||
|
||||
FROM server_dependencies AS server
|
||||
|
||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||
|
||||
@@ -25,6 +25,9 @@ class AgentExecutorBlock(Block):
|
||||
user_id: str = SchemaField(description="User ID")
|
||||
graph_id: str = SchemaField(description="Graph ID")
|
||||
graph_version: int = SchemaField(description="Graph Version")
|
||||
agent_name: Optional[str] = SchemaField(
|
||||
default=None, description="Name to display in the Builder UI"
|
||||
)
|
||||
|
||||
inputs: BlockInput = SchemaField(description="Input data for the graph")
|
||||
input_schema: dict = SchemaField(description="Input schema for the graph")
|
||||
|
||||
@@ -166,7 +166,7 @@ class AIMusicGeneratorBlock(Block):
|
||||
output_format=input_data.output_format,
|
||||
normalization_strategy=input_data.normalization_strategy,
|
||||
)
|
||||
if result and result != "No output received":
|
||||
if result and isinstance(result, str) and result.startswith("http"):
|
||||
yield "result", result
|
||||
return
|
||||
else:
|
||||
|
||||
205
autogpt_platform/backend/backend/blocks/baas/_api.py
Normal file
205
autogpt_platform/backend/backend/blocks/baas/_api.py
Normal file
@@ -0,0 +1,205 @@
|
||||
"""
|
||||
Meeting BaaS API client module.
|
||||
All API calls centralized for consistency and maintainability.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from backend.sdk import Requests
|
||||
|
||||
|
||||
class MeetingBaasAPI:
|
||||
"""Client for Meeting BaaS API endpoints."""
|
||||
|
||||
BASE_URL = "https://api.meetingbaas.com"
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
"""Initialize API client with authentication key."""
|
||||
self.api_key = api_key
|
||||
self.headers = {"x-meeting-baas-api-key": api_key}
|
||||
self.requests = Requests()
|
||||
|
||||
# Bot Management Endpoints
|
||||
|
||||
async def join_meeting(
|
||||
self,
|
||||
bot_name: str,
|
||||
meeting_url: str,
|
||||
reserved: bool = False,
|
||||
bot_image: Optional[str] = None,
|
||||
entry_message: Optional[str] = None,
|
||||
start_time: Optional[int] = None,
|
||||
speech_to_text: Optional[Dict[str, Any]] = None,
|
||||
webhook_url: Optional[str] = None,
|
||||
automatic_leave: Optional[Dict[str, Any]] = None,
|
||||
extra: Optional[Dict[str, Any]] = None,
|
||||
recording_mode: str = "speaker_view",
|
||||
streaming: Optional[Dict[str, Any]] = None,
|
||||
deduplication_key: Optional[str] = None,
|
||||
zoom_sdk_id: Optional[str] = None,
|
||||
zoom_sdk_pwd: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Deploy a bot to join and record a meeting.
|
||||
|
||||
POST /bots
|
||||
"""
|
||||
body = {
|
||||
"bot_name": bot_name,
|
||||
"meeting_url": meeting_url,
|
||||
"reserved": reserved,
|
||||
"recording_mode": recording_mode,
|
||||
}
|
||||
|
||||
# Add optional fields if provided
|
||||
if bot_image is not None:
|
||||
body["bot_image"] = bot_image
|
||||
if entry_message is not None:
|
||||
body["entry_message"] = entry_message
|
||||
if start_time is not None:
|
||||
body["start_time"] = start_time
|
||||
if speech_to_text is not None:
|
||||
body["speech_to_text"] = speech_to_text
|
||||
if webhook_url is not None:
|
||||
body["webhook_url"] = webhook_url
|
||||
if automatic_leave is not None:
|
||||
body["automatic_leave"] = automatic_leave
|
||||
if extra is not None:
|
||||
body["extra"] = extra
|
||||
if streaming is not None:
|
||||
body["streaming"] = streaming
|
||||
if deduplication_key is not None:
|
||||
body["deduplication_key"] = deduplication_key
|
||||
if zoom_sdk_id is not None:
|
||||
body["zoom_sdk_id"] = zoom_sdk_id
|
||||
if zoom_sdk_pwd is not None:
|
||||
body["zoom_sdk_pwd"] = zoom_sdk_pwd
|
||||
|
||||
response = await self.requests.post(
|
||||
f"{self.BASE_URL}/bots",
|
||||
headers=self.headers,
|
||||
json=body,
|
||||
)
|
||||
return response.json()
|
||||
|
||||
async def leave_meeting(self, bot_id: str) -> bool:
|
||||
"""
|
||||
Remove a bot from an ongoing meeting.
|
||||
|
||||
DELETE /bots/{uuid}
|
||||
"""
|
||||
response = await self.requests.delete(
|
||||
f"{self.BASE_URL}/bots/{bot_id}",
|
||||
headers=self.headers,
|
||||
)
|
||||
return response.status in [200, 204]
|
||||
|
||||
async def retranscribe(
|
||||
self,
|
||||
bot_uuid: str,
|
||||
speech_to_text: Optional[Dict[str, Any]] = None,
|
||||
webhook_url: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Re-run transcription on a bot's audio.
|
||||
|
||||
POST /bots/retranscribe
|
||||
"""
|
||||
body: Dict[str, Any] = {"bot_uuid": bot_uuid}
|
||||
|
||||
if speech_to_text is not None:
|
||||
body["speech_to_text"] = speech_to_text
|
||||
if webhook_url is not None:
|
||||
body["webhook_url"] = webhook_url
|
||||
|
||||
response = await self.requests.post(
|
||||
f"{self.BASE_URL}/bots/retranscribe",
|
||||
headers=self.headers,
|
||||
json=body,
|
||||
)
|
||||
|
||||
if response.status == 202:
|
||||
return {"accepted": True}
|
||||
return response.json()
|
||||
|
||||
# Data Retrieval Endpoints
|
||||
|
||||
async def get_meeting_data(
|
||||
self, bot_id: str, include_transcripts: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Retrieve meeting data including recording and transcripts.
|
||||
|
||||
GET /bots/meeting_data
|
||||
"""
|
||||
params = {
|
||||
"bot_id": bot_id,
|
||||
"include_transcripts": str(include_transcripts).lower(),
|
||||
}
|
||||
|
||||
response = await self.requests.get(
|
||||
f"{self.BASE_URL}/bots/meeting_data",
|
||||
headers=self.headers,
|
||||
params=params,
|
||||
)
|
||||
return response.json()
|
||||
|
||||
async def get_screenshots(self, bot_id: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieve screenshots captured during a meeting.
|
||||
|
||||
GET /bots/{uuid}/screenshots
|
||||
"""
|
||||
response = await self.requests.get(
|
||||
f"{self.BASE_URL}/bots/{bot_id}/screenshots",
|
||||
headers=self.headers,
|
||||
)
|
||||
result = response.json()
|
||||
# Ensure we return a list
|
||||
if isinstance(result, list):
|
||||
return result
|
||||
return []
|
||||
|
||||
async def delete_data(self, bot_id: str) -> bool:
|
||||
"""
|
||||
Delete a bot's recorded data.
|
||||
|
||||
POST /bots/{uuid}/delete_data
|
||||
"""
|
||||
response = await self.requests.post(
|
||||
f"{self.BASE_URL}/bots/{bot_id}/delete_data",
|
||||
headers=self.headers,
|
||||
)
|
||||
return response.status == 200
|
||||
|
||||
async def list_bots_with_metadata(
|
||||
self,
|
||||
limit: Optional[int] = None,
|
||||
offset: Optional[int] = None,
|
||||
sort_by: Optional[str] = None,
|
||||
sort_order: Optional[str] = None,
|
||||
filter_by: Optional[Dict[str, Any]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
List bots with metadata including IDs, names, and meeting details.
|
||||
|
||||
GET /bots/bots_with_metadata
|
||||
"""
|
||||
params = {}
|
||||
if limit is not None:
|
||||
params["limit"] = limit
|
||||
if offset is not None:
|
||||
params["offset"] = offset
|
||||
if sort_by is not None:
|
||||
params["sort_by"] = sort_by
|
||||
if sort_order is not None:
|
||||
params["sort_order"] = sort_order
|
||||
if filter_by is not None:
|
||||
params.update(filter_by)
|
||||
|
||||
response = await self.requests.get(
|
||||
f"{self.BASE_URL}/bots/bots_with_metadata",
|
||||
headers=self.headers,
|
||||
params=params,
|
||||
)
|
||||
return response.json()
|
||||
13
autogpt_platform/backend/backend/blocks/baas/_config.py
Normal file
13
autogpt_platform/backend/backend/blocks/baas/_config.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""
|
||||
Shared configuration for all Meeting BaaS blocks using the SDK pattern.
|
||||
"""
|
||||
|
||||
from backend.sdk import BlockCostType, ProviderBuilder
|
||||
|
||||
# Configure the Meeting BaaS provider with API key authentication
|
||||
baas = (
|
||||
ProviderBuilder("baas")
|
||||
.with_api_key("MEETING_BAAS_API_KEY", "Meeting BaaS API Key")
|
||||
.with_base_cost(5, BlockCostType.RUN) # Higher cost for meeting recording service
|
||||
.build()
|
||||
)
|
||||
217
autogpt_platform/backend/backend/blocks/baas/bots.py
Normal file
217
autogpt_platform/backend/backend/blocks/baas/bots.py
Normal file
@@ -0,0 +1,217 @@
|
||||
"""
|
||||
Meeting BaaS bot (recording) blocks.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
from ._api import MeetingBaasAPI
|
||||
from ._config import baas
|
||||
|
||||
|
||||
class BaasBotJoinMeetingBlock(Block):
|
||||
"""
|
||||
Deploy a bot immediately or at a scheduled start_time to join and record a meeting.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = baas.credentials_field(
|
||||
description="Meeting BaaS API credentials"
|
||||
)
|
||||
meeting_url: str = SchemaField(
|
||||
description="The URL of the meeting the bot should join"
|
||||
)
|
||||
bot_name: str = SchemaField(
|
||||
description="Display name for the bot in the meeting"
|
||||
)
|
||||
bot_image: str = SchemaField(
|
||||
description="URL to an image for the bot's avatar (16:9 ratio recommended)",
|
||||
default="",
|
||||
)
|
||||
entry_message: str = SchemaField(
|
||||
description="Chat message the bot will post upon entry", default=""
|
||||
)
|
||||
reserved: bool = SchemaField(
|
||||
description="Use a reserved bot slot (joins 4 min before meeting)",
|
||||
default=False,
|
||||
)
|
||||
start_time: Optional[int] = SchemaField(
|
||||
description="Unix timestamp (ms) when bot should join", default=None
|
||||
)
|
||||
webhook_url: str | None = SchemaField(
|
||||
description="URL to receive webhook events for this bot", default=None
|
||||
)
|
||||
timeouts: dict = SchemaField(
|
||||
description="Automatic leave timeouts configuration", default={}
|
||||
)
|
||||
extra: dict = SchemaField(
|
||||
description="Custom metadata to attach to the bot", default={}
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
bot_id: str = SchemaField(description="UUID of the deployed bot")
|
||||
join_response: dict = SchemaField(
|
||||
description="Full response from join operation"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="377d1a6a-a99b-46cf-9af3-1d1b12758e04",
|
||||
description="Deploy a bot to join and record a meeting",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
api_key = credentials.api_key.get_secret_value()
|
||||
api = MeetingBaasAPI(api_key)
|
||||
|
||||
# Call API with all parameters
|
||||
data = await api.join_meeting(
|
||||
bot_name=input_data.bot_name,
|
||||
meeting_url=input_data.meeting_url,
|
||||
reserved=input_data.reserved,
|
||||
bot_image=input_data.bot_image if input_data.bot_image else None,
|
||||
entry_message=(
|
||||
input_data.entry_message if input_data.entry_message else None
|
||||
),
|
||||
start_time=input_data.start_time,
|
||||
speech_to_text={"provider": "Default"},
|
||||
webhook_url=input_data.webhook_url if input_data.webhook_url else None,
|
||||
automatic_leave=input_data.timeouts if input_data.timeouts else None,
|
||||
extra=input_data.extra if input_data.extra else None,
|
||||
)
|
||||
|
||||
yield "bot_id", data.get("bot_id", "")
|
||||
yield "join_response", data
|
||||
|
||||
|
||||
class BaasBotLeaveMeetingBlock(Block):
|
||||
"""
|
||||
Force the bot to exit the call.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = baas.credentials_field(
|
||||
description="Meeting BaaS API credentials"
|
||||
)
|
||||
bot_id: str = SchemaField(description="UUID of the bot to remove from meeting")
|
||||
|
||||
class Output(BlockSchema):
|
||||
left: bool = SchemaField(description="Whether the bot successfully left")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="bf77d128-8b25-4280-b5c7-2d553ba7e482",
|
||||
description="Remove a bot from an ongoing meeting",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
api_key = credentials.api_key.get_secret_value()
|
||||
api = MeetingBaasAPI(api_key)
|
||||
|
||||
# Leave meeting
|
||||
left = await api.leave_meeting(input_data.bot_id)
|
||||
|
||||
yield "left", left
|
||||
|
||||
|
||||
class BaasBotFetchMeetingDataBlock(Block):
|
||||
"""
|
||||
Pull MP4 URL, transcript & metadata for a completed meeting.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = baas.credentials_field(
|
||||
description="Meeting BaaS API credentials"
|
||||
)
|
||||
bot_id: str = SchemaField(description="UUID of the bot whose data to fetch")
|
||||
include_transcripts: bool = SchemaField(
|
||||
description="Include transcript data in response", default=True
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
mp4_url: str = SchemaField(
|
||||
description="URL to download the meeting recording (time-limited)"
|
||||
)
|
||||
transcript: list = SchemaField(description="Meeting transcript data")
|
||||
metadata: dict = SchemaField(description="Meeting metadata and bot information")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ea7c1309-303c-4da1-893f-89c0e9d64e78",
|
||||
description="Retrieve recorded meeting data",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
api_key = credentials.api_key.get_secret_value()
|
||||
api = MeetingBaasAPI(api_key)
|
||||
|
||||
# Fetch meeting data
|
||||
data = await api.get_meeting_data(
|
||||
bot_id=input_data.bot_id,
|
||||
include_transcripts=input_data.include_transcripts,
|
||||
)
|
||||
|
||||
yield "mp4_url", data.get("mp4", "")
|
||||
yield "transcript", data.get("bot_data", {}).get("transcripts", [])
|
||||
yield "metadata", data.get("bot_data", {}).get("bot", {})
|
||||
|
||||
|
||||
class BaasBotDeleteRecordingBlock(Block):
|
||||
"""
|
||||
Purge MP4 + transcript data for privacy or storage management.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = baas.credentials_field(
|
||||
description="Meeting BaaS API credentials"
|
||||
)
|
||||
bot_id: str = SchemaField(description="UUID of the bot whose data to delete")
|
||||
|
||||
class Output(BlockSchema):
|
||||
deleted: bool = SchemaField(
|
||||
description="Whether the data was successfully deleted"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="bf8d1aa6-42d8-4944-b6bd-6bac554c0d3b",
|
||||
description="Permanently delete a meeting's recorded data",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
api_key = credentials.api_key.get_secret_value()
|
||||
api = MeetingBaasAPI(api_key)
|
||||
|
||||
# Delete recording data
|
||||
deleted = await api.delete_data(input_data.bot_id)
|
||||
|
||||
yield "deleted", deleted
|
||||
178
autogpt_platform/backend/backend/blocks/dataforseo/_api.py
Normal file
178
autogpt_platform/backend/backend/blocks/dataforseo/_api.py
Normal file
@@ -0,0 +1,178 @@
|
||||
"""
|
||||
DataForSEO API client with async support using the SDK patterns.
|
||||
"""
|
||||
|
||||
import base64
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from backend.sdk import Requests, UserPasswordCredentials
|
||||
|
||||
|
||||
class DataForSeoClient:
|
||||
"""Client for the DataForSEO API using async requests."""
|
||||
|
||||
API_URL = "https://api.dataforseo.com"
|
||||
|
||||
def __init__(self, credentials: UserPasswordCredentials):
|
||||
self.credentials = credentials
|
||||
self.requests = Requests(
|
||||
trusted_origins=["https://api.dataforseo.com"],
|
||||
raise_for_status=False,
|
||||
)
|
||||
|
||||
def _get_headers(self) -> Dict[str, str]:
|
||||
"""Generate the authorization header using Basic Auth."""
|
||||
username = self.credentials.username.get_secret_value()
|
||||
password = self.credentials.password.get_secret_value()
|
||||
credentials_str = f"{username}:{password}"
|
||||
encoded = base64.b64encode(credentials_str.encode("ascii")).decode("ascii")
|
||||
return {
|
||||
"Authorization": f"Basic {encoded}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
async def keyword_suggestions(
|
||||
self,
|
||||
keyword: str,
|
||||
location_code: Optional[int] = None,
|
||||
language_code: Optional[str] = None,
|
||||
include_seed_keyword: bool = True,
|
||||
include_serp_info: bool = False,
|
||||
include_clickstream_data: bool = False,
|
||||
limit: int = 100,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get keyword suggestions from DataForSEO Labs.
|
||||
|
||||
Args:
|
||||
keyword: Seed keyword
|
||||
location_code: Location code for targeting
|
||||
language_code: Language code (e.g., "en")
|
||||
include_seed_keyword: Include seed keyword in results
|
||||
include_serp_info: Include SERP data
|
||||
include_clickstream_data: Include clickstream metrics
|
||||
limit: Maximum number of results (up to 3000)
|
||||
|
||||
Returns:
|
||||
API response with keyword suggestions
|
||||
"""
|
||||
endpoint = f"{self.API_URL}/v3/dataforseo_labs/google/keyword_suggestions/live"
|
||||
|
||||
# Build payload only with non-None values to avoid sending null fields
|
||||
task_data: dict[str, Any] = {
|
||||
"keyword": keyword,
|
||||
}
|
||||
|
||||
if location_code is not None:
|
||||
task_data["location_code"] = location_code
|
||||
if language_code is not None:
|
||||
task_data["language_code"] = language_code
|
||||
if include_seed_keyword is not None:
|
||||
task_data["include_seed_keyword"] = include_seed_keyword
|
||||
if include_serp_info is not None:
|
||||
task_data["include_serp_info"] = include_serp_info
|
||||
if include_clickstream_data is not None:
|
||||
task_data["include_clickstream_data"] = include_clickstream_data
|
||||
if limit is not None:
|
||||
task_data["limit"] = limit
|
||||
|
||||
payload = [task_data]
|
||||
|
||||
response = await self.requests.post(
|
||||
endpoint,
|
||||
headers=self._get_headers(),
|
||||
json=payload,
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
|
||||
# Check for API errors
|
||||
if response.status != 200:
|
||||
error_message = data.get("status_message", "Unknown error")
|
||||
raise Exception(
|
||||
f"DataForSEO API error ({response.status}): {error_message}"
|
||||
)
|
||||
|
||||
# Extract the results from the response
|
||||
if data.get("tasks") and len(data["tasks"]) > 0:
|
||||
task = data["tasks"][0]
|
||||
if task.get("status_code") == 20000: # Success code
|
||||
return task.get("result", [])
|
||||
else:
|
||||
error_msg = task.get("status_message", "Task failed")
|
||||
raise Exception(f"DataForSEO task error: {error_msg}")
|
||||
|
||||
return []
|
||||
|
||||
async def related_keywords(
|
||||
self,
|
||||
keyword: str,
|
||||
location_code: Optional[int] = None,
|
||||
language_code: Optional[str] = None,
|
||||
include_seed_keyword: bool = True,
|
||||
include_serp_info: bool = False,
|
||||
include_clickstream_data: bool = False,
|
||||
limit: int = 100,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get related keywords from DataForSEO Labs.
|
||||
|
||||
Args:
|
||||
keyword: Seed keyword
|
||||
location_code: Location code for targeting
|
||||
language_code: Language code (e.g., "en")
|
||||
include_seed_keyword: Include seed keyword in results
|
||||
include_serp_info: Include SERP data
|
||||
include_clickstream_data: Include clickstream metrics
|
||||
limit: Maximum number of results (up to 3000)
|
||||
|
||||
Returns:
|
||||
API response with related keywords
|
||||
"""
|
||||
endpoint = f"{self.API_URL}/v3/dataforseo_labs/google/related_keywords/live"
|
||||
|
||||
# Build payload only with non-None values to avoid sending null fields
|
||||
task_data: dict[str, Any] = {
|
||||
"keyword": keyword,
|
||||
}
|
||||
|
||||
if location_code is not None:
|
||||
task_data["location_code"] = location_code
|
||||
if language_code is not None:
|
||||
task_data["language_code"] = language_code
|
||||
if include_seed_keyword is not None:
|
||||
task_data["include_seed_keyword"] = include_seed_keyword
|
||||
if include_serp_info is not None:
|
||||
task_data["include_serp_info"] = include_serp_info
|
||||
if include_clickstream_data is not None:
|
||||
task_data["include_clickstream_data"] = include_clickstream_data
|
||||
if limit is not None:
|
||||
task_data["limit"] = limit
|
||||
|
||||
payload = [task_data]
|
||||
|
||||
response = await self.requests.post(
|
||||
endpoint,
|
||||
headers=self._get_headers(),
|
||||
json=payload,
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
|
||||
# Check for API errors
|
||||
if response.status != 200:
|
||||
error_message = data.get("status_message", "Unknown error")
|
||||
raise Exception(
|
||||
f"DataForSEO API error ({response.status}): {error_message}"
|
||||
)
|
||||
|
||||
# Extract the results from the response
|
||||
if data.get("tasks") and len(data["tasks"]) > 0:
|
||||
task = data["tasks"][0]
|
||||
if task.get("status_code") == 20000: # Success code
|
||||
return task.get("result", [])
|
||||
else:
|
||||
error_msg = task.get("status_message", "Task failed")
|
||||
raise Exception(f"DataForSEO task error: {error_msg}")
|
||||
|
||||
return []
|
||||
@@ -0,0 +1,17 @@
|
||||
"""
|
||||
Configuration for all DataForSEO blocks using the new SDK pattern.
|
||||
"""
|
||||
|
||||
from backend.sdk import BlockCostType, ProviderBuilder
|
||||
|
||||
# Build the DataForSEO provider with username/password authentication
|
||||
dataforseo = (
|
||||
ProviderBuilder("dataforseo")
|
||||
.with_user_password(
|
||||
username_env_var="DATAFORSEO_USERNAME",
|
||||
password_env_var="DATAFORSEO_PASSWORD",
|
||||
title="DataForSEO Credentials",
|
||||
)
|
||||
.with_base_cost(1, BlockCostType.RUN)
|
||||
.build()
|
||||
)
|
||||
@@ -0,0 +1,273 @@
|
||||
"""
|
||||
DataForSEO Google Keyword Suggestions block.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from backend.sdk import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
|
||||
from ._api import DataForSeoClient
|
||||
from ._config import dataforseo
|
||||
|
||||
|
||||
class KeywordSuggestion(BlockSchema):
|
||||
"""Schema for a keyword suggestion result."""
|
||||
|
||||
keyword: str = SchemaField(description="The keyword suggestion")
|
||||
search_volume: Optional[int] = SchemaField(
|
||||
description="Monthly search volume", default=None
|
||||
)
|
||||
competition: Optional[float] = SchemaField(
|
||||
description="Competition level (0-1)", default=None
|
||||
)
|
||||
cpc: Optional[float] = SchemaField(
|
||||
description="Cost per click in USD", default=None
|
||||
)
|
||||
keyword_difficulty: Optional[int] = SchemaField(
|
||||
description="Keyword difficulty score", default=None
|
||||
)
|
||||
serp_info: Optional[Dict[str, Any]] = SchemaField(
|
||||
description="data from SERP for each keyword", default=None
|
||||
)
|
||||
clickstream_data: Optional[Dict[str, Any]] = SchemaField(
|
||||
description="Clickstream data metrics", default=None
|
||||
)
|
||||
|
||||
|
||||
class DataForSeoKeywordSuggestionsBlock(Block):
|
||||
"""Block for getting keyword suggestions from DataForSEO Labs."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = dataforseo.credentials_field(
|
||||
description="DataForSEO credentials (username and password)"
|
||||
)
|
||||
keyword: str = SchemaField(description="Seed keyword to get suggestions for")
|
||||
location_code: Optional[int] = SchemaField(
|
||||
description="Location code for targeting (e.g., 2840 for USA)",
|
||||
default=2840, # USA
|
||||
)
|
||||
language_code: Optional[str] = SchemaField(
|
||||
description="Language code (e.g., 'en' for English)",
|
||||
default="en",
|
||||
)
|
||||
include_seed_keyword: bool = SchemaField(
|
||||
description="Include the seed keyword in results",
|
||||
default=True,
|
||||
)
|
||||
include_serp_info: bool = SchemaField(
|
||||
description="Include SERP information",
|
||||
default=False,
|
||||
)
|
||||
include_clickstream_data: bool = SchemaField(
|
||||
description="Include clickstream metrics",
|
||||
default=False,
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of results (up to 3000)",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=3000,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
suggestions: List[KeywordSuggestion] = SchemaField(
|
||||
description="List of keyword suggestions with metrics"
|
||||
)
|
||||
suggestion: KeywordSuggestion = SchemaField(
|
||||
description="A single keyword suggestion with metrics"
|
||||
)
|
||||
total_count: int = SchemaField(
|
||||
description="Total number of suggestions returned"
|
||||
)
|
||||
seed_keyword: str = SchemaField(
|
||||
description="The seed keyword used for the query"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="73c3e7c4-2b3f-4e9f-9e3e-8f7a5c3e2d45",
|
||||
description="Get keyword suggestions from DataForSEO Labs Google API",
|
||||
categories={BlockCategory.SEARCH, BlockCategory.DATA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"credentials": dataforseo.get_test_credentials().model_dump(),
|
||||
"keyword": "digital marketing",
|
||||
"location_code": 2840,
|
||||
"language_code": "en",
|
||||
"limit": 1,
|
||||
},
|
||||
test_credentials=dataforseo.get_test_credentials(),
|
||||
test_output=[
|
||||
(
|
||||
"suggestion",
|
||||
lambda x: hasattr(x, "keyword")
|
||||
and x.keyword == "digital marketing strategy",
|
||||
),
|
||||
("suggestions", lambda x: isinstance(x, list) and len(x) == 1),
|
||||
("total_count", 1),
|
||||
("seed_keyword", "digital marketing"),
|
||||
],
|
||||
test_mock={
|
||||
"_fetch_keyword_suggestions": lambda *args, **kwargs: [
|
||||
{
|
||||
"items": [
|
||||
{
|
||||
"keyword": "digital marketing strategy",
|
||||
"keyword_info": {
|
||||
"search_volume": 10000,
|
||||
"competition": 0.5,
|
||||
"cpc": 2.5,
|
||||
},
|
||||
"keyword_properties": {
|
||||
"keyword_difficulty": 50,
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
async def _fetch_keyword_suggestions(
|
||||
self,
|
||||
client: DataForSeoClient,
|
||||
input_data: Input,
|
||||
) -> Any:
|
||||
"""Private method to fetch keyword suggestions - can be mocked for testing."""
|
||||
return await client.keyword_suggestions(
|
||||
keyword=input_data.keyword,
|
||||
location_code=input_data.location_code,
|
||||
language_code=input_data.language_code,
|
||||
include_seed_keyword=input_data.include_seed_keyword,
|
||||
include_serp_info=input_data.include_serp_info,
|
||||
include_clickstream_data=input_data.include_clickstream_data,
|
||||
limit=input_data.limit,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: UserPasswordCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
"""Execute the keyword suggestions query."""
|
||||
client = DataForSeoClient(credentials)
|
||||
|
||||
results = await self._fetch_keyword_suggestions(client, input_data)
|
||||
|
||||
# Process and format the results
|
||||
suggestions = []
|
||||
if results and len(results) > 0:
|
||||
# results is a list, get the first element
|
||||
first_result = results[0] if isinstance(results, list) else results
|
||||
items = (
|
||||
first_result.get("items", []) if isinstance(first_result, dict) else []
|
||||
)
|
||||
for item in items:
|
||||
# Create the KeywordSuggestion object
|
||||
suggestion = KeywordSuggestion(
|
||||
keyword=item.get("keyword", ""),
|
||||
search_volume=item.get("keyword_info", {}).get("search_volume"),
|
||||
competition=item.get("keyword_info", {}).get("competition"),
|
||||
cpc=item.get("keyword_info", {}).get("cpc"),
|
||||
keyword_difficulty=item.get("keyword_properties", {}).get(
|
||||
"keyword_difficulty"
|
||||
),
|
||||
serp_info=(
|
||||
item.get("serp_info") if input_data.include_serp_info else None
|
||||
),
|
||||
clickstream_data=(
|
||||
item.get("clickstream_keyword_info")
|
||||
if input_data.include_clickstream_data
|
||||
else None
|
||||
),
|
||||
)
|
||||
yield "suggestion", suggestion
|
||||
suggestions.append(suggestion)
|
||||
|
||||
yield "suggestions", suggestions
|
||||
yield "total_count", len(suggestions)
|
||||
yield "seed_keyword", input_data.keyword
|
||||
|
||||
|
||||
class KeywordSuggestionExtractorBlock(Block):
|
||||
"""Extracts individual fields from a KeywordSuggestion object."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
suggestion: KeywordSuggestion = SchemaField(
|
||||
description="The keyword suggestion object to extract fields from"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
keyword: str = SchemaField(description="The keyword suggestion")
|
||||
search_volume: Optional[int] = SchemaField(
|
||||
description="Monthly search volume", default=None
|
||||
)
|
||||
competition: Optional[float] = SchemaField(
|
||||
description="Competition level (0-1)", default=None
|
||||
)
|
||||
cpc: Optional[float] = SchemaField(
|
||||
description="Cost per click in USD", default=None
|
||||
)
|
||||
keyword_difficulty: Optional[int] = SchemaField(
|
||||
description="Keyword difficulty score", default=None
|
||||
)
|
||||
serp_info: Optional[Dict[str, Any]] = SchemaField(
|
||||
description="data from SERP for each keyword", default=None
|
||||
)
|
||||
clickstream_data: Optional[Dict[str, Any]] = SchemaField(
|
||||
description="Clickstream data metrics", default=None
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="4193cb94-677c-48b0-9eec-6ac72fffd0f2",
|
||||
description="Extract individual fields from a KeywordSuggestion object",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"suggestion": KeywordSuggestion(
|
||||
keyword="test keyword",
|
||||
search_volume=1000,
|
||||
competition=0.5,
|
||||
cpc=2.5,
|
||||
keyword_difficulty=60,
|
||||
).model_dump()
|
||||
},
|
||||
test_output=[
|
||||
("keyword", "test keyword"),
|
||||
("search_volume", 1000),
|
||||
("competition", 0.5),
|
||||
("cpc", 2.5),
|
||||
("keyword_difficulty", 60),
|
||||
("serp_info", None),
|
||||
("clickstream_data", None),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
"""Extract fields from the KeywordSuggestion object."""
|
||||
suggestion = input_data.suggestion
|
||||
|
||||
yield "keyword", suggestion.keyword
|
||||
yield "search_volume", suggestion.search_volume
|
||||
yield "competition", suggestion.competition
|
||||
yield "cpc", suggestion.cpc
|
||||
yield "keyword_difficulty", suggestion.keyword_difficulty
|
||||
yield "serp_info", suggestion.serp_info
|
||||
yield "clickstream_data", suggestion.clickstream_data
|
||||
@@ -0,0 +1,283 @@
|
||||
"""
|
||||
DataForSEO Google Related Keywords block.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from backend.sdk import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
|
||||
from ._api import DataForSeoClient
|
||||
from ._config import dataforseo
|
||||
|
||||
|
||||
class RelatedKeyword(BlockSchema):
|
||||
"""Schema for a related keyword result."""
|
||||
|
||||
keyword: str = SchemaField(description="The related keyword")
|
||||
search_volume: Optional[int] = SchemaField(
|
||||
description="Monthly search volume", default=None
|
||||
)
|
||||
competition: Optional[float] = SchemaField(
|
||||
description="Competition level (0-1)", default=None
|
||||
)
|
||||
cpc: Optional[float] = SchemaField(
|
||||
description="Cost per click in USD", default=None
|
||||
)
|
||||
keyword_difficulty: Optional[int] = SchemaField(
|
||||
description="Keyword difficulty score", default=None
|
||||
)
|
||||
serp_info: Optional[Dict[str, Any]] = SchemaField(
|
||||
description="SERP data for the keyword", default=None
|
||||
)
|
||||
clickstream_data: Optional[Dict[str, Any]] = SchemaField(
|
||||
description="Clickstream data metrics", default=None
|
||||
)
|
||||
|
||||
|
||||
class DataForSeoRelatedKeywordsBlock(Block):
|
||||
"""Block for getting related keywords from DataForSEO Labs."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput = dataforseo.credentials_field(
|
||||
description="DataForSEO credentials (username and password)"
|
||||
)
|
||||
keyword: str = SchemaField(
|
||||
description="Seed keyword to find related keywords for"
|
||||
)
|
||||
location_code: Optional[int] = SchemaField(
|
||||
description="Location code for targeting (e.g., 2840 for USA)",
|
||||
default=2840, # USA
|
||||
)
|
||||
language_code: Optional[str] = SchemaField(
|
||||
description="Language code (e.g., 'en' for English)",
|
||||
default="en",
|
||||
)
|
||||
include_seed_keyword: bool = SchemaField(
|
||||
description="Include the seed keyword in results",
|
||||
default=True,
|
||||
)
|
||||
include_serp_info: bool = SchemaField(
|
||||
description="Include SERP information",
|
||||
default=False,
|
||||
)
|
||||
include_clickstream_data: bool = SchemaField(
|
||||
description="Include clickstream metrics",
|
||||
default=False,
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of results (up to 3000)",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=3000,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
related_keywords: List[RelatedKeyword] = SchemaField(
|
||||
description="List of related keywords with metrics"
|
||||
)
|
||||
related_keyword: RelatedKeyword = SchemaField(
|
||||
description="A related keyword with metrics"
|
||||
)
|
||||
total_count: int = SchemaField(
|
||||
description="Total number of related keywords returned"
|
||||
)
|
||||
seed_keyword: str = SchemaField(
|
||||
description="The seed keyword used for the query"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8f2e4d6a-1b3c-4a5e-9d7f-2c8e6a4b3f1d",
|
||||
description="Get related keywords from DataForSEO Labs Google API",
|
||||
categories={BlockCategory.SEARCH, BlockCategory.DATA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"credentials": dataforseo.get_test_credentials().model_dump(),
|
||||
"keyword": "content marketing",
|
||||
"location_code": 2840,
|
||||
"language_code": "en",
|
||||
"limit": 1,
|
||||
},
|
||||
test_credentials=dataforseo.get_test_credentials(),
|
||||
test_output=[
|
||||
(
|
||||
"related_keyword",
|
||||
lambda x: hasattr(x, "keyword") and x.keyword == "content strategy",
|
||||
),
|
||||
("related_keywords", lambda x: isinstance(x, list) and len(x) == 1),
|
||||
("total_count", 1),
|
||||
("seed_keyword", "content marketing"),
|
||||
],
|
||||
test_mock={
|
||||
"_fetch_related_keywords": lambda *args, **kwargs: [
|
||||
{
|
||||
"items": [
|
||||
{
|
||||
"keyword_data": {
|
||||
"keyword": "content strategy",
|
||||
"keyword_info": {
|
||||
"search_volume": 8000,
|
||||
"competition": 0.4,
|
||||
"cpc": 3.0,
|
||||
},
|
||||
"keyword_properties": {
|
||||
"keyword_difficulty": 45,
|
||||
},
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
async def _fetch_related_keywords(
|
||||
self,
|
||||
client: DataForSeoClient,
|
||||
input_data: Input,
|
||||
) -> Any:
|
||||
"""Private method to fetch related keywords - can be mocked for testing."""
|
||||
return await client.related_keywords(
|
||||
keyword=input_data.keyword,
|
||||
location_code=input_data.location_code,
|
||||
language_code=input_data.language_code,
|
||||
include_seed_keyword=input_data.include_seed_keyword,
|
||||
include_serp_info=input_data.include_serp_info,
|
||||
include_clickstream_data=input_data.include_clickstream_data,
|
||||
limit=input_data.limit,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: UserPasswordCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
"""Execute the related keywords query."""
|
||||
client = DataForSeoClient(credentials)
|
||||
|
||||
results = await self._fetch_related_keywords(client, input_data)
|
||||
|
||||
# Process and format the results
|
||||
related_keywords = []
|
||||
if results and len(results) > 0:
|
||||
# results is a list, get the first element
|
||||
first_result = results[0] if isinstance(results, list) else results
|
||||
items = (
|
||||
first_result.get("items", []) if isinstance(first_result, dict) else []
|
||||
)
|
||||
for item in items:
|
||||
# Extract keyword_data from the item
|
||||
keyword_data = item.get("keyword_data", {})
|
||||
|
||||
# Create the RelatedKeyword object
|
||||
keyword = RelatedKeyword(
|
||||
keyword=keyword_data.get("keyword", ""),
|
||||
search_volume=keyword_data.get("keyword_info", {}).get(
|
||||
"search_volume"
|
||||
),
|
||||
competition=keyword_data.get("keyword_info", {}).get("competition"),
|
||||
cpc=keyword_data.get("keyword_info", {}).get("cpc"),
|
||||
keyword_difficulty=keyword_data.get("keyword_properties", {}).get(
|
||||
"keyword_difficulty"
|
||||
),
|
||||
serp_info=(
|
||||
keyword_data.get("serp_info")
|
||||
if input_data.include_serp_info
|
||||
else None
|
||||
),
|
||||
clickstream_data=(
|
||||
keyword_data.get("clickstream_keyword_info")
|
||||
if input_data.include_clickstream_data
|
||||
else None
|
||||
),
|
||||
)
|
||||
yield "related_keyword", keyword
|
||||
related_keywords.append(keyword)
|
||||
|
||||
yield "related_keywords", related_keywords
|
||||
yield "total_count", len(related_keywords)
|
||||
yield "seed_keyword", input_data.keyword
|
||||
|
||||
|
||||
class RelatedKeywordExtractorBlock(Block):
|
||||
"""Extracts individual fields from a RelatedKeyword object."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
related_keyword: RelatedKeyword = SchemaField(
|
||||
description="The related keyword object to extract fields from"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
keyword: str = SchemaField(description="The related keyword")
|
||||
search_volume: Optional[int] = SchemaField(
|
||||
description="Monthly search volume", default=None
|
||||
)
|
||||
competition: Optional[float] = SchemaField(
|
||||
description="Competition level (0-1)", default=None
|
||||
)
|
||||
cpc: Optional[float] = SchemaField(
|
||||
description="Cost per click in USD", default=None
|
||||
)
|
||||
keyword_difficulty: Optional[int] = SchemaField(
|
||||
description="Keyword difficulty score", default=None
|
||||
)
|
||||
serp_info: Optional[Dict[str, Any]] = SchemaField(
|
||||
description="SERP data for the keyword", default=None
|
||||
)
|
||||
clickstream_data: Optional[Dict[str, Any]] = SchemaField(
|
||||
description="Clickstream data metrics", default=None
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="98342061-09d2-4952-bf77-0761fc8cc9a8",
|
||||
description="Extract individual fields from a RelatedKeyword object",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"related_keyword": RelatedKeyword(
|
||||
keyword="test related keyword",
|
||||
search_volume=800,
|
||||
competition=0.4,
|
||||
cpc=3.0,
|
||||
keyword_difficulty=55,
|
||||
).model_dump()
|
||||
},
|
||||
test_output=[
|
||||
("keyword", "test related keyword"),
|
||||
("search_volume", 800),
|
||||
("competition", 0.4),
|
||||
("cpc", 3.0),
|
||||
("keyword_difficulty", 55),
|
||||
("serp_info", None),
|
||||
("clickstream_data", None),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
"""Extract fields from the RelatedKeyword object."""
|
||||
related_keyword = input_data.related_keyword
|
||||
|
||||
yield "keyword", related_keyword.keyword
|
||||
yield "search_volume", related_keyword.search_volume
|
||||
yield "competition", related_keyword.competition
|
||||
yield "cpc", related_keyword.cpc
|
||||
yield "keyword_difficulty", related_keyword.keyword_difficulty
|
||||
yield "serp_info", related_keyword.serp_info
|
||||
yield "clickstream_data", related_keyword.clickstream_data
|
||||
117
autogpt_platform/backend/backend/blocks/discord/_api.py
Normal file
117
autogpt_platform/backend/backend/blocks/discord/_api.py
Normal file
@@ -0,0 +1,117 @@
|
||||
"""
|
||||
Discord API helper functions for making authenticated requests.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.model import OAuth2Credentials
|
||||
from backend.util.request import Requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DiscordAPIException(Exception):
|
||||
"""Exception raised for Discord API errors."""
|
||||
|
||||
def __init__(self, message: str, status_code: int):
|
||||
super().__init__(message)
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class DiscordOAuthUser(BaseModel):
|
||||
"""Model for Discord OAuth user response."""
|
||||
|
||||
user_id: str
|
||||
username: str
|
||||
avatar_url: str
|
||||
banner: Optional[str] = None
|
||||
accent_color: Optional[int] = None
|
||||
|
||||
|
||||
def get_api(credentials: OAuth2Credentials) -> Requests:
|
||||
"""
|
||||
Create a Requests instance configured for Discord API calls with OAuth2 credentials.
|
||||
|
||||
Args:
|
||||
credentials: The OAuth2 credentials containing the access token.
|
||||
|
||||
Returns:
|
||||
A configured Requests instance for Discord API calls.
|
||||
"""
|
||||
return Requests(
|
||||
trusted_origins=[],
|
||||
extra_headers={
|
||||
"Authorization": f"Bearer {credentials.access_token.get_secret_value()}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
raise_for_status=False,
|
||||
)
|
||||
|
||||
|
||||
async def get_current_user(credentials: OAuth2Credentials) -> DiscordOAuthUser:
|
||||
"""
|
||||
Fetch the current user's information using Discord OAuth2 API.
|
||||
|
||||
Reference: https://discord.com/developers/docs/resources/user#get-current-user
|
||||
|
||||
Args:
|
||||
credentials: The OAuth2 credentials.
|
||||
|
||||
Returns:
|
||||
A model containing user data with avatar URL.
|
||||
|
||||
Raises:
|
||||
DiscordAPIException: If the API request fails.
|
||||
"""
|
||||
api = get_api(credentials)
|
||||
response = await api.get("https://discord.com/api/oauth2/@me")
|
||||
|
||||
if not response.ok:
|
||||
error_text = response.text()
|
||||
raise DiscordAPIException(
|
||||
f"Failed to fetch user info: {response.status} - {error_text}",
|
||||
response.status,
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
logger.info(f"Discord OAuth2 API Response: {data}")
|
||||
|
||||
# The /api/oauth2/@me endpoint returns a user object nested in the response
|
||||
user_info = data.get("user", {})
|
||||
logger.info(f"User info extracted: {user_info}")
|
||||
|
||||
# Build avatar URL
|
||||
user_id = user_info.get("id")
|
||||
avatar_hash = user_info.get("avatar")
|
||||
if avatar_hash:
|
||||
# Custom avatar
|
||||
avatar_ext = "gif" if avatar_hash.startswith("a_") else "png"
|
||||
avatar_url = (
|
||||
f"https://cdn.discordapp.com/avatars/{user_id}/{avatar_hash}.{avatar_ext}"
|
||||
)
|
||||
else:
|
||||
# Default avatar based on discriminator or user ID
|
||||
discriminator = user_info.get("discriminator", "0")
|
||||
if discriminator == "0":
|
||||
# New username system - use user ID for default avatar
|
||||
default_avatar_index = (int(user_id) >> 22) % 6
|
||||
else:
|
||||
# Legacy discriminator system
|
||||
default_avatar_index = int(discriminator) % 5
|
||||
avatar_url = (
|
||||
f"https://cdn.discordapp.com/embed/avatars/{default_avatar_index}.png"
|
||||
)
|
||||
|
||||
result = DiscordOAuthUser(
|
||||
user_id=user_id,
|
||||
username=user_info.get("username", ""),
|
||||
avatar_url=avatar_url,
|
||||
banner=user_info.get("banner"),
|
||||
accent_color=user_info.get("accent_color"),
|
||||
)
|
||||
|
||||
logger.info(f"Returning user data: {result.model_dump()}")
|
||||
return result
|
||||
74
autogpt_platform/backend/backend/blocks/discord/_auth.py
Normal file
74
autogpt_platform/backend/backend/blocks/discord/_auth.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
OAuth2Credentials,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
secrets = Secrets()
|
||||
DISCORD_OAUTH_IS_CONFIGURED = bool(
|
||||
secrets.discord_client_id and secrets.discord_client_secret
|
||||
)
|
||||
|
||||
# Bot token credentials (existing)
|
||||
DiscordBotCredentials = APIKeyCredentials
|
||||
DiscordBotCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.DISCORD], Literal["api_key"]
|
||||
]
|
||||
|
||||
# OAuth2 credentials (new)
|
||||
DiscordOAuthCredentials = OAuth2Credentials
|
||||
DiscordOAuthCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.DISCORD], Literal["oauth2"]
|
||||
]
|
||||
|
||||
|
||||
def DiscordBotCredentialsField() -> DiscordBotCredentialsInput:
|
||||
"""Creates a Discord bot token credentials field."""
|
||||
return CredentialsField(description="Discord bot token")
|
||||
|
||||
|
||||
def DiscordOAuthCredentialsField(scopes: list[str]) -> DiscordOAuthCredentialsInput:
|
||||
"""Creates a Discord OAuth2 credentials field."""
|
||||
return CredentialsField(
|
||||
description="Discord OAuth2 credentials",
|
||||
required_scopes=set(scopes) | {"identify"}, # Basic user info scope
|
||||
)
|
||||
|
||||
|
||||
# Test credentials for bot tokens
|
||||
TEST_BOT_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="discord",
|
||||
api_key=SecretStr("test_api_key"),
|
||||
title="Mock Discord API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_BOT_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_BOT_CREDENTIALS.provider,
|
||||
"id": TEST_BOT_CREDENTIALS.id,
|
||||
"type": TEST_BOT_CREDENTIALS.type,
|
||||
"title": TEST_BOT_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
# Test credentials for OAuth2
|
||||
TEST_OAUTH_CREDENTIALS = OAuth2Credentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="discord",
|
||||
access_token=SecretStr("test_access_token"),
|
||||
title="Mock Discord OAuth",
|
||||
scopes=["identify"],
|
||||
username="testuser",
|
||||
)
|
||||
TEST_OAUTH_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_OAUTH_CREDENTIALS.provider,
|
||||
"id": TEST_OAUTH_CREDENTIALS.id,
|
||||
"type": TEST_OAUTH_CREDENTIALS.type,
|
||||
"title": TEST_OAUTH_CREDENTIALS.type,
|
||||
}
|
||||
@@ -2,45 +2,29 @@ import base64
|
||||
import io
|
||||
import mimetypes
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
import aiohttp
|
||||
import discord
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.data.model import APIKeyCredentials, SchemaField
|
||||
from backend.util.file import store_media_file
|
||||
from backend.util.type import MediaFileType
|
||||
|
||||
DiscordCredentials = CredentialsMetaInput[
|
||||
Literal[ProviderName.DISCORD], Literal["api_key"]
|
||||
]
|
||||
|
||||
|
||||
def DiscordCredentialsField() -> DiscordCredentials:
|
||||
return CredentialsField(description="Discord bot token")
|
||||
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="discord",
|
||||
api_key=SecretStr("test_api_key"),
|
||||
title="Mock Discord API key",
|
||||
expires_at=None,
|
||||
from ._auth import (
|
||||
TEST_BOT_CREDENTIALS,
|
||||
TEST_BOT_CREDENTIALS_INPUT,
|
||||
DiscordBotCredentialsField,
|
||||
DiscordBotCredentialsInput,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
# Keep backward compatibility alias
|
||||
DiscordCredentials = DiscordBotCredentialsInput
|
||||
DiscordCredentialsField = DiscordBotCredentialsField
|
||||
TEST_CREDENTIALS = TEST_BOT_CREDENTIALS
|
||||
TEST_CREDENTIALS_INPUT = TEST_BOT_CREDENTIALS_INPUT
|
||||
|
||||
|
||||
class ReadDiscordMessagesBlock(Block):
|
||||
@@ -0,0 +1,99 @@
|
||||
"""
|
||||
Discord OAuth-based blocks.
|
||||
"""
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import OAuth2Credentials, SchemaField
|
||||
|
||||
from ._api import DiscordOAuthUser, get_current_user
|
||||
from ._auth import (
|
||||
DISCORD_OAUTH_IS_CONFIGURED,
|
||||
TEST_OAUTH_CREDENTIALS,
|
||||
TEST_OAUTH_CREDENTIALS_INPUT,
|
||||
DiscordOAuthCredentialsField,
|
||||
DiscordOAuthCredentialsInput,
|
||||
)
|
||||
|
||||
|
||||
class DiscordGetCurrentUserBlock(Block):
|
||||
"""
|
||||
Gets information about the currently authenticated Discord user using OAuth2.
|
||||
This block requires Discord OAuth2 credentials (not bot tokens).
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: DiscordOAuthCredentialsInput = DiscordOAuthCredentialsField(
|
||||
["identify"]
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
user_id: str = SchemaField(description="The authenticated user's Discord ID")
|
||||
username: str = SchemaField(description="The user's username")
|
||||
avatar_url: str = SchemaField(description="URL to the user's avatar image")
|
||||
banner_url: str = SchemaField(
|
||||
description="URL to the user's banner image (if set)", default=""
|
||||
)
|
||||
accent_color: int = SchemaField(
|
||||
description="The user's accent color as an integer", default=0
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8c7e39b8-4e9d-4f3a-b4e1-2a8c9d5f6e3b",
|
||||
input_schema=DiscordGetCurrentUserBlock.Input,
|
||||
output_schema=DiscordGetCurrentUserBlock.Output,
|
||||
description="Gets information about the currently authenticated Discord user using OAuth2 credentials.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
disabled=not DISCORD_OAUTH_IS_CONFIGURED,
|
||||
test_input={
|
||||
"credentials": TEST_OAUTH_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_OAUTH_CREDENTIALS,
|
||||
test_output=[
|
||||
("user_id", "123456789012345678"),
|
||||
("username", "testuser"),
|
||||
(
|
||||
"avatar_url",
|
||||
"https://cdn.discordapp.com/avatars/123456789012345678/avatar.png",
|
||||
),
|
||||
("banner_url", ""),
|
||||
("accent_color", 0),
|
||||
],
|
||||
test_mock={
|
||||
"get_user": lambda _: DiscordOAuthUser(
|
||||
user_id="123456789012345678",
|
||||
username="testuser",
|
||||
avatar_url="https://cdn.discordapp.com/avatars/123456789012345678/avatar.png",
|
||||
banner=None,
|
||||
accent_color=0,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def get_user(credentials: OAuth2Credentials) -> DiscordOAuthUser:
|
||||
user_info = await get_current_user(credentials)
|
||||
return user_info
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: OAuth2Credentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = await self.get_user(credentials)
|
||||
|
||||
# Yield each output field
|
||||
yield "user_id", result.user_id
|
||||
yield "username", result.username
|
||||
yield "avatar_url", result.avatar_url
|
||||
|
||||
# Handle banner URL if banner hash exists
|
||||
if result.banner:
|
||||
banner_url = f"https://cdn.discordapp.com/banners/{result.user_id}/{result.banner}.png"
|
||||
yield "banner_url", banner_url
|
||||
else:
|
||||
yield "banner_url", ""
|
||||
|
||||
yield "accent_color", result.accent_color or 0
|
||||
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to get Discord user info: {e}")
|
||||
408
autogpt_platform/backend/backend/blocks/enrichlayer/_api.py
Normal file
408
autogpt_platform/backend/backend/blocks/enrichlayer/_api.py
Normal file
@@ -0,0 +1,408 @@
|
||||
"""
|
||||
API module for Enrichlayer integration.
|
||||
|
||||
This module provides a client for interacting with the Enrichlayer API,
|
||||
which allows fetching LinkedIn profile data and related information.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import enum
|
||||
import logging
|
||||
from json import JSONDecodeError
|
||||
from typing import Any, Optional, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.data.model import APIKeyCredentials
|
||||
from backend.util.request import Requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class EnrichlayerAPIException(Exception):
|
||||
"""Exception raised for Enrichlayer API errors."""
|
||||
|
||||
def __init__(self, message: str, status_code: int):
|
||||
super().__init__(message)
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class FallbackToCache(enum.Enum):
|
||||
ON_ERROR = "on-error"
|
||||
NEVER = "never"
|
||||
|
||||
|
||||
class UseCache(enum.Enum):
|
||||
IF_PRESENT = "if-present"
|
||||
NEVER = "never"
|
||||
|
||||
|
||||
class SocialMediaProfiles(BaseModel):
|
||||
"""Social media profiles model."""
|
||||
|
||||
twitter: Optional[str] = None
|
||||
facebook: Optional[str] = None
|
||||
github: Optional[str] = None
|
||||
|
||||
|
||||
class Experience(BaseModel):
|
||||
"""Experience model for LinkedIn profiles."""
|
||||
|
||||
company: Optional[str] = None
|
||||
title: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
location: Optional[str] = None
|
||||
starts_at: Optional[dict[str, int]] = None
|
||||
ends_at: Optional[dict[str, int]] = None
|
||||
company_linkedin_profile_url: Optional[str] = None
|
||||
|
||||
|
||||
class Education(BaseModel):
|
||||
"""Education model for LinkedIn profiles."""
|
||||
|
||||
school: Optional[str] = None
|
||||
degree_name: Optional[str] = None
|
||||
field_of_study: Optional[str] = None
|
||||
starts_at: Optional[dict[str, int]] = None
|
||||
ends_at: Optional[dict[str, int]] = None
|
||||
school_linkedin_profile_url: Optional[str] = None
|
||||
|
||||
|
||||
class PersonProfileResponse(BaseModel):
|
||||
"""Response model for LinkedIn person profile.
|
||||
|
||||
This model represents the response from Enrichlayer's LinkedIn profile API.
|
||||
The API returns comprehensive profile data including work experience,
|
||||
education, skills, and contact information (when available).
|
||||
|
||||
Example API Response:
|
||||
{
|
||||
"public_identifier": "johnsmith",
|
||||
"full_name": "John Smith",
|
||||
"occupation": "Software Engineer at Tech Corp",
|
||||
"experiences": [
|
||||
{
|
||||
"company": "Tech Corp",
|
||||
"title": "Software Engineer",
|
||||
"starts_at": {"year": 2020, "month": 1}
|
||||
}
|
||||
],
|
||||
"education": [...],
|
||||
"skills": ["Python", "JavaScript", ...]
|
||||
}
|
||||
"""
|
||||
|
||||
public_identifier: Optional[str] = None
|
||||
profile_pic_url: Optional[str] = None
|
||||
full_name: Optional[str] = None
|
||||
first_name: Optional[str] = None
|
||||
last_name: Optional[str] = None
|
||||
occupation: Optional[str] = None
|
||||
headline: Optional[str] = None
|
||||
summary: Optional[str] = None
|
||||
country: Optional[str] = None
|
||||
country_full_name: Optional[str] = None
|
||||
city: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
experiences: Optional[list[Experience]] = None
|
||||
education: Optional[list[Education]] = None
|
||||
languages: Optional[list[str]] = None
|
||||
skills: Optional[list[str]] = None
|
||||
inferred_salary: Optional[dict[str, Any]] = None
|
||||
personal_email: Optional[str] = None
|
||||
personal_contact_number: Optional[str] = None
|
||||
social_media_profiles: Optional[SocialMediaProfiles] = None
|
||||
extra: Optional[dict[str, Any]] = None
|
||||
|
||||
|
||||
class SimilarProfile(BaseModel):
|
||||
"""Similar profile model for LinkedIn person lookup."""
|
||||
|
||||
similarity: float
|
||||
linkedin_profile_url: str
|
||||
|
||||
|
||||
class PersonLookupResponse(BaseModel):
|
||||
"""Response model for LinkedIn person lookup.
|
||||
|
||||
This model represents the response from Enrichlayer's person lookup API.
|
||||
The API returns a LinkedIn profile URL and similarity scores when
|
||||
searching for a person by name and company.
|
||||
|
||||
Example API Response:
|
||||
{
|
||||
"url": "https://www.linkedin.com/in/johnsmith/",
|
||||
"name_similarity_score": 0.95,
|
||||
"company_similarity_score": 0.88,
|
||||
"title_similarity_score": 0.75,
|
||||
"location_similarity_score": 0.60
|
||||
}
|
||||
"""
|
||||
|
||||
url: str | None = None
|
||||
name_similarity_score: float | None
|
||||
company_similarity_score: float | None
|
||||
title_similarity_score: float | None
|
||||
location_similarity_score: float | None
|
||||
last_updated: datetime.datetime | None = None
|
||||
profile: PersonProfileResponse | None = None
|
||||
|
||||
|
||||
class RoleLookupResponse(BaseModel):
|
||||
"""Response model for LinkedIn role lookup.
|
||||
|
||||
This model represents the response from Enrichlayer's role lookup API.
|
||||
The API returns LinkedIn profile data for a specific role at a company.
|
||||
|
||||
Example API Response:
|
||||
{
|
||||
"linkedin_profile_url": "https://www.linkedin.com/in/johnsmith/",
|
||||
"profile_data": {...} // Full PersonProfileResponse data when enrich_profile=True
|
||||
}
|
||||
"""
|
||||
|
||||
linkedin_profile_url: Optional[str] = None
|
||||
profile_data: Optional[PersonProfileResponse] = None
|
||||
|
||||
|
||||
class ProfilePictureResponse(BaseModel):
|
||||
"""Response model for LinkedIn profile picture.
|
||||
|
||||
This model represents the response from Enrichlayer's profile picture API.
|
||||
The API returns a URL to the person's LinkedIn profile picture.
|
||||
|
||||
Example API Response:
|
||||
{
|
||||
"tmp_profile_pic_url": "https://media.licdn.com/dms/image/..."
|
||||
}
|
||||
"""
|
||||
|
||||
tmp_profile_pic_url: str = Field(
|
||||
..., description="URL of the profile picture", alias="tmp_profile_pic_url"
|
||||
)
|
||||
|
||||
@property
|
||||
def profile_picture_url(self) -> str:
|
||||
"""Backward compatibility property for profile_picture_url."""
|
||||
return self.tmp_profile_pic_url
|
||||
|
||||
|
||||
class EnrichlayerClient:
|
||||
"""Client for interacting with the Enrichlayer API."""
|
||||
|
||||
API_BASE_URL = "https://enrichlayer.com/api/v2"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
credentials: Optional[APIKeyCredentials] = None,
|
||||
custom_requests: Optional[Requests] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the Enrichlayer client.
|
||||
|
||||
Args:
|
||||
credentials: The credentials to use for authentication.
|
||||
custom_requests: Custom Requests instance for testing.
|
||||
"""
|
||||
if custom_requests:
|
||||
self._requests = custom_requests
|
||||
else:
|
||||
headers: dict[str, str] = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
if credentials:
|
||||
headers["Authorization"] = (
|
||||
f"Bearer {credentials.api_key.get_secret_value()}"
|
||||
)
|
||||
|
||||
self._requests = Requests(
|
||||
extra_headers=headers,
|
||||
raise_for_status=False,
|
||||
)
|
||||
|
||||
async def _handle_response(self, response) -> Any:
|
||||
"""
|
||||
Handle API response and check for errors.
|
||||
|
||||
Args:
|
||||
response: The response object from the request.
|
||||
|
||||
Returns:
|
||||
The response data.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
if not response.ok:
|
||||
try:
|
||||
error_data = response.json()
|
||||
error_message = error_data.get("message", "")
|
||||
except JSONDecodeError:
|
||||
error_message = response.text
|
||||
|
||||
raise EnrichlayerAPIException(
|
||||
f"Enrichlayer API request failed ({response.status_code}): {error_message}",
|
||||
response.status_code,
|
||||
)
|
||||
|
||||
return response.json()
|
||||
|
||||
async def fetch_profile(
|
||||
self,
|
||||
linkedin_url: str,
|
||||
fallback_to_cache: FallbackToCache = FallbackToCache.ON_ERROR,
|
||||
use_cache: UseCache = UseCache.IF_PRESENT,
|
||||
include_skills: bool = False,
|
||||
include_inferred_salary: bool = False,
|
||||
include_personal_email: bool = False,
|
||||
include_personal_contact_number: bool = False,
|
||||
include_social_media: bool = False,
|
||||
include_extra: bool = False,
|
||||
) -> PersonProfileResponse:
|
||||
"""
|
||||
Fetch a LinkedIn profile with optional parameters.
|
||||
|
||||
Args:
|
||||
linkedin_url: The LinkedIn profile URL to fetch.
|
||||
fallback_to_cache: Cache usage if live fetch fails ('on-error' or 'never').
|
||||
use_cache: Cache utilization ('if-present' or 'never').
|
||||
include_skills: Whether to include skills data.
|
||||
include_inferred_salary: Whether to include inferred salary data.
|
||||
include_personal_email: Whether to include personal email.
|
||||
include_personal_contact_number: Whether to include personal contact number.
|
||||
include_social_media: Whether to include social media profiles.
|
||||
include_extra: Whether to include additional data.
|
||||
|
||||
Returns:
|
||||
The LinkedIn profile data.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
params = {
|
||||
"url": linkedin_url,
|
||||
"fallback_to_cache": fallback_to_cache.value.lower(),
|
||||
"use_cache": use_cache.value.lower(),
|
||||
}
|
||||
|
||||
if include_skills:
|
||||
params["skills"] = "include"
|
||||
if include_inferred_salary:
|
||||
params["inferred_salary"] = "include"
|
||||
if include_personal_email:
|
||||
params["personal_email"] = "include"
|
||||
if include_personal_contact_number:
|
||||
params["personal_contact_number"] = "include"
|
||||
if include_social_media:
|
||||
params["twitter_profile_id"] = "include"
|
||||
params["facebook_profile_id"] = "include"
|
||||
params["github_profile_id"] = "include"
|
||||
if include_extra:
|
||||
params["extra"] = "include"
|
||||
|
||||
response = await self._requests.get(
|
||||
f"{self.API_BASE_URL}/profile", params=params
|
||||
)
|
||||
return PersonProfileResponse(**await self._handle_response(response))
|
||||
|
||||
async def lookup_person(
|
||||
self,
|
||||
first_name: str,
|
||||
company_domain: str,
|
||||
last_name: str | None = None,
|
||||
location: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
include_similarity_checks: bool = False,
|
||||
enrich_profile: bool = False,
|
||||
) -> PersonLookupResponse:
|
||||
"""
|
||||
Look up a LinkedIn profile by person's information.
|
||||
|
||||
Args:
|
||||
first_name: The person's first name.
|
||||
last_name: The person's last name.
|
||||
company_domain: The domain of the company they work for.
|
||||
location: The person's location.
|
||||
title: The person's job title.
|
||||
include_similarity_checks: Whether to include similarity checks.
|
||||
enrich_profile: Whether to enrich the profile.
|
||||
|
||||
Returns:
|
||||
The LinkedIn profile lookup result.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
params = {"first_name": first_name, "company_domain": company_domain}
|
||||
|
||||
if last_name:
|
||||
params["last_name"] = last_name
|
||||
if location:
|
||||
params["location"] = location
|
||||
if title:
|
||||
params["title"] = title
|
||||
if include_similarity_checks:
|
||||
params["similarity_checks"] = "include"
|
||||
if enrich_profile:
|
||||
params["enrich_profile"] = "enrich"
|
||||
|
||||
response = await self._requests.get(
|
||||
f"{self.API_BASE_URL}/profile/resolve", params=params
|
||||
)
|
||||
return PersonLookupResponse(**await self._handle_response(response))
|
||||
|
||||
async def lookup_role(
|
||||
self, role: str, company_name: str, enrich_profile: bool = False
|
||||
) -> RoleLookupResponse:
|
||||
"""
|
||||
Look up a LinkedIn profile by role in a company.
|
||||
|
||||
Args:
|
||||
role: The role title (e.g., CEO, CTO).
|
||||
company_name: The name of the company.
|
||||
enrich_profile: Whether to enrich the profile.
|
||||
|
||||
Returns:
|
||||
The LinkedIn profile lookup result.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
params = {
|
||||
"role": role,
|
||||
"company_name": company_name,
|
||||
}
|
||||
|
||||
if enrich_profile:
|
||||
params["enrich_profile"] = "enrich"
|
||||
|
||||
response = await self._requests.get(
|
||||
f"{self.API_BASE_URL}/find/company/role", params=params
|
||||
)
|
||||
return RoleLookupResponse(**await self._handle_response(response))
|
||||
|
||||
async def get_profile_picture(
|
||||
self, linkedin_profile_url: str
|
||||
) -> ProfilePictureResponse:
|
||||
"""
|
||||
Get a LinkedIn profile picture URL.
|
||||
|
||||
Args:
|
||||
linkedin_profile_url: The LinkedIn profile URL.
|
||||
|
||||
Returns:
|
||||
The profile picture URL.
|
||||
|
||||
Raises:
|
||||
EnrichlayerAPIException: If the API request fails.
|
||||
"""
|
||||
params = {
|
||||
"linkedin_person_profile_url": linkedin_profile_url,
|
||||
}
|
||||
|
||||
response = await self._requests.get(
|
||||
f"{self.API_BASE_URL}/person/profile-picture", params=params
|
||||
)
|
||||
return ProfilePictureResponse(**await self._handle_response(response))
|
||||
34
autogpt_platform/backend/backend/blocks/enrichlayer/_auth.py
Normal file
34
autogpt_platform/backend/backend/blocks/enrichlayer/_auth.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""
|
||||
Authentication module for Enrichlayer API integration.
|
||||
|
||||
This module provides credential types and test credentials for the Enrichlayer API.
|
||||
"""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
# Define the type of credentials input expected for Enrichlayer API
|
||||
EnrichlayerCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.ENRICHLAYER], Literal["api_key"]
|
||||
]
|
||||
|
||||
# Mock credentials for testing Enrichlayer API integration
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="1234a567-89bc-4def-ab12-3456cdef7890",
|
||||
provider="enrichlayer",
|
||||
api_key=SecretStr("mock-enrichlayer-api-key"),
|
||||
title="Mock Enrichlayer API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
# Dictionary representation of test credentials for input fields
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
527
autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py
Normal file
527
autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py
Normal file
@@ -0,0 +1,527 @@
|
||||
"""
|
||||
Block definitions for Enrichlayer API integration.
|
||||
|
||||
This module implements blocks for interacting with the Enrichlayer API,
|
||||
which provides access to LinkedIn profile data and related information.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField
|
||||
from backend.util.type import MediaFileType
|
||||
|
||||
from ._api import (
|
||||
EnrichlayerClient,
|
||||
Experience,
|
||||
FallbackToCache,
|
||||
PersonLookupResponse,
|
||||
PersonProfileResponse,
|
||||
RoleLookupResponse,
|
||||
UseCache,
|
||||
)
|
||||
from ._auth import TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, EnrichlayerCredentialsInput
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GetLinkedinProfileBlock(Block):
|
||||
"""Block to fetch LinkedIn profile data using Enrichlayer API."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
"""Input schema for GetLinkedinProfileBlock."""
|
||||
|
||||
linkedin_url: str = SchemaField(
|
||||
description="LinkedIn profile URL to fetch data from",
|
||||
placeholder="https://www.linkedin.com/in/username/",
|
||||
)
|
||||
fallback_to_cache: FallbackToCache = SchemaField(
|
||||
description="Cache usage if live fetch fails",
|
||||
default=FallbackToCache.ON_ERROR,
|
||||
advanced=True,
|
||||
)
|
||||
use_cache: UseCache = SchemaField(
|
||||
description="Cache utilization strategy",
|
||||
default=UseCache.IF_PRESENT,
|
||||
advanced=True,
|
||||
)
|
||||
include_skills: bool = SchemaField(
|
||||
description="Include skills data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_inferred_salary: bool = SchemaField(
|
||||
description="Include inferred salary data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_personal_email: bool = SchemaField(
|
||||
description="Include personal email",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_personal_contact_number: bool = SchemaField(
|
||||
description="Include personal contact number",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_social_media: bool = SchemaField(
|
||||
description="Include social media profiles",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
include_extra: bool = SchemaField(
|
||||
description="Include additional data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
credentials: EnrichlayerCredentialsInput = CredentialsField(
|
||||
description="Enrichlayer API credentials"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
"""Output schema for GetLinkedinProfileBlock."""
|
||||
|
||||
profile: PersonProfileResponse = SchemaField(
|
||||
description="LinkedIn profile data"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize GetLinkedinProfileBlock."""
|
||||
super().__init__(
|
||||
id="f6e0ac73-4f1d-4acb-b4b7-b67066c5984e",
|
||||
description="Fetch LinkedIn profile data using Enrichlayer",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=GetLinkedinProfileBlock.Input,
|
||||
output_schema=GetLinkedinProfileBlock.Output,
|
||||
test_input={
|
||||
"linkedin_url": "https://www.linkedin.com/in/williamhgates/",
|
||||
"include_skills": True,
|
||||
"include_social_media": True,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"profile",
|
||||
PersonProfileResponse(
|
||||
public_identifier="williamhgates",
|
||||
full_name="Bill Gates",
|
||||
occupation="Co-chair at Bill & Melinda Gates Foundation",
|
||||
experiences=[
|
||||
Experience(
|
||||
company="Bill & Melinda Gates Foundation",
|
||||
title="Co-chair",
|
||||
starts_at={"year": 2000},
|
||||
)
|
||||
],
|
||||
),
|
||||
)
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"_fetch_profile": lambda *args, **kwargs: PersonProfileResponse(
|
||||
public_identifier="williamhgates",
|
||||
full_name="Bill Gates",
|
||||
occupation="Co-chair at Bill & Melinda Gates Foundation",
|
||||
experiences=[
|
||||
Experience(
|
||||
company="Bill & Melinda Gates Foundation",
|
||||
title="Co-chair",
|
||||
starts_at={"year": 2000},
|
||||
)
|
||||
],
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _fetch_profile(
|
||||
credentials: APIKeyCredentials,
|
||||
linkedin_url: str,
|
||||
fallback_to_cache: FallbackToCache = FallbackToCache.ON_ERROR,
|
||||
use_cache: UseCache = UseCache.IF_PRESENT,
|
||||
include_skills: bool = False,
|
||||
include_inferred_salary: bool = False,
|
||||
include_personal_email: bool = False,
|
||||
include_personal_contact_number: bool = False,
|
||||
include_social_media: bool = False,
|
||||
include_extra: bool = False,
|
||||
):
|
||||
client = EnrichlayerClient(credentials)
|
||||
profile = await client.fetch_profile(
|
||||
linkedin_url=linkedin_url,
|
||||
fallback_to_cache=fallback_to_cache,
|
||||
use_cache=use_cache,
|
||||
include_skills=include_skills,
|
||||
include_inferred_salary=include_inferred_salary,
|
||||
include_personal_email=include_personal_email,
|
||||
include_personal_contact_number=include_personal_contact_number,
|
||||
include_social_media=include_social_media,
|
||||
include_extra=include_extra,
|
||||
)
|
||||
return profile
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Run the block to fetch LinkedIn profile data.
|
||||
|
||||
Args:
|
||||
input_data: Input parameters for the block
|
||||
credentials: API key credentials for Enrichlayer
|
||||
**kwargs: Additional keyword arguments
|
||||
|
||||
Yields:
|
||||
Tuples of (output_name, output_value)
|
||||
"""
|
||||
try:
|
||||
profile = await self._fetch_profile(
|
||||
credentials=credentials,
|
||||
linkedin_url=input_data.linkedin_url,
|
||||
fallback_to_cache=input_data.fallback_to_cache,
|
||||
use_cache=input_data.use_cache,
|
||||
include_skills=input_data.include_skills,
|
||||
include_inferred_salary=input_data.include_inferred_salary,
|
||||
include_personal_email=input_data.include_personal_email,
|
||||
include_personal_contact_number=input_data.include_personal_contact_number,
|
||||
include_social_media=input_data.include_social_media,
|
||||
include_extra=input_data.include_extra,
|
||||
)
|
||||
yield "profile", profile
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching LinkedIn profile: {str(e)}")
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class LinkedinPersonLookupBlock(Block):
|
||||
"""Block to look up LinkedIn profiles by person's information using Enrichlayer API."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
"""Input schema for LinkedinPersonLookupBlock."""
|
||||
|
||||
first_name: str = SchemaField(
|
||||
description="Person's first name",
|
||||
placeholder="John",
|
||||
advanced=False,
|
||||
)
|
||||
last_name: str | None = SchemaField(
|
||||
description="Person's last name",
|
||||
placeholder="Doe",
|
||||
default=None,
|
||||
advanced=False,
|
||||
)
|
||||
company_domain: str = SchemaField(
|
||||
description="Domain of the company they work for (optional)",
|
||||
placeholder="example.com",
|
||||
advanced=False,
|
||||
)
|
||||
location: Optional[str] = SchemaField(
|
||||
description="Person's location (optional)",
|
||||
placeholder="San Francisco",
|
||||
default=None,
|
||||
)
|
||||
title: Optional[str] = SchemaField(
|
||||
description="Person's job title (optional)",
|
||||
placeholder="CEO",
|
||||
default=None,
|
||||
)
|
||||
include_similarity_checks: bool = SchemaField(
|
||||
description="Include similarity checks",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
enrich_profile: bool = SchemaField(
|
||||
description="Enrich the profile with additional data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
credentials: EnrichlayerCredentialsInput = CredentialsField(
|
||||
description="Enrichlayer API credentials"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
"""Output schema for LinkedinPersonLookupBlock."""
|
||||
|
||||
lookup_result: PersonLookupResponse = SchemaField(
|
||||
description="LinkedIn profile lookup result"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize LinkedinPersonLookupBlock."""
|
||||
super().__init__(
|
||||
id="d237a98a-5c4b-4a1c-b9e3-e6f9a6c81df7",
|
||||
description="Look up LinkedIn profiles by person information using Enrichlayer",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=LinkedinPersonLookupBlock.Input,
|
||||
output_schema=LinkedinPersonLookupBlock.Output,
|
||||
test_input={
|
||||
"first_name": "Bill",
|
||||
"last_name": "Gates",
|
||||
"company_domain": "gatesfoundation.org",
|
||||
"include_similarity_checks": True,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"lookup_result",
|
||||
PersonLookupResponse(
|
||||
url="https://www.linkedin.com/in/williamhgates/",
|
||||
name_similarity_score=0.93,
|
||||
company_similarity_score=0.83,
|
||||
title_similarity_score=0.3,
|
||||
location_similarity_score=0.20,
|
||||
),
|
||||
)
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"_lookup_person": lambda *args, **kwargs: PersonLookupResponse(
|
||||
url="https://www.linkedin.com/in/williamhgates/",
|
||||
name_similarity_score=0.93,
|
||||
company_similarity_score=0.83,
|
||||
title_similarity_score=0.3,
|
||||
location_similarity_score=0.20,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _lookup_person(
|
||||
credentials: APIKeyCredentials,
|
||||
first_name: str,
|
||||
company_domain: str,
|
||||
last_name: str | None = None,
|
||||
location: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
include_similarity_checks: bool = False,
|
||||
enrich_profile: bool = False,
|
||||
):
|
||||
client = EnrichlayerClient(credentials=credentials)
|
||||
lookup_result = await client.lookup_person(
|
||||
first_name=first_name,
|
||||
last_name=last_name,
|
||||
company_domain=company_domain,
|
||||
location=location,
|
||||
title=title,
|
||||
include_similarity_checks=include_similarity_checks,
|
||||
enrich_profile=enrich_profile,
|
||||
)
|
||||
return lookup_result
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Run the block to look up LinkedIn profiles.
|
||||
|
||||
Args:
|
||||
input_data: Input parameters for the block
|
||||
credentials: API key credentials for Enrichlayer
|
||||
**kwargs: Additional keyword arguments
|
||||
|
||||
Yields:
|
||||
Tuples of (output_name, output_value)
|
||||
"""
|
||||
try:
|
||||
lookup_result = await self._lookup_person(
|
||||
credentials=credentials,
|
||||
first_name=input_data.first_name,
|
||||
last_name=input_data.last_name,
|
||||
company_domain=input_data.company_domain,
|
||||
location=input_data.location,
|
||||
title=input_data.title,
|
||||
include_similarity_checks=input_data.include_similarity_checks,
|
||||
enrich_profile=input_data.enrich_profile,
|
||||
)
|
||||
yield "lookup_result", lookup_result
|
||||
except Exception as e:
|
||||
logger.error(f"Error looking up LinkedIn profile: {str(e)}")
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class LinkedinRoleLookupBlock(Block):
|
||||
"""Block to look up LinkedIn profiles by role in a company using Enrichlayer API."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
"""Input schema for LinkedinRoleLookupBlock."""
|
||||
|
||||
role: str = SchemaField(
|
||||
description="Role title (e.g., CEO, CTO)",
|
||||
placeholder="CEO",
|
||||
)
|
||||
company_name: str = SchemaField(
|
||||
description="Name of the company",
|
||||
placeholder="Microsoft",
|
||||
)
|
||||
enrich_profile: bool = SchemaField(
|
||||
description="Enrich the profile with additional data",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
credentials: EnrichlayerCredentialsInput = CredentialsField(
|
||||
description="Enrichlayer API credentials"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
"""Output schema for LinkedinRoleLookupBlock."""
|
||||
|
||||
role_lookup_result: RoleLookupResponse = SchemaField(
|
||||
description="LinkedIn role lookup result"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize LinkedinRoleLookupBlock."""
|
||||
super().__init__(
|
||||
id="3b9fc742-06d4-49c7-b5ce-7e302dd7c8a7",
|
||||
description="Look up LinkedIn profiles by role in a company using Enrichlayer",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=LinkedinRoleLookupBlock.Input,
|
||||
output_schema=LinkedinRoleLookupBlock.Output,
|
||||
test_input={
|
||||
"role": "Co-chair",
|
||||
"company_name": "Gates Foundation",
|
||||
"enrich_profile": True,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"role_lookup_result",
|
||||
RoleLookupResponse(
|
||||
linkedin_profile_url="https://www.linkedin.com/in/williamhgates/",
|
||||
),
|
||||
)
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"_lookup_role": lambda *args, **kwargs: RoleLookupResponse(
|
||||
linkedin_profile_url="https://www.linkedin.com/in/williamhgates/",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _lookup_role(
|
||||
credentials: APIKeyCredentials,
|
||||
role: str,
|
||||
company_name: str,
|
||||
enrich_profile: bool = False,
|
||||
):
|
||||
client = EnrichlayerClient(credentials=credentials)
|
||||
role_lookup_result = await client.lookup_role(
|
||||
role=role,
|
||||
company_name=company_name,
|
||||
enrich_profile=enrich_profile,
|
||||
)
|
||||
return role_lookup_result
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Run the block to look up LinkedIn profiles by role.
|
||||
|
||||
Args:
|
||||
input_data: Input parameters for the block
|
||||
credentials: API key credentials for Enrichlayer
|
||||
**kwargs: Additional keyword arguments
|
||||
|
||||
Yields:
|
||||
Tuples of (output_name, output_value)
|
||||
"""
|
||||
try:
|
||||
role_lookup_result = await self._lookup_role(
|
||||
credentials=credentials,
|
||||
role=input_data.role,
|
||||
company_name=input_data.company_name,
|
||||
enrich_profile=input_data.enrich_profile,
|
||||
)
|
||||
yield "role_lookup_result", role_lookup_result
|
||||
except Exception as e:
|
||||
logger.error(f"Error looking up role in company: {str(e)}")
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GetLinkedinProfilePictureBlock(Block):
|
||||
"""Block to get LinkedIn profile pictures using Enrichlayer API."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
"""Input schema for GetLinkedinProfilePictureBlock."""
|
||||
|
||||
linkedin_profile_url: str = SchemaField(
|
||||
description="LinkedIn profile URL",
|
||||
placeholder="https://www.linkedin.com/in/username/",
|
||||
)
|
||||
credentials: EnrichlayerCredentialsInput = CredentialsField(
|
||||
description="Enrichlayer API credentials"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
"""Output schema for GetLinkedinProfilePictureBlock."""
|
||||
|
||||
profile_picture_url: MediaFileType = SchemaField(
|
||||
description="LinkedIn profile picture URL"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize GetLinkedinProfilePictureBlock."""
|
||||
super().__init__(
|
||||
id="68d5a942-9b3f-4e9a-b7c1-d96ea4321f0d",
|
||||
description="Get LinkedIn profile pictures using Enrichlayer",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=GetLinkedinProfilePictureBlock.Input,
|
||||
output_schema=GetLinkedinProfilePictureBlock.Output,
|
||||
test_input={
|
||||
"linkedin_profile_url": "https://www.linkedin.com/in/williamhgates/",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"profile_picture_url",
|
||||
"https://media.licdn.com/dms/image/C4D03AQFj-xjuXrLFSQ/profile-displayphoto-shrink_800_800/0/1576881858598?e=1686787200&v=beta&t=zrQC76QwsfQQIWthfOnrKRBMZ5D-qIAvzLXLmWgYvTk",
|
||||
)
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"_get_profile_picture": lambda *args, **kwargs: "https://media.licdn.com/dms/image/C4D03AQFj-xjuXrLFSQ/profile-displayphoto-shrink_800_800/0/1576881858598?e=1686787200&v=beta&t=zrQC76QwsfQQIWthfOnrKRBMZ5D-qIAvzLXLmWgYvTk",
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _get_profile_picture(
|
||||
credentials: APIKeyCredentials, linkedin_profile_url: str
|
||||
):
|
||||
client = EnrichlayerClient(credentials=credentials)
|
||||
profile_picture_response = await client.get_profile_picture(
|
||||
linkedin_profile_url=linkedin_profile_url,
|
||||
)
|
||||
return profile_picture_response.profile_picture_url
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Run the block to get LinkedIn profile pictures.
|
||||
|
||||
Args:
|
||||
input_data: Input parameters for the block
|
||||
credentials: API key credentials for Enrichlayer
|
||||
**kwargs: Additional keyword arguments
|
||||
|
||||
Yields:
|
||||
Tuples of (output_name, output_value)
|
||||
"""
|
||||
try:
|
||||
profile_picture = await self._get_profile_picture(
|
||||
credentials=credentials,
|
||||
linkedin_profile_url=input_data.linkedin_profile_url,
|
||||
)
|
||||
yield "profile_picture_url", profile_picture
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting profile picture: {str(e)}")
|
||||
yield "error", str(e)
|
||||
5
autogpt_platform/backend/backend/blocks/firecrawl/extract.py
Normal file → Executable file
5
autogpt_platform/backend/backend/blocks/firecrawl/extract.py
Normal file → Executable file
@@ -29,8 +29,8 @@ class FirecrawlExtractBlock(Block):
|
||||
prompt: str | None = SchemaField(
|
||||
description="The prompt to use for the crawl", default=None, advanced=False
|
||||
)
|
||||
output_schema: str | None = SchemaField(
|
||||
description="A more rigid structure if you already know the JSON layout.",
|
||||
output_schema: dict | None = SchemaField(
|
||||
description="A Json Schema describing the output structure if more rigid structure is desired.",
|
||||
default=None,
|
||||
)
|
||||
enable_web_search: bool = SchemaField(
|
||||
@@ -56,7 +56,6 @@ class FirecrawlExtractBlock(Block):
|
||||
|
||||
app = FirecrawlApp(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
# Sync call
|
||||
extract_result = app.extract(
|
||||
urls=input_data.urls,
|
||||
prompt=input_data.prompt,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -37,6 +37,7 @@ LLMProviderName = Literal[
|
||||
ProviderName.OPENAI,
|
||||
ProviderName.OPEN_ROUTER,
|
||||
ProviderName.LLAMA_API,
|
||||
ProviderName.V0,
|
||||
]
|
||||
AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]]
|
||||
|
||||
@@ -155,6 +156,10 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
|
||||
LLAMA_API_LLAMA3_3_8B = "Llama-3.3-8B-Instruct"
|
||||
LLAMA_API_LLAMA3_3_70B = "Llama-3.3-70B-Instruct"
|
||||
# v0 by Vercel models
|
||||
V0_1_5_MD = "v0-1.5-md"
|
||||
V0_1_5_LG = "v0-1.5-lg"
|
||||
V0_1_0_MD = "v0-1.0-md"
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
@@ -280,6 +285,10 @@ MODEL_METADATA = {
|
||||
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028),
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000),
|
||||
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000),
|
||||
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
@@ -676,7 +685,11 @@ async def llm_call(
|
||||
client = openai.OpenAI(
|
||||
base_url="https://api.aimlapi.com/v2",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
default_headers={"X-Project": "AutoGPT"},
|
||||
default_headers={
|
||||
"X-Project": "AutoGPT",
|
||||
"X-Title": "AutoGPT",
|
||||
"HTTP-Referer": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
},
|
||||
)
|
||||
|
||||
completion = client.chat.completions.create(
|
||||
@@ -696,6 +709,42 @@ async def llm_call(
|
||||
),
|
||||
reasoning=None,
|
||||
)
|
||||
elif provider == "v0":
|
||||
tools_param = tools if tools else openai.NOT_GIVEN
|
||||
client = openai.AsyncOpenAI(
|
||||
base_url="https://api.v0.dev/v1",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
)
|
||||
|
||||
response_format = None
|
||||
if json_format:
|
||||
response_format = {"type": "json_object"}
|
||||
|
||||
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
||||
llm_model, parallel_tool_calls
|
||||
)
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
tools=tools_param, # type: ignore
|
||||
parallel_tool_calls=parallel_tool_calls_param,
|
||||
)
|
||||
|
||||
tool_calls = extract_openai_tool_calls(response)
|
||||
reasoning = extract_openai_reasoning(response)
|
||||
|
||||
return LLMResponse(
|
||||
raw_response=response.choices[0].message,
|
||||
prompt=prompt,
|
||||
response=response.choices[0].message.content or "",
|
||||
tool_calls=tool_calls,
|
||||
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
||||
completion_tokens=response.usage.completion_tokens if response.usage else 0,
|
||||
reasoning=reasoning,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
|
||||
|
||||
@@ -291,9 +291,32 @@ class SmartDecisionMakerBlock(Block):
|
||||
|
||||
for link in links:
|
||||
sink_name = SmartDecisionMakerBlock.cleanup(link.sink_name)
|
||||
properties[sink_name] = sink_block_input_schema.get_field_schema(
|
||||
link.sink_name
|
||||
)
|
||||
|
||||
# Handle dynamic fields (e.g., values_#_*, items_$_*, etc.)
|
||||
# These are fields that get merged by the executor into their base field
|
||||
if (
|
||||
"_#_" in link.sink_name
|
||||
or "_$_" in link.sink_name
|
||||
or "_@_" in link.sink_name
|
||||
):
|
||||
# For dynamic fields, provide a generic string schema
|
||||
# The executor will handle merging these into the appropriate structure
|
||||
properties[sink_name] = {
|
||||
"type": "string",
|
||||
"description": f"Dynamic value for {link.sink_name}",
|
||||
}
|
||||
else:
|
||||
# For regular fields, use the block's schema
|
||||
try:
|
||||
properties[sink_name] = sink_block_input_schema.get_field_schema(
|
||||
link.sink_name
|
||||
)
|
||||
except (KeyError, AttributeError):
|
||||
# If the field doesn't exist in the schema, provide a generic schema
|
||||
properties[sink_name] = {
|
||||
"type": "string",
|
||||
"description": f"Value for {link.sink_name}",
|
||||
}
|
||||
|
||||
tool_function["parameters"] = {
|
||||
**block.input_schema.jsonschema(),
|
||||
@@ -478,10 +501,6 @@ class SmartDecisionMakerBlock(Block):
|
||||
}
|
||||
)
|
||||
prompt.extend(tool_output)
|
||||
if input_data.multiple_tool_calls:
|
||||
input_data.sys_prompt += "\nYou can call a tool (different tools) multiple times in a single response."
|
||||
else:
|
||||
input_data.sys_prompt += "\nOnly provide EXACTLY one function call, multiple tool calls is strictly prohibited."
|
||||
|
||||
values = input_data.prompt_values
|
||||
if values:
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
|
||||
@@ -0,0 +1,283 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Duplicate pydantic models for store data so we don't accidently change the data shape in the blocks unintentionally when editing the backend
|
||||
class LibraryAgent(BaseModel):
|
||||
"""Model representing an agent in the user's library."""
|
||||
|
||||
library_agent_id: str = ""
|
||||
agent_id: str = ""
|
||||
agent_version: int = 0
|
||||
agent_name: str = ""
|
||||
description: str = ""
|
||||
creator: str = ""
|
||||
is_archived: bool = False
|
||||
categories: list[str] = []
|
||||
|
||||
|
||||
class AddToLibraryFromStoreBlock(Block):
|
||||
"""
|
||||
Block that adds an agent from the store to the user's library.
|
||||
This enables users to easily import agents from the marketplace into their personal collection.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
store_listing_version_id: str = SchemaField(
|
||||
description="The ID of the store listing version to add to library"
|
||||
)
|
||||
agent_name: str | None = SchemaField(
|
||||
description="Optional custom name for the agent in your library",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(
|
||||
description="Whether the agent was successfully added to library"
|
||||
)
|
||||
library_agent_id: str = SchemaField(
|
||||
description="The ID of the library agent entry"
|
||||
)
|
||||
agent_id: str = SchemaField(description="The ID of the agent graph")
|
||||
agent_version: int = SchemaField(
|
||||
description="The version number of the agent graph"
|
||||
)
|
||||
agent_name: str = SchemaField(description="The name of the agent")
|
||||
message: str = SchemaField(description="Success or error message")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="2602a7b1-3f4d-4e5f-9c8b-1a2b3c4d5e6f",
|
||||
description="Add an agent from the store to your personal library",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=AddToLibraryFromStoreBlock.Input,
|
||||
output_schema=AddToLibraryFromStoreBlock.Output,
|
||||
test_input={
|
||||
"store_listing_version_id": "test-listing-id",
|
||||
"agent_name": "My Custom Agent",
|
||||
},
|
||||
test_output=[
|
||||
("success", True),
|
||||
("library_agent_id", "test-library-id"),
|
||||
("agent_id", "test-agent-id"),
|
||||
("agent_version", 1),
|
||||
("agent_name", "Test Agent"),
|
||||
("message", "Agent successfully added to library"),
|
||||
],
|
||||
test_mock={
|
||||
"_add_to_library": lambda *_, **__: LibraryAgent(
|
||||
library_agent_id="test-library-id",
|
||||
agent_id="test-agent-id",
|
||||
agent_version=1,
|
||||
agent_name="Test Agent",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
user_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
library_agent = await self._add_to_library(
|
||||
user_id=user_id,
|
||||
store_listing_version_id=input_data.store_listing_version_id,
|
||||
custom_name=input_data.agent_name,
|
||||
)
|
||||
|
||||
yield "success", True
|
||||
yield "library_agent_id", library_agent.library_agent_id
|
||||
yield "agent_id", library_agent.agent_id
|
||||
yield "agent_version", library_agent.agent_version
|
||||
yield "agent_name", library_agent.agent_name
|
||||
yield "message", "Agent successfully added to library"
|
||||
|
||||
async def _add_to_library(
|
||||
self,
|
||||
user_id: str,
|
||||
store_listing_version_id: str,
|
||||
custom_name: str | None = None,
|
||||
) -> LibraryAgent:
|
||||
"""
|
||||
Add a store agent to the user's library using the existing library database function.
|
||||
"""
|
||||
library_agent = (
|
||||
await get_database_manager_async_client().add_store_agent_to_library(
|
||||
store_listing_version_id=store_listing_version_id, user_id=user_id
|
||||
)
|
||||
)
|
||||
|
||||
# If custom name is provided, we could update the library agent name here
|
||||
# For now, we'll just return the agent info
|
||||
agent_name = custom_name if custom_name else library_agent.name
|
||||
|
||||
return LibraryAgent(
|
||||
library_agent_id=library_agent.id,
|
||||
agent_id=library_agent.graph_id,
|
||||
agent_version=library_agent.graph_version,
|
||||
agent_name=agent_name,
|
||||
)
|
||||
|
||||
|
||||
class ListLibraryAgentsBlock(Block):
|
||||
"""
|
||||
Block that lists all agents in the user's library.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
search_query: str | None = SchemaField(
|
||||
description="Optional search query to filter agents", default=None
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of agents to return", default=50, ge=1, le=100
|
||||
)
|
||||
page: int = SchemaField(
|
||||
description="Page number for pagination", default=1, ge=1
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
agents: list[LibraryAgent] = SchemaField(
|
||||
description="List of agents in the library",
|
||||
default_factory=list,
|
||||
)
|
||||
agent: LibraryAgent = SchemaField(
|
||||
description="Individual library agent (yielded for each agent)"
|
||||
)
|
||||
total_count: int = SchemaField(
|
||||
description="Total number of agents in library", default=0
|
||||
)
|
||||
page: int = SchemaField(description="Current page number", default=1)
|
||||
total_pages: int = SchemaField(description="Total number of pages", default=1)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="082602d3-a74d-4600-9e9c-15b3af7eae98",
|
||||
description="List all agents in your personal library",
|
||||
categories={BlockCategory.BASIC, BlockCategory.DATA},
|
||||
input_schema=ListLibraryAgentsBlock.Input,
|
||||
output_schema=ListLibraryAgentsBlock.Output,
|
||||
test_input={
|
||||
"search_query": None,
|
||||
"limit": 10,
|
||||
"page": 1,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"agents",
|
||||
[
|
||||
LibraryAgent(
|
||||
library_agent_id="test-lib-id",
|
||||
agent_id="test-agent-id",
|
||||
agent_version=1,
|
||||
agent_name="Test Library Agent",
|
||||
description="A test agent in library",
|
||||
creator="Test User",
|
||||
),
|
||||
],
|
||||
),
|
||||
("total_count", 1),
|
||||
("page", 1),
|
||||
("total_pages", 1),
|
||||
(
|
||||
"agent",
|
||||
LibraryAgent(
|
||||
library_agent_id="test-lib-id",
|
||||
agent_id="test-agent-id",
|
||||
agent_version=1,
|
||||
agent_name="Test Library Agent",
|
||||
description="A test agent in library",
|
||||
creator="Test User",
|
||||
),
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_list_library_agents": lambda *_, **__: {
|
||||
"agents": [
|
||||
LibraryAgent(
|
||||
library_agent_id="test-lib-id",
|
||||
agent_id="test-agent-id",
|
||||
agent_version=1,
|
||||
agent_name="Test Library Agent",
|
||||
description="A test agent in library",
|
||||
creator="Test User",
|
||||
)
|
||||
],
|
||||
"total": 1,
|
||||
"page": 1,
|
||||
"total_pages": 1,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
user_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
result = await self._list_library_agents(
|
||||
user_id=user_id,
|
||||
search_query=input_data.search_query,
|
||||
limit=input_data.limit,
|
||||
page=input_data.page,
|
||||
)
|
||||
|
||||
agents = result["agents"]
|
||||
|
||||
yield "agents", agents
|
||||
yield "total_count", result["total"]
|
||||
yield "page", result["page"]
|
||||
yield "total_pages", result["total_pages"]
|
||||
|
||||
# Yield each agent individually for better graph connectivity
|
||||
for agent in agents:
|
||||
yield "agent", agent
|
||||
|
||||
async def _list_library_agents(
|
||||
self,
|
||||
user_id: str,
|
||||
search_query: str | None = None,
|
||||
limit: int = 50,
|
||||
page: int = 1,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List agents in the user's library using the database client.
|
||||
"""
|
||||
result = await get_database_manager_async_client().list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query,
|
||||
page=page,
|
||||
page_size=limit,
|
||||
)
|
||||
|
||||
agents = [
|
||||
LibraryAgent(
|
||||
library_agent_id=agent.id,
|
||||
agent_id=agent.graph_id,
|
||||
agent_version=agent.graph_version,
|
||||
agent_name=agent.name,
|
||||
description=getattr(agent, "description", ""),
|
||||
creator=getattr(agent, "creator", ""),
|
||||
is_archived=getattr(agent, "is_archived", False),
|
||||
categories=getattr(agent, "categories", []),
|
||||
)
|
||||
for agent in result.agents
|
||||
]
|
||||
|
||||
return {
|
||||
"agents": agents,
|
||||
"total": result.pagination.total_items,
|
||||
"page": result.pagination.current_page,
|
||||
"total_pages": result.pagination.total_pages,
|
||||
}
|
||||
@@ -0,0 +1,311 @@
|
||||
import logging
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Duplicate pydantic models for store data so we don't accidently change the data shape in the blocks unintentionally when editing the backend
|
||||
class StoreAgent(BaseModel):
|
||||
"""Model representing a store agent."""
|
||||
|
||||
slug: str = ""
|
||||
name: str = ""
|
||||
description: str = ""
|
||||
creator: str = ""
|
||||
rating: float = 0.0
|
||||
runs: int = 0
|
||||
categories: list[str] = []
|
||||
|
||||
|
||||
class StoreAgentDict(BaseModel):
|
||||
"""Dictionary representation of a store agent."""
|
||||
|
||||
slug: str
|
||||
name: str
|
||||
description: str
|
||||
creator: str
|
||||
rating: float
|
||||
runs: int
|
||||
|
||||
|
||||
class SearchAgentsResponse(BaseModel):
|
||||
"""Response from searching store agents."""
|
||||
|
||||
agents: list[StoreAgentDict]
|
||||
total_count: int
|
||||
|
||||
|
||||
class StoreAgentDetails(BaseModel):
|
||||
"""Detailed information about a store agent."""
|
||||
|
||||
found: bool
|
||||
store_listing_version_id: str = ""
|
||||
agent_name: str = ""
|
||||
description: str = ""
|
||||
creator: str = ""
|
||||
categories: list[str] = []
|
||||
runs: int = 0
|
||||
rating: float = 0.0
|
||||
|
||||
|
||||
class GetStoreAgentDetailsBlock(Block):
|
||||
"""
|
||||
Block that retrieves detailed information about an agent from the store.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
creator: str = SchemaField(description="The username of the agent creator")
|
||||
slug: str = SchemaField(description="The name of the agent")
|
||||
|
||||
class Output(BlockSchema):
|
||||
found: bool = SchemaField(
|
||||
description="Whether the agent was found in the store"
|
||||
)
|
||||
store_listing_version_id: str = SchemaField(
|
||||
description="The store listing version ID"
|
||||
)
|
||||
agent_name: str = SchemaField(description="Name of the agent")
|
||||
description: str = SchemaField(description="Description of the agent")
|
||||
creator: str = SchemaField(description="Creator of the agent")
|
||||
categories: list[str] = SchemaField(
|
||||
description="Categories the agent belongs to", default_factory=list
|
||||
)
|
||||
runs: int = SchemaField(
|
||||
description="Number of times the agent has been run", default=0
|
||||
)
|
||||
rating: float = SchemaField(
|
||||
description="Average rating of the agent", default=0.0
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b604f0ec-6e0d-40a7-bf55-9fd09997cced",
|
||||
description="Get detailed information about an agent from the store",
|
||||
categories={BlockCategory.BASIC, BlockCategory.DATA},
|
||||
input_schema=GetStoreAgentDetailsBlock.Input,
|
||||
output_schema=GetStoreAgentDetailsBlock.Output,
|
||||
test_input={"creator": "test-creator", "slug": "test-agent-slug"},
|
||||
test_output=[
|
||||
("found", True),
|
||||
("store_listing_version_id", "test-listing-id"),
|
||||
("agent_name", "Test Agent"),
|
||||
("description", "A test agent"),
|
||||
("creator", "Test Creator"),
|
||||
("categories", ["productivity", "automation"]),
|
||||
("runs", 100),
|
||||
("rating", 4.5),
|
||||
],
|
||||
test_mock={
|
||||
"_get_agent_details": lambda *_, **__: StoreAgentDetails(
|
||||
found=True,
|
||||
store_listing_version_id="test-listing-id",
|
||||
agent_name="Test Agent",
|
||||
description="A test agent",
|
||||
creator="Test Creator",
|
||||
categories=["productivity", "automation"],
|
||||
runs=100,
|
||||
rating=4.5,
|
||||
)
|
||||
},
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
details = await self._get_agent_details(
|
||||
creator=input_data.creator, slug=input_data.slug
|
||||
)
|
||||
yield "found", details.found
|
||||
yield "store_listing_version_id", details.store_listing_version_id
|
||||
yield "agent_name", details.agent_name
|
||||
yield "description", details.description
|
||||
yield "creator", details.creator
|
||||
yield "categories", details.categories
|
||||
yield "runs", details.runs
|
||||
yield "rating", details.rating
|
||||
|
||||
async def _get_agent_details(self, creator: str, slug: str) -> StoreAgentDetails:
|
||||
"""
|
||||
Retrieve detailed information about a store agent.
|
||||
"""
|
||||
# Get by specific version ID
|
||||
agent_details = (
|
||||
await get_database_manager_async_client().get_store_agent_details(
|
||||
username=creator, agent_name=slug
|
||||
)
|
||||
)
|
||||
|
||||
return StoreAgentDetails(
|
||||
found=True,
|
||||
store_listing_version_id=agent_details.store_listing_version_id,
|
||||
agent_name=agent_details.agent_name,
|
||||
description=agent_details.description,
|
||||
creator=agent_details.creator,
|
||||
categories=(
|
||||
agent_details.categories if hasattr(agent_details, "categories") else []
|
||||
),
|
||||
runs=agent_details.runs,
|
||||
rating=agent_details.rating,
|
||||
)
|
||||
|
||||
|
||||
class SearchStoreAgentsBlock(Block):
|
||||
"""
|
||||
Block that searches for agents in the store based on various criteria.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
query: str | None = SchemaField(
|
||||
description="Search query to find agents", default=None
|
||||
)
|
||||
category: str | None = SchemaField(
|
||||
description="Filter by category", default=None
|
||||
)
|
||||
sort_by: Literal["rating", "runs", "name", "recent"] = SchemaField(
|
||||
description="How to sort the results", default="rating"
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of results to return", default=10, ge=1, le=100
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
agents: list[StoreAgent] = SchemaField(
|
||||
description="List of agents matching the search criteria",
|
||||
default_factory=list,
|
||||
)
|
||||
agent: StoreAgent = SchemaField(description="Basic information of the agent")
|
||||
total_count: int = SchemaField(
|
||||
description="Total number of agents found", default=0
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="39524701-026c-4328-87cc-1b88c8e2cb4c",
|
||||
description="Search for agents in the store",
|
||||
categories={BlockCategory.BASIC, BlockCategory.DATA},
|
||||
input_schema=SearchStoreAgentsBlock.Input,
|
||||
output_schema=SearchStoreAgentsBlock.Output,
|
||||
test_input={
|
||||
"query": "productivity",
|
||||
"category": None,
|
||||
"sort_by": "rating",
|
||||
"limit": 10,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"agents",
|
||||
[
|
||||
{
|
||||
"slug": "test-agent",
|
||||
"name": "Test Agent",
|
||||
"description": "A test agent",
|
||||
"creator": "Test Creator",
|
||||
"rating": 4.5,
|
||||
"runs": 100,
|
||||
}
|
||||
],
|
||||
),
|
||||
("total_count", 1),
|
||||
(
|
||||
"agent",
|
||||
{
|
||||
"slug": "test-agent",
|
||||
"name": "Test Agent",
|
||||
"description": "A test agent",
|
||||
"creator": "Test Creator",
|
||||
"rating": 4.5,
|
||||
"runs": 100,
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_search_agents": lambda *_, **__: SearchAgentsResponse(
|
||||
agents=[
|
||||
StoreAgentDict(
|
||||
slug="test-agent",
|
||||
name="Test Agent",
|
||||
description="A test agent",
|
||||
creator="Test Creator",
|
||||
rating=4.5,
|
||||
runs=100,
|
||||
)
|
||||
],
|
||||
total_count=1,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
result = await self._search_agents(
|
||||
query=input_data.query,
|
||||
category=input_data.category,
|
||||
sort_by=input_data.sort_by,
|
||||
limit=input_data.limit,
|
||||
)
|
||||
|
||||
agents = result.agents
|
||||
total_count = result.total_count
|
||||
|
||||
# Convert to dict for output
|
||||
agents_as_dicts = [agent.model_dump() for agent in agents]
|
||||
|
||||
yield "agents", agents_as_dicts
|
||||
yield "total_count", total_count
|
||||
|
||||
for agent_dict in agents_as_dicts:
|
||||
yield "agent", agent_dict
|
||||
|
||||
async def _search_agents(
|
||||
self,
|
||||
query: str | None = None,
|
||||
category: str | None = None,
|
||||
sort_by: str = "rating",
|
||||
limit: int = 10,
|
||||
) -> SearchAgentsResponse:
|
||||
"""
|
||||
Search for agents in the store using the existing store database function.
|
||||
"""
|
||||
# Map our sort_by to the store's sorted_by parameter
|
||||
sorted_by_map = {
|
||||
"rating": "most_popular",
|
||||
"runs": "most_runs",
|
||||
"name": "alphabetical",
|
||||
"recent": "recently_updated",
|
||||
}
|
||||
|
||||
result = await get_database_manager_async_client().get_store_agents(
|
||||
featured=False,
|
||||
creators=None,
|
||||
sorted_by=sorted_by_map.get(sort_by, "most_popular"),
|
||||
search_query=query,
|
||||
category=category,
|
||||
page=1,
|
||||
page_size=limit,
|
||||
)
|
||||
|
||||
agents = [
|
||||
StoreAgentDict(
|
||||
slug=agent.slug,
|
||||
name=agent.agent_name,
|
||||
description=agent.description,
|
||||
creator=agent.creator,
|
||||
rating=agent.rating,
|
||||
runs=agent.runs,
|
||||
)
|
||||
for agent in result.agents
|
||||
]
|
||||
|
||||
return SearchAgentsResponse(agents=agents, total_count=len(agents))
|
||||
@@ -0,0 +1,130 @@
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.blocks.data_manipulation import AddToListBlock, CreateDictionaryBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_handles_dynamic_dict_fields():
|
||||
"""Test Smart Decision Maker can handle dynamic dictionary fields (_#_) for any block"""
|
||||
|
||||
# Create a mock node for CreateDictionaryBlock
|
||||
mock_node = Mock()
|
||||
mock_node.block = CreateDictionaryBlock()
|
||||
mock_node.block_id = CreateDictionaryBlock().id
|
||||
mock_node.input_default = {}
|
||||
|
||||
# Create mock links with dynamic dictionary fields
|
||||
mock_links = [
|
||||
Mock(
|
||||
source_name="tools_^_create_dict_~_name",
|
||||
sink_name="values_#_name", # Dynamic dict field
|
||||
sink_id="dict_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_create_dict_~_age",
|
||||
sink_name="values_#_age", # Dynamic dict field
|
||||
sink_id="dict_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_create_dict_~_city",
|
||||
sink_name="values_#_city", # Dynamic dict field
|
||||
sink_id="dict_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
]
|
||||
|
||||
# Generate function signature
|
||||
signature = await SmartDecisionMakerBlock._create_block_function_signature(
|
||||
mock_node, mock_links # type: ignore
|
||||
)
|
||||
|
||||
# Verify the signature was created successfully
|
||||
assert signature["type"] == "function"
|
||||
assert "parameters" in signature["function"]
|
||||
assert "properties" in signature["function"]["parameters"]
|
||||
|
||||
# Check that dynamic fields are handled
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
assert len(properties) == 3 # Should have all three fields
|
||||
|
||||
# Each dynamic field should have proper schema
|
||||
for prop_value in properties.values():
|
||||
assert "type" in prop_value
|
||||
assert prop_value["type"] == "string" # Dynamic fields get string type
|
||||
assert "description" in prop_value
|
||||
assert "Dynamic value for" in prop_value["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_handles_dynamic_list_fields():
|
||||
"""Test Smart Decision Maker can handle dynamic list fields (_$_) for any block"""
|
||||
|
||||
# Create a mock node for AddToListBlock
|
||||
mock_node = Mock()
|
||||
mock_node.block = AddToListBlock()
|
||||
mock_node.block_id = AddToListBlock().id
|
||||
mock_node.input_default = {}
|
||||
|
||||
# Create mock links with dynamic list fields
|
||||
mock_links = [
|
||||
Mock(
|
||||
source_name="tools_^_add_to_list_~_0",
|
||||
sink_name="entries_$_0", # Dynamic list field
|
||||
sink_id="list_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
Mock(
|
||||
source_name="tools_^_add_to_list_~_1",
|
||||
sink_name="entries_$_1", # Dynamic list field
|
||||
sink_id="list_node_id",
|
||||
source_id="smart_decision_node_id",
|
||||
),
|
||||
]
|
||||
|
||||
# Generate function signature
|
||||
signature = await SmartDecisionMakerBlock._create_block_function_signature(
|
||||
mock_node, mock_links # type: ignore
|
||||
)
|
||||
|
||||
# Verify dynamic list fields are handled properly
|
||||
assert signature["type"] == "function"
|
||||
properties = signature["function"]["parameters"]["properties"]
|
||||
assert len(properties) == 2 # Should have both list items
|
||||
|
||||
# Each dynamic field should have proper schema
|
||||
for prop_value in properties.values():
|
||||
assert prop_value["type"] == "string"
|
||||
assert "Dynamic value for" in prop_value["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_dict_block_with_dynamic_values():
|
||||
"""Test CreateDictionaryBlock processes dynamic values correctly"""
|
||||
|
||||
block = CreateDictionaryBlock()
|
||||
|
||||
# Simulate what happens when executor merges dynamic fields
|
||||
# The executor merges values_#_* fields into the values dict
|
||||
input_data = block.input_schema(
|
||||
values={
|
||||
"existing": "value",
|
||||
"name": "Alice", # This would come from values_#_name
|
||||
"age": 25, # This would come from values_#_age
|
||||
}
|
||||
)
|
||||
|
||||
# Run the block
|
||||
result = {}
|
||||
async for output_name, output_value in block.run(input_data):
|
||||
result[output_name] = output_value
|
||||
|
||||
# Check the result
|
||||
assert "dictionary" in result
|
||||
assert result["dictionary"]["existing"] == "value"
|
||||
assert result["dictionary"]["name"] == "Alice"
|
||||
assert result["dictionary"]["age"] == 25
|
||||
@@ -0,0 +1,155 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.blocks.system.library_operations import (
|
||||
AddToLibraryFromStoreBlock,
|
||||
LibraryAgent,
|
||||
)
|
||||
from backend.blocks.system.store_operations import (
|
||||
GetStoreAgentDetailsBlock,
|
||||
SearchAgentsResponse,
|
||||
SearchStoreAgentsBlock,
|
||||
StoreAgentDetails,
|
||||
StoreAgentDict,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_to_library_from_store_block_success(mocker):
|
||||
"""Test successful addition of agent from store to library."""
|
||||
block = AddToLibraryFromStoreBlock()
|
||||
|
||||
# Mock the library agent response
|
||||
mock_library_agent = MagicMock()
|
||||
mock_library_agent.id = "lib-agent-123"
|
||||
mock_library_agent.graph_id = "graph-456"
|
||||
mock_library_agent.graph_version = 1
|
||||
mock_library_agent.name = "Test Agent"
|
||||
|
||||
mocker.patch.object(
|
||||
block,
|
||||
"_add_to_library",
|
||||
return_value=LibraryAgent(
|
||||
library_agent_id="lib-agent-123",
|
||||
agent_id="graph-456",
|
||||
agent_version=1,
|
||||
agent_name="Test Agent",
|
||||
),
|
||||
)
|
||||
|
||||
input_data = block.Input(
|
||||
store_listing_version_id="store-listing-v1", agent_name="Custom Agent Name"
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
async for name, value in block.run(input_data, user_id="test-user"):
|
||||
outputs[name] = value
|
||||
|
||||
assert outputs["success"] is True
|
||||
assert outputs["library_agent_id"] == "lib-agent-123"
|
||||
assert outputs["agent_id"] == "graph-456"
|
||||
assert outputs["agent_version"] == 1
|
||||
assert outputs["agent_name"] == "Test Agent"
|
||||
assert outputs["message"] == "Agent successfully added to library"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_store_agent_details_block_success(mocker):
|
||||
"""Test successful retrieval of store agent details."""
|
||||
block = GetStoreAgentDetailsBlock()
|
||||
|
||||
mocker.patch.object(
|
||||
block,
|
||||
"_get_agent_details",
|
||||
return_value=StoreAgentDetails(
|
||||
found=True,
|
||||
store_listing_version_id="version-123",
|
||||
agent_name="Test Agent",
|
||||
description="A test agent for testing",
|
||||
creator="Test Creator",
|
||||
categories=["productivity", "automation"],
|
||||
runs=100,
|
||||
rating=4.5,
|
||||
),
|
||||
)
|
||||
|
||||
input_data = block.Input(creator="Test Creator", slug="test-slug")
|
||||
outputs = {}
|
||||
async for name, value in block.run(input_data):
|
||||
outputs[name] = value
|
||||
|
||||
assert outputs["found"] is True
|
||||
assert outputs["store_listing_version_id"] == "version-123"
|
||||
assert outputs["agent_name"] == "Test Agent"
|
||||
assert outputs["description"] == "A test agent for testing"
|
||||
assert outputs["creator"] == "Test Creator"
|
||||
assert outputs["categories"] == ["productivity", "automation"]
|
||||
assert outputs["runs"] == 100
|
||||
assert outputs["rating"] == 4.5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_store_agents_block(mocker):
|
||||
"""Test searching for store agents."""
|
||||
block = SearchStoreAgentsBlock()
|
||||
|
||||
mocker.patch.object(
|
||||
block,
|
||||
"_search_agents",
|
||||
return_value=SearchAgentsResponse(
|
||||
agents=[
|
||||
StoreAgentDict(
|
||||
slug="creator1/agent1",
|
||||
name="Agent One",
|
||||
description="First test agent",
|
||||
creator="Creator 1",
|
||||
rating=4.8,
|
||||
runs=500,
|
||||
),
|
||||
StoreAgentDict(
|
||||
slug="creator2/agent2",
|
||||
name="Agent Two",
|
||||
description="Second test agent",
|
||||
creator="Creator 2",
|
||||
rating=4.2,
|
||||
runs=200,
|
||||
),
|
||||
],
|
||||
total_count=2,
|
||||
),
|
||||
)
|
||||
|
||||
input_data = block.Input(
|
||||
query="test", category="productivity", sort_by="rating", limit=10
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
async for name, value in block.run(input_data):
|
||||
outputs[name] = value
|
||||
|
||||
assert len(outputs["agents"]) == 2
|
||||
assert outputs["total_count"] == 2
|
||||
assert outputs["agents"][0]["name"] == "Agent One"
|
||||
assert outputs["agents"][0]["rating"] == 4.8
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_store_agents_block_empty_results(mocker):
|
||||
"""Test searching with no results."""
|
||||
block = SearchStoreAgentsBlock()
|
||||
|
||||
mocker.patch.object(
|
||||
block,
|
||||
"_search_agents",
|
||||
return_value=SearchAgentsResponse(agents=[], total_count=0),
|
||||
)
|
||||
|
||||
input_data = block.Input(query="nonexistent", limit=10)
|
||||
|
||||
outputs = {}
|
||||
async for name, value in block.run(input_data):
|
||||
outputs[name] = value
|
||||
|
||||
assert outputs["agents"] == []
|
||||
assert outputs["total_count"] == 0
|
||||
@@ -1,4 +1,5 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Literal, Union
|
||||
@@ -7,6 +8,7 @@ from zoneinfo import ZoneInfo
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.execution import UserContext
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
# Shared timezone literal type for all time/date blocks
|
||||
@@ -51,16 +53,80 @@ TimezoneLiteral = Literal[
|
||||
"Etc/GMT+12", # UTC-12:00
|
||||
]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_timezone(
|
||||
format_type: Any, # Any format type with timezone and use_user_timezone attributes
|
||||
user_timezone: str | None,
|
||||
) -> ZoneInfo:
|
||||
"""
|
||||
Determine which timezone to use based on format settings and user context.
|
||||
|
||||
Args:
|
||||
format_type: The format configuration containing timezone settings
|
||||
user_timezone: The user's timezone from context
|
||||
|
||||
Returns:
|
||||
ZoneInfo object for the determined timezone
|
||||
"""
|
||||
if format_type.use_user_timezone and user_timezone:
|
||||
tz = ZoneInfo(user_timezone)
|
||||
logger.debug(f"Using user timezone: {user_timezone}")
|
||||
else:
|
||||
tz = ZoneInfo(format_type.timezone)
|
||||
logger.debug(f"Using specified timezone: {format_type.timezone}")
|
||||
return tz
|
||||
|
||||
|
||||
def _format_datetime_iso8601(dt: datetime, include_microseconds: bool = False) -> str:
|
||||
"""
|
||||
Format a datetime object to ISO8601 string.
|
||||
|
||||
Args:
|
||||
dt: The datetime object to format
|
||||
include_microseconds: Whether to include microseconds in the output
|
||||
|
||||
Returns:
|
||||
ISO8601 formatted string
|
||||
"""
|
||||
if include_microseconds:
|
||||
return dt.isoformat()
|
||||
else:
|
||||
return dt.isoformat(timespec="seconds")
|
||||
|
||||
|
||||
# BACKWARDS COMPATIBILITY NOTE:
|
||||
# The timezone field is kept at the format level (not block level) for backwards compatibility.
|
||||
# Existing graphs have timezone saved within format_type, moving it would break them.
|
||||
#
|
||||
# The use_user_timezone flag was added to allow using the user's profile timezone.
|
||||
# Default is False to maintain backwards compatibility - existing graphs will continue
|
||||
# using their specified timezone.
|
||||
#
|
||||
# KNOWN ISSUE: If a user switches between format types (strftime <-> iso8601),
|
||||
# the timezone setting doesn't carry over. This is a UX issue but fixing it would
|
||||
# require either:
|
||||
# 1. Moving timezone to block level (breaking change, needs migration)
|
||||
# 2. Complex state management to sync timezone across format types
|
||||
#
|
||||
# Future migration path: When we do a major version bump, consider moving timezone
|
||||
# to the block Input level for better UX.
|
||||
|
||||
|
||||
class TimeStrftimeFormat(BaseModel):
|
||||
discriminator: Literal["strftime"]
|
||||
format: str = "%H:%M:%S"
|
||||
timezone: TimezoneLiteral = "UTC"
|
||||
# When True, overrides timezone with user's profile timezone
|
||||
use_user_timezone: bool = False
|
||||
|
||||
|
||||
class TimeISO8601Format(BaseModel):
|
||||
discriminator: Literal["iso8601"]
|
||||
timezone: TimezoneLiteral = "UTC"
|
||||
# When True, overrides timezone with user's profile timezone
|
||||
use_user_timezone: bool = False
|
||||
include_microseconds: bool = False
|
||||
|
||||
|
||||
@@ -115,25 +181,27 @@ class GetCurrentTimeBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(
|
||||
self, input_data: Input, *, user_context: UserContext, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Extract timezone from user_context (always present)
|
||||
effective_timezone = user_context.timezone
|
||||
|
||||
# Get the appropriate timezone
|
||||
tz = _get_timezone(input_data.format_type, effective_timezone)
|
||||
dt = datetime.now(tz=tz)
|
||||
|
||||
if isinstance(input_data.format_type, TimeISO8601Format):
|
||||
# ISO 8601 format for time only (extract time portion from full ISO datetime)
|
||||
tz = ZoneInfo(input_data.format_type.timezone)
|
||||
dt = datetime.now(tz=tz)
|
||||
|
||||
# Get the full ISO format and extract just the time portion with timezone
|
||||
if input_data.format_type.include_microseconds:
|
||||
full_iso = dt.isoformat()
|
||||
else:
|
||||
full_iso = dt.isoformat(timespec="seconds")
|
||||
|
||||
full_iso = _format_datetime_iso8601(
|
||||
dt, input_data.format_type.include_microseconds
|
||||
)
|
||||
# Extract time portion (everything after 'T')
|
||||
current_time = full_iso.split("T")[1] if "T" in full_iso else full_iso
|
||||
current_time = f"T{current_time}" # Add T prefix for ISO 8601 time format
|
||||
else: # TimeStrftimeFormat
|
||||
tz = ZoneInfo(input_data.format_type.timezone)
|
||||
dt = datetime.now(tz=tz)
|
||||
current_time = dt.strftime(input_data.format_type.format)
|
||||
|
||||
yield "time", current_time
|
||||
|
||||
|
||||
@@ -141,11 +209,15 @@ class DateStrftimeFormat(BaseModel):
|
||||
discriminator: Literal["strftime"]
|
||||
format: str = "%Y-%m-%d"
|
||||
timezone: TimezoneLiteral = "UTC"
|
||||
# When True, overrides timezone with user's profile timezone
|
||||
use_user_timezone: bool = False
|
||||
|
||||
|
||||
class DateISO8601Format(BaseModel):
|
||||
discriminator: Literal["iso8601"]
|
||||
timezone: TimezoneLiteral = "UTC"
|
||||
# When True, overrides timezone with user's profile timezone
|
||||
use_user_timezone: bool = False
|
||||
|
||||
|
||||
class GetCurrentDateBlock(Block):
|
||||
@@ -217,20 +289,23 @@ class GetCurrentDateBlock(Block):
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Extract timezone from user_context (required keyword argument)
|
||||
user_context: UserContext = kwargs["user_context"]
|
||||
effective_timezone = user_context.timezone
|
||||
|
||||
try:
|
||||
offset = int(input_data.offset)
|
||||
except ValueError:
|
||||
offset = 0
|
||||
|
||||
# Get the appropriate timezone
|
||||
tz = _get_timezone(input_data.format_type, effective_timezone)
|
||||
current_date = datetime.now(tz=tz) - timedelta(days=offset)
|
||||
|
||||
if isinstance(input_data.format_type, DateISO8601Format):
|
||||
# ISO 8601 format for date only (YYYY-MM-DD)
|
||||
tz = ZoneInfo(input_data.format_type.timezone)
|
||||
current_date = datetime.now(tz=tz) - timedelta(days=offset)
|
||||
# ISO 8601 date format is YYYY-MM-DD
|
||||
date_str = current_date.date().isoformat()
|
||||
else: # DateStrftimeFormat
|
||||
tz = ZoneInfo(input_data.format_type.timezone)
|
||||
current_date = datetime.now(tz=tz) - timedelta(days=offset)
|
||||
date_str = current_date.strftime(input_data.format_type.format)
|
||||
|
||||
yield "date", date_str
|
||||
@@ -240,11 +315,15 @@ class StrftimeFormat(BaseModel):
|
||||
discriminator: Literal["strftime"]
|
||||
format: str = "%Y-%m-%d %H:%M:%S"
|
||||
timezone: TimezoneLiteral = "UTC"
|
||||
# When True, overrides timezone with user's profile timezone
|
||||
use_user_timezone: bool = False
|
||||
|
||||
|
||||
class ISO8601Format(BaseModel):
|
||||
discriminator: Literal["iso8601"]
|
||||
timezone: TimezoneLiteral = "UTC"
|
||||
# When True, overrides timezone with user's profile timezone
|
||||
use_user_timezone: bool = False
|
||||
include_microseconds: bool = False
|
||||
|
||||
|
||||
@@ -316,20 +395,22 @@ class GetCurrentDateAndTimeBlock(Block):
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Extract timezone from user_context (required keyword argument)
|
||||
user_context: UserContext = kwargs["user_context"]
|
||||
effective_timezone = user_context.timezone
|
||||
|
||||
# Get the appropriate timezone
|
||||
tz = _get_timezone(input_data.format_type, effective_timezone)
|
||||
dt = datetime.now(tz=tz)
|
||||
|
||||
if isinstance(input_data.format_type, ISO8601Format):
|
||||
# ISO 8601 format with specified timezone (also RFC3339-compliant)
|
||||
tz = ZoneInfo(input_data.format_type.timezone)
|
||||
dt = datetime.now(tz=tz)
|
||||
|
||||
# Format with or without microseconds
|
||||
if input_data.format_type.include_microseconds:
|
||||
current_date_time = dt.isoformat()
|
||||
else:
|
||||
current_date_time = dt.isoformat(timespec="seconds")
|
||||
current_date_time = _format_datetime_iso8601(
|
||||
dt, input_data.format_type.include_microseconds
|
||||
)
|
||||
else: # StrftimeFormat
|
||||
tz = ZoneInfo(input_data.format_type.timezone)
|
||||
dt = datetime.now(tz=tz)
|
||||
current_date_time = dt.strftime(input_data.format_type.format)
|
||||
|
||||
yield "date_time", current_date_time
|
||||
|
||||
|
||||
|
||||
@@ -5,6 +5,12 @@ from backend.blocks.ai_shortform_video_block import AIShortformVideoCreatorBlock
|
||||
from backend.blocks.apollo.organization import SearchOrganizationsBlock
|
||||
from backend.blocks.apollo.people import SearchPeopleBlock
|
||||
from backend.blocks.apollo.person import GetPersonDetailBlock
|
||||
from backend.blocks.enrichlayer.linkedin import (
|
||||
GetLinkedinProfileBlock,
|
||||
GetLinkedinProfilePictureBlock,
|
||||
LinkedinPersonLookupBlock,
|
||||
LinkedinRoleLookupBlock,
|
||||
)
|
||||
from backend.blocks.flux_kontext import AIImageEditorBlock, FluxKontextModelName
|
||||
from backend.blocks.ideogram import IdeogramModelBlock
|
||||
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
|
||||
@@ -30,6 +36,7 @@ from backend.integrations.credentials_store import (
|
||||
anthropic_credentials,
|
||||
apollo_credentials,
|
||||
did_credentials,
|
||||
enrichlayer_credentials,
|
||||
groq_credentials,
|
||||
ideogram_credentials,
|
||||
jina_credentials,
|
||||
@@ -39,6 +46,7 @@ from backend.integrations.credentials_store import (
|
||||
replicate_credentials,
|
||||
revid_credentials,
|
||||
unreal_credentials,
|
||||
v0_credentials,
|
||||
)
|
||||
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
@@ -115,6 +123,10 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
|
||||
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
|
||||
LlmModel.DEEPSEEK_R1_0528: 1,
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: 1,
|
||||
LlmModel.V0_1_5_LG: 2,
|
||||
LlmModel.V0_1_0_MD: 1,
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
@@ -204,6 +216,23 @@ LLM_COST = (
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "llama_api"
|
||||
]
|
||||
# v0 by Vercel Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": v0_credentials.id,
|
||||
"provider": v0_credentials.provider,
|
||||
"type": v0_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "v0"
|
||||
]
|
||||
# AI/ML Api Models
|
||||
+ [
|
||||
BlockCost(
|
||||
@@ -376,6 +405,54 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
},
|
||||
)
|
||||
],
|
||||
GetLinkedinProfileBlock: [
|
||||
BlockCost(
|
||||
cost_amount=1,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": enrichlayer_credentials.id,
|
||||
"provider": enrichlayer_credentials.provider,
|
||||
"type": enrichlayer_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
LinkedinPersonLookupBlock: [
|
||||
BlockCost(
|
||||
cost_amount=2,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": enrichlayer_credentials.id,
|
||||
"provider": enrichlayer_credentials.provider,
|
||||
"type": enrichlayer_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
LinkedinRoleLookupBlock: [
|
||||
BlockCost(
|
||||
cost_amount=3,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": enrichlayer_credentials.id,
|
||||
"provider": enrichlayer_credentials.provider,
|
||||
"type": enrichlayer_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
GetLinkedinProfilePictureBlock: [
|
||||
BlockCost(
|
||||
cost_amount=3,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": enrichlayer_credentials.id,
|
||||
"provider": enrichlayer_credentials.provider,
|
||||
"type": enrichlayer_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
SmartDecisionMakerBlock: LLM_COST,
|
||||
SearchOrganizationsBlock: [
|
||||
BlockCost(
|
||||
|
||||
@@ -34,10 +34,10 @@ from backend.data.model import (
|
||||
from backend.data.notifications import NotificationEventModel, RefundRequestData
|
||||
from backend.data.user import get_user_by_id, get_user_email_by_id
|
||||
from backend.notifications.notifications import queue_notification_async
|
||||
from backend.server.model import Pagination
|
||||
from backend.server.v2.admin.model import UserHistoryResponse
|
||||
from backend.util.exceptions import InsufficientBalanceError
|
||||
from backend.util.json import SafeJson
|
||||
from backend.util.models import Pagination
|
||||
from backend.util.retry import func_retry
|
||||
from backend.util.settings import Settings
|
||||
|
||||
@@ -286,11 +286,17 @@ class UserCreditBase(ABC):
|
||||
transaction = await CreditTransaction.prisma().find_first_or_raise(
|
||||
where={"transactionKey": transaction_key, "userId": user_id}
|
||||
)
|
||||
|
||||
if transaction.isActive:
|
||||
return
|
||||
|
||||
async with db.locked_transaction(f"usr_trx_{user_id}"):
|
||||
|
||||
transaction = await CreditTransaction.prisma().find_first_or_raise(
|
||||
where={"transactionKey": transaction_key, "userId": user_id}
|
||||
)
|
||||
if transaction.isActive:
|
||||
return
|
||||
|
||||
user_balance, _ = await self._get_credits(user_id)
|
||||
await CreditTransaction.prisma().update(
|
||||
where={
|
||||
|
||||
@@ -7,7 +7,7 @@ from prisma.models import CreditTransaction
|
||||
from backend.blocks.llm import AITextGeneratorBlock
|
||||
from backend.data.block import get_block
|
||||
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
|
||||
from backend.data.execution import NodeExecutionEntry
|
||||
from backend.data.execution import NodeExecutionEntry, UserContext
|
||||
from backend.data.user import DEFAULT_USER_ID
|
||||
from backend.executor.utils import block_usage_cost
|
||||
from backend.integrations.credentials_store import openai_credentials
|
||||
@@ -75,6 +75,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
||||
"type": openai_credentials.type,
|
||||
},
|
||||
},
|
||||
user_context=UserContext(timezone="UTC"),
|
||||
),
|
||||
)
|
||||
assert spending_amount_1 > 0
|
||||
@@ -88,6 +89,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
||||
node_exec_id="test_node_exec",
|
||||
block_id=AITextGeneratorBlock().id,
|
||||
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
|
||||
user_context=UserContext(timezone="UTC"),
|
||||
),
|
||||
)
|
||||
assert spending_amount_2 == 0
|
||||
|
||||
@@ -33,12 +33,13 @@ from prisma.types import (
|
||||
AgentNodeExecutionUpdateInput,
|
||||
AgentNodeExecutionWhereInput,
|
||||
)
|
||||
from pydantic import BaseModel, ConfigDict, JsonValue
|
||||
from pydantic import BaseModel, ConfigDict, JsonValue, ValidationError
|
||||
from pydantic.fields import Field
|
||||
|
||||
from backend.server.v2.store.exceptions import DatabaseError
|
||||
from backend.util import type as type_utils
|
||||
from backend.util.json import SafeJson
|
||||
from backend.util.models import Pagination
|
||||
from backend.util.retry import func_retry
|
||||
from backend.util.settings import Config
|
||||
from backend.util.truncate import truncate
|
||||
@@ -59,7 +60,7 @@ from .includes import (
|
||||
GRAPH_EXECUTION_INCLUDE_WITH_NODES,
|
||||
graph_execution_include,
|
||||
)
|
||||
from .model import GraphExecutionStats
|
||||
from .model import GraphExecutionStats, NodeExecutionStats
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
@@ -89,6 +90,7 @@ ExecutionStatus = AgentExecutionStatus
|
||||
|
||||
|
||||
class GraphExecutionMeta(BaseDbModel):
|
||||
id: str # type: ignore # Override base class to make this required
|
||||
user_id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
@@ -290,13 +292,14 @@ class GraphExecutionWithNodes(GraphExecution):
|
||||
node_executions=node_executions,
|
||||
)
|
||||
|
||||
def to_graph_execution_entry(self):
|
||||
def to_graph_execution_entry(self, user_context: "UserContext"):
|
||||
return GraphExecutionEntry(
|
||||
user_id=self.user_id,
|
||||
graph_id=self.graph_id,
|
||||
graph_version=self.graph_version or 0,
|
||||
graph_exec_id=self.id,
|
||||
nodes_input_masks={}, # FIXME: store credentials on AgentGraphExecution
|
||||
user_context=user_context,
|
||||
)
|
||||
|
||||
|
||||
@@ -318,18 +321,30 @@ class NodeExecutionResult(BaseModel):
|
||||
|
||||
@staticmethod
|
||||
def from_db(_node_exec: AgentNodeExecution, user_id: Optional[str] = None):
|
||||
if _node_exec.executionData:
|
||||
# Execution that has been queued for execution will persist its data.
|
||||
try:
|
||||
stats = NodeExecutionStats.model_validate(_node_exec.stats or {})
|
||||
except (ValueError, ValidationError):
|
||||
stats = NodeExecutionStats()
|
||||
|
||||
if stats.cleared_inputs:
|
||||
input_data: BlockInput = defaultdict()
|
||||
for name, messages in stats.cleared_inputs.items():
|
||||
input_data[name] = messages[-1] if messages else ""
|
||||
elif _node_exec.executionData:
|
||||
input_data = type_utils.convert(_node_exec.executionData, dict[str, Any])
|
||||
else:
|
||||
# For incomplete execution, executionData will not be yet available.
|
||||
input_data: BlockInput = defaultdict()
|
||||
for data in _node_exec.Input or []:
|
||||
input_data[data.name] = type_utils.convert(data.data, type[Any])
|
||||
|
||||
output_data: CompletedBlockOutput = defaultdict(list)
|
||||
for data in _node_exec.Output or []:
|
||||
output_data[data.name].append(type_utils.convert(data.data, type[Any]))
|
||||
|
||||
if stats.cleared_outputs:
|
||||
for name, messages in stats.cleared_outputs.items():
|
||||
output_data[name].extend(messages)
|
||||
else:
|
||||
for data in _node_exec.Output or []:
|
||||
output_data[data.name].append(type_utils.convert(data.data, type[Any]))
|
||||
|
||||
graph_execution: AgentGraphExecution | None = _node_exec.GraphExecution
|
||||
if graph_execution:
|
||||
@@ -356,7 +371,9 @@ class NodeExecutionResult(BaseModel):
|
||||
end_time=_node_exec.endedTime,
|
||||
)
|
||||
|
||||
def to_node_execution_entry(self) -> "NodeExecutionEntry":
|
||||
def to_node_execution_entry(
|
||||
self, user_context: "UserContext"
|
||||
) -> "NodeExecutionEntry":
|
||||
return NodeExecutionEntry(
|
||||
user_id=self.user_id,
|
||||
graph_exec_id=self.graph_exec_id,
|
||||
@@ -365,6 +382,7 @@ class NodeExecutionResult(BaseModel):
|
||||
node_id=self.node_id,
|
||||
block_id=self.block_id,
|
||||
inputs=self.input_data,
|
||||
user_context=user_context,
|
||||
)
|
||||
|
||||
|
||||
@@ -372,13 +390,13 @@ class NodeExecutionResult(BaseModel):
|
||||
|
||||
|
||||
async def get_graph_executions(
|
||||
graph_exec_id: str | None = None,
|
||||
graph_id: str | None = None,
|
||||
user_id: str | None = None,
|
||||
statuses: list[ExecutionStatus] | None = None,
|
||||
created_time_gte: datetime | None = None,
|
||||
created_time_lte: datetime | None = None,
|
||||
limit: int | None = None,
|
||||
graph_exec_id: Optional[str] = None,
|
||||
graph_id: Optional[str] = None,
|
||||
user_id: Optional[str] = None,
|
||||
statuses: Optional[list[ExecutionStatus]] = None,
|
||||
created_time_gte: Optional[datetime] = None,
|
||||
created_time_lte: Optional[datetime] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> list[GraphExecutionMeta]:
|
||||
"""⚠️ **Optional `user_id` check**: MUST USE check in user-facing endpoints."""
|
||||
where_filter: AgentGraphExecutionWhereInput = {
|
||||
@@ -406,6 +424,60 @@ async def get_graph_executions(
|
||||
return [GraphExecutionMeta.from_db(execution) for execution in executions]
|
||||
|
||||
|
||||
class GraphExecutionsPaginated(BaseModel):
|
||||
"""Response schema for paginated graph executions."""
|
||||
|
||||
executions: list[GraphExecutionMeta]
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
async def get_graph_executions_paginated(
|
||||
user_id: str,
|
||||
graph_id: Optional[str] = None,
|
||||
page: int = 1,
|
||||
page_size: int = 25,
|
||||
statuses: Optional[list[ExecutionStatus]] = None,
|
||||
created_time_gte: Optional[datetime] = None,
|
||||
created_time_lte: Optional[datetime] = None,
|
||||
) -> GraphExecutionsPaginated:
|
||||
"""Get paginated graph executions for a specific graph."""
|
||||
where_filter: AgentGraphExecutionWhereInput = {
|
||||
"isDeleted": False,
|
||||
"userId": user_id,
|
||||
}
|
||||
|
||||
if graph_id:
|
||||
where_filter["agentGraphId"] = graph_id
|
||||
if created_time_gte or created_time_lte:
|
||||
where_filter["createdAt"] = {
|
||||
"gte": created_time_gte or datetime.min.replace(tzinfo=timezone.utc),
|
||||
"lte": created_time_lte or datetime.max.replace(tzinfo=timezone.utc),
|
||||
}
|
||||
if statuses:
|
||||
where_filter["OR"] = [{"executionStatus": status} for status in statuses]
|
||||
|
||||
total_count = await AgentGraphExecution.prisma().count(where=where_filter)
|
||||
total_pages = (total_count + page_size - 1) // page_size
|
||||
|
||||
offset = (page - 1) * page_size
|
||||
executions = await AgentGraphExecution.prisma().find_many(
|
||||
where=where_filter,
|
||||
order={"createdAt": "desc"},
|
||||
take=page_size,
|
||||
skip=offset,
|
||||
)
|
||||
|
||||
return GraphExecutionsPaginated(
|
||||
executions=[GraphExecutionMeta.from_db(execution) for execution in executions],
|
||||
pagination=Pagination(
|
||||
total_items=total_count,
|
||||
total_pages=total_pages,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
async def get_graph_execution_meta(
|
||||
user_id: str, execution_id: str
|
||||
) -> GraphExecutionMeta | None:
|
||||
@@ -805,12 +877,19 @@ async def get_latest_node_execution(
|
||||
# ----------------- Execution Infrastructure ----------------- #
|
||||
|
||||
|
||||
class UserContext(BaseModel):
|
||||
"""Generic user context for graph execution containing user-specific settings."""
|
||||
|
||||
timezone: str
|
||||
|
||||
|
||||
class GraphExecutionEntry(BaseModel):
|
||||
user_id: str
|
||||
graph_exec_id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
nodes_input_masks: Optional[dict[str, dict[str, JsonValue]]] = None
|
||||
user_context: UserContext
|
||||
|
||||
|
||||
class NodeExecutionEntry(BaseModel):
|
||||
@@ -821,6 +900,7 @@ class NodeExecutionEntry(BaseModel):
|
||||
node_id: str
|
||||
block_id: str
|
||||
inputs: BlockInput
|
||||
user_context: UserContext
|
||||
|
||||
|
||||
class ExecutionQueue(Generic[T]):
|
||||
|
||||
109
autogpt_platform/backend/backend/data/generate_data.py
Normal file
109
autogpt_platform/backend/backend/data/generate_data.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
|
||||
from prisma.enums import AgentExecutionStatus
|
||||
|
||||
from backend.data.execution import get_graph_executions
|
||||
from backend.data.graph import get_graph_metadata
|
||||
from backend.data.model import UserExecutionSummaryStats
|
||||
from backend.server.v2.store.exceptions import DatabaseError
|
||||
from backend.util.logging import TruncatedLogger
|
||||
|
||||
logger = TruncatedLogger(logging.getLogger(__name__), prefix="[SummaryData]")
|
||||
|
||||
|
||||
async def get_user_execution_summary_data(
|
||||
user_id: str, start_time: datetime, end_time: datetime
|
||||
) -> UserExecutionSummaryStats:
|
||||
"""Gather all summary data for a user in a time range.
|
||||
|
||||
This function fetches graph executions once and aggregates all required
|
||||
statistics in a single pass for efficiency.
|
||||
"""
|
||||
try:
|
||||
# Fetch graph executions once
|
||||
executions = await get_graph_executions(
|
||||
user_id=user_id,
|
||||
created_time_gte=start_time,
|
||||
created_time_lte=end_time,
|
||||
)
|
||||
|
||||
# Initialize aggregation variables
|
||||
total_credits_used = 0.0
|
||||
total_executions = len(executions)
|
||||
successful_runs = 0
|
||||
failed_runs = 0
|
||||
terminated_runs = 0
|
||||
execution_times = []
|
||||
agent_usage = defaultdict(int)
|
||||
cost_by_graph_id = defaultdict(float)
|
||||
|
||||
# Single pass through executions to aggregate all stats
|
||||
for execution in executions:
|
||||
# Count execution statuses (including TERMINATED as failed)
|
||||
if execution.status == AgentExecutionStatus.COMPLETED:
|
||||
successful_runs += 1
|
||||
elif execution.status == AgentExecutionStatus.FAILED:
|
||||
failed_runs += 1
|
||||
elif execution.status == AgentExecutionStatus.TERMINATED:
|
||||
terminated_runs += 1
|
||||
|
||||
# Aggregate costs from stats
|
||||
if execution.stats and hasattr(execution.stats, "cost"):
|
||||
cost_in_dollars = execution.stats.cost / 100
|
||||
total_credits_used += cost_in_dollars
|
||||
cost_by_graph_id[execution.graph_id] += cost_in_dollars
|
||||
|
||||
# Collect execution times
|
||||
if execution.stats and hasattr(execution.stats, "duration"):
|
||||
execution_times.append(execution.stats.duration)
|
||||
|
||||
# Count agent usage
|
||||
agent_usage[execution.graph_id] += 1
|
||||
|
||||
# Calculate derived stats
|
||||
total_execution_time = sum(execution_times)
|
||||
average_execution_time = (
|
||||
total_execution_time / len(execution_times) if execution_times else 0
|
||||
)
|
||||
|
||||
# Find most used agent
|
||||
most_used_agent = "No agents used"
|
||||
if agent_usage:
|
||||
most_used_agent_id = max(agent_usage, key=lambda k: agent_usage[k])
|
||||
try:
|
||||
graph_meta = await get_graph_metadata(graph_id=most_used_agent_id)
|
||||
most_used_agent = (
|
||||
graph_meta.name if graph_meta else f"Agent {most_used_agent_id[:8]}"
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(f"Could not get metadata for graph {most_used_agent_id}")
|
||||
most_used_agent = f"Agent {most_used_agent_id[:8]}"
|
||||
|
||||
# Convert graph_ids to agent names for cost breakdown
|
||||
cost_breakdown = {}
|
||||
for graph_id, cost in cost_by_graph_id.items():
|
||||
try:
|
||||
graph_meta = await get_graph_metadata(graph_id=graph_id)
|
||||
agent_name = graph_meta.name if graph_meta else f"Agent {graph_id[:8]}"
|
||||
except Exception:
|
||||
logger.warning(f"Could not get metadata for graph {graph_id}")
|
||||
agent_name = f"Agent {graph_id[:8]}"
|
||||
cost_breakdown[agent_name] = cost
|
||||
|
||||
# Build the summary stats object (include terminated runs as failed)
|
||||
return UserExecutionSummaryStats(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs + terminated_runs,
|
||||
most_used_agent=most_used_agent,
|
||||
total_execution_time=total_execution_time,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get user summary data: {e}")
|
||||
raise DatabaseError(f"Failed to get user summary data: {e}") from e
|
||||
@@ -96,6 +96,12 @@ class User(BaseModel):
|
||||
default=True, description="Notify on monthly summary"
|
||||
)
|
||||
|
||||
# User timezone for scheduling and time display
|
||||
timezone: str = Field(
|
||||
default="not-set",
|
||||
description="User timezone (IANA timezone identifier or 'not-set')",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_db(cls, prisma_user: "PrismaUser") -> "User":
|
||||
"""Convert a database User object to application User model."""
|
||||
@@ -149,6 +155,7 @@ class User(BaseModel):
|
||||
notify_on_daily_summary=prisma_user.notifyOnDailySummary or True,
|
||||
notify_on_weekly_summary=prisma_user.notifyOnWeeklySummary or True,
|
||||
notify_on_monthly_summary=prisma_user.notifyOnMonthlySummary or True,
|
||||
timezone=prisma_user.timezone or "not-set",
|
||||
)
|
||||
|
||||
|
||||
@@ -764,6 +771,9 @@ class NodeExecutionStats(BaseModel):
|
||||
output_token_count: int = 0
|
||||
extra_cost: int = 0
|
||||
extra_steps: int = 0
|
||||
# Moderation fields
|
||||
cleared_inputs: Optional[dict[str, list[str]]] = None
|
||||
cleared_outputs: Optional[dict[str, list[str]]] = None
|
||||
|
||||
def __iadd__(self, other: "NodeExecutionStats") -> "NodeExecutionStats":
|
||||
"""Mutate this instance by adding another NodeExecutionStats."""
|
||||
@@ -818,3 +828,21 @@ class GraphExecutionStats(BaseModel):
|
||||
activity_status: Optional[str] = Field(
|
||||
default=None, description="AI-generated summary of what the agent did"
|
||||
)
|
||||
|
||||
|
||||
class UserExecutionSummaryStats(BaseModel):
|
||||
"""Summary of user statistics for a specific user."""
|
||||
|
||||
model_config = ConfigDict(
|
||||
extra="allow",
|
||||
arbitrary_types_allowed=True,
|
||||
)
|
||||
|
||||
total_credits_used: float = Field(default=0)
|
||||
total_executions: int = Field(default=0)
|
||||
successful_runs: int = Field(default=0)
|
||||
failed_runs: int = Field(default=0)
|
||||
most_used_agent: str = Field(default="")
|
||||
total_execution_time: float = Field(default=0)
|
||||
average_execution_time: float = Field(default=0)
|
||||
cost_breakdown: dict[str, float] = Field(default_factory=dict)
|
||||
|
||||
@@ -54,19 +54,6 @@ class AgentRunData(BaseNotificationData):
|
||||
|
||||
|
||||
class ZeroBalanceData(BaseNotificationData):
|
||||
last_transaction: float
|
||||
last_transaction_time: datetime
|
||||
top_up_link: str
|
||||
|
||||
@field_validator("last_transaction_time")
|
||||
@classmethod
|
||||
def validate_timezone(cls, value: datetime):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
class LowBalanceData(BaseNotificationData):
|
||||
agent_name: str = Field(..., description="Name of the agent")
|
||||
current_balance: float = Field(
|
||||
..., description="Current balance in credits (100 = $1)"
|
||||
@@ -75,6 +62,13 @@ class LowBalanceData(BaseNotificationData):
|
||||
shortfall: float = Field(..., description="Amount of credits needed to continue")
|
||||
|
||||
|
||||
class LowBalanceData(BaseNotificationData):
|
||||
current_balance: float = Field(
|
||||
..., description="Current balance in credits (100 = $1)"
|
||||
)
|
||||
billing_page_link: str = Field(..., description="Link to billing page")
|
||||
|
||||
|
||||
class BlockExecutionFailedData(BaseNotificationData):
|
||||
block_name: str
|
||||
block_id: str
|
||||
@@ -181,6 +175,42 @@ class RefundRequestData(BaseNotificationData):
|
||||
balance: int
|
||||
|
||||
|
||||
class AgentApprovalData(BaseNotificationData):
|
||||
agent_name: str
|
||||
agent_id: str
|
||||
agent_version: int
|
||||
reviewer_name: str
|
||||
reviewer_email: str
|
||||
comments: str
|
||||
reviewed_at: datetime
|
||||
store_url: str
|
||||
|
||||
@field_validator("reviewed_at")
|
||||
@classmethod
|
||||
def validate_timezone(cls, value: datetime):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
class AgentRejectionData(BaseNotificationData):
|
||||
agent_name: str
|
||||
agent_id: str
|
||||
agent_version: int
|
||||
reviewer_name: str
|
||||
reviewer_email: str
|
||||
comments: str
|
||||
reviewed_at: datetime
|
||||
resubmit_url: str
|
||||
|
||||
@field_validator("reviewed_at")
|
||||
@classmethod
|
||||
def validate_timezone(cls, value: datetime):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
NotificationData = Annotated[
|
||||
Union[
|
||||
AgentRunData,
|
||||
@@ -240,6 +270,8 @@ def get_notif_data_type(
|
||||
NotificationType.MONTHLY_SUMMARY: MonthlySummaryData,
|
||||
NotificationType.REFUND_REQUEST: RefundRequestData,
|
||||
NotificationType.REFUND_PROCESSED: RefundRequestData,
|
||||
NotificationType.AGENT_APPROVED: AgentApprovalData,
|
||||
NotificationType.AGENT_REJECTED: AgentRejectionData,
|
||||
}[notification_type]
|
||||
|
||||
|
||||
@@ -274,7 +306,7 @@ class NotificationTypeOverride:
|
||||
# These are batched by the notification service
|
||||
NotificationType.AGENT_RUN: QueueType.BATCH,
|
||||
# These are batched by the notification service, but with a backoff strategy
|
||||
NotificationType.ZERO_BALANCE: QueueType.BACKOFF,
|
||||
NotificationType.ZERO_BALANCE: QueueType.IMMEDIATE,
|
||||
NotificationType.LOW_BALANCE: QueueType.IMMEDIATE,
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: QueueType.BACKOFF,
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: QueueType.BACKOFF,
|
||||
@@ -283,6 +315,8 @@ class NotificationTypeOverride:
|
||||
NotificationType.MONTHLY_SUMMARY: QueueType.SUMMARY,
|
||||
NotificationType.REFUND_REQUEST: QueueType.ADMIN,
|
||||
NotificationType.REFUND_PROCESSED: QueueType.ADMIN,
|
||||
NotificationType.AGENT_APPROVED: QueueType.IMMEDIATE,
|
||||
NotificationType.AGENT_REJECTED: QueueType.IMMEDIATE,
|
||||
}
|
||||
return BATCHING_RULES.get(self.notification_type, QueueType.IMMEDIATE)
|
||||
|
||||
@@ -300,6 +334,8 @@ class NotificationTypeOverride:
|
||||
NotificationType.MONTHLY_SUMMARY: "monthly_summary.html",
|
||||
NotificationType.REFUND_REQUEST: "refund_request.html",
|
||||
NotificationType.REFUND_PROCESSED: "refund_processed.html",
|
||||
NotificationType.AGENT_APPROVED: "agent_approved.html",
|
||||
NotificationType.AGENT_REJECTED: "agent_rejected.html",
|
||||
}[self.notification_type]
|
||||
|
||||
@property
|
||||
@@ -315,6 +351,8 @@ class NotificationTypeOverride:
|
||||
NotificationType.MONTHLY_SUMMARY: "We did a lot this month!",
|
||||
NotificationType.REFUND_REQUEST: "[ACTION REQUIRED] You got a ${{data.amount / 100}} refund request from {{data.user_name}}",
|
||||
NotificationType.REFUND_PROCESSED: "Refund for ${{data.amount / 100}} to {{data.user_name}} has been processed",
|
||||
NotificationType.AGENT_APPROVED: "🎉 Your agent '{{data.agent_name}}' has been approved!",
|
||||
NotificationType.AGENT_REJECTED: "Your agent '{{data.agent_name}}' needs some updates",
|
||||
}[self.notification_type]
|
||||
|
||||
|
||||
|
||||
151
autogpt_platform/backend/backend/data/notifications_test.py
Normal file
151
autogpt_platform/backend/backend/data/notifications_test.py
Normal file
@@ -0,0 +1,151 @@
|
||||
"""Tests for notification data models."""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from backend.data.notifications import AgentApprovalData, AgentRejectionData
|
||||
|
||||
|
||||
class TestAgentApprovalData:
|
||||
"""Test cases for AgentApprovalData model."""
|
||||
|
||||
def test_valid_agent_approval_data(self):
|
||||
"""Test creating valid AgentApprovalData."""
|
||||
data = AgentApprovalData(
|
||||
agent_name="Test Agent",
|
||||
agent_id="test-agent-123",
|
||||
agent_version=1,
|
||||
reviewer_name="John Doe",
|
||||
reviewer_email="john@example.com",
|
||||
comments="Great agent, approved!",
|
||||
reviewed_at=datetime.now(timezone.utc),
|
||||
store_url="https://app.autogpt.com/store/test-agent-123",
|
||||
)
|
||||
|
||||
assert data.agent_name == "Test Agent"
|
||||
assert data.agent_id == "test-agent-123"
|
||||
assert data.agent_version == 1
|
||||
assert data.reviewer_name == "John Doe"
|
||||
assert data.reviewer_email == "john@example.com"
|
||||
assert data.comments == "Great agent, approved!"
|
||||
assert data.store_url == "https://app.autogpt.com/store/test-agent-123"
|
||||
assert data.reviewed_at.tzinfo is not None
|
||||
|
||||
def test_agent_approval_data_without_timezone_raises_error(self):
|
||||
"""Test that AgentApprovalData raises error without timezone."""
|
||||
with pytest.raises(
|
||||
ValidationError, match="datetime must have timezone information"
|
||||
):
|
||||
AgentApprovalData(
|
||||
agent_name="Test Agent",
|
||||
agent_id="test-agent-123",
|
||||
agent_version=1,
|
||||
reviewer_name="John Doe",
|
||||
reviewer_email="john@example.com",
|
||||
comments="Great agent, approved!",
|
||||
reviewed_at=datetime.now(), # No timezone
|
||||
store_url="https://app.autogpt.com/store/test-agent-123",
|
||||
)
|
||||
|
||||
def test_agent_approval_data_with_empty_comments(self):
|
||||
"""Test AgentApprovalData with empty comments."""
|
||||
data = AgentApprovalData(
|
||||
agent_name="Test Agent",
|
||||
agent_id="test-agent-123",
|
||||
agent_version=1,
|
||||
reviewer_name="John Doe",
|
||||
reviewer_email="john@example.com",
|
||||
comments="", # Empty comments
|
||||
reviewed_at=datetime.now(timezone.utc),
|
||||
store_url="https://app.autogpt.com/store/test-agent-123",
|
||||
)
|
||||
|
||||
assert data.comments == ""
|
||||
|
||||
|
||||
class TestAgentRejectionData:
|
||||
"""Test cases for AgentRejectionData model."""
|
||||
|
||||
def test_valid_agent_rejection_data(self):
|
||||
"""Test creating valid AgentRejectionData."""
|
||||
data = AgentRejectionData(
|
||||
agent_name="Test Agent",
|
||||
agent_id="test-agent-123",
|
||||
agent_version=1,
|
||||
reviewer_name="Jane Doe",
|
||||
reviewer_email="jane@example.com",
|
||||
comments="Please fix the security issues before resubmitting.",
|
||||
reviewed_at=datetime.now(timezone.utc),
|
||||
resubmit_url="https://app.autogpt.com/build/test-agent-123",
|
||||
)
|
||||
|
||||
assert data.agent_name == "Test Agent"
|
||||
assert data.agent_id == "test-agent-123"
|
||||
assert data.agent_version == 1
|
||||
assert data.reviewer_name == "Jane Doe"
|
||||
assert data.reviewer_email == "jane@example.com"
|
||||
assert data.comments == "Please fix the security issues before resubmitting."
|
||||
assert data.resubmit_url == "https://app.autogpt.com/build/test-agent-123"
|
||||
assert data.reviewed_at.tzinfo is not None
|
||||
|
||||
def test_agent_rejection_data_without_timezone_raises_error(self):
|
||||
"""Test that AgentRejectionData raises error without timezone."""
|
||||
with pytest.raises(
|
||||
ValidationError, match="datetime must have timezone information"
|
||||
):
|
||||
AgentRejectionData(
|
||||
agent_name="Test Agent",
|
||||
agent_id="test-agent-123",
|
||||
agent_version=1,
|
||||
reviewer_name="Jane Doe",
|
||||
reviewer_email="jane@example.com",
|
||||
comments="Please fix the security issues.",
|
||||
reviewed_at=datetime.now(), # No timezone
|
||||
resubmit_url="https://app.autogpt.com/build/test-agent-123",
|
||||
)
|
||||
|
||||
def test_agent_rejection_data_with_long_comments(self):
|
||||
"""Test AgentRejectionData with long comments."""
|
||||
long_comment = "A" * 1000 # Very long comment
|
||||
data = AgentRejectionData(
|
||||
agent_name="Test Agent",
|
||||
agent_id="test-agent-123",
|
||||
agent_version=1,
|
||||
reviewer_name="Jane Doe",
|
||||
reviewer_email="jane@example.com",
|
||||
comments=long_comment,
|
||||
reviewed_at=datetime.now(timezone.utc),
|
||||
resubmit_url="https://app.autogpt.com/build/test-agent-123",
|
||||
)
|
||||
|
||||
assert data.comments == long_comment
|
||||
|
||||
def test_model_serialization(self):
|
||||
"""Test that models can be serialized and deserialized."""
|
||||
original_data = AgentRejectionData(
|
||||
agent_name="Test Agent",
|
||||
agent_id="test-agent-123",
|
||||
agent_version=1,
|
||||
reviewer_name="Jane Doe",
|
||||
reviewer_email="jane@example.com",
|
||||
comments="Please fix the issues.",
|
||||
reviewed_at=datetime.now(timezone.utc),
|
||||
resubmit_url="https://app.autogpt.com/build/test-agent-123",
|
||||
)
|
||||
|
||||
# Serialize to dict
|
||||
data_dict = original_data.model_dump()
|
||||
|
||||
# Deserialize back
|
||||
restored_data = AgentRejectionData.model_validate(data_dict)
|
||||
|
||||
assert restored_data.agent_name == original_data.agent_name
|
||||
assert restored_data.agent_id == original_data.agent_id
|
||||
assert restored_data.agent_version == original_data.agent_version
|
||||
assert restored_data.reviewer_name == original_data.reviewer_name
|
||||
assert restored_data.reviewer_email == original_data.reviewer_email
|
||||
assert restored_data.comments == original_data.comments
|
||||
assert restored_data.reviewed_at == original_data.reviewed_at
|
||||
assert restored_data.resubmit_url == original_data.resubmit_url
|
||||
@@ -208,6 +208,8 @@ async def get_user_notification_preference(user_id: str) -> NotificationPreferen
|
||||
NotificationType.DAILY_SUMMARY: user.notifyOnDailySummary or False,
|
||||
NotificationType.WEEKLY_SUMMARY: user.notifyOnWeeklySummary or False,
|
||||
NotificationType.MONTHLY_SUMMARY: user.notifyOnMonthlySummary or False,
|
||||
NotificationType.AGENT_APPROVED: user.notifyOnAgentApproved or False,
|
||||
NotificationType.AGENT_REJECTED: user.notifyOnAgentRejected or False,
|
||||
}
|
||||
daily_limit = user.maxEmailsPerDay or 3
|
||||
notification_preference = NotificationPreference(
|
||||
@@ -266,6 +268,14 @@ async def update_user_notification_preference(
|
||||
update_data["notifyOnMonthlySummary"] = data.preferences[
|
||||
NotificationType.MONTHLY_SUMMARY
|
||||
]
|
||||
if NotificationType.AGENT_APPROVED in data.preferences:
|
||||
update_data["notifyOnAgentApproved"] = data.preferences[
|
||||
NotificationType.AGENT_APPROVED
|
||||
]
|
||||
if NotificationType.AGENT_REJECTED in data.preferences:
|
||||
update_data["notifyOnAgentRejected"] = data.preferences[
|
||||
NotificationType.AGENT_REJECTED
|
||||
]
|
||||
if data.daily_limit:
|
||||
update_data["maxEmailsPerDay"] = data.daily_limit
|
||||
|
||||
@@ -286,6 +296,8 @@ async def update_user_notification_preference(
|
||||
NotificationType.DAILY_SUMMARY: user.notifyOnDailySummary or True,
|
||||
NotificationType.WEEKLY_SUMMARY: user.notifyOnWeeklySummary or True,
|
||||
NotificationType.MONTHLY_SUMMARY: user.notifyOnMonthlySummary or True,
|
||||
NotificationType.AGENT_APPROVED: user.notifyOnAgentApproved or True,
|
||||
NotificationType.AGENT_REJECTED: user.notifyOnAgentRejected or True,
|
||||
}
|
||||
notification_preference = NotificationPreference(
|
||||
user_id=user.id,
|
||||
@@ -384,3 +396,17 @@ async def unsubscribe_user_by_token(token: str) -> None:
|
||||
)
|
||||
except Exception as e:
|
||||
raise DatabaseError(f"Failed to unsubscribe user by token {token}: {e}") from e
|
||||
|
||||
|
||||
async def update_user_timezone(user_id: str, timezone: str) -> User:
|
||||
"""Update a user's timezone setting."""
|
||||
try:
|
||||
user = await PrismaUser.prisma().update(
|
||||
where={"id": user_id},
|
||||
data={"timezone": timezone},
|
||||
)
|
||||
if not user:
|
||||
raise ValueError(f"User not found with ID: {user_id}")
|
||||
return User.from_db(user)
|
||||
except Exception as e:
|
||||
raise DatabaseError(f"Failed to update timezone for user {user_id}: {e}") from e
|
||||
|
||||
@@ -6,20 +6,17 @@ import json
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, NotRequired, TypedDict
|
||||
|
||||
from autogpt_libs.feature_flag.client import is_feature_enabled
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.blocks.llm import LlmModel, llm_call
|
||||
from backend.data.block import get_block
|
||||
from backend.data.execution import ExecutionStatus, NodeExecutionResult
|
||||
from backend.data.model import APIKeyCredentials, GraphExecutionStats
|
||||
from backend.util.feature_flag import Flag, is_feature_enabled
|
||||
from backend.util.retry import func_retry
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.truncate import truncate
|
||||
|
||||
# LaunchDarkly feature flag key for AI activity status generation
|
||||
AI_ACTIVITY_STATUS_FLAG_KEY = "ai-agent-execution-summary"
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor import DatabaseManagerAsyncClient
|
||||
|
||||
@@ -103,9 +100,7 @@ async def generate_activity_status_for_execution(
|
||||
AI-generated activity status string, or None if feature is disabled
|
||||
"""
|
||||
# Check LaunchDarkly feature flag for AI activity status generation with full context support
|
||||
if not await is_feature_enabled(
|
||||
AI_ACTIVITY_STATUS_FLAG_KEY, user_id, default=False
|
||||
):
|
||||
if not await is_feature_enabled(Flag.AI_ACTIVITY_STATUS, user_id):
|
||||
logger.debug("AI activity status generation is disabled via LaunchDarkly")
|
||||
return None
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ from backend.data.execution import (
|
||||
upsert_execution_input,
|
||||
upsert_execution_output,
|
||||
)
|
||||
from backend.data.generate_data import get_user_execution_summary_data
|
||||
from backend.data.graph import (
|
||||
get_connected_output_nodes,
|
||||
get_graph,
|
||||
@@ -35,13 +36,14 @@ from backend.data.notifications import (
|
||||
)
|
||||
from backend.data.user import (
|
||||
get_active_user_ids_in_timerange,
|
||||
get_user_by_id,
|
||||
get_user_email_by_id,
|
||||
get_user_email_verification,
|
||||
get_user_integrations,
|
||||
get_user_notification_preference,
|
||||
update_user_integrations,
|
||||
)
|
||||
from backend.server.v2.library.db import add_store_agent_to_library, list_library_agents
|
||||
from backend.server.v2.store.db import get_store_agent_details, get_store_agents
|
||||
from backend.util.service import (
|
||||
AppService,
|
||||
AppServiceClient,
|
||||
@@ -130,7 +132,6 @@ class DatabaseManager(AppService):
|
||||
|
||||
# User Comms - async
|
||||
get_active_user_ids_in_timerange = _(get_active_user_ids_in_timerange)
|
||||
get_user_by_id = _(get_user_by_id)
|
||||
get_user_email_by_id = _(get_user_email_by_id)
|
||||
get_user_email_verification = _(get_user_email_verification)
|
||||
get_user_notification_preference = _(get_user_notification_preference)
|
||||
@@ -146,6 +147,17 @@ class DatabaseManager(AppService):
|
||||
get_user_notification_oldest_message_in_batch
|
||||
)
|
||||
|
||||
# Library
|
||||
list_library_agents = _(list_library_agents)
|
||||
add_store_agent_to_library = _(add_store_agent_to_library)
|
||||
|
||||
# Store
|
||||
get_store_agents = _(get_store_agents)
|
||||
get_store_agent_details = _(get_store_agent_details)
|
||||
|
||||
# Summary data - async
|
||||
get_user_execution_summary_data = _(get_user_execution_summary_data)
|
||||
|
||||
|
||||
class DatabaseManagerClient(AppServiceClient):
|
||||
d = DatabaseManager
|
||||
@@ -174,6 +186,17 @@ class DatabaseManagerClient(AppServiceClient):
|
||||
# Block error monitoring
|
||||
get_block_error_stats = _(d.get_block_error_stats)
|
||||
|
||||
# User Emails
|
||||
get_user_email_by_id = _(d.get_user_email_by_id)
|
||||
|
||||
# Library
|
||||
list_library_agents = _(d.list_library_agents)
|
||||
add_store_agent_to_library = _(d.add_store_agent_to_library)
|
||||
|
||||
# Store
|
||||
get_store_agents = _(d.get_store_agents)
|
||||
get_store_agent_details = _(d.get_store_agent_details)
|
||||
|
||||
|
||||
class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
d = DatabaseManager
|
||||
@@ -203,7 +226,6 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
|
||||
# User Comms
|
||||
get_active_user_ids_in_timerange = d.get_active_user_ids_in_timerange
|
||||
get_user_by_id = d.get_user_by_id
|
||||
get_user_email_by_id = d.get_user_email_by_id
|
||||
get_user_email_verification = d.get_user_email_verification
|
||||
get_user_notification_preference = d.get_user_notification_preference
|
||||
@@ -218,3 +240,14 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
get_user_notification_oldest_message_in_batch = (
|
||||
d.get_user_notification_oldest_message_in_batch
|
||||
)
|
||||
|
||||
# Library
|
||||
list_library_agents = d.list_library_agents
|
||||
add_store_agent_to_library = d.add_store_agent_to_library
|
||||
|
||||
# Store
|
||||
get_store_agents = d.get_store_agents
|
||||
get_store_agent_details = d.get_store_agent_details
|
||||
|
||||
# Summary data
|
||||
get_user_execution_summary_data = d.get_user_execution_summary_data
|
||||
|
||||
@@ -20,6 +20,7 @@ from backend.data.notifications import (
|
||||
LowBalanceData,
|
||||
NotificationEventModel,
|
||||
NotificationType,
|
||||
ZeroBalanceData,
|
||||
)
|
||||
from backend.data.rabbitmq import SyncRabbitMQ
|
||||
from backend.executor.activity_status_generator import (
|
||||
@@ -27,7 +28,7 @@ from backend.executor.activity_status_generator import (
|
||||
)
|
||||
from backend.executor.utils import LogMetadata
|
||||
from backend.notifications.notifications import queue_notification
|
||||
from backend.util.exceptions import InsufficientBalanceError
|
||||
from backend.util.exceptions import InsufficientBalanceError, ModerationError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor import DatabaseManagerClient, DatabaseManagerAsyncClient
|
||||
@@ -51,6 +52,7 @@ from backend.data.execution import (
|
||||
GraphExecutionEntry,
|
||||
NodeExecutionEntry,
|
||||
NodeExecutionResult,
|
||||
UserContext,
|
||||
)
|
||||
from backend.data.graph import Link, Node
|
||||
from backend.executor.utils import (
|
||||
@@ -67,12 +69,14 @@ from backend.executor.utils import (
|
||||
validate_exec,
|
||||
)
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.server.v2.AutoMod.manager import automod_manager
|
||||
from backend.util import json
|
||||
from backend.util.clients import (
|
||||
get_async_execution_event_bus,
|
||||
get_database_manager_async_client,
|
||||
get_database_manager_client,
|
||||
get_execution_event_bus,
|
||||
get_notification_manager_client,
|
||||
)
|
||||
from backend.util.decorator import (
|
||||
async_error_logged,
|
||||
@@ -82,6 +86,7 @@ from backend.util.decorator import (
|
||||
)
|
||||
from backend.util.file import clean_exec_files
|
||||
from backend.util.logging import TruncatedLogger, configure_logging
|
||||
from backend.util.metrics import DiscordChannel
|
||||
from backend.util.process import AppProcess, set_service_name
|
||||
from backend.util.retry import continuous_retry, func_retry
|
||||
from backend.util.settings import Settings
|
||||
@@ -189,6 +194,9 @@ async def execute_node(
|
||||
"user_id": user_id,
|
||||
}
|
||||
|
||||
# Add user context from NodeExecutionEntry
|
||||
extra_exec_kwargs["user_context"] = data.user_context
|
||||
|
||||
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
|
||||
# changes during execution. ⚠️ This means a set of credentials can only be used by
|
||||
# one (running) block at a time; simultaneous execution of blocks using same
|
||||
@@ -235,6 +243,7 @@ async def _enqueue_next_nodes(
|
||||
graph_id: str,
|
||||
log_metadata: LogMetadata,
|
||||
nodes_input_masks: Optional[dict[str, dict[str, JsonValue]]],
|
||||
user_context: UserContext,
|
||||
) -> list[NodeExecutionEntry]:
|
||||
async def add_enqueued_execution(
|
||||
node_exec_id: str, node_id: str, block_id: str, data: BlockInput
|
||||
@@ -253,6 +262,7 @@ async def _enqueue_next_nodes(
|
||||
node_id=node_id,
|
||||
block_id=block_id,
|
||||
inputs=data,
|
||||
user_context=user_context,
|
||||
)
|
||||
|
||||
async def register_next_executions(node_link: Link) -> list[NodeExecutionEntry]:
|
||||
@@ -677,19 +687,20 @@ class ExecutionProcessor:
|
||||
self,
|
||||
node_exec: NodeExecutionEntry,
|
||||
execution_count: int,
|
||||
) -> int:
|
||||
) -> tuple[int, int]:
|
||||
total_cost = 0
|
||||
remaining_balance = 0
|
||||
db_client = get_db_client()
|
||||
block = get_block(node_exec.block_id)
|
||||
if not block:
|
||||
logger.error(f"Block {node_exec.block_id} not found.")
|
||||
return total_cost
|
||||
return total_cost, 0
|
||||
|
||||
cost, matching_filter = block_usage_cost(
|
||||
block=block, input_data=node_exec.inputs
|
||||
)
|
||||
if cost > 0:
|
||||
db_client.spend_credits(
|
||||
remaining_balance = db_client.spend_credits(
|
||||
user_id=node_exec.user_id,
|
||||
cost=cost,
|
||||
metadata=UsageTransactionMetadata(
|
||||
@@ -707,7 +718,7 @@ class ExecutionProcessor:
|
||||
|
||||
cost, usage_count = execution_usage_cost(execution_count)
|
||||
if cost > 0:
|
||||
db_client.spend_credits(
|
||||
remaining_balance = db_client.spend_credits(
|
||||
user_id=node_exec.user_id,
|
||||
cost=cost,
|
||||
metadata=UsageTransactionMetadata(
|
||||
@@ -722,7 +733,7 @@ class ExecutionProcessor:
|
||||
)
|
||||
total_cost += cost
|
||||
|
||||
return total_cost
|
||||
return total_cost, remaining_balance
|
||||
|
||||
@time_measured
|
||||
def _on_graph_execution(
|
||||
@@ -759,6 +770,22 @@ class ExecutionProcessor:
|
||||
amount=1,
|
||||
)
|
||||
|
||||
# Input moderation
|
||||
try:
|
||||
if moderation_error := asyncio.run_coroutine_threadsafe(
|
||||
automod_manager.moderate_graph_execution_inputs(
|
||||
db_client=get_db_async_client(),
|
||||
graph_exec=graph_exec,
|
||||
),
|
||||
self.node_evaluation_loop,
|
||||
).result(timeout=30.0):
|
||||
raise moderation_error
|
||||
except asyncio.TimeoutError:
|
||||
log_metadata.warning(
|
||||
f"Input moderation timed out for graph execution {graph_exec.graph_exec_id}, bypassing moderation and continuing execution"
|
||||
)
|
||||
# Continue execution without moderation
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Pre‑populate queue ---------------------------------------
|
||||
# ------------------------------------------------------------
|
||||
@@ -770,7 +797,8 @@ class ExecutionProcessor:
|
||||
ExecutionStatus.TERMINATED,
|
||||
],
|
||||
):
|
||||
execution_queue.add(node_exec.to_node_execution_entry())
|
||||
node_entry = node_exec.to_node_execution_entry(graph_exec.user_context)
|
||||
execution_queue.add(node_entry)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Main dispatch / polling loop -----------------------------
|
||||
@@ -788,12 +816,19 @@ class ExecutionProcessor:
|
||||
|
||||
# Charge usage (may raise) ------------------------------
|
||||
try:
|
||||
cost = self._charge_usage(
|
||||
cost, remaining_balance = self._charge_usage(
|
||||
node_exec=queued_node_exec,
|
||||
execution_count=increment_execution_count(graph_exec.user_id),
|
||||
)
|
||||
with execution_stats_lock:
|
||||
execution_stats.cost += cost
|
||||
# Check if we crossed the low balance threshold
|
||||
self._handle_low_balance(
|
||||
db_client=db_client,
|
||||
user_id=graph_exec.user_id,
|
||||
current_balance=remaining_balance,
|
||||
transaction_cost=cost,
|
||||
)
|
||||
except InsufficientBalanceError as balance_error:
|
||||
error = balance_error # Set error to trigger FAILED status
|
||||
node_exec_id = queued_node_exec.node_exec_id
|
||||
@@ -808,11 +843,10 @@ class ExecutionProcessor:
|
||||
status=ExecutionStatus.FAILED,
|
||||
)
|
||||
|
||||
self._handle_low_balance_notif(
|
||||
self._handle_insufficient_funds_notif(
|
||||
db_client,
|
||||
graph_exec.user_id,
|
||||
graph_exec.graph_id,
|
||||
execution_stats,
|
||||
error,
|
||||
)
|
||||
# Gracefully stop the execution loop
|
||||
@@ -897,6 +931,25 @@ class ExecutionProcessor:
|
||||
time.sleep(0.1)
|
||||
|
||||
# loop done --------------------------------------------------
|
||||
|
||||
# Output moderation
|
||||
try:
|
||||
if moderation_error := asyncio.run_coroutine_threadsafe(
|
||||
automod_manager.moderate_graph_execution_outputs(
|
||||
db_client=get_db_async_client(),
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
user_id=graph_exec.user_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
),
|
||||
self.node_evaluation_loop,
|
||||
).result(timeout=30.0):
|
||||
raise moderation_error
|
||||
except asyncio.TimeoutError:
|
||||
log_metadata.warning(
|
||||
f"Output moderation timed out for graph execution {graph_exec.graph_exec_id}, bypassing moderation and continuing execution"
|
||||
)
|
||||
# Continue execution without moderation
|
||||
|
||||
# Determine final execution status based on whether there was an error or termination
|
||||
if cancel.is_set():
|
||||
execution_status = ExecutionStatus.TERMINATED
|
||||
@@ -917,11 +970,12 @@ class ExecutionProcessor:
|
||||
else Exception(f"{e.__class__.__name__}: {e}")
|
||||
)
|
||||
|
||||
known_errors = (InsufficientBalanceError,)
|
||||
known_errors = (InsufficientBalanceError, ModerationError)
|
||||
if isinstance(error, known_errors):
|
||||
execution_stats.error = str(error)
|
||||
return ExecutionStatus.FAILED
|
||||
|
||||
execution_status = ExecutionStatus.FAILED
|
||||
log_metadata.exception(
|
||||
f"Failed graph execution {graph_exec.graph_exec_id}: {error}"
|
||||
)
|
||||
@@ -1015,6 +1069,7 @@ class ExecutionProcessor:
|
||||
db_client = get_db_async_client()
|
||||
|
||||
log_metadata.debug(f"Enqueue nodes for {node_id}: {output}")
|
||||
|
||||
for next_execution in await _enqueue_next_nodes(
|
||||
db_client=db_client,
|
||||
node=output.node,
|
||||
@@ -1024,6 +1079,7 @@ class ExecutionProcessor:
|
||||
graph_id=graph_exec.graph_id,
|
||||
log_metadata=log_metadata,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
user_context=graph_exec.user_context,
|
||||
):
|
||||
execution_queue.add(next_execution)
|
||||
|
||||
@@ -1064,25 +1120,25 @@ class ExecutionProcessor:
|
||||
)
|
||||
)
|
||||
|
||||
def _handle_low_balance_notif(
|
||||
def _handle_insufficient_funds_notif(
|
||||
self,
|
||||
db_client: "DatabaseManagerClient",
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
exec_stats: GraphExecutionStats,
|
||||
e: InsufficientBalanceError,
|
||||
):
|
||||
shortfall = e.balance - e.amount
|
||||
shortfall = abs(e.amount) - e.balance
|
||||
metadata = db_client.get_graph_metadata(graph_id)
|
||||
base_url = (
|
||||
settings.config.frontend_base_url or settings.config.platform_base_url
|
||||
)
|
||||
|
||||
queue_notification(
|
||||
NotificationEventModel(
|
||||
user_id=user_id,
|
||||
type=NotificationType.LOW_BALANCE,
|
||||
data=LowBalanceData(
|
||||
current_balance=exec_stats.cost,
|
||||
type=NotificationType.ZERO_BALANCE,
|
||||
data=ZeroBalanceData(
|
||||
current_balance=e.balance,
|
||||
billing_page_link=f"{base_url}/profile/credits",
|
||||
shortfall=shortfall,
|
||||
agent_name=metadata.name if metadata else "Unknown Agent",
|
||||
@@ -1090,6 +1146,73 @@ class ExecutionProcessor:
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
user_email = db_client.get_user_email_by_id(user_id)
|
||||
|
||||
alert_message = (
|
||||
f"❌ **Insufficient Funds Alert**\n"
|
||||
f"User: {user_email or user_id}\n"
|
||||
f"Agent: {metadata.name if metadata else 'Unknown Agent'}\n"
|
||||
f"Current balance: ${e.balance/100:.2f}\n"
|
||||
f"Attempted cost: ${abs(e.amount)/100:.2f}\n"
|
||||
f"Shortfall: ${abs(shortfall)/100:.2f}\n"
|
||||
f"[View User Details]({base_url}/admin/spending?search={user_email})"
|
||||
)
|
||||
|
||||
get_notification_manager_client().discord_system_alert(
|
||||
alert_message, DiscordChannel.PRODUCT
|
||||
)
|
||||
except Exception as alert_error:
|
||||
logger.error(
|
||||
f"Failed to send insufficient funds Discord alert: {alert_error}"
|
||||
)
|
||||
|
||||
def _handle_low_balance(
|
||||
self,
|
||||
db_client: "DatabaseManagerClient",
|
||||
user_id: str,
|
||||
current_balance: int,
|
||||
transaction_cost: int,
|
||||
):
|
||||
"""Check and handle low balance scenarios after a transaction"""
|
||||
LOW_BALANCE_THRESHOLD = settings.config.low_balance_threshold
|
||||
|
||||
balance_before = current_balance + transaction_cost
|
||||
|
||||
if (
|
||||
current_balance < LOW_BALANCE_THRESHOLD
|
||||
and balance_before >= LOW_BALANCE_THRESHOLD
|
||||
):
|
||||
base_url = (
|
||||
settings.config.frontend_base_url or settings.config.platform_base_url
|
||||
)
|
||||
queue_notification(
|
||||
NotificationEventModel(
|
||||
user_id=user_id,
|
||||
type=NotificationType.LOW_BALANCE,
|
||||
data=LowBalanceData(
|
||||
current_balance=current_balance,
|
||||
billing_page_link=f"{base_url}/profile/credits",
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
user_email = db_client.get_user_email_by_id(user_id)
|
||||
alert_message = (
|
||||
f"⚠️ **Low Balance Alert**\n"
|
||||
f"User: {user_email or user_id}\n"
|
||||
f"Balance dropped below ${LOW_BALANCE_THRESHOLD/100:.2f}\n"
|
||||
f"Current balance: ${current_balance/100:.2f}\n"
|
||||
f"Transaction cost: ${transaction_cost/100:.2f}\n"
|
||||
f"[View User Details]({base_url}/admin/spending?search={user_email})"
|
||||
)
|
||||
get_notification_manager_client().discord_system_alert(
|
||||
alert_message, DiscordChannel.PRODUCT
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send low balance Discord alert: {e}")
|
||||
|
||||
|
||||
class ExecutionManager(AppProcess):
|
||||
def __init__(self):
|
||||
@@ -1171,6 +1294,9 @@ class ExecutionManager(AppProcess):
|
||||
)
|
||||
return
|
||||
|
||||
# Check if channel is closed and force reconnection if needed
|
||||
if not self.cancel_client.is_ready:
|
||||
self.cancel_client.disconnect()
|
||||
self.cancel_client.connect()
|
||||
cancel_channel = self.cancel_client.get_channel()
|
||||
cancel_channel.basic_consume(
|
||||
@@ -1200,6 +1326,9 @@ class ExecutionManager(AppProcess):
|
||||
)
|
||||
return
|
||||
|
||||
# Check if channel is closed and force reconnection if needed
|
||||
if not self.run_client.is_ready:
|
||||
self.run_client.disconnect()
|
||||
self.run_client.connect()
|
||||
run_channel = self.run_client.get_channel()
|
||||
run_channel.basic_qos(prefetch_count=self.pool_size)
|
||||
|
||||
@@ -0,0 +1,149 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from prisma.enums import NotificationType
|
||||
|
||||
from backend.data.notifications import LowBalanceData
|
||||
from backend.executor.manager import ExecutionProcessor
|
||||
from backend.util.test import SpinTestServer
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_handle_low_balance_threshold_crossing(server: SpinTestServer):
|
||||
"""Test that _handle_low_balance triggers notification when crossing threshold."""
|
||||
|
||||
execution_processor = ExecutionProcessor()
|
||||
user_id = "test-user-123"
|
||||
current_balance = 400 # $4 - below $5 threshold
|
||||
transaction_cost = 600 # $6 transaction
|
||||
|
||||
# Mock dependencies
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
mock_settings.config.low_balance_threshold = 500 # $5 threshold
|
||||
mock_settings.config.frontend_base_url = "https://test.com"
|
||||
|
||||
# Create mock database client
|
||||
mock_db_client = MagicMock()
|
||||
mock_db_client.get_user_email_by_id.return_value = "test@example.com"
|
||||
|
||||
# Test the low balance handler
|
||||
execution_processor._handle_low_balance(
|
||||
db_client=mock_db_client,
|
||||
user_id=user_id,
|
||||
current_balance=current_balance,
|
||||
transaction_cost=transaction_cost,
|
||||
)
|
||||
|
||||
# Verify notification was queued
|
||||
mock_queue_notif.assert_called_once()
|
||||
notification_call = mock_queue_notif.call_args[0][0]
|
||||
|
||||
# Verify notification details
|
||||
assert notification_call.type == NotificationType.LOW_BALANCE
|
||||
assert notification_call.user_id == user_id
|
||||
assert isinstance(notification_call.data, LowBalanceData)
|
||||
assert notification_call.data.current_balance == current_balance
|
||||
|
||||
# Verify Discord alert was sent
|
||||
mock_client.discord_system_alert.assert_called_once()
|
||||
discord_message = mock_client.discord_system_alert.call_args[0][0]
|
||||
assert "Low Balance Alert" in discord_message
|
||||
assert "test@example.com" in discord_message
|
||||
assert "$4.00" in discord_message
|
||||
assert "$6.00" in discord_message
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_handle_low_balance_no_notification_when_not_crossing(
|
||||
server: SpinTestServer,
|
||||
):
|
||||
"""Test that no notification is sent when not crossing the threshold."""
|
||||
|
||||
execution_processor = ExecutionProcessor()
|
||||
user_id = "test-user-123"
|
||||
current_balance = 600 # $6 - above $5 threshold
|
||||
transaction_cost = (
|
||||
100 # $1 transaction (balance before was $7, still above threshold)
|
||||
)
|
||||
|
||||
# Mock dependencies
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
mock_settings.config.low_balance_threshold = 500 # $5 threshold
|
||||
|
||||
# Create mock database client
|
||||
mock_db_client = MagicMock()
|
||||
|
||||
# Test the low balance handler
|
||||
execution_processor._handle_low_balance(
|
||||
db_client=mock_db_client,
|
||||
user_id=user_id,
|
||||
current_balance=current_balance,
|
||||
transaction_cost=transaction_cost,
|
||||
)
|
||||
|
||||
# Verify no notification was sent
|
||||
mock_queue_notif.assert_not_called()
|
||||
mock_client.discord_system_alert.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_handle_low_balance_no_duplicate_when_already_below(
|
||||
server: SpinTestServer,
|
||||
):
|
||||
"""Test that no notification is sent when already below threshold."""
|
||||
|
||||
execution_processor = ExecutionProcessor()
|
||||
user_id = "test-user-123"
|
||||
current_balance = 300 # $3 - below $5 threshold
|
||||
transaction_cost = (
|
||||
100 # $1 transaction (balance before was $4, also below threshold)
|
||||
)
|
||||
|
||||
# Mock dependencies
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
mock_settings.config.low_balance_threshold = 500 # $5 threshold
|
||||
|
||||
# Create mock database client
|
||||
mock_db_client = MagicMock()
|
||||
|
||||
# Test the low balance handler
|
||||
execution_processor._handle_low_balance(
|
||||
db_client=mock_db_client,
|
||||
user_id=user_id,
|
||||
current_balance=current_balance,
|
||||
transaction_cost=transaction_cost,
|
||||
)
|
||||
|
||||
# Verify no notification was sent (user was already below threshold)
|
||||
mock_queue_notif.assert_not_called()
|
||||
mock_client.discord_system_alert.assert_not_called()
|
||||
@@ -17,6 +17,7 @@ from apscheduler.jobstores.memory import MemoryJobStore
|
||||
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from apscheduler.util import ZoneInfo
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
from sqlalchemy import MetaData, create_engine
|
||||
@@ -269,7 +270,9 @@ class Scheduler(AppService):
|
||||
|
||||
self.scheduler = BackgroundScheduler(
|
||||
executors={
|
||||
"default": ThreadPoolExecutor(max_workers=10), # Max 10 concurrent jobs
|
||||
"default": ThreadPoolExecutor(
|
||||
max_workers=self.db_pool_size()
|
||||
), # Match DB pool size to prevent resource contention
|
||||
},
|
||||
job_defaults={
|
||||
"coalesce": True, # Skip redundant missed jobs - just run the latest
|
||||
@@ -301,13 +304,15 @@ class Scheduler(AppService):
|
||||
Jobstores.WEEKLY_NOTIFICATIONS.value: MemoryJobStore(),
|
||||
},
|
||||
logger=apscheduler_logger,
|
||||
timezone=ZoneInfo("UTC"),
|
||||
)
|
||||
|
||||
if self.register_system_tasks:
|
||||
# Notification PROCESS WEEKLY SUMMARY
|
||||
# Runs every Monday at 9 AM UTC
|
||||
self.scheduler.add_job(
|
||||
process_weekly_summary,
|
||||
CronTrigger.from_crontab("0 * * * *"),
|
||||
CronTrigger.from_crontab("0 9 * * 1"),
|
||||
id="process_weekly_summary",
|
||||
kwargs={},
|
||||
replace_existing=True,
|
||||
@@ -403,6 +408,8 @@ class Scheduler(AppService):
|
||||
)
|
||||
)
|
||||
|
||||
logger.info(f"Scheduling job for user {user_id} in UTC (cron: {cron})")
|
||||
|
||||
job_args = GraphExecutionJobArgs(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
@@ -415,12 +422,12 @@ class Scheduler(AppService):
|
||||
execute_graph,
|
||||
kwargs=job_args.model_dump(),
|
||||
name=name,
|
||||
trigger=CronTrigger.from_crontab(cron),
|
||||
trigger=CronTrigger.from_crontab(cron, timezone="UTC"),
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
replace_existing=True,
|
||||
)
|
||||
logger.info(
|
||||
f"Added job {job.id} with cron schedule '{cron}' input data: {input_data}"
|
||||
f"Added job {job.id} with cron schedule '{cron}' in UTC, input data: {input_data}"
|
||||
)
|
||||
return GraphExecutionJobInfo.from_db(job_args, job)
|
||||
|
||||
|
||||
@@ -18,10 +18,12 @@ from backend.data.execution import (
|
||||
ExecutionStatus,
|
||||
GraphExecutionStats,
|
||||
GraphExecutionWithNodes,
|
||||
UserContext,
|
||||
)
|
||||
from backend.data.graph import GraphModel, Node
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig
|
||||
from backend.data.user import get_user_by_id
|
||||
from backend.util.clients import (
|
||||
get_async_execution_event_bus,
|
||||
get_async_execution_queue,
|
||||
@@ -34,6 +36,27 @@ from backend.util.mock import MockObject
|
||||
from backend.util.settings import Config
|
||||
from backend.util.type import convert
|
||||
|
||||
|
||||
async def get_user_context(user_id: str) -> UserContext:
|
||||
"""
|
||||
Get UserContext for a user, always returns a valid context with timezone.
|
||||
Defaults to UTC if user has no timezone set.
|
||||
"""
|
||||
user_context = UserContext(timezone="UTC") # Default to UTC
|
||||
try:
|
||||
user = await get_user_by_id(user_id)
|
||||
if user and user.timezone and user.timezone != "not-set":
|
||||
user_context.timezone = user.timezone
|
||||
logger.debug(f"Retrieved user context: timezone={user.timezone}")
|
||||
else:
|
||||
logger.debug("User has no timezone set, using UTC")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not fetch user timezone: {e}")
|
||||
# Continue with UTC as default
|
||||
|
||||
return user_context
|
||||
|
||||
|
||||
config = Config()
|
||||
logger = TruncatedLogger(logging.getLogger(__name__), prefix="[GraphExecutorUtil]")
|
||||
|
||||
@@ -548,7 +571,7 @@ async def validate_graph_with_credentials(
|
||||
return node_input_errors
|
||||
|
||||
|
||||
async def _construct_node_execution_input(
|
||||
async def _construct_starting_node_execution_input(
|
||||
graph: GraphModel,
|
||||
user_id: str,
|
||||
graph_inputs: BlockInput,
|
||||
@@ -622,7 +645,7 @@ async def validate_and_construct_node_execution_input(
|
||||
graph_version: Optional[int] = None,
|
||||
graph_credentials_inputs: Optional[dict[str, CredentialsMetaInput]] = None,
|
||||
nodes_input_masks: Optional[dict[str, dict[str, JsonValue]]] = None,
|
||||
) -> tuple[GraphModel, list[tuple[str, BlockInput]]]:
|
||||
) -> tuple[GraphModel, list[tuple[str, BlockInput]], dict[str, dict[str, JsonValue]]]:
|
||||
"""
|
||||
Public wrapper that handles graph fetching, credential mapping, and validation+construction.
|
||||
This centralizes the logic used by both scheduler validation and actual execution.
|
||||
@@ -666,14 +689,14 @@ async def validate_and_construct_node_execution_input(
|
||||
nodes_input_masks or {},
|
||||
)
|
||||
|
||||
starting_nodes_input = await _construct_node_execution_input(
|
||||
starting_nodes_input = await _construct_starting_node_execution_input(
|
||||
graph=graph,
|
||||
user_id=user_id,
|
||||
graph_inputs=graph_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
)
|
||||
|
||||
return graph, starting_nodes_input
|
||||
return graph, starting_nodes_input, nodes_input_masks
|
||||
|
||||
|
||||
def _merge_nodes_input_masks(
|
||||
@@ -856,13 +879,15 @@ async def add_graph_execution(
|
||||
else:
|
||||
edb = get_database_manager_async_client()
|
||||
|
||||
graph, starting_nodes_input = await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
graph, starting_nodes_input, nodes_input_masks = (
|
||||
await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
)
|
||||
)
|
||||
graph_exec = None
|
||||
|
||||
@@ -875,8 +900,11 @@ async def add_graph_execution(
|
||||
preset_id=preset_id,
|
||||
)
|
||||
|
||||
# Fetch user context for the graph execution
|
||||
user_context = await get_user_context(user_id)
|
||||
|
||||
queue = await get_async_execution_queue()
|
||||
graph_exec_entry = graph_exec.to_graph_execution_entry()
|
||||
graph_exec_entry = graph_exec.to_graph_execution_entry(user_context)
|
||||
if nodes_input_masks:
|
||||
graph_exec_entry.nodes_input_masks = nodes_input_masks
|
||||
|
||||
|
||||
@@ -182,6 +182,15 @@ zerobounce_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
enrichlayer_credentials = APIKeyCredentials(
|
||||
id="d9fce73a-6c1d-4e8b-ba2e-12a456789def",
|
||||
provider="enrichlayer",
|
||||
api_key=SecretStr(settings.secrets.enrichlayer_api_key),
|
||||
title="Use Credits for Enrichlayer",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
|
||||
llama_api_credentials = APIKeyCredentials(
|
||||
id="d44045af-1c33-4833-9e19-752313214de2",
|
||||
provider="llama_api",
|
||||
@@ -190,6 +199,14 @@ llama_api_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
v0_credentials = APIKeyCredentials(
|
||||
id="c4e6d1a0-3b5f-4789-a8e2-9b123456789f",
|
||||
provider="v0",
|
||||
api_key=SecretStr(settings.secrets.v0_api_key),
|
||||
title="Use Credits for v0 by Vercel",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
@@ -203,6 +220,7 @@ DEFAULT_CREDENTIALS = [
|
||||
jina_credentials,
|
||||
unreal_credentials,
|
||||
open_router_credentials,
|
||||
enrichlayer_credentials,
|
||||
fal_credentials,
|
||||
exa_credentials,
|
||||
e2b_credentials,
|
||||
@@ -213,6 +231,8 @@ DEFAULT_CREDENTIALS = [
|
||||
smartlead_credentials,
|
||||
zerobounce_credentials,
|
||||
google_maps_credentials,
|
||||
llama_api_credentials,
|
||||
v0_credentials,
|
||||
]
|
||||
|
||||
|
||||
@@ -279,6 +299,8 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(unreal_credentials)
|
||||
if settings.secrets.open_router_api_key:
|
||||
all_credentials.append(open_router_credentials)
|
||||
if settings.secrets.enrichlayer_api_key:
|
||||
all_credentials.append(enrichlayer_credentials)
|
||||
if settings.secrets.fal_api_key:
|
||||
all_credentials.append(fal_credentials)
|
||||
if settings.secrets.exa_api_key:
|
||||
|
||||
@@ -4,6 +4,7 @@ from pydantic import BaseModel
|
||||
|
||||
from backend.integrations.oauth.todoist import TodoistOAuthHandler
|
||||
|
||||
from .discord import DiscordOAuthHandler
|
||||
from .github import GitHubOAuthHandler
|
||||
from .google import GoogleOAuthHandler
|
||||
from .notion import NotionOAuthHandler
|
||||
@@ -15,6 +16,7 @@ if TYPE_CHECKING:
|
||||
# --8<-- [start:HANDLERS_BY_NAMEExample]
|
||||
# Build handlers dict with string keys for compatibility with SDK auto-registration
|
||||
_ORIGINAL_HANDLERS = [
|
||||
DiscordOAuthHandler,
|
||||
GitHubOAuthHandler,
|
||||
GoogleOAuthHandler,
|
||||
NotionOAuthHandler,
|
||||
|
||||
175
autogpt_platform/backend/backend/integrations/oauth/discord.py
Normal file
175
autogpt_platform/backend/backend/integrations/oauth/discord.py
Normal file
@@ -0,0 +1,175 @@
|
||||
import time
|
||||
from typing import Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from backend.data.model import OAuth2Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.request import Requests
|
||||
|
||||
from .base import BaseOAuthHandler
|
||||
|
||||
|
||||
class DiscordOAuthHandler(BaseOAuthHandler):
|
||||
"""
|
||||
Discord OAuth2 handler implementation.
|
||||
|
||||
Based on the documentation at:
|
||||
- https://discord.com/developers/docs/topics/oauth2
|
||||
|
||||
Discord OAuth2 tokens expire after 7 days by default and include refresh tokens.
|
||||
"""
|
||||
|
||||
PROVIDER_NAME = ProviderName.DISCORD
|
||||
DEFAULT_SCOPES = ["identify"] # Basic user information
|
||||
|
||||
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
|
||||
self.client_id = client_id
|
||||
self.client_secret = client_secret
|
||||
self.redirect_uri = redirect_uri
|
||||
self.auth_base_url = "https://discord.com/oauth2/authorize"
|
||||
self.token_url = "https://discord.com/api/oauth2/token"
|
||||
self.revoke_url = "https://discord.com/api/oauth2/token/revoke"
|
||||
|
||||
def get_login_url(
|
||||
self, scopes: list[str], state: str, code_challenge: Optional[str]
|
||||
) -> str:
|
||||
# Handle default scopes
|
||||
scopes = self.handle_default_scopes(scopes)
|
||||
|
||||
params = {
|
||||
"client_id": self.client_id,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"response_type": "code",
|
||||
"scope": " ".join(scopes),
|
||||
"state": state,
|
||||
}
|
||||
|
||||
# Discord supports PKCE
|
||||
if code_challenge:
|
||||
params["code_challenge"] = code_challenge
|
||||
params["code_challenge_method"] = "S256"
|
||||
|
||||
return f"{self.auth_base_url}?{urlencode(params)}"
|
||||
|
||||
async def exchange_code_for_tokens(
|
||||
self, code: str, scopes: list[str], code_verifier: Optional[str]
|
||||
) -> OAuth2Credentials:
|
||||
params = {
|
||||
"code": code,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"grant_type": "authorization_code",
|
||||
}
|
||||
|
||||
# Include PKCE verifier if provided
|
||||
if code_verifier:
|
||||
params["code_verifier"] = code_verifier
|
||||
|
||||
return await self._request_tokens(params)
|
||||
|
||||
async def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
|
||||
if not credentials.access_token:
|
||||
raise ValueError("No access token to revoke")
|
||||
|
||||
# Discord requires client authentication for token revocation
|
||||
data = {
|
||||
"token": credentials.access_token.get_secret_value(),
|
||||
"token_type_hint": "access_token",
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
}
|
||||
|
||||
response = await Requests().post(
|
||||
url=self.revoke_url,
|
||||
data=data,
|
||||
headers=headers,
|
||||
auth=(self.client_id, self.client_secret),
|
||||
)
|
||||
|
||||
# Discord returns 200 OK for successful revocation
|
||||
return response.status == 200
|
||||
|
||||
async def _refresh_tokens(
|
||||
self, credentials: OAuth2Credentials
|
||||
) -> OAuth2Credentials:
|
||||
if not credentials.refresh_token:
|
||||
return credentials
|
||||
|
||||
return await self._request_tokens(
|
||||
{
|
||||
"refresh_token": credentials.refresh_token.get_secret_value(),
|
||||
"grant_type": "refresh_token",
|
||||
},
|
||||
current_credentials=credentials,
|
||||
)
|
||||
|
||||
async def _request_tokens(
|
||||
self,
|
||||
params: dict[str, str],
|
||||
current_credentials: Optional[OAuth2Credentials] = None,
|
||||
) -> OAuth2Credentials:
|
||||
request_body = {
|
||||
"client_id": self.client_id,
|
||||
"client_secret": self.client_secret,
|
||||
**params,
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
}
|
||||
|
||||
response = await Requests().post(
|
||||
self.token_url, data=request_body, headers=headers
|
||||
)
|
||||
token_data: dict = response.json()
|
||||
|
||||
# Get username if this is a new token request
|
||||
username = None
|
||||
if "access_token" in token_data:
|
||||
username = await self._request_username(token_data["access_token"])
|
||||
|
||||
now = int(time.time())
|
||||
new_credentials = OAuth2Credentials(
|
||||
provider=self.PROVIDER_NAME,
|
||||
title=current_credentials.title if current_credentials else None,
|
||||
username=username,
|
||||
access_token=token_data["access_token"],
|
||||
scopes=token_data.get("scope", "").split()
|
||||
or (current_credentials.scopes if current_credentials else []),
|
||||
refresh_token=token_data.get("refresh_token"),
|
||||
# Discord tokens expire after expires_in seconds (typically 7 days)
|
||||
access_token_expires_at=(
|
||||
now + expires_in
|
||||
if (expires_in := token_data.get("expires_in", None))
|
||||
else None
|
||||
),
|
||||
# Discord doesn't provide separate refresh token expiration
|
||||
refresh_token_expires_at=None,
|
||||
)
|
||||
|
||||
if current_credentials:
|
||||
new_credentials.id = current_credentials.id
|
||||
|
||||
return new_credentials
|
||||
|
||||
async def _request_username(self, access_token: str) -> str | None:
|
||||
"""
|
||||
Fetch the username using the Discord OAuth2 @me endpoint.
|
||||
"""
|
||||
url = "https://discord.com/api/oauth2/@me"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
}
|
||||
|
||||
response = await Requests().get(url, headers=headers)
|
||||
|
||||
if not response.ok:
|
||||
return None
|
||||
|
||||
# Get user info from the response
|
||||
data = response.json()
|
||||
user_info = data.get("user", {})
|
||||
|
||||
# Return username (without discriminator)
|
||||
return user_info.get("username")
|
||||
@@ -25,6 +25,7 @@ class ProviderName(str, Enum):
|
||||
GROQ = "groq"
|
||||
HTTP = "http"
|
||||
HUBSPOT = "hubspot"
|
||||
ENRICHLAYER = "enrichlayer"
|
||||
IDEOGRAM = "ideogram"
|
||||
JINA = "jina"
|
||||
LLAMA_API = "llama_api"
|
||||
@@ -47,6 +48,7 @@ class ProviderName(str, Enum):
|
||||
TWITTER = "twitter"
|
||||
TODOIST = "todoist"
|
||||
UNREAL_SPEECH = "unreal_speech"
|
||||
V0 = "v0"
|
||||
ZEROBOUNCE = "zerobounce"
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -29,7 +29,7 @@ from backend.data.user import generate_unsubscribe_link
|
||||
from backend.notifications.email import EmailSender
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
from backend.util.logging import TruncatedLogger
|
||||
from backend.util.metrics import discord_send_alert
|
||||
from backend.util.metrics import DiscordChannel, discord_send_alert
|
||||
from backend.util.retry import continuous_retry
|
||||
from backend.util.service import (
|
||||
AppService,
|
||||
@@ -223,10 +223,14 @@ class NotificationManager(AppService):
|
||||
processed_count = 0
|
||||
current_time = datetime.now(tz=timezone.utc)
|
||||
start_time = current_time - timedelta(days=7)
|
||||
logger.info(
|
||||
f"Querying for active users between {start_time} and {current_time}"
|
||||
)
|
||||
users = await get_database_manager_async_client().get_active_user_ids_in_timerange(
|
||||
end_time=current_time.isoformat(),
|
||||
start_time=start_time.isoformat(),
|
||||
)
|
||||
logger.info(f"Found {len(users)} active users in the last 7 days")
|
||||
for user in users:
|
||||
await self._queue_scheduled_notification(
|
||||
SummaryParamsEventModel(
|
||||
@@ -378,16 +382,21 @@ class NotificationManager(AppService):
|
||||
}
|
||||
|
||||
@expose
|
||||
async def discord_system_alert(self, content: str):
|
||||
await discord_send_alert(content)
|
||||
async def discord_system_alert(
|
||||
self, content: str, channel: DiscordChannel = DiscordChannel.PLATFORM
|
||||
):
|
||||
await discord_send_alert(content, channel)
|
||||
|
||||
async def _queue_scheduled_notification(self, event: SummaryParamsEventModel):
|
||||
"""Queue a scheduled notification - exposed method for other services to call"""
|
||||
try:
|
||||
logger.debug(f"Received Request to queue scheduled notification {event=}")
|
||||
logger.info(
|
||||
f"Queueing scheduled notification type={event.type} user_id={event.user_id}"
|
||||
)
|
||||
|
||||
exchange = "notifications"
|
||||
routing_key = get_routing_key(event.type)
|
||||
logger.info(f"Using routing key: {routing_key}")
|
||||
|
||||
# Publish to RabbitMQ
|
||||
await self.rabbit.publish_message(
|
||||
@@ -395,6 +404,7 @@ class NotificationManager(AppService):
|
||||
message=event.model_dump_json(),
|
||||
exchange=next(ex for ex in EXCHANGES if ex.name == exchange),
|
||||
)
|
||||
logger.info(f"Successfully queued notification for user {event.user_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error queueing notification: {e}")
|
||||
@@ -416,85 +426,99 @@ class NotificationManager(AppService):
|
||||
# only if both are true, should we email this person
|
||||
return validated_email and preference
|
||||
|
||||
def _gather_summary_data(
|
||||
async def _gather_summary_data(
|
||||
self, user_id: str, event_type: NotificationType, params: BaseSummaryParams
|
||||
) -> BaseSummaryData:
|
||||
"""Gathers the data to build a summary notification"""
|
||||
|
||||
logger.info(
|
||||
f"Gathering summary data for {user_id} and {event_type} wiht {params=}"
|
||||
f"Gathering summary data for {user_id} and {event_type} with {params=}"
|
||||
)
|
||||
|
||||
# total_credits_used = self.run_and_wait(
|
||||
# get_total_credits_used(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# total_executions = self.run_and_wait(
|
||||
# get_total_executions(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# most_used_agent = self.run_and_wait(
|
||||
# get_most_used_agent(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# execution_times = self.run_and_wait(
|
||||
# get_execution_time(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# runs = self.run_and_wait(
|
||||
# get_runs(user_id, start_time, end_time)
|
||||
# )
|
||||
total_credits_used = 3.0
|
||||
total_executions = 2
|
||||
most_used_agent = {"name": "Some"}
|
||||
execution_times = [1, 2, 3]
|
||||
runs = [{"status": "COMPLETED"}, {"status": "FAILED"}]
|
||||
|
||||
successful_runs = len([run for run in runs if run["status"] == "COMPLETED"])
|
||||
failed_runs = len([run for run in runs if run["status"] != "COMPLETED"])
|
||||
average_execution_time = (
|
||||
sum(execution_times) / len(execution_times) if execution_times else 0
|
||||
)
|
||||
# cost_breakdown = self.run_and_wait(
|
||||
# get_cost_breakdown(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
cost_breakdown = {
|
||||
"agent1": 1.0,
|
||||
"agent2": 2.0,
|
||||
}
|
||||
|
||||
if event_type == NotificationType.DAILY_SUMMARY and isinstance(
|
||||
params, DailySummaryParams
|
||||
):
|
||||
return DailySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent["name"],
|
||||
total_execution_time=sum(execution_times),
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
date=params.date,
|
||||
try:
|
||||
# Get summary data from the database
|
||||
summary_data = await get_database_manager_async_client().get_user_execution_summary_data(
|
||||
user_id=user_id,
|
||||
start_time=params.start_date,
|
||||
end_time=params.end_date,
|
||||
)
|
||||
elif event_type == NotificationType.WEEKLY_SUMMARY and isinstance(
|
||||
params, WeeklySummaryParams
|
||||
):
|
||||
return WeeklySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent["name"],
|
||||
total_execution_time=sum(execution_times),
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
start_date=params.start_date,
|
||||
end_date=params.end_date,
|
||||
)
|
||||
else:
|
||||
raise ValueError("Invalid event type or params")
|
||||
|
||||
# Extract data from summary
|
||||
total_credits_used = summary_data.total_credits_used
|
||||
total_executions = summary_data.total_executions
|
||||
most_used_agent = summary_data.most_used_agent
|
||||
successful_runs = summary_data.successful_runs
|
||||
failed_runs = summary_data.failed_runs
|
||||
total_execution_time = summary_data.total_execution_time
|
||||
average_execution_time = summary_data.average_execution_time
|
||||
cost_breakdown = summary_data.cost_breakdown
|
||||
|
||||
if event_type == NotificationType.DAILY_SUMMARY and isinstance(
|
||||
params, DailySummaryParams
|
||||
):
|
||||
return DailySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent,
|
||||
total_execution_time=total_execution_time,
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
date=params.date,
|
||||
)
|
||||
elif event_type == NotificationType.WEEKLY_SUMMARY and isinstance(
|
||||
params, WeeklySummaryParams
|
||||
):
|
||||
return WeeklySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent,
|
||||
total_execution_time=total_execution_time,
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
start_date=params.start_date,
|
||||
end_date=params.end_date,
|
||||
)
|
||||
else:
|
||||
raise ValueError("Invalid event type or params")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to gather summary data: {e}")
|
||||
# Return sensible defaults in case of error
|
||||
if event_type == NotificationType.DAILY_SUMMARY and isinstance(
|
||||
params, DailySummaryParams
|
||||
):
|
||||
return DailySummaryData(
|
||||
total_credits_used=0.0,
|
||||
total_executions=0,
|
||||
most_used_agent="No data available",
|
||||
total_execution_time=0.0,
|
||||
successful_runs=0,
|
||||
failed_runs=0,
|
||||
average_execution_time=0.0,
|
||||
cost_breakdown={},
|
||||
date=params.date,
|
||||
)
|
||||
elif event_type == NotificationType.WEEKLY_SUMMARY and isinstance(
|
||||
params, WeeklySummaryParams
|
||||
):
|
||||
return WeeklySummaryData(
|
||||
total_credits_used=0.0,
|
||||
total_executions=0,
|
||||
most_used_agent="No data available",
|
||||
total_execution_time=0.0,
|
||||
successful_runs=0,
|
||||
failed_runs=0,
|
||||
average_execution_time=0.0,
|
||||
cost_breakdown={},
|
||||
start_date=params.start_date,
|
||||
end_date=params.end_date,
|
||||
)
|
||||
else:
|
||||
raise ValueError("Invalid event type or params") from e
|
||||
|
||||
async def _should_batch(
|
||||
self, user_id: str, event_type: NotificationType, event: NotificationEventModel
|
||||
@@ -764,7 +788,7 @@ class NotificationManager(AppService):
|
||||
)
|
||||
return True
|
||||
|
||||
summary_data = self._gather_summary_data(
|
||||
summary_data = await self._gather_summary_data(
|
||||
event.user_id, event.type, model.data
|
||||
)
|
||||
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
{# Agent Approved Notification Email Template #}
|
||||
{#
|
||||
Template variables:
|
||||
data.agent_name: the name of the approved agent
|
||||
data.agent_id: the ID of the agent
|
||||
data.agent_version: the version of the agent
|
||||
data.reviewer_name: the name of the reviewer who approved it
|
||||
data.reviewer_email: the email of the reviewer
|
||||
data.comments: comments from the reviewer
|
||||
data.reviewed_at: when the agent was reviewed
|
||||
data.store_url: URL to view the agent in the store
|
||||
|
||||
Subject: 🎉 Your agent '{{ data.agent_name }}' has been approved!
|
||||
#}
|
||||
|
||||
{% block content %}
|
||||
<h1 style="color: #28a745; font-size: 32px; font-weight: 700; margin: 0 0 24px 0; text-align: center;">
|
||||
🎉 Congratulations!
|
||||
</h1>
|
||||
|
||||
<p style="color: #586069; font-size: 18px; text-align: center; margin: 0 0 24px 0;">
|
||||
Your agent <strong>'{{ data.agent_name }}'</strong> has been approved and is now live in the store!
|
||||
</p>
|
||||
|
||||
<div style="height: 32px; background: transparent;"></div>
|
||||
|
||||
{% if data.comments %}
|
||||
<div style="background: #d4edda; border: 1px solid #c3e6cb; border-radius: 8px; padding: 20px; margin: 0;">
|
||||
<h3 style="color: #155724; font-size: 16px; font-weight: 600; margin: 0 0 12px 0;">
|
||||
💬 Creator feedback area
|
||||
</h3>
|
||||
<p style="color: #155724; margin: 0; font-size: 16px; line-height: 1.5;">
|
||||
{{ data.comments }}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div style="height: 40px; background: transparent;"></div>
|
||||
{% endif %}
|
||||
|
||||
<div style="background: #d1ecf1; border: 1px solid #bee5eb; border-radius: 8px; padding: 20px; margin: 0;">
|
||||
<h3 style="color: #0c5460; font-size: 16px; font-weight: 600; margin: 0 0 12px 0;">
|
||||
What's Next?
|
||||
</h3>
|
||||
<ul style="color: #0c5460; margin: 0; padding-left: 18px; font-size: 16px; line-height: 1.6;">
|
||||
<li>Your agent is now live and discoverable in the AutoGPT Store</li>
|
||||
<li>Users can find, install, and run your agent</li>
|
||||
<li>You can update your agent anytime by submitting a new version</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div style="height: 32px; background: transparent;"></div>
|
||||
|
||||
<div style="text-align: center; margin: 24px 0;">
|
||||
<a href="{{ data.store_url }}" style="display: inline-block; background: linear-gradient(135deg, #7c3aed 0%, #5b21b6 100%); color: black; text-decoration: none; padding: 14px 28px; border-radius: 6px; font-weight: 600; font-size: 16px;">
|
||||
View Your Agent in Store
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div style="height: 32px; background: transparent;"></div>
|
||||
|
||||
<div style="background: #fff3cd; border: 1px solid #ffeaa7; border-radius: 6px; padding: 16px; margin: 24px 0; text-align: center;">
|
||||
<p style="margin: 0; color: #856404; font-size: 14px;">
|
||||
<strong>💡 Pro Tip:</strong> Share your agent with the community! Post about it on social media, forums, or your blog to help more users discover and benefit from your creation.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div style="height: 32px; background: transparent;"></div>
|
||||
|
||||
<p style="color: #6a737d; font-size: 14px; text-align: center; margin: 24px 0;">
|
||||
Thank you for contributing to the AutoGPT ecosystem! 🚀
|
||||
</p>
|
||||
|
||||
{% endblock %}
|
||||
@@ -0,0 +1,77 @@
|
||||
{# Agent Rejected Notification Email Template #}
|
||||
{#
|
||||
Template variables:
|
||||
data.agent_name: the name of the rejected agent
|
||||
data.agent_id: the ID of the agent
|
||||
data.agent_version: the version of the agent
|
||||
data.reviewer_name: the name of the reviewer who rejected it
|
||||
data.reviewer_email: the email of the reviewer
|
||||
data.comments: comments from the reviewer explaining the rejection
|
||||
data.reviewed_at: when the agent was reviewed
|
||||
data.resubmit_url: URL to resubmit the agent
|
||||
|
||||
Subject: Your agent '{{ data.agent_name }}' needs some updates
|
||||
#}
|
||||
|
||||
|
||||
{% block content %}
|
||||
<h1 style="color: #d73a49; font-size: 32px; font-weight: 700; margin: 0 0 24px 0; text-align: center;">
|
||||
📝 Review Complete
|
||||
</h1>
|
||||
|
||||
<p style="color: #586069; font-size: 18px; text-align: center; margin: 0 0 24px 0;">
|
||||
Your agent <strong>'{{ data.agent_name }}'</strong> needs some updates before approval.
|
||||
</p>
|
||||
|
||||
<div style="height: 32px; background: transparent;"></div>
|
||||
|
||||
<div style="background: #f8d7da; border: 1px solid #f5c6cb; border-radius: 8px; padding: 20px; margin: 0 0 24px 0;">
|
||||
<h3 style="color: #721c24; font-size: 16px; font-weight: 600; margin: 0 0 12px 0;">
|
||||
💬 Creator feedback area
|
||||
</h3>
|
||||
<p style="color: #721c24; margin: 0; font-size: 16px; line-height: 1.5;">
|
||||
{{ data.comments }}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div style="height: 40px; background: transparent;"></div>
|
||||
|
||||
<div style="background: #d4edda; border: 1px solid #c3e6cb; border-radius: 8px; padding: 20px; margin: 0 0 24px 0;">
|
||||
<h3 style="color: #155724; font-size: 16px; font-weight: 600; margin: 0 0 12px 0;">
|
||||
☑ Steps to Resubmit:
|
||||
</h3>
|
||||
<ul style="color: #155724; margin: 0; padding-left: 18px; font-size: 16px; line-height: 1.6;">
|
||||
<li>Review the feedback provided above carefully</li>
|
||||
<li>Make the necessary updates to your agent</li>
|
||||
<li>Test your agent thoroughly to ensure it works as expected</li>
|
||||
<li>Submit your updated agent for review</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div style="height: 32px; background: transparent;"></div>
|
||||
|
||||
<div style="background: #fff3cd; border: 1px solid #ffeaa7; border-radius: 6px; padding: 12px; margin: 0 0 24px 0; text-align: center;">
|
||||
<p style="margin: 0; color: #856404; font-size: 14px;">
|
||||
<strong>💡 Tip:</strong> Address all the points mentioned in the feedback to increase your chances of approval in the next review.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div style="text-align: center; margin: 32px 0;">
|
||||
<a href="{{ data.resubmit_url }}" style="display: inline-block; background: linear-gradient(135deg, #7c3aed 0%, #5b21b6 100%); color: black; text-decoration: none; padding: 14px 28px; border-radius: 6px; font-weight: 600; font-size: 16px;">
|
||||
Update & Resubmit Agent
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div style="background: #d1ecf1; border: 1px solid #bee5eb; border-radius: 6px; padding: 16px; margin: 24px 0;">
|
||||
<p style="margin: 0; color: #0c5460; font-size: 14px; text-align: center;">
|
||||
<strong>🌟 Don't Give Up!</strong> Many successful agents go through multiple iterations before approval. Our review team is here to help you succeed!
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div style="height: 32px; background: transparent;"></div>
|
||||
|
||||
<p style="color: #6a737d; font-size: 14px; text-align: center; margin: 32px 0 24px 0;">
|
||||
We're excited to see your improved agent submission! 🚀
|
||||
</p>
|
||||
|
||||
{% endblock %}
|
||||
@@ -1,9 +1,7 @@
|
||||
{# Low Balance Notification Email Template #}
|
||||
{# Template variables:
|
||||
data.agent_name: the name of the agent
|
||||
data.current_balance: the current balance of the user
|
||||
data.billing_page_link: the link to the billing page
|
||||
data.shortfall: the shortfall amount
|
||||
#}
|
||||
|
||||
<p style="
|
||||
@@ -25,7 +23,7 @@ data.shortfall: the shortfall amount
|
||||
margin-top: 0;
|
||||
margin-bottom: 20px;
|
||||
">
|
||||
Your agent "<strong>{{ data.agent_name }}</strong>" has been stopped due to low balance.
|
||||
Your account balance has dropped below the recommended threshold.
|
||||
</p>
|
||||
|
||||
<div style="
|
||||
@@ -44,15 +42,6 @@ data.shortfall: the shortfall amount
|
||||
">
|
||||
<strong>Current Balance:</strong> ${{ "{:.2f}".format((data.current_balance|float)/100) }}
|
||||
</p>
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
margin-top: 0;
|
||||
margin-bottom: 10px;
|
||||
">
|
||||
<strong>Shortfall:</strong> ${{ "{:.2f}".format((data.shortfall|float)/100) }}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -79,7 +68,7 @@ data.shortfall: the shortfall amount
|
||||
margin-top: 0;
|
||||
margin-bottom: 5px;
|
||||
">
|
||||
Your agent "<strong>{{ data.agent_name }}</strong>" requires additional credits to continue running. The current operation has been canceled until your balance is replenished.
|
||||
Your account requires additional credits to continue running agents. Please add credits to your account to avoid service interruption.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -110,5 +99,5 @@ data.shortfall: the shortfall amount
|
||||
margin-bottom: 10px;
|
||||
font-style: italic;
|
||||
">
|
||||
This is an automated notification. Your agent is stopped and will need manually restarted unless set to trigger automatically.
|
||||
This is an automated low balance notification. Consider adding credits soon to avoid service interruption.
|
||||
</p>
|
||||
|
||||
@@ -5,23 +5,64 @@ data.start_date: the start date of the summary
|
||||
data.end_date: the end date of the summary
|
||||
data.total_credits_used: the total credits used during the summary
|
||||
data.total_executions: the total number of executions during the summary
|
||||
data.most_used_agent: the most used agent's nameduring the summary
|
||||
data.most_used_agent: the most used agent's name during the summary
|
||||
data.total_execution_time: the total execution time during the summary
|
||||
data.successful_runs: the total number of successful runs during the summary
|
||||
data.failed_runs: the total number of failed runs during the summary
|
||||
data.average_execution_time: the average execution time during the summary
|
||||
data.cost_breakdown: the cost breakdown during the summary
|
||||
data.cost_breakdown: the cost breakdown during the summary (dict mapping agent names to credit amounts)
|
||||
#}
|
||||
|
||||
<h1>Weekly Summary</h1>
|
||||
<h1 style="color: #5D23BB; font-size: 32px; font-weight: 600; margin-bottom: 25px; margin-top: 0;">
|
||||
Weekly Summary
|
||||
</h1>
|
||||
|
||||
<p>Start Date: {{ data.start_date }}</p>
|
||||
<p>End Date: {{ data.end_date }}</p>
|
||||
<p>Total Credits Used: {{ data.total_credits_used }}</p>
|
||||
<p>Total Executions: {{ data.total_executions }}</p>
|
||||
<p>Most Used Agent: {{ data.most_used_agent }}</p>
|
||||
<p>Total Execution Time: {{ data.total_execution_time }}</p>
|
||||
<p>Successful Runs: {{ data.successful_runs }}</p>
|
||||
<p>Failed Runs: {{ data.failed_runs }}</p>
|
||||
<p>Average Execution Time: {{ data.average_execution_time }}</p>
|
||||
<p>Cost Breakdown: {{ data.cost_breakdown }}</p>
|
||||
<h2 style="color: #070629; font-size: 24px; font-weight: 500; margin-bottom: 20px;">
|
||||
Your Agent Activity: {{ data.start_date.strftime('%B %-d') }} – {{ data.end_date.strftime('%B %-d') }}
|
||||
</h2>
|
||||
|
||||
<div style="background-color: #ffffff; border-radius: 8px; padding: 20px; margin-bottom: 25px;">
|
||||
<ul style="list-style-type: disc; padding-left: 20px; margin: 0;">
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Total Executions:</strong> {{ data.total_executions }}
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Total Credits Used:</strong> {{ data.total_credits_used|format("%.2f") }}
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Total Execution Time:</strong> {{ data.total_execution_time|format("%.1f") }} seconds
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Successful Runs:</strong> {{ data.successful_runs }}
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Failed Runs:</strong> {{ data.failed_runs }}
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Average Execution Time:</strong> {{ data.average_execution_time|format("%.1f") }} seconds
|
||||
</li>
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Most Used Agent:</strong> {{ data.most_used_agent }}
|
||||
</li>
|
||||
{% if data.cost_breakdown %}
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 8px;">
|
||||
<strong>Cost Breakdown:</strong>
|
||||
<ul style="list-style-type: disc; padding-left: 40px; margin-top: 8px;">
|
||||
{% for agent_name, credits in data.cost_breakdown.items() %}
|
||||
<li style="font-size: 16px; line-height: 1.8; margin-bottom: 4px;">
|
||||
{{ agent_name }}: {{ credits|format("%.2f") }} credits
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</li>
|
||||
{% endif %}
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<p style="font-size: 16px; line-height: 165%; margin-top: 20px; margin-bottom: 10px;">
|
||||
Thank you for being a part of the AutoGPT community! 🎉
|
||||
</p>
|
||||
|
||||
<p style="font-size: 16px; line-height: 165%; margin-bottom: 0;">
|
||||
Join the conversation on <a href="https://discord.gg/autogpt" style="color: #4285F4; text-decoration: underline;">Discord here</a>.
|
||||
</p>
|
||||
@@ -0,0 +1,114 @@
|
||||
{# Low Balance Notification Email Template #}
|
||||
{# Template variables:
|
||||
data.agent_name: the name of the agent
|
||||
data.current_balance: the current balance of the user
|
||||
data.billing_page_link: the link to the billing page
|
||||
data.shortfall: the shortfall amount
|
||||
#}
|
||||
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
line-height: 165%;
|
||||
margin-top: 0;
|
||||
margin-bottom: 10px;
|
||||
">
|
||||
<strong>Zero Balance Warning</strong>
|
||||
</p>
|
||||
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
line-height: 165%;
|
||||
margin-top: 0;
|
||||
margin-bottom: 20px;
|
||||
">
|
||||
Your agent "<strong>{{ data.agent_name }}</strong>" has been stopped due to low balance.
|
||||
</p>
|
||||
|
||||
<div style="
|
||||
margin-left: 15px;
|
||||
margin-bottom: 20px;
|
||||
padding: 15px;
|
||||
border-left: 4px solid #5D23BB;
|
||||
background-color: #f8f8ff;
|
||||
">
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
margin-top: 0;
|
||||
margin-bottom: 10px;
|
||||
">
|
||||
<strong>Current Balance:</strong> ${{ "{:.2f}".format((data.current_balance|float)/100) }}
|
||||
</p>
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
margin-top: 0;
|
||||
margin-bottom: 10px;
|
||||
">
|
||||
<strong>Shortfall:</strong> ${{ "{:.2f}".format((data.shortfall|float)/100) }}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
|
||||
<div style="
|
||||
margin-left: 15px;
|
||||
margin-bottom: 20px;
|
||||
padding: 15px;
|
||||
border-left: 4px solid #FF6B6B;
|
||||
background-color: #FFF0F0;
|
||||
">
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
margin-top: 0;
|
||||
margin-bottom: 10px;
|
||||
">
|
||||
<strong>Low Balance:</strong>
|
||||
</p>
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
margin-top: 0;
|
||||
margin-bottom: 5px;
|
||||
">
|
||||
Your agent "<strong>{{ data.agent_name }}</strong>" requires additional credits to continue running. The current operation has been canceled until your balance is replenished.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div style="
|
||||
text-align: center;
|
||||
margin: 30px 0;
|
||||
">
|
||||
<a href="{{ data.billing_page_link }}" style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
background-color: #5D23BB;
|
||||
color: white;
|
||||
padding: 12px 24px;
|
||||
text-decoration: none;
|
||||
border-radius: 4px;
|
||||
font-weight: 500;
|
||||
display: inline-block;
|
||||
">
|
||||
Manage Billing
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
line-height: 150%;
|
||||
margin-top: 30px;
|
||||
margin-bottom: 10px;
|
||||
font-style: italic;
|
||||
">
|
||||
This is an automated notification. Your agent is stopped and will need manually restarted unless set to trigger automatically.
|
||||
</p>
|
||||
@@ -14,6 +14,8 @@ from backend.data.model import (
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
CredentialsType,
|
||||
OAuth2Credentials,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
from backend.integrations.oauth.base import BaseOAuthHandler
|
||||
from backend.integrations.webhooks._base import BaseWebhooksManager
|
||||
@@ -104,14 +106,39 @@ class Provider:
|
||||
)
|
||||
|
||||
def get_test_credentials(self) -> Credentials:
|
||||
"""Get test credentials for the provider."""
|
||||
return APIKeyCredentials(
|
||||
id=str(self.test_credentials_uuid),
|
||||
provider=self.name,
|
||||
api_key=SecretStr("mock-api-key"),
|
||||
title=f"Mock {self.name.title()} API key",
|
||||
expires_at=None,
|
||||
)
|
||||
"""Get test credentials for the provider based on supported auth types."""
|
||||
test_id = str(self.test_credentials_uuid)
|
||||
|
||||
# Return credentials based on the first supported auth type
|
||||
if "user_password" in self.supported_auth_types:
|
||||
return UserPasswordCredentials(
|
||||
id=test_id,
|
||||
provider=self.name,
|
||||
username=SecretStr(f"mock-{self.name}-username"),
|
||||
password=SecretStr(f"mock-{self.name}-password"),
|
||||
title=f"Mock {self.name.title()} credentials",
|
||||
)
|
||||
elif "oauth2" in self.supported_auth_types:
|
||||
return OAuth2Credentials(
|
||||
id=test_id,
|
||||
provider=self.name,
|
||||
username=f"mock-{self.name}-username",
|
||||
access_token=SecretStr(f"mock-{self.name}-access-token"),
|
||||
access_token_expires_at=None,
|
||||
refresh_token=SecretStr(f"mock-{self.name}-refresh-token"),
|
||||
refresh_token_expires_at=None,
|
||||
scopes=[f"mock-{self.name}-scope"],
|
||||
title=f"Mock {self.name.title()} OAuth credentials",
|
||||
)
|
||||
else:
|
||||
# Default to API key credentials
|
||||
return APIKeyCredentials(
|
||||
id=test_id,
|
||||
provider=self.name,
|
||||
api_key=SecretStr(f"mock-{self.name}-api-key"),
|
||||
title=f"Mock {self.name.title()} API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
def get_api(self, credentials: Credentials) -> Any:
|
||||
"""Get API client instance for the given credentials."""
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
from supabase import Client, create_client
|
||||
|
||||
from backend.util.settings import Settings
|
||||
|
||||
settings = Settings()
|
||||
|
||||
|
||||
def get_supabase() -> Client:
|
||||
return create_client(
|
||||
settings.secrets.supabase_url, settings.secrets.supabase_service_role_key
|
||||
)
|
||||
@@ -5,6 +5,7 @@ import pydantic
|
||||
|
||||
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
|
||||
from backend.data.graph import Graph
|
||||
from backend.util.timezone_name import TimeZoneName
|
||||
|
||||
|
||||
class WSMethod(enum.Enum):
|
||||
@@ -60,21 +61,6 @@ class UpdatePermissionsRequest(pydantic.BaseModel):
|
||||
permissions: list[APIKeyPermission]
|
||||
|
||||
|
||||
class Pagination(pydantic.BaseModel):
|
||||
total_items: int = pydantic.Field(
|
||||
description="Total number of items.", examples=[42]
|
||||
)
|
||||
total_pages: int = pydantic.Field(
|
||||
description="Total number of pages.", examples=[2]
|
||||
)
|
||||
current_page: int = pydantic.Field(
|
||||
description="Current_page page number.", examples=[1]
|
||||
)
|
||||
page_size: int = pydantic.Field(
|
||||
description="Number of items per page.", examples=[25]
|
||||
)
|
||||
|
||||
|
||||
class RequestTopUp(pydantic.BaseModel):
|
||||
credit_amount: int
|
||||
|
||||
@@ -85,3 +71,12 @@ class UploadFileResponse(pydantic.BaseModel):
|
||||
size: int
|
||||
content_type: str
|
||||
expires_in_hours: int
|
||||
|
||||
|
||||
class TimezoneResponse(pydantic.BaseModel):
|
||||
# Allow "not-set" as a special value, or any valid IANA timezone
|
||||
timezone: TimeZoneName | str
|
||||
|
||||
|
||||
class UpdateTimezoneRequest(pydantic.BaseModel):
|
||||
timezone: TimeZoneName
|
||||
|
||||
@@ -9,11 +9,6 @@ import fastapi.responses
|
||||
import pydantic
|
||||
import starlette.middleware.cors
|
||||
import uvicorn
|
||||
from autogpt_libs.feature_flag.client import (
|
||||
initialize_launchdarkly,
|
||||
shutdown_launchdarkly,
|
||||
)
|
||||
from autogpt_libs.logging.utils import generate_uvicorn_config
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
from fastapi.routing import APIRoute
|
||||
|
||||
@@ -25,6 +20,8 @@ import backend.server.routers.postmark.postmark
|
||||
import backend.server.routers.v1
|
||||
import backend.server.v2.admin.credit_admin_routes
|
||||
import backend.server.v2.admin.store_admin_routes
|
||||
import backend.server.v2.builder
|
||||
import backend.server.v2.builder.routes
|
||||
import backend.server.v2.library.db
|
||||
import backend.server.v2.library.model
|
||||
import backend.server.v2.library.routes
|
||||
@@ -41,6 +38,7 @@ from backend.server.external.api import external_app
|
||||
from backend.server.middleware.security import SecurityHeadersMiddleware
|
||||
from backend.util import json
|
||||
from backend.util.cloud_storage import shutdown_cloud_storage_handler
|
||||
from backend.util.feature_flag import initialize_launchdarkly, shutdown_launchdarkly
|
||||
from backend.util.service import UnhealthyServiceError
|
||||
|
||||
settings = backend.util.settings.Settings()
|
||||
@@ -199,6 +197,9 @@ app.include_router(backend.server.routers.v1.v1_router, tags=["v1"], prefix="/ap
|
||||
app.include_router(
|
||||
backend.server.v2.store.routes.router, tags=["v2"], prefix="/api/store"
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.builder.routes.router, tags=["v2"], prefix="/api/builder"
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.admin.store_admin_routes.router,
|
||||
tags=["v2", "admin"],
|
||||
@@ -250,7 +251,7 @@ class AgentServer(backend.util.service.AppProcess):
|
||||
server_app,
|
||||
host=backend.util.settings.Config().agent_api_host,
|
||||
port=backend.util.settings.Config().agent_api_port,
|
||||
log_config=generate_uvicorn_config(),
|
||||
log_config=None,
|
||||
)
|
||||
|
||||
def cleanup(self):
|
||||
|
||||
@@ -8,7 +8,6 @@ from typing import Annotated, Any, Sequence
|
||||
import pydantic
|
||||
import stripe
|
||||
from autogpt_libs.auth.middleware import auth_middleware
|
||||
from autogpt_libs.feature_flag.client import feature_flag
|
||||
from fastapi import (
|
||||
APIRouter,
|
||||
Body,
|
||||
@@ -16,6 +15,7 @@ from fastapi import (
|
||||
File,
|
||||
HTTPException,
|
||||
Path,
|
||||
Query,
|
||||
Request,
|
||||
Response,
|
||||
UploadFile,
|
||||
@@ -61,9 +61,11 @@ from backend.data.onboarding import (
|
||||
)
|
||||
from backend.data.user import (
|
||||
get_or_create_user,
|
||||
get_user_by_id,
|
||||
get_user_notification_preference,
|
||||
update_user_email,
|
||||
update_user_notification_preference,
|
||||
update_user_timezone,
|
||||
)
|
||||
from backend.executor import scheduler
|
||||
from backend.executor import utils as execution_utils
|
||||
@@ -78,7 +80,9 @@ from backend.server.model import (
|
||||
ExecuteGraphResponse,
|
||||
RequestTopUp,
|
||||
SetGraphActiveVersion,
|
||||
TimezoneResponse,
|
||||
UpdatePermissionsRequest,
|
||||
UpdateTimezoneRequest,
|
||||
UploadFileResponse,
|
||||
)
|
||||
from backend.server.utils import get_user_id
|
||||
@@ -86,6 +90,11 @@ from backend.util.clients import get_scheduler_client
|
||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
from backend.util.exceptions import GraphValidationError, NotFoundError
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.timezone_utils import (
|
||||
convert_cron_to_utc,
|
||||
convert_utc_time_to_user_timezone,
|
||||
get_user_timezone_or_utc,
|
||||
)
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
|
||||
|
||||
@@ -149,6 +158,35 @@ async def update_user_email_route(
|
||||
return {"email": email}
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/auth/user/timezone",
|
||||
summary="Get user timezone",
|
||||
tags=["auth"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def get_user_timezone_route(
|
||||
user_data: dict = Depends(auth_middleware),
|
||||
) -> TimezoneResponse:
|
||||
"""Get user timezone setting."""
|
||||
user = await get_or_create_user(user_data)
|
||||
return TimezoneResponse(timezone=user.timezone)
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
"/auth/user/timezone",
|
||||
summary="Update user timezone",
|
||||
tags=["auth"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
response_model=TimezoneResponse,
|
||||
)
|
||||
async def update_user_timezone_route(
|
||||
user_id: Annotated[str, Depends(get_user_id)], request: UpdateTimezoneRequest
|
||||
) -> TimezoneResponse:
|
||||
"""Update user timezone. The timezone should be a valid IANA timezone identifier."""
|
||||
user = await update_user_timezone(user_id, str(request.timezone))
|
||||
return TimezoneResponse(timezone=user.timezone)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/auth/user/preferences",
|
||||
summary="Get notification preferences",
|
||||
@@ -458,12 +496,16 @@ async def stripe_webhook(request: Request):
|
||||
event = stripe.Webhook.construct_event(
|
||||
payload, sig_header, settings.secrets.stripe_webhook_secret
|
||||
)
|
||||
except ValueError:
|
||||
except ValueError as e:
|
||||
# Invalid payload
|
||||
raise HTTPException(status_code=400)
|
||||
except stripe.SignatureVerificationError:
|
||||
raise HTTPException(
|
||||
status_code=400, detail=f"Invalid payload: {str(e) or type(e).__name__}"
|
||||
)
|
||||
except stripe.SignatureVerificationError as e:
|
||||
# Invalid signature
|
||||
raise HTTPException(status_code=400)
|
||||
raise HTTPException(
|
||||
status_code=400, detail=f"Invalid signature: {str(e) or type(e).__name__}"
|
||||
)
|
||||
|
||||
if (
|
||||
event["type"] == "checkout.session.completed"
|
||||
@@ -676,7 +718,15 @@ async def update_graph(
|
||||
# Handle deactivation of the previously active version
|
||||
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||
|
||||
return new_graph_version
|
||||
# Fetch new graph version *with sub-graphs* (needed for credentials input schema)
|
||||
new_graph_version_with_subgraphs = await graph_db.get_graph(
|
||||
graph_id,
|
||||
new_graph_version.version,
|
||||
user_id=user_id,
|
||||
include_subgraphs=True,
|
||||
)
|
||||
assert new_graph_version_with_subgraphs # make type checker happy
|
||||
return new_graph_version_with_subgraphs
|
||||
|
||||
|
||||
@v1_router.put(
|
||||
@@ -808,11 +858,11 @@ async def _stop_graph_run(
|
||||
|
||||
@v1_router.get(
|
||||
path="/executions",
|
||||
summary="Get all executions",
|
||||
summary="List all executions",
|
||||
tags=["graphs"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def get_graphs_executions(
|
||||
async def list_graphs_executions(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> list[execution_db.GraphExecutionMeta]:
|
||||
return await execution_db.get_graph_executions(user_id=user_id)
|
||||
@@ -820,15 +870,24 @@ async def get_graphs_executions(
|
||||
|
||||
@v1_router.get(
|
||||
path="/graphs/{graph_id}/executions",
|
||||
summary="Get graph executions",
|
||||
summary="List graph executions",
|
||||
tags=["graphs"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def get_graph_executions(
|
||||
async def list_graph_executions(
|
||||
graph_id: str,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> list[execution_db.GraphExecutionMeta]:
|
||||
return await execution_db.get_graph_executions(graph_id=graph_id, user_id=user_id)
|
||||
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
|
||||
page_size: int = Query(
|
||||
25, ge=1, le=100, description="Number of executions per page"
|
||||
),
|
||||
) -> execution_db.GraphExecutionsPaginated:
|
||||
return await execution_db.get_graph_executions_paginated(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
@@ -912,16 +971,36 @@ async def create_graph_execution_schedule(
|
||||
detail=f"Graph #{graph_id} v{schedule_params.graph_version} not found.",
|
||||
)
|
||||
|
||||
return await get_scheduler_client().add_execution_schedule(
|
||||
user = await get_user_by_id(user_id)
|
||||
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
|
||||
|
||||
# Convert cron expression from user timezone to UTC
|
||||
try:
|
||||
utc_cron = convert_cron_to_utc(schedule_params.cron, user_timezone)
|
||||
except ValueError as e:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid cron expression for timezone {user_timezone}: {e}",
|
||||
)
|
||||
|
||||
result = await get_scheduler_client().add_execution_schedule(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph.version,
|
||||
name=schedule_params.name,
|
||||
cron=schedule_params.cron,
|
||||
cron=utc_cron, # Send UTC cron to scheduler
|
||||
input_data=schedule_params.inputs,
|
||||
input_credentials=schedule_params.credentials,
|
||||
)
|
||||
|
||||
# Convert the next_run_time back to user timezone for display
|
||||
if result.next_run_time:
|
||||
result.next_run_time = convert_utc_time_to_user_timezone(
|
||||
result.next_run_time, user_timezone
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/graphs/{graph_id}/schedules",
|
||||
@@ -933,11 +1012,24 @@ async def list_graph_execution_schedules(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
graph_id: str = Path(),
|
||||
) -> list[scheduler.GraphExecutionJobInfo]:
|
||||
return await get_scheduler_client().get_execution_schedules(
|
||||
schedules = await get_scheduler_client().get_execution_schedules(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
)
|
||||
|
||||
# Get user timezone for conversion
|
||||
user = await get_user_by_id(user_id)
|
||||
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
|
||||
|
||||
# Convert next_run_time to user timezone for display
|
||||
for schedule in schedules:
|
||||
if schedule.next_run_time:
|
||||
schedule.next_run_time = convert_utc_time_to_user_timezone(
|
||||
schedule.next_run_time, user_timezone
|
||||
)
|
||||
|
||||
return schedules
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/schedules",
|
||||
@@ -948,7 +1040,20 @@ async def list_graph_execution_schedules(
|
||||
async def list_all_graphs_execution_schedules(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> list[scheduler.GraphExecutionJobInfo]:
|
||||
return await get_scheduler_client().get_execution_schedules(user_id=user_id)
|
||||
schedules = await get_scheduler_client().get_execution_schedules(user_id=user_id)
|
||||
|
||||
# Get user timezone for conversion
|
||||
user = await get_user_by_id(user_id)
|
||||
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
|
||||
|
||||
# Convert UTC next_run_time to user timezone for display
|
||||
for schedule in schedules:
|
||||
if schedule.next_run_time:
|
||||
schedule.next_run_time = convert_utc_time_to_user_timezone(
|
||||
schedule.next_run_time, user_timezone
|
||||
)
|
||||
|
||||
return schedules
|
||||
|
||||
|
||||
@v1_router.delete(
|
||||
@@ -1059,7 +1164,6 @@ async def get_api_key(
|
||||
tags=["api-keys"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
@feature_flag("api-keys-enabled")
|
||||
async def delete_api_key(
|
||||
key_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> Optional[APIKeyWithoutHash]:
|
||||
@@ -1088,7 +1192,6 @@ async def delete_api_key(
|
||||
tags=["api-keys"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
@feature_flag("api-keys-enabled")
|
||||
async def suspend_key(
|
||||
key_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> Optional[APIKeyWithoutHash]:
|
||||
@@ -1114,7 +1217,6 @@ async def suspend_key(
|
||||
tags=["api-keys"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
@feature_flag("api-keys-enabled")
|
||||
async def update_permissions(
|
||||
key_id: str,
|
||||
request: UpdatePermissionsRequest,
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
# AutoMod integration for content moderation
|
||||
364
autogpt_platform/backend/backend/server/v2/AutoMod/manager.py
Normal file
364
autogpt_platform/backend/backend/server/v2/AutoMod/manager.py
Normal file
@@ -0,0 +1,364 @@
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Literal
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor import DatabaseManagerAsyncClient
|
||||
|
||||
from pydantic import ValidationError
|
||||
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.server.v2.AutoMod.models import (
|
||||
AutoModRequest,
|
||||
AutoModResponse,
|
||||
ModerationConfig,
|
||||
)
|
||||
from backend.util.exceptions import ModerationError
|
||||
from backend.util.feature_flag import Flag, is_feature_enabled
|
||||
from backend.util.request import Requests
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AutoModManager:
|
||||
|
||||
def __init__(self):
|
||||
self.config = self._load_config()
|
||||
|
||||
def _load_config(self) -> ModerationConfig:
|
||||
"""Load AutoMod configuration from settings"""
|
||||
settings = Settings()
|
||||
return ModerationConfig(
|
||||
enabled=settings.config.automod_enabled,
|
||||
api_url=settings.config.automod_api_url,
|
||||
api_key=settings.secrets.automod_api_key,
|
||||
timeout=settings.config.automod_timeout,
|
||||
retry_attempts=settings.config.automod_retry_attempts,
|
||||
retry_delay=settings.config.automod_retry_delay,
|
||||
fail_open=settings.config.automod_fail_open,
|
||||
)
|
||||
|
||||
async def moderate_graph_execution_inputs(
|
||||
self, db_client: "DatabaseManagerAsyncClient", graph_exec, timeout: int = 10
|
||||
) -> Exception | None:
|
||||
"""
|
||||
Complete input moderation flow for graph execution
|
||||
Returns: error_if_failed (None means success)
|
||||
"""
|
||||
if not self.config.enabled:
|
||||
return None
|
||||
|
||||
# Check if AutoMod feature is enabled for this user
|
||||
if not await is_feature_enabled(Flag.AUTOMOD, graph_exec.user_id):
|
||||
logger.debug(f"AutoMod feature not enabled for user {graph_exec.user_id}")
|
||||
return None
|
||||
|
||||
# Get graph model and collect all inputs
|
||||
graph_model = await db_client.get_graph(
|
||||
graph_exec.graph_id,
|
||||
user_id=graph_exec.user_id,
|
||||
version=graph_exec.graph_version,
|
||||
)
|
||||
|
||||
if not graph_model or not graph_model.nodes:
|
||||
return None
|
||||
|
||||
all_inputs = []
|
||||
for node in graph_model.nodes:
|
||||
if node.input_default:
|
||||
all_inputs.extend(str(v) for v in node.input_default.values() if v)
|
||||
if (masks := graph_exec.nodes_input_masks) and (mask := masks.get(node.id)):
|
||||
all_inputs.extend(str(v) for v in mask.values() if v)
|
||||
|
||||
if not all_inputs:
|
||||
return None
|
||||
|
||||
# Combine all content and moderate directly
|
||||
content = " ".join(all_inputs)
|
||||
|
||||
# Run moderation
|
||||
logger.warning(
|
||||
f"Moderating inputs for graph execution {graph_exec.graph_exec_id}"
|
||||
)
|
||||
try:
|
||||
moderation_passed, content_id = await self._moderate_content(
|
||||
content,
|
||||
{
|
||||
"user_id": graph_exec.user_id,
|
||||
"graph_id": graph_exec.graph_id,
|
||||
"graph_exec_id": graph_exec.graph_exec_id,
|
||||
"moderation_type": "execution_input",
|
||||
},
|
||||
)
|
||||
|
||||
if not moderation_passed:
|
||||
logger.warning(
|
||||
f"Moderation failed for graph execution {graph_exec.graph_exec_id}"
|
||||
)
|
||||
# Update node statuses for frontend display before raising error
|
||||
await self._update_failed_nodes_for_moderation(
|
||||
db_client, graph_exec.graph_exec_id, "input", content_id
|
||||
)
|
||||
|
||||
return ModerationError(
|
||||
message="Execution failed due to input content moderation",
|
||||
user_id=graph_exec.user_id,
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
moderation_type="input",
|
||||
content_id=content_id,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(
|
||||
f"Input moderation timed out for graph execution {graph_exec.graph_exec_id}, bypassing moderation"
|
||||
)
|
||||
return None # Bypass moderation on timeout
|
||||
except Exception as e:
|
||||
logger.warning(f"Input moderation execution failed: {e}")
|
||||
return ModerationError(
|
||||
message="Execution failed due to input content moderation error",
|
||||
user_id=graph_exec.user_id,
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
moderation_type="input",
|
||||
)
|
||||
|
||||
async def moderate_graph_execution_outputs(
|
||||
self,
|
||||
db_client: "DatabaseManagerAsyncClient",
|
||||
graph_exec_id: str,
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
timeout: int = 10,
|
||||
) -> Exception | None:
|
||||
"""
|
||||
Complete output moderation flow for graph execution
|
||||
Returns: error_if_failed (None means success)
|
||||
"""
|
||||
if not self.config.enabled:
|
||||
return None
|
||||
|
||||
# Check if AutoMod feature is enabled for this user
|
||||
if not await is_feature_enabled(Flag.AUTOMOD, user_id):
|
||||
logger.debug(f"AutoMod feature not enabled for user {user_id}")
|
||||
return None
|
||||
|
||||
# Get completed executions and collect outputs
|
||||
completed_executions = await db_client.get_node_executions(
|
||||
graph_exec_id, statuses=[ExecutionStatus.COMPLETED], include_exec_data=True
|
||||
)
|
||||
|
||||
if not completed_executions:
|
||||
return None
|
||||
|
||||
all_outputs = []
|
||||
for exec_entry in completed_executions:
|
||||
if exec_entry.output_data:
|
||||
all_outputs.extend(str(v) for v in exec_entry.output_data.values() if v)
|
||||
|
||||
if not all_outputs:
|
||||
return None
|
||||
|
||||
# Combine all content and moderate directly
|
||||
content = " ".join(all_outputs)
|
||||
|
||||
# Run moderation
|
||||
logger.warning(f"Moderating outputs for graph execution {graph_exec_id}")
|
||||
try:
|
||||
moderation_passed, content_id = await self._moderate_content(
|
||||
content,
|
||||
{
|
||||
"user_id": user_id,
|
||||
"graph_id": graph_id,
|
||||
"graph_exec_id": graph_exec_id,
|
||||
"moderation_type": "execution_output",
|
||||
},
|
||||
)
|
||||
|
||||
if not moderation_passed:
|
||||
logger.warning(f"Moderation failed for graph execution {graph_exec_id}")
|
||||
# Update node statuses for frontend display before raising error
|
||||
await self._update_failed_nodes_for_moderation(
|
||||
db_client, graph_exec_id, "output", content_id
|
||||
)
|
||||
|
||||
return ModerationError(
|
||||
message="Execution failed due to output content moderation",
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
moderation_type="output",
|
||||
content_id=content_id,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(
|
||||
f"Output moderation timed out for graph execution {graph_exec_id}, bypassing moderation"
|
||||
)
|
||||
return None # Bypass moderation on timeout
|
||||
except Exception as e:
|
||||
logger.warning(f"Output moderation execution failed: {e}")
|
||||
return ModerationError(
|
||||
message="Execution failed due to output content moderation error",
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
moderation_type="output",
|
||||
)
|
||||
|
||||
async def _update_failed_nodes_for_moderation(
|
||||
self,
|
||||
db_client: "DatabaseManagerAsyncClient",
|
||||
graph_exec_id: str,
|
||||
moderation_type: Literal["input", "output"],
|
||||
content_id: str | None = None,
|
||||
):
|
||||
"""Update node execution statuses for frontend display when moderation fails"""
|
||||
# Import here to avoid circular imports
|
||||
from backend.executor.manager import send_async_execution_update
|
||||
|
||||
if moderation_type == "input":
|
||||
# For input moderation, mark queued/running/incomplete nodes as failed
|
||||
target_statuses = [
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
]
|
||||
else:
|
||||
# For output moderation, mark completed nodes as failed
|
||||
target_statuses = [ExecutionStatus.COMPLETED]
|
||||
|
||||
# Get the executions that need to be updated
|
||||
executions_to_update = await db_client.get_node_executions(
|
||||
graph_exec_id, statuses=target_statuses, include_exec_data=True
|
||||
)
|
||||
|
||||
if not executions_to_update:
|
||||
return
|
||||
|
||||
# Create error message with content_id if available
|
||||
error_message = "Failed due to content moderation"
|
||||
if content_id:
|
||||
error_message += f" (Moderation ID: {content_id})"
|
||||
|
||||
# Prepare database update tasks
|
||||
exec_updates = []
|
||||
for exec_entry in executions_to_update:
|
||||
# Collect all input and output names to clear
|
||||
cleared_inputs = {}
|
||||
cleared_outputs = {}
|
||||
|
||||
if exec_entry.input_data:
|
||||
for name in exec_entry.input_data.keys():
|
||||
cleared_inputs[name] = [error_message]
|
||||
|
||||
if exec_entry.output_data:
|
||||
for name in exec_entry.output_data.keys():
|
||||
cleared_outputs[name] = [error_message]
|
||||
|
||||
# Add update task to list
|
||||
exec_updates.append(
|
||||
db_client.update_node_execution_status(
|
||||
exec_entry.node_exec_id,
|
||||
status=ExecutionStatus.FAILED,
|
||||
stats={
|
||||
"error": error_message,
|
||||
"cleared_inputs": cleared_inputs,
|
||||
"cleared_outputs": cleared_outputs,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# Execute all database updates in parallel
|
||||
updated_execs = await asyncio.gather(*exec_updates)
|
||||
|
||||
# Send all websocket updates in parallel
|
||||
await asyncio.gather(
|
||||
*[
|
||||
send_async_execution_update(updated_exec)
|
||||
for updated_exec in updated_execs
|
||||
]
|
||||
)
|
||||
|
||||
async def _moderate_content(
|
||||
self, content: str, metadata: dict[str, Any]
|
||||
) -> tuple[bool, str | None]:
|
||||
"""Moderate content using AutoMod API
|
||||
|
||||
Returns:
|
||||
Tuple of (approval_status, content_id)
|
||||
- approval_status: True if approved or timeout occurred, False if rejected
|
||||
- content_id: Reference ID from moderation API, or None if not available
|
||||
|
||||
Raises:
|
||||
asyncio.TimeoutError: When moderation times out (should be bypassed)
|
||||
"""
|
||||
try:
|
||||
request_data = AutoModRequest(
|
||||
type="text",
|
||||
content=content,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
response = await self._make_request(request_data)
|
||||
|
||||
if response.success and response.status == "approved":
|
||||
logger.debug(
|
||||
f"Content approved for {metadata.get('graph_exec_id', 'unknown')}"
|
||||
)
|
||||
return True, response.content_id
|
||||
else:
|
||||
reasons = [r.reason for r in response.moderation_results if r.reason]
|
||||
error_msg = f"Content rejected by AutoMod: {'; '.join(reasons)}"
|
||||
logger.warning(f"Content rejected: {error_msg}")
|
||||
return False, response.content_id
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# Re-raise timeout to be handled by calling methods
|
||||
logger.warning(
|
||||
f"AutoMod API timeout for {metadata.get('graph_exec_id', 'unknown')}"
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"AutoMod moderation error: {e}")
|
||||
return self.config.fail_open, None
|
||||
|
||||
async def _make_request(self, request_data: AutoModRequest) -> AutoModResponse:
|
||||
"""Make HTTP request to AutoMod API using the standard request utility"""
|
||||
url = f"{self.config.api_url}/moderate"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": self.config.api_key.strip(),
|
||||
}
|
||||
|
||||
# Create requests instance with timeout and retry configuration
|
||||
requests = Requests(
|
||||
extra_headers=headers,
|
||||
retry_max_wait=float(self.config.timeout),
|
||||
)
|
||||
|
||||
try:
|
||||
response = await requests.post(
|
||||
url, json=request_data.model_dump(), timeout=self.config.timeout
|
||||
)
|
||||
|
||||
response_data = response.json()
|
||||
return AutoModResponse.model_validate(response_data)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# Re-raise timeout error to be caught by _moderate_content
|
||||
raise
|
||||
except (json.JSONDecodeError, ValidationError) as e:
|
||||
raise Exception(f"Invalid response from AutoMod API: {e}")
|
||||
except Exception as e:
|
||||
# Check if this is an aiohttp timeout that we should convert
|
||||
if "timeout" in str(e).lower():
|
||||
raise asyncio.TimeoutError(f"AutoMod API request timed out: {e}")
|
||||
raise Exception(f"AutoMod API request failed: {e}")
|
||||
|
||||
|
||||
# Global instance
|
||||
automod_manager = AutoModManager()
|
||||
60
autogpt_platform/backend/backend/server/v2/AutoMod/models.py
Normal file
60
autogpt_platform/backend/backend/server/v2/AutoMod/models.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AutoModRequest(BaseModel):
|
||||
"""Request model for AutoMod API"""
|
||||
|
||||
type: str = Field(..., description="Content type - 'text', 'image', 'video'")
|
||||
content: str = Field(..., description="The content to moderate")
|
||||
metadata: Optional[Dict[str, Any]] = Field(
|
||||
default=None, description="Additional context about the content"
|
||||
)
|
||||
|
||||
|
||||
class ModerationResult(BaseModel):
|
||||
"""Individual moderation result"""
|
||||
|
||||
decision: str = Field(
|
||||
..., description="Moderation decision: 'approved', 'rejected', 'flagged'"
|
||||
)
|
||||
reason: Optional[str] = Field(default=None, description="Reason for the decision")
|
||||
|
||||
|
||||
class AutoModResponse(BaseModel):
|
||||
"""Response model for AutoMod API"""
|
||||
|
||||
success: bool = Field(..., description="Whether the request was successful")
|
||||
content_id: str = Field(
|
||||
..., description="Unique reference ID for this moderation request"
|
||||
)
|
||||
status: str = Field(
|
||||
..., description="Overall status: 'approved', 'rejected', 'flagged', 'pending'"
|
||||
)
|
||||
moderation_results: List[ModerationResult] = Field(
|
||||
default_factory=list, description="List of moderation results"
|
||||
)
|
||||
|
||||
|
||||
class ModerationConfig(BaseModel):
|
||||
"""Configuration for AutoMod integration"""
|
||||
|
||||
enabled: bool = Field(default=True, description="Whether moderation is enabled")
|
||||
api_url: str = Field(default="", description="AutoMod API base URL")
|
||||
api_key: str = Field(..., description="AutoMod API key")
|
||||
timeout: int = Field(default=30, description="Request timeout in seconds")
|
||||
retry_attempts: int = Field(default=3, description="Number of retry attempts")
|
||||
retry_delay: float = Field(
|
||||
default=1.0, description="Delay between retries in seconds"
|
||||
)
|
||||
fail_open: bool = Field(
|
||||
default=False,
|
||||
description="If True, allow execution to continue if moderation fails",
|
||||
)
|
||||
moderate_inputs: bool = Field(
|
||||
default=True, description="Whether to moderate block inputs"
|
||||
)
|
||||
moderate_outputs: bool = Field(
|
||||
default=True, description="Whether to moderate block outputs"
|
||||
)
|
||||
@@ -14,7 +14,7 @@ import backend.server.v2.admin.credit_admin_routes as credit_admin_routes
|
||||
import backend.server.v2.admin.model as admin_model
|
||||
from backend.data.model import UserTransaction
|
||||
from backend.server.conftest import ADMIN_USER_ID, TARGET_USER_ID
|
||||
from backend.server.model import Pagination
|
||||
from backend.util.models import Pagination
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(credit_admin_routes.router)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.model import UserTransaction
|
||||
from backend.server.model import Pagination
|
||||
from backend.util.models import Pagination
|
||||
|
||||
|
||||
class UserHistoryResponse(BaseModel):
|
||||
|
||||
376
autogpt_platform/backend/backend/server/v2/builder/db.py
Normal file
376
autogpt_platform/backend/backend/server/v2/builder/db.py
Normal file
@@ -0,0 +1,376 @@
|
||||
import functools
|
||||
import logging
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import prisma
|
||||
|
||||
import backend.data.block
|
||||
from backend.blocks import load_all_blocks
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.block import Block, BlockCategory, BlockSchema
|
||||
from backend.data.credit import get_block_costs
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.server.v2.builder.model import (
|
||||
BlockCategoryResponse,
|
||||
BlockData,
|
||||
BlockResponse,
|
||||
BlockType,
|
||||
CountResponse,
|
||||
Provider,
|
||||
ProviderResponse,
|
||||
SearchBlocksResponse,
|
||||
)
|
||||
from backend.util.models import Pagination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
|
||||
_static_counts_cache: dict | None = None
|
||||
_suggested_blocks: list[BlockData] | None = None
|
||||
|
||||
|
||||
def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse]:
|
||||
categories: dict[BlockCategory, BlockCategoryResponse] = {}
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: Block[BlockSchema, BlockSchema] = block_type()
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip blocks that don't have categories (all should have at least one)
|
||||
if not block.categories:
|
||||
continue
|
||||
|
||||
# Add block to the categories
|
||||
for category in block.categories:
|
||||
if category not in categories:
|
||||
categories[category] = BlockCategoryResponse(
|
||||
name=category.name.lower(),
|
||||
total_blocks=0,
|
||||
blocks=[],
|
||||
)
|
||||
|
||||
categories[category].total_blocks += 1
|
||||
|
||||
# Append if the category has less than the specified number of blocks
|
||||
if len(categories[category].blocks) < category_blocks:
|
||||
categories[category].blocks.append(block.to_dict())
|
||||
|
||||
# Sort categories by name
|
||||
return sorted(categories.values(), key=lambda x: x.name)
|
||||
|
||||
|
||||
def get_blocks(
|
||||
*,
|
||||
category: str | None = None,
|
||||
type: BlockType | None = None,
|
||||
provider: ProviderName | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> BlockResponse:
|
||||
"""
|
||||
Get blocks based on either category, type or provider.
|
||||
Providing nothing fetches all block types.
|
||||
"""
|
||||
# Only one of category, type, or provider can be specified
|
||||
if (category and type) or (category and provider) or (type and provider):
|
||||
raise ValueError("Only one of category, type, or provider can be specified")
|
||||
|
||||
blocks: list[Block[BlockSchema, BlockSchema]] = []
|
||||
skip = (page - 1) * page_size
|
||||
take = page_size
|
||||
total = 0
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: Block[BlockSchema, BlockSchema] = block_type()
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip blocks that don't match the category
|
||||
if category and category not in {c.name.lower() for c in block.categories}:
|
||||
continue
|
||||
# Skip blocks that don't match the type
|
||||
if (
|
||||
(type == "input" and block.block_type.value != "Input")
|
||||
or (type == "output" and block.block_type.value != "Output")
|
||||
or (type == "action" and block.block_type.value in ("Input", "Output"))
|
||||
):
|
||||
continue
|
||||
# Skip blocks that don't match the provider
|
||||
if provider:
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
if not any(provider in info.provider for info in credentials_info):
|
||||
continue
|
||||
|
||||
total += 1
|
||||
if skip > 0:
|
||||
skip -= 1
|
||||
continue
|
||||
if take > 0:
|
||||
take -= 1
|
||||
blocks.append(block)
|
||||
|
||||
costs = get_block_costs()
|
||||
|
||||
return BlockResponse(
|
||||
blocks=[{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks],
|
||||
pagination=Pagination(
|
||||
total_items=total,
|
||||
total_pages=(total + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def search_blocks(
|
||||
include_blocks: bool = True,
|
||||
include_integrations: bool = True,
|
||||
query: str = "",
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> SearchBlocksResponse:
|
||||
"""
|
||||
Get blocks based on the filter and query.
|
||||
`providers` only applies for `integrations` filter.
|
||||
"""
|
||||
blocks: list[Block[BlockSchema, BlockSchema]] = []
|
||||
query = query.lower()
|
||||
|
||||
total = 0
|
||||
skip = (page - 1) * page_size
|
||||
take = page_size
|
||||
block_count = 0
|
||||
integration_count = 0
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: Block[BlockSchema, BlockSchema] = block_type()
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip blocks that don't match the query
|
||||
if (
|
||||
query not in block.name.lower()
|
||||
and query not in block.description.lower()
|
||||
and not _matches_llm_model(block.input_schema, query)
|
||||
):
|
||||
continue
|
||||
keep = False
|
||||
credentials = list(block.input_schema.get_credentials_fields().values())
|
||||
if include_integrations and len(credentials) > 0:
|
||||
keep = True
|
||||
integration_count += 1
|
||||
if include_blocks and len(credentials) == 0:
|
||||
keep = True
|
||||
block_count += 1
|
||||
|
||||
if not keep:
|
||||
continue
|
||||
|
||||
total += 1
|
||||
if skip > 0:
|
||||
skip -= 1
|
||||
continue
|
||||
if take > 0:
|
||||
take -= 1
|
||||
blocks.append(block)
|
||||
|
||||
costs = get_block_costs()
|
||||
|
||||
return SearchBlocksResponse(
|
||||
blocks=BlockResponse(
|
||||
blocks=[{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks],
|
||||
pagination=Pagination(
|
||||
total_items=total,
|
||||
total_pages=(total + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
),
|
||||
total_block_count=block_count,
|
||||
total_integration_count=integration_count,
|
||||
)
|
||||
|
||||
|
||||
def get_providers(
|
||||
query: str = "",
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> ProviderResponse:
|
||||
providers = []
|
||||
query = query.lower()
|
||||
|
||||
skip = (page - 1) * page_size
|
||||
take = page_size
|
||||
|
||||
all_providers = _get_all_providers()
|
||||
|
||||
for provider in all_providers.values():
|
||||
if (
|
||||
query not in provider.name.value.lower()
|
||||
and query not in provider.description.lower()
|
||||
):
|
||||
continue
|
||||
if skip > 0:
|
||||
skip -= 1
|
||||
continue
|
||||
if take > 0:
|
||||
take -= 1
|
||||
providers.append(provider)
|
||||
|
||||
total = len(all_providers)
|
||||
|
||||
return ProviderResponse(
|
||||
providers=providers,
|
||||
pagination=Pagination(
|
||||
total_items=total,
|
||||
total_pages=(total + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
async def get_counts(user_id: str) -> CountResponse:
|
||||
my_agents = await prisma.models.LibraryAgent.prisma().count(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
"isArchived": False,
|
||||
}
|
||||
)
|
||||
counts = await _get_static_counts()
|
||||
return CountResponse(
|
||||
my_agents=my_agents,
|
||||
**counts,
|
||||
)
|
||||
|
||||
|
||||
async def _get_static_counts():
|
||||
"""
|
||||
Get counts of blocks, integrations, and marketplace agents.
|
||||
This is cached to avoid unnecessary database queries and calculations.
|
||||
Can't use functools.cache here because the function is async.
|
||||
"""
|
||||
global _static_counts_cache
|
||||
if _static_counts_cache is not None:
|
||||
return _static_counts_cache
|
||||
|
||||
all_blocks = 0
|
||||
input_blocks = 0
|
||||
action_blocks = 0
|
||||
output_blocks = 0
|
||||
integrations = 0
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: Block[BlockSchema, BlockSchema] = block_type()
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
all_blocks += 1
|
||||
|
||||
if block.block_type.value == "Input":
|
||||
input_blocks += 1
|
||||
elif block.block_type.value == "Output":
|
||||
output_blocks += 1
|
||||
else:
|
||||
action_blocks += 1
|
||||
|
||||
credentials = list(block.input_schema.get_credentials_fields().values())
|
||||
if len(credentials) > 0:
|
||||
integrations += 1
|
||||
|
||||
marketplace_agents = await prisma.models.StoreAgent.prisma().count()
|
||||
|
||||
_static_counts_cache = {
|
||||
"all_blocks": all_blocks,
|
||||
"input_blocks": input_blocks,
|
||||
"action_blocks": action_blocks,
|
||||
"output_blocks": output_blocks,
|
||||
"integrations": integrations,
|
||||
"marketplace_agents": marketplace_agents,
|
||||
}
|
||||
|
||||
return _static_counts_cache
|
||||
|
||||
|
||||
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
for field in schema_cls.model_fields.values():
|
||||
if field.annotation == LlmModel:
|
||||
# Check if query matches any value in llm_models
|
||||
if any(query in name for name in llm_models):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@functools.cache
|
||||
def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
providers: dict[ProviderName, Provider] = {}
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: Block[BlockSchema, BlockSchema] = block_type()
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
for info in credentials_info:
|
||||
for provider in info.provider: # provider is a ProviderName enum member
|
||||
if provider in providers:
|
||||
providers[provider].integration_count += 1
|
||||
else:
|
||||
providers[provider] = Provider(
|
||||
name=provider, description="", integration_count=1
|
||||
)
|
||||
return providers
|
||||
|
||||
|
||||
async def get_suggested_blocks(count: int = 5) -> list[BlockData]:
|
||||
global _suggested_blocks
|
||||
|
||||
if _suggested_blocks is not None and len(_suggested_blocks) >= count:
|
||||
return _suggested_blocks[:count]
|
||||
|
||||
_suggested_blocks = []
|
||||
# Sum the number of executions for each block type
|
||||
# Prisma cannot group by nested relations, so we do a raw query
|
||||
# Calculate the cutoff timestamp
|
||||
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
|
||||
results = await prisma.get_client().query_raw(
|
||||
"""
|
||||
SELECT
|
||||
agent_node."agentBlockId" AS block_id,
|
||||
COUNT(execution.id) AS execution_count
|
||||
FROM "AgentNodeExecution" execution
|
||||
JOIN "AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
WHERE execution."endedTime" >= $1::timestamp
|
||||
GROUP BY agent_node."agentBlockId"
|
||||
ORDER BY execution_count DESC;
|
||||
""",
|
||||
timestamp_threshold,
|
||||
)
|
||||
|
||||
# Get the top blocks based on execution count
|
||||
# But ignore Input and Output blocks
|
||||
blocks: list[tuple[BlockData, int]] = []
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: Block[BlockSchema, BlockSchema] = block_type()
|
||||
if block.disabled or block.block_type in (
|
||||
backend.data.block.BlockType.INPUT,
|
||||
backend.data.block.BlockType.OUTPUT,
|
||||
backend.data.block.BlockType.AGENT,
|
||||
):
|
||||
continue
|
||||
# Find the execution count for this block
|
||||
execution_count = next(
|
||||
(row["execution_count"] for row in results if row["block_id"] == block.id),
|
||||
0,
|
||||
)
|
||||
blocks.append((block.to_dict(), execution_count))
|
||||
# Sort blocks by execution count
|
||||
blocks.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
_suggested_blocks = [block[0] for block in blocks]
|
||||
|
||||
# Return the top blocks
|
||||
return _suggested_blocks[:count]
|
||||
87
autogpt_platform/backend/backend/server/v2/builder/model.py
Normal file
87
autogpt_platform/backend/backend/server/v2/builder/model.py
Normal file
@@ -0,0 +1,87 @@
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.models import Pagination
|
||||
|
||||
FilterType = Literal[
|
||||
"blocks",
|
||||
"integrations",
|
||||
"marketplace_agents",
|
||||
"my_agents",
|
||||
]
|
||||
|
||||
BlockType = Literal["all", "input", "action", "output"]
|
||||
|
||||
BlockData = dict[str, Any]
|
||||
|
||||
|
||||
# Suggestions
|
||||
class SuggestionsResponse(BaseModel):
|
||||
otto_suggestions: list[str]
|
||||
recent_searches: list[str]
|
||||
providers: list[ProviderName]
|
||||
top_blocks: list[BlockData]
|
||||
|
||||
|
||||
# All blocks
|
||||
class BlockCategoryResponse(BaseModel):
|
||||
name: str
|
||||
total_blocks: int
|
||||
blocks: list[BlockData]
|
||||
|
||||
model_config = {"use_enum_values": False} # <== use enum names like "AI"
|
||||
|
||||
|
||||
# Input/Action/Output and see all for block categories
|
||||
class BlockResponse(BaseModel):
|
||||
blocks: list[BlockData]
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
# Providers
|
||||
class Provider(BaseModel):
|
||||
name: ProviderName
|
||||
description: str
|
||||
integration_count: int
|
||||
|
||||
|
||||
class ProviderResponse(BaseModel):
|
||||
providers: list[Provider]
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
# Search
|
||||
class SearchRequest(BaseModel):
|
||||
search_query: str | None = None
|
||||
filter: list[FilterType] | None = None
|
||||
by_creator: list[str] | None = None
|
||||
search_id: str | None = None
|
||||
page: int | None = None
|
||||
page_size: int | None = None
|
||||
|
||||
|
||||
class SearchBlocksResponse(BaseModel):
|
||||
blocks: BlockResponse
|
||||
total_block_count: int
|
||||
total_integration_count: int
|
||||
|
||||
|
||||
class SearchResponse(BaseModel):
|
||||
items: list[BlockData | library_model.LibraryAgent | store_model.StoreAgent]
|
||||
total_items: dict[FilterType, int]
|
||||
page: int
|
||||
more_pages: bool
|
||||
|
||||
|
||||
class CountResponse(BaseModel):
|
||||
all_blocks: int
|
||||
input_blocks: int
|
||||
action_blocks: int
|
||||
output_blocks: int
|
||||
integrations: int
|
||||
marketplace_agents: int
|
||||
my_agents: int
|
||||
239
autogpt_platform/backend/backend/server/v2/builder/routes.py
Normal file
239
autogpt_platform/backend/backend/server/v2/builder/routes.py
Normal file
@@ -0,0 +1,239 @@
|
||||
import logging
|
||||
from typing import Annotated, Sequence
|
||||
|
||||
import fastapi
|
||||
from autogpt_libs.auth.depends import auth_middleware, get_user_id
|
||||
|
||||
import backend.server.v2.builder.db as builder_db
|
||||
import backend.server.v2.builder.model as builder_model
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.db as store_db
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.models import Pagination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = fastapi.APIRouter()
|
||||
|
||||
|
||||
# Taken from backend/server/v2/store/db.py
|
||||
def sanitize_query(query: str | None) -> str | None:
|
||||
if query is None:
|
||||
return query
|
||||
query = query.strip()[:100]
|
||||
return (
|
||||
query.replace("\\", "\\\\")
|
||||
.replace("%", "\\%")
|
||||
.replace("_", "\\_")
|
||||
.replace("[", "\\[")
|
||||
.replace("]", "\\]")
|
||||
.replace("'", "\\'")
|
||||
.replace('"', '\\"')
|
||||
.replace(";", "\\;")
|
||||
.replace("--", "\\--")
|
||||
.replace("/*", "\\/*")
|
||||
.replace("*/", "\\*/")
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/suggestions",
|
||||
summary="Get Builder suggestions",
|
||||
dependencies=[fastapi.Depends(auth_middleware)],
|
||||
response_model=builder_model.SuggestionsResponse,
|
||||
)
|
||||
async def get_suggestions(
|
||||
user_id: Annotated[str, fastapi.Depends(get_user_id)],
|
||||
) -> builder_model.SuggestionsResponse:
|
||||
"""
|
||||
Get all suggestions for the Blocks Menu.
|
||||
"""
|
||||
return builder_model.SuggestionsResponse(
|
||||
otto_suggestions=[
|
||||
"What blocks do I need to get started?",
|
||||
"Help me create a list",
|
||||
"Help me feed my data to Google Maps",
|
||||
],
|
||||
recent_searches=[
|
||||
"image generation",
|
||||
"deepfake",
|
||||
"competitor analysis",
|
||||
],
|
||||
providers=[
|
||||
ProviderName.TWITTER,
|
||||
ProviderName.GITHUB,
|
||||
ProviderName.NOTION,
|
||||
ProviderName.GOOGLE,
|
||||
ProviderName.DISCORD,
|
||||
ProviderName.GOOGLE_MAPS,
|
||||
],
|
||||
top_blocks=await builder_db.get_suggested_blocks(),
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/categories",
|
||||
summary="Get Builder block categories",
|
||||
dependencies=[fastapi.Depends(auth_middleware)],
|
||||
response_model=Sequence[builder_model.BlockCategoryResponse],
|
||||
)
|
||||
async def get_block_categories(
|
||||
blocks_per_category: Annotated[int, fastapi.Query()] = 3,
|
||||
) -> Sequence[builder_model.BlockCategoryResponse]:
|
||||
"""
|
||||
Get all block categories with a specified number of blocks per category.
|
||||
"""
|
||||
return builder_db.get_block_categories(blocks_per_category)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/blocks",
|
||||
summary="Get Builder blocks",
|
||||
dependencies=[fastapi.Depends(auth_middleware)],
|
||||
response_model=builder_model.BlockResponse,
|
||||
)
|
||||
async def get_blocks(
|
||||
category: Annotated[str | None, fastapi.Query()] = None,
|
||||
type: Annotated[builder_model.BlockType | None, fastapi.Query()] = None,
|
||||
provider: Annotated[ProviderName | None, fastapi.Query()] = None,
|
||||
page: Annotated[int, fastapi.Query()] = 1,
|
||||
page_size: Annotated[int, fastapi.Query()] = 50,
|
||||
) -> builder_model.BlockResponse:
|
||||
"""
|
||||
Get blocks based on either category, type, or provider.
|
||||
"""
|
||||
return builder_db.get_blocks(
|
||||
category=category,
|
||||
type=type,
|
||||
provider=provider,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/providers",
|
||||
summary="Get Builder integration providers",
|
||||
dependencies=[fastapi.Depends(auth_middleware)],
|
||||
response_model=builder_model.ProviderResponse,
|
||||
)
|
||||
async def get_providers(
|
||||
page: Annotated[int, fastapi.Query()] = 1,
|
||||
page_size: Annotated[int, fastapi.Query()] = 50,
|
||||
) -> builder_model.ProviderResponse:
|
||||
"""
|
||||
Get all integration providers with their block counts.
|
||||
"""
|
||||
return builder_db.get_providers(
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/search",
|
||||
summary="Builder search",
|
||||
tags=["store", "private"],
|
||||
dependencies=[fastapi.Depends(auth_middleware)],
|
||||
response_model=builder_model.SearchResponse,
|
||||
)
|
||||
async def search(
|
||||
options: builder_model.SearchRequest,
|
||||
user_id: Annotated[str, fastapi.Depends(get_user_id)],
|
||||
) -> builder_model.SearchResponse:
|
||||
"""
|
||||
Search for blocks (including integrations), marketplace agents, and user library agents.
|
||||
"""
|
||||
# If no filters are provided, then we will return all types
|
||||
if not options.filter:
|
||||
options.filter = [
|
||||
"blocks",
|
||||
"integrations",
|
||||
"marketplace_agents",
|
||||
"my_agents",
|
||||
]
|
||||
options.search_query = sanitize_query(options.search_query)
|
||||
options.page = options.page or 1
|
||||
options.page_size = options.page_size or 50
|
||||
|
||||
# Blocks&Integrations
|
||||
blocks = builder_model.SearchBlocksResponse(
|
||||
blocks=builder_model.BlockResponse(
|
||||
blocks=[],
|
||||
pagination=Pagination.empty(),
|
||||
),
|
||||
total_block_count=0,
|
||||
total_integration_count=0,
|
||||
)
|
||||
if "blocks" in options.filter or "integrations" in options.filter:
|
||||
blocks = builder_db.search_blocks(
|
||||
include_blocks="blocks" in options.filter,
|
||||
include_integrations="integrations" in options.filter,
|
||||
query=options.search_query or "",
|
||||
page=options.page,
|
||||
page_size=options.page_size,
|
||||
)
|
||||
|
||||
# Library Agents
|
||||
my_agents = library_model.LibraryAgentResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
)
|
||||
if "my_agents" in options.filter:
|
||||
my_agents = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=options.search_query,
|
||||
page=options.page,
|
||||
page_size=options.page_size,
|
||||
)
|
||||
|
||||
# Marketplace Agents
|
||||
marketplace_agents = store_model.StoreAgentsResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
)
|
||||
if "marketplace_agents" in options.filter:
|
||||
marketplace_agents = await store_db.get_store_agents(
|
||||
creators=options.by_creator,
|
||||
search_query=options.search_query,
|
||||
page=options.page,
|
||||
page_size=options.page_size,
|
||||
)
|
||||
|
||||
more_pages = False
|
||||
if (
|
||||
blocks.blocks.pagination.current_page < blocks.blocks.pagination.total_pages
|
||||
or my_agents.pagination.current_page < my_agents.pagination.total_pages
|
||||
or marketplace_agents.pagination.current_page
|
||||
< marketplace_agents.pagination.total_pages
|
||||
):
|
||||
more_pages = True
|
||||
|
||||
return builder_model.SearchResponse(
|
||||
items=blocks.blocks.blocks + my_agents.agents + marketplace_agents.agents,
|
||||
total_items={
|
||||
"blocks": blocks.total_block_count,
|
||||
"integrations": blocks.total_integration_count,
|
||||
"marketplace_agents": marketplace_agents.pagination.total_items,
|
||||
"my_agents": my_agents.pagination.total_items,
|
||||
},
|
||||
page=options.page,
|
||||
more_pages=more_pages,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/counts",
|
||||
summary="Get Builder item counts",
|
||||
dependencies=[fastapi.Depends(auth_middleware)],
|
||||
response_model=builder_model.CountResponse,
|
||||
)
|
||||
async def get_counts(
|
||||
user_id: Annotated[str, fastapi.Depends(get_user_id)],
|
||||
) -> builder_model.CountResponse:
|
||||
"""
|
||||
Get item counts for the menu categories in the Blocks Menu.
|
||||
"""
|
||||
return await builder_db.get_counts(user_id)
|
||||
@@ -9,7 +9,6 @@ import prisma.models
|
||||
import prisma.types
|
||||
|
||||
import backend.data.graph as graph_db
|
||||
import backend.server.model
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.exceptions as store_exceptions
|
||||
import backend.server.v2.store.image_gen as store_image_gen
|
||||
@@ -23,6 +22,7 @@ from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.json import SafeJson
|
||||
from backend.util.models import Pagination
|
||||
from backend.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -131,7 +131,7 @@ async def list_library_agents(
|
||||
# Return the response with only valid agents
|
||||
return library_model.LibraryAgentResponse(
|
||||
agents=valid_library_agents,
|
||||
pagination=backend.server.model.Pagination(
|
||||
pagination=Pagination(
|
||||
total_items=agent_count,
|
||||
total_pages=(agent_count + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
@@ -241,7 +241,11 @@ async def get_library_agent_by_graph_id(
|
||||
)
|
||||
if not agent:
|
||||
return None
|
||||
return library_model.LibraryAgent.from_db(agent)
|
||||
|
||||
assert agent.AgentGraph # make type checker happy
|
||||
# Include sub-graphs so we can make a full credentials input schema
|
||||
sub_graphs = await graph_db.get_sub_graphs(agent.AgentGraph)
|
||||
return library_model.LibraryAgent.from_db(agent, sub_graphs=sub_graphs)
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error fetching library agent by graph ID: {e}")
|
||||
raise store_exceptions.DatabaseError("Failed to fetch library agent") from e
|
||||
@@ -625,7 +629,7 @@ async def list_presets(
|
||||
|
||||
return library_model.LibraryAgentPresetResponse(
|
||||
presets=presets,
|
||||
pagination=backend.server.model.Pagination(
|
||||
pagination=Pagination(
|
||||
total_items=total_items,
|
||||
total_pages=total_pages,
|
||||
current_page=page,
|
||||
|
||||
@@ -8,9 +8,9 @@ import pydantic
|
||||
|
||||
import backend.data.block as block_model
|
||||
import backend.data.graph as graph_model
|
||||
import backend.server.model as server_model
|
||||
from backend.data.model import CredentialsMetaInput, is_credentials_field_name
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.models import Pagination
|
||||
|
||||
|
||||
class LibraryAgentStatus(str, Enum):
|
||||
@@ -51,6 +51,7 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
description: str
|
||||
|
||||
input_schema: dict[str, Any] # Should be BlockIOObjectSubSchema in frontend
|
||||
output_schema: dict[str, Any]
|
||||
credentials_input_schema: dict[str, Any] | None = pydantic.Field(
|
||||
description="Input schema for credentials required by the agent",
|
||||
)
|
||||
@@ -126,6 +127,7 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
name=graph.name,
|
||||
description=graph.description,
|
||||
input_schema=graph.input_schema,
|
||||
output_schema=graph.output_schema,
|
||||
credentials_input_schema=(
|
||||
graph.credentials_input_schema if sub_graphs is not None else None
|
||||
),
|
||||
@@ -213,7 +215,7 @@ class LibraryAgentResponse(pydantic.BaseModel):
|
||||
"""Response schema for a list of library agents and pagination info."""
|
||||
|
||||
agents: list[LibraryAgent]
|
||||
pagination: server_model.Pagination
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class LibraryAgentPresetCreatable(pydantic.BaseModel):
|
||||
@@ -317,7 +319,7 @@ class LibraryAgentPresetResponse(pydantic.BaseModel):
|
||||
"""Response schema for a list of agent presets and pagination info."""
|
||||
|
||||
presets: list[LibraryAgentPreset]
|
||||
pagination: server_model.Pagination
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class LibraryAgentFilter(str, Enum):
|
||||
|
||||
@@ -7,9 +7,9 @@ import pytest
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.model as server_model
|
||||
import backend.server.v2.library.model as library_model
|
||||
from backend.server.v2.library.routes import router as library_router
|
||||
from backend.util.models import Pagination
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(library_router)
|
||||
@@ -50,6 +50,7 @@ async def test_get_library_agents_success(
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
output_schema={"type": "object", "properties": {}},
|
||||
credentials_input_schema={"type": "object", "properties": {}},
|
||||
has_external_trigger=False,
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
@@ -68,6 +69,7 @@ async def test_get_library_agents_success(
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
output_schema={"type": "object", "properties": {}},
|
||||
credentials_input_schema={"type": "object", "properties": {}},
|
||||
has_external_trigger=False,
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
@@ -77,7 +79,7 @@ async def test_get_library_agents_success(
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
],
|
||||
pagination=server_model.Pagination(
|
||||
pagination=Pagination(
|
||||
total_items=2, total_pages=1, current_page=1, page_size=50
|
||||
),
|
||||
)
|
||||
@@ -132,6 +134,7 @@ def test_add_agent_to_library_success(mocker: pytest_mock.MockFixture):
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
output_schema={"type": "object", "properties": {}},
|
||||
credentials_input_schema={"type": "object", "properties": {}},
|
||||
has_external_trigger=False,
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
|
||||
@@ -17,8 +17,21 @@ from backend.data.graph import (
|
||||
get_sub_graphs,
|
||||
)
|
||||
from backend.data.includes import AGENT_GRAPH_INCLUDE
|
||||
from backend.data.notifications import (
|
||||
AgentApprovalData,
|
||||
AgentRejectionData,
|
||||
NotificationEventModel,
|
||||
)
|
||||
from backend.notifications.notifications import queue_notification_async
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
|
||||
# Constants for default admin values
|
||||
DEFAULT_ADMIN_NAME = "AutoGPT Admin"
|
||||
DEFAULT_ADMIN_EMAIL = "admin@autogpt.co"
|
||||
|
||||
|
||||
def sanitize_query(query: str | None) -> str | None:
|
||||
@@ -42,7 +55,7 @@ def sanitize_query(query: str | None) -> str | None:
|
||||
|
||||
async def get_store_agents(
|
||||
featured: bool = False,
|
||||
creator: str | None = None,
|
||||
creators: list[str] | None = None,
|
||||
sorted_by: str | None = None,
|
||||
search_query: str | None = None,
|
||||
category: str | None = None,
|
||||
@@ -53,15 +66,15 @@ async def get_store_agents(
|
||||
Get PUBLIC store agents from the StoreAgent view
|
||||
"""
|
||||
logger.debug(
|
||||
f"Getting store agents. featured={featured}, creator={creator}, sorted_by={sorted_by}, search={search_query}, category={category}, page={page}"
|
||||
f"Getting store agents. featured={featured}, creators={creators}, sorted_by={sorted_by}, search={search_query}, category={category}, page={page}"
|
||||
)
|
||||
sanitized_query = sanitize_query(search_query)
|
||||
|
||||
where_clause = {}
|
||||
if featured:
|
||||
where_clause["featured"] = featured
|
||||
if creator:
|
||||
where_clause["creator_username"] = creator
|
||||
if creators:
|
||||
where_clause["creator_username"] = {"in": creators}
|
||||
if category:
|
||||
where_clause["categories"] = {"has": category}
|
||||
|
||||
@@ -466,6 +479,8 @@ async def get_store_submissions(
|
||||
# internal_comments omitted for regular users
|
||||
reviewed_at=sub.reviewed_at,
|
||||
changes_summary=sub.changes_summary,
|
||||
video_url=sub.video_url,
|
||||
categories=sub.categories,
|
||||
)
|
||||
submission_models.append(submission_model)
|
||||
|
||||
@@ -546,7 +561,7 @@ async def create_store_submission(
|
||||
description: str = "",
|
||||
sub_heading: str = "",
|
||||
categories: list[str] = [],
|
||||
changes_summary: str = "Initial Submission",
|
||||
changes_summary: str | None = "Initial Submission",
|
||||
) -> backend.server.v2.store.model.StoreSubmission:
|
||||
"""
|
||||
Create the first (and only) store listing and thus submission as a normal user
|
||||
@@ -685,6 +700,160 @@ async def create_store_submission(
|
||||
) from e
|
||||
|
||||
|
||||
async def edit_store_submission(
|
||||
user_id: str,
|
||||
store_listing_version_id: str,
|
||||
name: str,
|
||||
video_url: str | None = None,
|
||||
image_urls: list[str] = [],
|
||||
description: str = "",
|
||||
sub_heading: str = "",
|
||||
categories: list[str] = [],
|
||||
changes_summary: str | None = "Update submission",
|
||||
) -> backend.server.v2.store.model.StoreSubmission:
|
||||
"""
|
||||
Edit an existing store listing submission.
|
||||
|
||||
Args:
|
||||
user_id: ID of the authenticated user editing the submission
|
||||
store_listing_version_id: ID of the store listing version to edit
|
||||
agent_id: ID of the agent being submitted
|
||||
agent_version: Version of the agent being submitted
|
||||
slug: URL slug for the listing (only changeable for PENDING submissions)
|
||||
name: Name of the agent
|
||||
video_url: Optional URL to video demo
|
||||
image_urls: List of image URLs for the listing
|
||||
description: Description of the agent
|
||||
sub_heading: Optional sub-heading for the agent
|
||||
categories: List of categories for the agent
|
||||
changes_summary: Summary of changes made in this submission
|
||||
|
||||
Returns:
|
||||
StoreSubmission: The updated store submission
|
||||
|
||||
Raises:
|
||||
SubmissionNotFoundError: If the submission is not found
|
||||
UnauthorizedError: If the user doesn't own the submission
|
||||
InvalidOperationError: If trying to edit a submission that can't be edited
|
||||
"""
|
||||
try:
|
||||
# Get the current version and verify ownership
|
||||
current_version = await prisma.models.StoreListingVersion.prisma().find_first(
|
||||
where=prisma.types.StoreListingVersionWhereInput(
|
||||
id=store_listing_version_id
|
||||
),
|
||||
include={
|
||||
"StoreListing": {
|
||||
"include": {
|
||||
"Versions": {"order_by": {"version": "desc"}, "take": 1}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if not current_version:
|
||||
raise backend.server.v2.store.exceptions.SubmissionNotFoundError(
|
||||
f"Store listing version not found: {store_listing_version_id}"
|
||||
)
|
||||
|
||||
# Verify the user owns this submission
|
||||
if (
|
||||
not current_version.StoreListing
|
||||
or current_version.StoreListing.owningUserId != user_id
|
||||
):
|
||||
raise backend.server.v2.store.exceptions.UnauthorizedError(
|
||||
f"User {user_id} does not own submission {store_listing_version_id}"
|
||||
)
|
||||
|
||||
# Currently we are not allowing user to update the agent associated with a submission
|
||||
# If we allow it in future, then we need a check here to verify the agent belongs to this user.
|
||||
|
||||
# Check if we can edit this submission
|
||||
if current_version.submissionStatus == prisma.enums.SubmissionStatus.REJECTED:
|
||||
raise backend.server.v2.store.exceptions.InvalidOperationError(
|
||||
"Cannot edit a rejected submission"
|
||||
)
|
||||
|
||||
# For APPROVED submissions, we need to create a new version
|
||||
if current_version.submissionStatus == prisma.enums.SubmissionStatus.APPROVED:
|
||||
# Create a new version for the existing listing
|
||||
return await create_store_version(
|
||||
user_id=user_id,
|
||||
agent_id=current_version.agentGraphId,
|
||||
agent_version=current_version.agentGraphVersion,
|
||||
store_listing_id=current_version.storeListingId,
|
||||
name=name,
|
||||
video_url=video_url,
|
||||
image_urls=image_urls,
|
||||
description=description,
|
||||
sub_heading=sub_heading,
|
||||
categories=categories,
|
||||
changes_summary=changes_summary,
|
||||
)
|
||||
|
||||
# For PENDING submissions, we can update the existing version
|
||||
elif current_version.submissionStatus == prisma.enums.SubmissionStatus.PENDING:
|
||||
# Update the existing version
|
||||
updated_version = await prisma.models.StoreListingVersion.prisma().update(
|
||||
where={"id": store_listing_version_id},
|
||||
data=prisma.types.StoreListingVersionUpdateInput(
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
categories=categories,
|
||||
subHeading=sub_heading,
|
||||
changesSummary=changes_summary,
|
||||
),
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Updated existing version {store_listing_version_id} for agent {current_version.agentGraphId}"
|
||||
)
|
||||
|
||||
if not updated_version:
|
||||
raise backend.server.v2.store.exceptions.DatabaseError(
|
||||
"Failed to update store listing version"
|
||||
)
|
||||
return backend.server.v2.store.model.StoreSubmission(
|
||||
agent_id=current_version.agentGraphId,
|
||||
agent_version=current_version.agentGraphVersion,
|
||||
name=name,
|
||||
sub_heading=sub_heading,
|
||||
slug=current_version.StoreListing.slug,
|
||||
description=description,
|
||||
image_urls=image_urls,
|
||||
date_submitted=updated_version.submittedAt or updated_version.createdAt,
|
||||
status=updated_version.submissionStatus,
|
||||
runs=0,
|
||||
rating=0.0,
|
||||
store_listing_version_id=updated_version.id,
|
||||
changes_summary=changes_summary,
|
||||
video_url=video_url,
|
||||
categories=categories,
|
||||
version=updated_version.version,
|
||||
)
|
||||
|
||||
else:
|
||||
raise backend.server.v2.store.exceptions.InvalidOperationError(
|
||||
f"Cannot edit submission with status: {current_version.submissionStatus}"
|
||||
)
|
||||
|
||||
except (
|
||||
backend.server.v2.store.exceptions.SubmissionNotFoundError,
|
||||
backend.server.v2.store.exceptions.UnauthorizedError,
|
||||
backend.server.v2.store.exceptions.AgentNotFoundError,
|
||||
backend.server.v2.store.exceptions.ListingExistsError,
|
||||
backend.server.v2.store.exceptions.InvalidOperationError,
|
||||
):
|
||||
raise
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error editing store submission: {e}")
|
||||
raise backend.server.v2.store.exceptions.DatabaseError(
|
||||
"Failed to edit store submission"
|
||||
) from e
|
||||
|
||||
|
||||
async def create_store_version(
|
||||
user_id: str,
|
||||
agent_id: str,
|
||||
@@ -696,7 +865,7 @@ async def create_store_version(
|
||||
description: str = "",
|
||||
sub_heading: str = "",
|
||||
categories: list[str] = [],
|
||||
changes_summary: str = "Update Submission",
|
||||
changes_summary: str | None = "Initial submission",
|
||||
) -> backend.server.v2.store.model.StoreSubmission:
|
||||
"""
|
||||
Create a new version for an existing store listing
|
||||
@@ -1085,7 +1254,8 @@ async def review_store_submission(
|
||||
where={"id": store_listing_version_id},
|
||||
include={
|
||||
"StoreListing": True,
|
||||
"AgentGraph": {"include": AGENT_GRAPH_INCLUDE},
|
||||
"AgentGraph": {"include": {**AGENT_GRAPH_INCLUDE, "User": True}},
|
||||
"Reviewer": True,
|
||||
},
|
||||
)
|
||||
)
|
||||
@@ -1096,6 +1266,13 @@ async def review_store_submission(
|
||||
detail=f"Store listing version {store_listing_version_id} not found",
|
||||
)
|
||||
|
||||
# Check if we're rejecting an already approved agent
|
||||
is_rejecting_approved = (
|
||||
not is_approved
|
||||
and store_listing_version.submissionStatus
|
||||
== prisma.enums.SubmissionStatus.APPROVED
|
||||
)
|
||||
|
||||
# If approving, update the listing to indicate it has an approved version
|
||||
if is_approved and store_listing_version.AgentGraph:
|
||||
heading = f"Sub-graph of {store_listing_version.name}v{store_listing_version.agentGraphVersion}"
|
||||
@@ -1126,6 +1303,37 @@ async def review_store_submission(
|
||||
},
|
||||
)
|
||||
|
||||
# If rejecting an approved agent, update the StoreListing accordingly
|
||||
if is_rejecting_approved:
|
||||
# Check if there are other approved versions
|
||||
other_approved = (
|
||||
await prisma.models.StoreListingVersion.prisma().find_first(
|
||||
where={
|
||||
"storeListingId": store_listing_version.StoreListing.id,
|
||||
"id": {"not": store_listing_version_id},
|
||||
"submissionStatus": prisma.enums.SubmissionStatus.APPROVED,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
if not other_approved:
|
||||
# No other approved versions, update hasApprovedVersion to False
|
||||
await prisma.models.StoreListing.prisma().update(
|
||||
where={"id": store_listing_version.StoreListing.id},
|
||||
data={
|
||||
"hasApprovedVersion": False,
|
||||
"ActiveVersion": {"disconnect": True},
|
||||
},
|
||||
)
|
||||
else:
|
||||
# Set the most recent other approved version as active
|
||||
await prisma.models.StoreListing.prisma().update(
|
||||
where={"id": store_listing_version.StoreListing.id},
|
||||
data={
|
||||
"ActiveVersion": {"connect": {"id": other_approved.id}},
|
||||
},
|
||||
)
|
||||
|
||||
submission_status = (
|
||||
prisma.enums.SubmissionStatus.APPROVED
|
||||
if is_approved
|
||||
@@ -1154,6 +1362,89 @@ async def review_store_submission(
|
||||
f"Failed to update store listing version {store_listing_version_id}"
|
||||
)
|
||||
|
||||
# Send email notification to the agent creator
|
||||
if store_listing_version.AgentGraph and store_listing_version.AgentGraph.User:
|
||||
agent_creator = store_listing_version.AgentGraph.User
|
||||
reviewer = (
|
||||
store_listing_version.Reviewer
|
||||
if store_listing_version.Reviewer
|
||||
else None
|
||||
)
|
||||
|
||||
try:
|
||||
base_url = (
|
||||
settings.config.frontend_base_url
|
||||
or settings.config.platform_base_url
|
||||
)
|
||||
|
||||
if is_approved:
|
||||
store_agent = (
|
||||
await prisma.models.StoreAgent.prisma().find_first_or_raise(
|
||||
where={"storeListingVersionId": submission.id}
|
||||
)
|
||||
)
|
||||
|
||||
# Send approval notification
|
||||
notification_data = AgentApprovalData(
|
||||
agent_name=submission.name,
|
||||
agent_id=submission.agentGraphId,
|
||||
agent_version=submission.agentGraphVersion,
|
||||
reviewer_name=(
|
||||
reviewer.name
|
||||
if reviewer and reviewer.name
|
||||
else DEFAULT_ADMIN_NAME
|
||||
),
|
||||
reviewer_email=(
|
||||
reviewer.email if reviewer else DEFAULT_ADMIN_EMAIL
|
||||
),
|
||||
comments=external_comments,
|
||||
reviewed_at=submission.reviewedAt
|
||||
or datetime.now(tz=timezone.utc),
|
||||
store_url=f"{base_url}/marketplace/agent/{store_agent.creator_username}/{store_agent.slug}",
|
||||
)
|
||||
|
||||
notification_event = NotificationEventModel[AgentApprovalData](
|
||||
user_id=agent_creator.id,
|
||||
type=prisma.enums.NotificationType.AGENT_APPROVED,
|
||||
data=notification_data,
|
||||
)
|
||||
else:
|
||||
# Send rejection notification
|
||||
notification_data = AgentRejectionData(
|
||||
agent_name=submission.name,
|
||||
agent_id=submission.agentGraphId,
|
||||
agent_version=submission.agentGraphVersion,
|
||||
reviewer_name=(
|
||||
reviewer.name
|
||||
if reviewer and reviewer.name
|
||||
else DEFAULT_ADMIN_NAME
|
||||
),
|
||||
reviewer_email=(
|
||||
reviewer.email if reviewer else DEFAULT_ADMIN_EMAIL
|
||||
),
|
||||
comments=external_comments,
|
||||
reviewed_at=submission.reviewedAt
|
||||
or datetime.now(tz=timezone.utc),
|
||||
resubmit_url=f"{base_url}/build?flowID={submission.agentGraphId}",
|
||||
)
|
||||
|
||||
notification_event = NotificationEventModel[AgentRejectionData](
|
||||
user_id=agent_creator.id,
|
||||
type=prisma.enums.NotificationType.AGENT_REJECTED,
|
||||
data=notification_data,
|
||||
)
|
||||
|
||||
# Queue the notification for immediate sending
|
||||
await queue_notification_async(notification_event)
|
||||
logger.info(
|
||||
f"Queued {'approval' if is_approved else 'rejection'} notification for user {agent_creator.id} and agent {submission.name}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send email notification for agent review: {e}")
|
||||
# Don't fail the review process if email sending fails
|
||||
pass
|
||||
|
||||
# Convert to Pydantic model for consistency
|
||||
return backend.server.v2.store.model.StoreSubmission(
|
||||
agent_id=submission.agentGraphId,
|
||||
|
||||
@@ -94,3 +94,15 @@ class SubmissionNotFoundError(StoreError):
|
||||
"""Raised when a submission is not found"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class InvalidOperationError(StoreError):
|
||||
"""Raised when an operation is not valid for the current state"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class UnauthorizedError(StoreError):
|
||||
"""Raised when a user is not authorized to perform an action"""
|
||||
|
||||
pass
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import List
|
||||
import prisma.enums
|
||||
import pydantic
|
||||
|
||||
from backend.server.model import Pagination
|
||||
from backend.util.models import Pagination
|
||||
|
||||
|
||||
class MyAgent(pydantic.BaseModel):
|
||||
@@ -115,11 +115,9 @@ class StoreSubmission(pydantic.BaseModel):
|
||||
reviewed_at: datetime.datetime | None = None
|
||||
changes_summary: str | None = None
|
||||
|
||||
reviewer_id: str | None = None
|
||||
review_comments: str | None = None # External comments visible to creator
|
||||
internal_comments: str | None = None # Private notes for admin use only
|
||||
reviewed_at: datetime.datetime | None = None
|
||||
changes_summary: str | None = None
|
||||
# Additional fields for editing
|
||||
video_url: str | None = None
|
||||
categories: list[str] = []
|
||||
|
||||
|
||||
class StoreSubmissionsResponse(pydantic.BaseModel):
|
||||
@@ -161,6 +159,16 @@ class StoreSubmissionRequest(pydantic.BaseModel):
|
||||
changes_summary: str | None = None
|
||||
|
||||
|
||||
class StoreSubmissionEditRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
sub_heading: str
|
||||
video_url: str | None = None
|
||||
image_urls: list[str] = []
|
||||
description: str = ""
|
||||
categories: list[str] = []
|
||||
changes_summary: str | None = None
|
||||
|
||||
|
||||
class ProfileDetails(pydantic.BaseModel):
|
||||
name: str
|
||||
username: str
|
||||
|
||||
@@ -162,7 +162,7 @@ async def get_agents(
|
||||
try:
|
||||
agents = await backend.server.v2.store.db.get_store_agents(
|
||||
featured=featured,
|
||||
creator=creator,
|
||||
creators=[creator] if creator else None,
|
||||
sorted_by=sorted_by,
|
||||
search_query=search_query,
|
||||
category=category,
|
||||
@@ -564,6 +564,47 @@ async def create_submission(
|
||||
)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/submissions/{store_listing_version_id}",
|
||||
summary="Edit store submission",
|
||||
tags=["store", "private"],
|
||||
dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)],
|
||||
response_model=backend.server.v2.store.model.StoreSubmission,
|
||||
)
|
||||
async def edit_submission(
|
||||
store_listing_version_id: str,
|
||||
submission_request: backend.server.v2.store.model.StoreSubmissionEditRequest,
|
||||
user_id: typing.Annotated[
|
||||
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
|
||||
],
|
||||
):
|
||||
"""
|
||||
Edit an existing store listing submission.
|
||||
|
||||
Args:
|
||||
store_listing_version_id (str): ID of the store listing version to edit
|
||||
submission_request (StoreSubmissionRequest): The updated submission details
|
||||
user_id (str): ID of the authenticated user editing the listing
|
||||
|
||||
Returns:
|
||||
StoreSubmission: The updated store submission
|
||||
|
||||
Raises:
|
||||
HTTPException: If there is an error editing the submission
|
||||
"""
|
||||
return await backend.server.v2.store.db.edit_store_submission(
|
||||
user_id=user_id,
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
name=submission_request.name,
|
||||
video_url=submission_request.video_url,
|
||||
image_urls=submission_request.image_urls,
|
||||
description=submission_request.description,
|
||||
sub_heading=submission_request.sub_heading,
|
||||
categories=submission_request.categories,
|
||||
changes_summary=submission_request.changes_summary,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/submissions/media",
|
||||
summary="Upload submission media",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user