Compare commits

..

42 Commits

Author SHA1 Message Date
SwiftyOS
6c436d6137 add default keyords 2024-09-20 13:55:52 +02:00
SwiftyOS
0146a01595 fix dockerfile 2024-09-20 13:39:47 +02:00
SwiftyOS
7e3b40cef3 Update docker file formatting 2024-09-20 13:37:42 +02:00
SwiftyOS
6afd670e9d Update docker file formatting 2024-09-20 13:36:44 +02:00
Reinier van der Leer
6ae6c711b7 fix Dockerfile.autogpt 2024-09-20 13:32:49 +02:00
Reinier van der Leer
c8f55bc518 fix .pre-commit-config.yml 2024-09-20 13:27:50 +02:00
Reinier van der Leer
df2126c1a8 fix Dockerfile.autogpt 2024-09-20 13:23:15 +02:00
Reinier van der Leer
dfcfd003df fix original_autogpt path in classic/cli.py 2024-09-20 13:21:36 +02:00
Reinier van der Leer
4e33399d31 fix code workspace file (vol. 2) 2024-09-20 13:11:05 +02:00
Reinier van der Leer
369b1d9023 fix code workspace file 2024-09-20 13:10:00 +02:00
Reinier van der Leer
241f21ab5f fix classic-autogpts-ci.yml 2024-09-20 13:04:36 +02:00
Reinier van der Leer
7551782cd1 fix classic-autogpts-ci.yml 2024-09-20 13:00:57 +02:00
Reinier van der Leer
430835e539 fix classic-forge-ci.yml 2024-09-20 12:59:12 +02:00
Reinier van der Leer
f5040fa3ab fix classic-benchmark-ci.yml 2024-09-20 12:58:59 +02:00
Reinier van der Leer
6ced85d203 fix classic docker CI workflows 2024-09-20 12:53:12 +02:00
Reinier van der Leer
5e1a3d5717 fix classic-autogpts-ci.yml 2024-09-20 12:50:05 +02:00
Reinier van der Leer
d35b91cde4 delete Classic AutoGPTs Nightly Benchmark 2024-09-20 12:44:27 +02:00
Reinier van der Leer
aeab5aac67 unbreak Classic AutoGPT Docker Release workflow 2024-09-20 12:42:20 +02:00
Reinier van der Leer
31cd6dc652 move back .pre-commit-config.yaml 2024-09-20 12:39:01 +02:00
Reinier van der Leer
13b82c86f5 unbreak Classic AutoGPT CI workflows 2024-09-20 12:37:56 +02:00
SwiftyOS
ff11d00f74 isort fixes 2024-09-20 12:35:53 +02:00
SwiftyOS
9d7dfb0a6d fix type errors 2024-09-20 12:33:39 +02:00
SwiftyOS
f1bf7f269b Merge branch 'repo-restructure' of github.com:Significant-Gravitas/AutoGPT into repo-restructure 2024-09-20 12:25:02 +02:00
SwiftyOS
46cc8ae3ea more linting fixes 2024-09-20 12:24:55 +02:00
Reinier van der Leer
43bf6f2349 unbreak classic/.dockerignore 2024-09-20 12:23:49 +02:00
SwiftyOS
2582eb1ee8 Merge branch 'repo-restructure' of github.com:Significant-Gravitas/AutoGPT into repo-restructure 2024-09-20 12:18:04 +02:00
SwiftyOS
10cefc149f linitng fixes 2024-09-20 12:17:56 +02:00
Reinier van der Leer
d62fe001b8 move back .pre-commit-config.yaml 2024-09-20 12:15:51 +02:00
SwiftyOS
f583a15fd0 formatting changes 2024-09-20 12:13:33 +02:00
SwiftyOS
2cad2093eb updaing python checks ci 2024-09-20 12:04:11 +02:00
SwiftyOS
4e569f4562 add flake8 path to python checks ci 2024-09-20 11:58:52 +02:00
SwiftyOS
7f514c10cf Merge branch 'repo-restructure' of github.com:Significant-Gravitas/AutoGPT into repo-restructure 2024-09-20 11:54:56 +02:00
SwiftyOS
d7aba4f6c0 updaate python checks ci 2024-09-20 11:54:17 +02:00
Swifty
ba30aa2fce Merge branch 'master' into repo-restructure 2024-09-20 11:43:19 +02:00
SwiftyOS
efeba4400e update symbolic links 2024-09-20 11:40:38 +02:00
SwiftyOS
ba206e3bec docs update 2024-09-20 11:32:38 +02:00
SwiftyOS
be16fd90d4 fixing repo workflow checker CI 2024-09-20 11:20:03 +02:00
SwiftyOS
d10167ceab renamed all CI's to make it clear which subproject they are for 2024-09-20 11:14:54 +02:00
SwiftyOS
d593f76437 updating CI's 2024-09-20 11:07:14 +02:00
SwiftyOS
bda938422e fix frontend paths 2024-09-20 11:02:23 +02:00
SwiftyOS
8397b78ec2 update frontend ci 2024-09-20 10:56:10 +02:00
SwiftyOS
0d7342826b Restructureing Repo 2024-09-20 10:48:08 +02:00
1958 changed files with 39691 additions and 258686 deletions

View File

@@ -1,18 +0,0 @@
version = 1
test_patterns = ["**/*.spec.ts","**/*_test.py","**/*_tests.py","**/test_*.py"]
exclude_patterns = ["classic/**"]
[[analyzers]]
name = "javascript"
[analyzers.meta]
plugins = ["react"]
environment = ["nodejs"]
[[analyzers]]
name = "python"
[analyzers.meta]
runtime_version = "3.x.x"

View File

@@ -1,65 +0,0 @@
# Ignore everything by default, selectively add things to context
*
# Platform - Libs
!autogpt_platform/autogpt_libs/autogpt_libs/
!autogpt_platform/autogpt_libs/pyproject.toml
!autogpt_platform/autogpt_libs/poetry.lock
!autogpt_platform/autogpt_libs/README.md
# Platform - Backend
!autogpt_platform/backend/backend/
!autogpt_platform/backend/test/e2e_test_data.py
!autogpt_platform/backend/migrations/
!autogpt_platform/backend/schema.prisma
!autogpt_platform/backend/pyproject.toml
!autogpt_platform/backend/poetry.lock
!autogpt_platform/backend/README.md
!autogpt_platform/backend/.env
# Platform - Market
!autogpt_platform/market/market/
!autogpt_platform/market/scripts.py
!autogpt_platform/market/schema.prisma
!autogpt_platform/market/pyproject.toml
!autogpt_platform/market/poetry.lock
!autogpt_platform/market/README.md
# Platform - Frontend
!autogpt_platform/frontend/src/
!autogpt_platform/frontend/public/
!autogpt_platform/frontend/scripts/
!autogpt_platform/frontend/package.json
!autogpt_platform/frontend/pnpm-lock.yaml
!autogpt_platform/frontend/tsconfig.json
!autogpt_platform/frontend/README.md
## config
!autogpt_platform/frontend/*.config.*
!autogpt_platform/frontend/.env.*
!autogpt_platform/frontend/.env
# Classic - AutoGPT
!classic/original_autogpt/autogpt/
!classic/original_autogpt/pyproject.toml
!classic/original_autogpt/poetry.lock
!classic/original_autogpt/README.md
!classic/original_autogpt/tests/
# Classic - Benchmark
!classic/benchmark/agbenchmark/
!classic/benchmark/pyproject.toml
!classic/benchmark/poetry.lock
!classic/benchmark/README.md
# Classic - Forge
!classic/forge/
!classic/forge/pyproject.toml
!classic/forge/poetry.lock
!classic/forge/README.md
# Classic - Frontend
!classic/frontend/build/web/
# Explicitly re-ignore some folders
.*
**/__pycache__

View File

@@ -1,39 +1,23 @@
### Background
<!-- Clearly explain the need for these changes: -->
### Changes 🏗️
<!-- Concisely describe all of the changes made in this pull request: -->
### Checklist 📋
#### For code changes:
- [ ] I have clearly listed my changes in the PR description
- [ ] I have made a test plan
- [ ] I have tested my changes according to the test plan:
<!-- Put your test plan here: -->
- [ ] ...
### Testing 🔍
> [!NOTE]
Only for the new autogpt platform, currently in autogpt_platform/
<details>
<summary>Example test plan</summary>
- [ ] Create from scratch and execute an agent with at least 3 blocks
- [ ] Import an agent from file upload, and confirm it executes correctly
- [ ] Upload agent to marketplace
- [ ] Import an agent from marketplace and confirm it executes correctly
- [ ] Edit an agent from monitor, and confirm it executes correctly
</details>
<!--
Please make sure your changes have been tested and are in good working condition.
Here is a list of our critical paths, if you need some inspiration on what and how to test:
-->
#### For configuration changes:
- [ ] `.env.default` is updated or already compatible with my changes
- [ ] `docker-compose.yml` is updated or already compatible with my changes
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
<details>
<summary>Examples of configuration changes</summary>
- Changing ports
- Adding new services that need to communicate with each other
- Secrets or environment variable changes
- New or infrastructure changes such as databases
</details>
- Create from scratch and execute an agent with at least 3 blocks
- Import an agent from file upload, and confirm it executes correctly
- Upload agent to marketplace
- Import an agent from marketplace and confirm it executes correctly
- Edit an agent from monitor, and confirm it executes correctly

View File

@@ -1,322 +0,0 @@
# GitHub Copilot Instructions for AutoGPT
This file provides comprehensive onboarding information for GitHub Copilot coding agent to work efficiently with the AutoGPT repository.
## Repository Overview
**AutoGPT** is a powerful platform for creating, deploying, and managing continuous AI agents that automate complex workflows. This is a large monorepo (~150MB) containing multiple components:
- **AutoGPT Platform** (`autogpt_platform/`) - Main focus: Modern AI agent platform (Polyform Shield License)
- **Classic AutoGPT** (`classic/`) - Legacy agent system (MIT License)
- **Documentation** (`docs/`) - MkDocs-based documentation site
- **Infrastructure** - Docker configurations, CI/CD, and development tools
**Primary Languages & Frameworks:**
- **Backend**: Python 3.10-3.13, FastAPI, Prisma ORM, PostgreSQL, RabbitMQ
- **Frontend**: TypeScript, Next.js 15, React, Tailwind CSS, Radix UI
- **Development**: Docker, Poetry, pnpm, Playwright, Storybook
## Build and Validation Instructions
### Essential Setup Commands
**Always run these commands in the correct directory and in this order:**
1. **Initial Setup** (required once):
```bash
# Clone and enter repository
git clone <repo> && cd AutoGPT
# Start all services (database, redis, rabbitmq, clamav)
cd autogpt_platform && docker compose --profile local up deps --build --detach
```
2. **Backend Setup** (always run before backend development):
```bash
cd autogpt_platform/backend
poetry install # Install dependencies
poetry run prisma migrate dev # Run database migrations
poetry run prisma generate # Generate Prisma client
```
3. **Frontend Setup** (always run before frontend development):
```bash
cd autogpt_platform/frontend
pnpm install # Install dependencies
```
### Runtime Requirements
**Critical:** Always ensure Docker services are running before starting development:
```bash
cd autogpt_platform && docker compose --profile local up deps --build --detach
```
**Python Version:** Use Python 3.11 (required; managed by Poetry via pyproject.toml)
**Node.js Version:** Use Node.js 21+ with pnpm package manager
### Development Commands
**Backend Development:**
```bash
cd autogpt_platform/backend
poetry run serve # Start development server (port 8000)
poetry run test # Run all tests (requires ~5 minutes)
poetry run pytest path/to/test.py # Run specific test
poetry run format # Format code (Black + isort) - always run first
poetry run lint # Lint code (ruff) - run after format
```
**Frontend Development:**
```bash
cd autogpt_platform/frontend
pnpm dev # Start development server (port 3000) - use for active development
pnpm build # Build for production (only needed for E2E tests or deployment)
pnpm test # Run Playwright E2E tests (requires build first)
pnpm test-ui # Run tests with UI
pnpm format # Format and lint code
pnpm storybook # Start component development server
```
### Testing Strategy
**Backend Tests:**
- **Block Tests**: `poetry run pytest backend/blocks/test/test_block.py -xvs` (validates all blocks)
- **Specific Block**: `poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[BlockName]' -xvs`
- **Snapshot Tests**: Use `--snapshot-update` when output changes, always review with `git diff`
**Frontend Tests:**
- **E2E Tests**: Always run `pnpm dev` before `pnpm test` (Playwright requires running instance)
- **Component Tests**: Use Storybook for isolated component development
### Critical Validation Steps
**Before committing changes:**
1. Run `poetry run format` (backend) and `pnpm format` (frontend)
2. Ensure all tests pass in modified areas
3. Verify Docker services are still running
4. Check that database migrations apply cleanly
**Common Issues & Workarounds:**
- **Prisma issues**: Run `poetry run prisma generate` after schema changes
- **Permission errors**: Ensure Docker has proper permissions
- **Port conflicts**: Check the `docker-compose.yml` file for the current list of exposed ports. You can list all mapped ports with:
- **Test timeouts**: Backend tests can take 5+ minutes, use `-x` flag to stop on first failure
## Project Layout & Architecture
### Core Architecture
**AutoGPT Platform** (`autogpt_platform/`):
- `backend/` - FastAPI server with async support
- `backend/backend/` - Core API logic
- `backend/blocks/` - Agent execution blocks
- `backend/data/` - Database models and schemas
- `schema.prisma` - Database schema definition
- `frontend/` - Next.js application
- `src/app/` - App Router pages and layouts
- `src/components/` - Reusable React components
- `src/lib/` - Utilities and configurations
- `autogpt_libs/` - Shared Python utilities
- `docker-compose.yml` - Development stack orchestration
**Key Configuration Files:**
- `pyproject.toml` - Python dependencies and tooling
- `package.json` - Node.js dependencies and scripts
- `schema.prisma` - Database schema and migrations
- `next.config.mjs` - Next.js configuration
- `tailwind.config.ts` - Styling configuration
### Security & Middleware
**Cache Protection**: Backend includes middleware preventing sensitive data caching in browsers/proxies
**Authentication**: JWT-based with Supabase integration
**User ID Validation**: All data access requires user ID checks - verify this for any `data/*.py` changes
### Development Workflow
**GitHub Actions**: Multiple CI/CD workflows in `.github/workflows/`
- `platform-backend-ci.yml` - Backend testing and validation
- `platform-frontend-ci.yml` - Frontend testing and validation
- `platform-fullstack-ci.yml` - End-to-end integration tests
**Pre-commit Hooks**: Run linting and formatting checks
**Conventional Commits**: Use format `type(scope): description` (e.g., `feat(backend): add API`)
### Key Source Files
**Backend Entry Points:**
- `backend/backend/server/server.py` - FastAPI application setup
- `backend/backend/data/` - Database models and user management
- `backend/blocks/` - Agent execution blocks and logic
**Frontend Entry Points:**
- `frontend/src/app/layout.tsx` - Root application layout
- `frontend/src/app/page.tsx` - Home page
- `frontend/src/lib/supabase/` - Authentication and database client
**Protected Routes**: Update `frontend/lib/supabase/middleware.ts` when adding protected routes
### Agent Block System
Agents are built using a visual block-based system where each block performs a single action. Blocks are defined in `backend/blocks/` and must include:
- Block definition with input/output schemas
- Execution logic with proper error handling
- Tests validating functionality
### Database & ORM
**Prisma ORM** with PostgreSQL backend including pgvector for embeddings:
- Schema in `schema.prisma`
- Migrations in `backend/migrations/`
- Always run `prisma migrate dev` and `prisma generate` after schema changes
## Environment Configuration
### Configuration Files Priority Order
1. **Backend**: `/backend/.env.default` → `/backend/.env` (user overrides)
2. **Frontend**: `/frontend/.env.default` → `/frontend/.env` (user overrides)
3. **Platform**: `/.env.default` (Supabase/shared) → `/.env` (user overrides)
4. Docker Compose `environment:` sections override file-based config
5. Shell environment variables have highest precedence
### Docker Environment Setup
- All services use hardcoded defaults (no `${VARIABLE}` substitutions)
- The `env_file` directive loads variables INTO containers at runtime
- Backend/Frontend services use YAML anchors for consistent configuration
- Copy `.env.default` files to `.env` for local development customization
## Advanced Development Patterns
### Adding New Blocks
1. Create file in `/backend/backend/blocks/`
2. Inherit from `Block` base class with input/output schemas
3. Implement `run` method with proper error handling
4. Generate block UUID using `uuid.uuid4()`
5. Register in block registry
6. Write tests alongside block implementation
7. Consider how inputs/outputs connect with other blocks in graph editor
### API Development
1. Update routes in `/backend/backend/server/routers/`
2. Add/update Pydantic models in same directory
3. Write tests alongside route files
4. For `data/*.py` changes, validate user ID checks
5. Run `poetry run test` to verify changes
### Frontend Development
**📖 Complete Frontend Guide**: See `autogpt_platform/frontend/CONTRIBUTING.md` and `autogpt_platform/frontend/.cursorrules` for comprehensive patterns and conventions.
**Quick Reference:**
**Component Structure:**
- Separate render logic from data/behavior
- Structure: `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts`
- Exception: Small components (3-4 lines of logic) can be inline
- Render-only components can be direct files without folders
**Data Fetching:**
- Use generated API hooks from `@/app/api/__generated__/endpoints/`
- Generated via Orval from backend OpenAPI spec
- Pattern: `use{Method}{Version}{OperationName}`
- Example: `useGetV2ListLibraryAgents`
- Regenerate with: `pnpm generate:api`
- **Never** use deprecated `BackendAPI` or `src/lib/autogpt-server-api/*`
**Code Conventions:**
- Use function declarations for components and handlers (not arrow functions)
- Only arrow functions for small inline lambdas (map, filter, etc.)
- Components: `PascalCase`, Hooks: `camelCase` with `use` prefix
- No barrel files or `index.ts` re-exports
- Minimal comments (code should be self-documenting)
**Styling:**
- Use Tailwind CSS utilities only
- Use design system components from `src/components/` (atoms, molecules, organisms)
- Never use `src/components/__legacy__/*`
- Only use Phosphor Icons (`@phosphor-icons/react`)
- Prefer design tokens over hardcoded values
**Error Handling:**
- Render errors: Use `<ErrorCard />` component
- Mutation errors: Display with toast notifications
- Manual exceptions: Use `Sentry.captureException()`
- Global error boundaries already configured
**Testing:**
- Add/update Storybook stories for UI components (`pnpm storybook`)
- Run Playwright E2E tests with `pnpm test`
- Verify in Chromatic after PR
**Architecture:**
- Default to client components ("use client")
- Server components only for SEO or extreme TTFB needs
- Use React Query for server state (via generated hooks)
- Co-locate UI state in components/hooks
### Security Guidelines
**Cache Protection Middleware** (`/backend/backend/server/middleware/security.py`):
- Default: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
- Uses allow list approach for cacheable paths (static assets, health checks, public pages)
- Prevents sensitive data caching in browsers/proxies
- Add new cacheable endpoints to `CACHEABLE_PATHS`
### CI/CD Alignment
The repository has comprehensive CI workflows that test:
- **Backend**: Python 3.11-3.13, services (Redis/RabbitMQ/ClamAV), Prisma migrations, Poetry lock validation
- **Frontend**: Node.js 21, pnpm, Playwright with Docker Compose stack, API schema validation
- **Integration**: Full-stack type checking and E2E testing
Match these patterns when developing locally - the copilot setup environment mirrors these CI configurations.
## Collaboration with Other AI Assistants
This repository is actively developed with assistance from Claude (via CLAUDE.md files). When working on this codebase:
- Check for existing CLAUDE.md files that provide additional context
- Follow established patterns and conventions already in the codebase
- Maintain consistency with existing code style and architecture
- Consider that changes may be reviewed and extended by both human developers and AI assistants
## Trust These Instructions
These instructions are comprehensive and tested. Only perform additional searches if:
1. Information here is incomplete for your specific task
2. You encounter errors not covered by the workarounds
3. You need to understand implementation details not covered above
For detailed platform development patterns, refer to `autogpt_platform/CLAUDE.md` and `AGENTS.md` in the repository root.

153
.github/dependabot.yml vendored
View File

@@ -1,153 +0,0 @@
version: 2
updates:
# autogpt_libs (Poetry project)
- package-ecosystem: "pip"
directory: "autogpt_platform/autogpt_libs"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
commit-message:
prefix: "chore(libs/deps)"
prefix-development: "chore(libs/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# backend (Poetry project)
- package-ecosystem: "pip"
directory: "autogpt_platform/backend"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
commit-message:
prefix: "chore(backend/deps)"
prefix-development: "chore(backend/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# frontend (Next.js project)
- package-ecosystem: "npm"
directory: "autogpt_platform/frontend"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
commit-message:
prefix: "chore(frontend/deps)"
prefix-development: "chore(frontend/deps-dev)"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# infra (Terraform)
- package-ecosystem: "terraform"
directory: "autogpt_platform/infra"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
target-branch: "dev"
commit-message:
prefix: "chore(infra/deps)"
prefix-development: "chore(infra/deps-dev)"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# Docker
- package-ecosystem: "docker"
directory: "autogpt_platform/"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# Docs
- package-ecosystem: "pip"
directory: "docs/"
schedule:
interval: "weekly"
open-pull-requests-limit: 1
target-branch: "dev"
commit-message:
prefix: "chore(docs/deps)"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"

32
.github/labeler.yml vendored
View File

@@ -1,33 +1,27 @@
Classic AutoGPT Agent:
AutoGPT Agent:
- changed-files:
- any-glob-to-any-file: classic/original_autogpt/**
Classic Benchmark:
- changed-files:
- any-glob-to-any-file: classic/benchmark/**
Classic Frontend:
- changed-files:
- any-glob-to-any-file: classic/frontend/**
Forge:
- changed-files:
- any-glob-to-any-file: classic/forge/**
Benchmark:
- changed-files:
- any-glob-to-any-file: classic/benchmark/**
Frontend:
- changed-files:
- any-glob-to-any-file: classic/frontend/**
documentation:
- changed-files:
- any-glob-to-any-file: docs/**
platform/frontend:
Builder:
- changed-files:
- any-glob-to-any-file: autogpt_platform/frontend/**
- any-glob-to-any-file: autogpt_platform/autogpt_builder/**
platform/backend:
Server:
- changed-files:
- all-globs-to-any-file:
- autogpt_platform/backend/**
- '!autogpt_platform/backend/backend/blocks/**'
platform/blocks:
- changed-files:
- any-glob-to-any-file: autogpt_platform/backend/backend/blocks/**
- any-glob-to-any-file: autogpt_platform/autogpt_server/**

View File

@@ -2,12 +2,12 @@ name: Classic - AutoGPT CI
on:
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
@@ -115,7 +115,6 @@ jobs:
poetry run pytest -vv \
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
tests/unit tests/integration
env:
CI: true
@@ -125,14 +124,8 @@ jobs:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: autogpt-agent,${{ runner.os }}

View File

@@ -5,7 +5,7 @@ on:
- cron: 20 4 * * 1,4
env:
BASE_BRANCH: dev
BASE_BRANCH: development
IMAGE_NAME: auto-gpt
jobs:
@@ -15,46 +15,46 @@ jobs:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.schedule }}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.schedule }}
build_type: ${{ matrix.build-type }}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: dev
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
push_forced_label:
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
push_forced_label:
new_commits_json: ${{ null }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
new_commits_json: ${{ null }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

View File

@@ -2,13 +2,13 @@ name: Classic - AutoGPT Docker CI
on:
push:
branches: [master, dev]
branches: [ master, development ]
paths:
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
@@ -34,58 +34,58 @@ jobs:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- if: runner.debug
run: |
ls -al
du -hs *
- if: runner.debug
run: |
ls -al
du -hs *
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
build_type: ${{ matrix.build-type }}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: dev
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.event.after }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.event.after }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
new_commits_json: ${{ toJSON(github.event.commits) }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
new_commits_json: ${{ toJSON(github.event.commits) }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
test:
runs-on: ubuntu-latest
@@ -117,16 +117,16 @@ jobs:
- id: build
name: Build image
uses: docker/build-push-action@v6
uses: docker/build-push-action@v5
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=dev # include pytest
build-args: BUILD_TYPE=dev # include pytest
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-dev
cache-to: type=gha,scope=autogpt-docker-dev,mode=max

View File

@@ -2,7 +2,7 @@ name: Classic - AutoGPT Docker Release
on:
release:
types: [published, edited]
types: [ published, edited ]
workflow_dispatch:
inputs:
@@ -19,69 +19,69 @@ jobs:
if: startsWith(github.ref, 'refs/tags/autogpt-')
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to Docker hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Log in to Docker hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# slashes are not allowed in image tags, but can appear in git branch or tag names
- id: sanitize_tag
name: Sanitize image tag
run: |
tag=${raw_tag//\//-}
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
env:
raw_tag: ${{ github.ref_name }}
# slashes are not allowed in image tags, but can appear in git branch or tag names
- id: sanitize_tag
name: Sanitize image tag
run: |
tag=${raw_tag//\//-}
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
env:
raw_tag: ${{ github.ref_name }}
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: Dockerfile.autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:latest,
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
labels: GIT_REVISION=${{ github.sha }}
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
context: classic/
file: Dockerfile.autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:latest,
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
labels: GIT_REVISION=${{ github.sha }}
# cache layers in GitHub Actions cache to speed up builds
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
cache-to: type=gha,scope=autogpt-docker-release,mode=max
# cache layers in GitHub Actions cache to speed up builds
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
cache-to: type=gha,scope=autogpt-docker-release,mode=max
- name: Push image to Docker Hub
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
- name: Push image to Docker Hub
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
inputs_no_cache: ${{ inputs.no_cache }}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
inputs_no_cache: ${{ inputs.no_cache }}
prod_branch: master
dev_branch: dev
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
ref_type: ${{ github.ref_type }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
ref_type: ${{ github.ref_type }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

View File

@@ -5,7 +5,7 @@ on:
schedule:
- cron: '0 8 * * *'
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
@@ -16,7 +16,7 @@ on:
- 'classic/setup.py'
- '!**/*.md'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'

View File

@@ -2,13 +2,13 @@ name: Classic - AGBenchmark CI
on:
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
@@ -87,20 +87,13 @@ jobs:
poetry run pytest -vv \
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
tests
env:
CI: true
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: agbenchmark,${{ runner.os }}
@@ -109,7 +102,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [forge]
agent-name: [ forge ]
fail-fast: false
timeout-minutes: 20
steps:
@@ -153,23 +146,23 @@ jobs:
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
poetry run agbenchmark --mock --category=coding
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
# poetry run agbenchmark --test=WriteFile
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
poetry run agbenchmark --test=WriteFile
cd ../benchmark
poetry install
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
export BUILD_SKILL_TREE=true
# poetry run agbenchmark --mock
poetry run agbenchmark --mock
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
# if [ ! -z "$CHANGED" ]; then
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
# echo "$CHANGED"
# exit 1
# else
# echo "No unstaged changes."
# fi
CHANGED=$(git diff --name-only | grep -E '(agclassic/benchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
if [ ! -z "$CHANGED" ]; then
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
echo "$CHANGED"
exit 1
else
echo "No unstaged changes."
fi
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci

View File

@@ -2,13 +2,13 @@ name: Classic - Forge CI
on:
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
@@ -139,7 +139,6 @@ jobs:
poetry run pytest -vv \
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
forge
env:
CI: true
@@ -149,14 +148,8 @@ jobs:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: forge,${{ runner.os }}

View File

@@ -4,15 +4,15 @@ on:
push:
branches:
- master
- dev
- development
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'classic/frontend/**'
- '.github/workflows/classic-frontend-ci.yml'
- '.github/workflows/frontend-ci.yml'
pull_request:
paths:
- 'classic/frontend/**'
- '.github/workflows/classic-frontend-ci.yml'
- '.github/workflows/frontend-ci.yml'
jobs:
build:
@@ -21,40 +21,40 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
env:
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.13.2'
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.13.2'
- name: Build Flutter to Web
run: |
cd classic/frontend
flutter build web --base-href /app/
- name: Build Flutter to Web
run: |
cd classic/frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
# if: github.event_name == 'push'
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add classic/frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
# if: github.event_name == 'push'
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add classic/frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v7
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true
title: "Update frontend build in `${{ github.ref_name }}`"
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
commit-message: "Update frontend build based on commit ${{ github.sha }}"
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v6
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true
title: "Update frontend build in `${{ github.ref_name }}`"
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
commit-message: "Update frontend build based on commit ${{ github.sha }}"

View File

@@ -2,18 +2,18 @@ name: Classic - Python checks
on:
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/classic-python-checks-ci.yml'
- '.github/workflows/lint-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- '**.py'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-python-checks-ci.yml'
- '.github/workflows/lint-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
@@ -21,7 +21,7 @@ on:
- '!classic/forge/tests/vcr_cassettes'
concurrency:
group: ${{ format('classic-python-checks-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:

View File

@@ -1,97 +0,0 @@
name: Auto Fix CI Failures
on:
workflow_run:
workflows: ["CI"]
types:
- completed
permissions:
contents: write
pull-requests: write
actions: read
issues: write
id-token: write # Required for OIDC token exchange
jobs:
auto-fix:
if: |
github.event.workflow_run.conclusion == 'failure' &&
github.event.workflow_run.pull_requests[0] &&
!startsWith(github.event.workflow_run.head_branch, 'claude-auto-fix-ci-')
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_branch }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup git identity
run: |
git config --global user.email "claude[bot]@users.noreply.github.com"
git config --global user.name "claude[bot]"
- name: Create fix branch
id: branch
run: |
BRANCH_NAME="claude-auto-fix-ci-${{ github.event.workflow_run.head_branch }}-${{ github.run_id }}"
git checkout -b "$BRANCH_NAME"
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
- name: Get CI failure details
id: failure_details
uses: actions/github-script@v7
with:
script: |
const run = await github.rest.actions.getWorkflowRun({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{ github.event.workflow_run.id }}
});
const jobs = await github.rest.actions.listJobsForWorkflowRun({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{ github.event.workflow_run.id }}
});
const failedJobs = jobs.data.jobs.filter(job => job.conclusion === 'failure');
let errorLogs = [];
for (const job of failedJobs) {
const logs = await github.rest.actions.downloadJobLogsForWorkflowRun({
owner: context.repo.owner,
repo: context.repo.repo,
job_id: job.id
});
errorLogs.push({
jobName: job.name,
logs: logs.data
});
}
return {
runUrl: run.data.html_url,
failedJobs: failedJobs.map(j => j.name),
errorLogs: errorLogs
};
- name: Fix CI failures with Claude
id: claude
uses: anthropics/claude-code-action@v1
with:
prompt: |
/fix-ci
Failed CI Run: ${{ fromJSON(steps.failure_details.outputs.result).runUrl }}
Failed Jobs: ${{ join(fromJSON(steps.failure_details.outputs.result).failedJobs, ', ') }}
PR Number: ${{ github.event.workflow_run.pull_requests[0].number }}
Branch Name: ${{ steps.branch.outputs.branch_name }}
Base Branch: ${{ github.event.workflow_run.head_branch }}
Repository: ${{ github.repository }}
Error logs:
${{ toJSON(fromJSON(steps.failure_details.outputs.result).errorLogs) }}
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_args: "--allowedTools 'Edit,MultiEdit,Write,Read,Glob,Grep,LS,Bash(git:*),Bash(bun:*),Bash(npm:*),Bash(npx:*),Bash(gh:*)'"

View File

@@ -1,379 +0,0 @@
# Claude Dependabot PR Review Workflow
#
# This workflow automatically runs Claude analysis on Dependabot PRs to:
# - Identify dependency changes and their versions
# - Look up changelogs for updated packages
# - Assess breaking changes and security impacts
# - Provide actionable recommendations for the development team
#
# Triggered on: Dependabot PRs (opened, synchronize)
# Requirements: ANTHROPIC_API_KEY secret must be configured
name: Claude Dependabot PR Review
on:
pull_request:
types: [opened, synchronize]
jobs:
dependabot-review:
# Only run on Dependabot PRs
if: github.actor == 'dependabot[bot]'
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: write
pull-requests: read
issues: read
id-token: write
actions: read # Required for CI access
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 1
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry
run: |
# Extract Poetry version from backend/poetry.lock (matches CI)
cd autogpt_platform/backend
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
# Add Poetry to PATH
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Check poetry.lock
working-directory: autogpt_platform/backend
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
echo "Warning: poetry.lock not up to date, but continuing for setup"
git checkout poetry.lock # Reset for clean setup
fi
- name: Install Python dependencies
working-directory: autogpt_platform/backend
run: poetry install
- name: Generate Prisma Client
working-directory: autogpt_platform/backend
run: poetry run prisma generate
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Set pnpm store directory
run: |
pnpm config set store-dir ~/.pnpm-store
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install JavaScript dependencies
working-directory: autogpt_platform/frontend
run: pnpm install --frozen-lockfile
# Install Playwright browsers for frontend testing
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
# - name: Install Playwright browsers
# working-directory: autogpt_platform/frontend
# run: pnpm playwright install --with-deps chromium
# Docker setup for development environment
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Copy default environment files
working-directory: autogpt_platform
run: |
# Copy default environment files for development
cp .env.default .env
cp backend/.env.default backend/.env
cp frontend/.env.default frontend/.env
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v4
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
restore-keys: |
docker-images-v2-${{ runner.os }}-
docker-images-v1-${{ runner.os }}-
- name: Load or pull Docker images
working-directory: autogpt_platform
run: |
mkdir -p ~/docker-cache
# Define image list for easy maintenance
IMAGES=(
"redis:latest"
"rabbitmq:management"
"clamav/clamav-debian:latest"
"busybox:latest"
"kong:2.8.1"
"supabase/gotrue:v2.170.0"
"supabase/postgres:15.8.1.049"
"supabase/postgres-meta:v0.86.1"
"supabase/studio:20250224-d10db0f"
)
# Check if any cached tar files exist (more reliable than cache-hit)
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
echo "Docker cache found, loading images in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
if [ -f ~/docker-cache/${filename}.tar ]; then
echo "Loading $image..."
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
fi
done
wait
echo "All cached images loaded"
else
echo "No Docker cache found, pulling images in parallel..."
# Pull all images in parallel
for image in "${IMAGES[@]}"; do
docker pull "$image" &
done
wait
# Only save cache on main branches (not PRs) to avoid cache pollution
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "Saving Docker images to cache in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
echo "Saving $image..."
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
done
wait
echo "Docker image cache saved"
else
echo "Skipping cache save for PR/feature branch"
fi
fi
echo "Docker images ready for use"
# Phase 2: Build migrate service with GitHub Actions cache
- name: Build migrate Docker image with cache
working-directory: autogpt_platform
run: |
# Build the migrate image with buildx for GHA caching
docker buildx build \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--target migrate \
--tag autogpt_platform-migrate:latest \
--load \
-f backend/Dockerfile \
..
# Start services using pre-built images
- name: Start Docker services for development
working-directory: autogpt_platform
run: |
# Start essential services (migrate image already built with correct tag)
docker compose --profile local up deps --no-build --detach
echo "Waiting for services to be ready..."
# Wait for database to be ready
echo "Checking database readiness..."
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
echo " Waiting for database..."
sleep 2
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
# Check migrate service status
echo "Checking migration status..."
docker compose ps migrate || echo " Migrate service not visible in ps output"
# Wait for migrate service to complete
echo "Waiting for migrations to complete..."
timeout 30 bash -c '
ATTEMPTS=0
while [ $ATTEMPTS -lt 15 ]; do
ATTEMPTS=$((ATTEMPTS + 1))
# Check using docker directly (more reliable than docker compose ps)
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
if [ -z "$CONTAINER_STATUS" ]; then
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
echo "✅ Migrations completed successfully"
docker compose logs migrate --tail=5 2>/dev/null || true
exit 0
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
echo "❌ Migrations failed with exit code: $EXIT_CODE"
echo "Migration logs:"
docker compose logs migrate --tail=20 2>/dev/null || true
exit 1
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
else
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
fi
sleep 2
done
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
echo "Final container check:"
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
echo "Migration logs (if available):"
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
' || echo "⚠️ Migration check completed with warnings, continuing..."
# Brief wait for other services to stabilize
echo "Waiting 5 seconds for other services to stabilize..."
sleep 5
# Verify installations and provide environment info
- name: Verify setup and show environment info
run: |
echo "=== Python Setup ==="
python --version
poetry --version
echo "=== Node.js Setup ==="
node --version
pnpm --version
echo "=== Additional Tools ==="
docker --version
docker compose version
gh --version || true
echo "=== Services Status ==="
cd autogpt_platform
docker compose ps || true
echo "=== Backend Dependencies ==="
cd backend
poetry show | head -10 || true
echo "=== Frontend Dependencies ==="
cd ../frontend
pnpm list --depth=0 | head -10 || true
echo "=== Environment Files ==="
ls -la ../.env* || true
ls -la .env* || true
ls -la ../backend/.env* || true
echo "✅ AutoGPT Platform development environment setup complete!"
echo "🚀 Ready for development with Docker services running"
echo "📝 Backend server: poetry run serve (port 8000)"
echo "🌐 Frontend server: pnpm dev (port 3000)"
- name: Run Claude Dependabot Analysis
id: claude_review
uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_args: |
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*)"
prompt: |
You are Claude, an AI assistant specialized in reviewing Dependabot dependency update PRs.
Your primary tasks are:
1. **Analyze the dependency changes** in this Dependabot PR
2. **Look up changelogs** for all updated dependencies to understand what changed
3. **Identify breaking changes** and assess potential impact on the AutoGPT codebase
4. **Provide actionable recommendations** for the development team
## Analysis Process:
1. **Identify Changed Dependencies**:
- Use git diff to see what dependencies were updated
- Parse package.json, poetry.lock, requirements files, etc.
- List all package versions: old → new
2. **Changelog Research**:
- For each updated dependency, look up its changelog/release notes
- Use WebFetch to access GitHub releases, NPM package pages, PyPI project pages. The pr should also have some details
- Focus on versions between the old and new versions
- Identify: breaking changes, deprecations, security fixes, new features
3. **Breaking Change Assessment**:
- Categorize changes: BREAKING, MAJOR, MINOR, PATCH, SECURITY
- Assess impact on AutoGPT's usage patterns
- Check if AutoGPT uses affected APIs/features
- Look for migration guides or upgrade instructions
4. **Codebase Impact Analysis**:
- Search the AutoGPT codebase for usage of changed APIs
- Identify files that might be affected by breaking changes
- Check test files for deprecated usage patterns
- Look for configuration changes needed
## Output Format:
Provide a comprehensive review comment with:
### 🔍 Dependency Analysis Summary
- List of updated packages with version changes
- Overall risk assessment (LOW/MEDIUM/HIGH)
### 📋 Detailed Changelog Review
For each updated dependency:
- **Package**: name (old_version → new_version)
- **Changes**: Summary of key changes
- **Breaking Changes**: List any breaking changes
- **Security Fixes**: Note security improvements
- **Migration Notes**: Any upgrade steps needed
### ⚠️ Impact Assessment
- **Breaking Changes Found**: Yes/No with details
- **Affected Files**: List AutoGPT files that may need updates
- **Test Impact**: Any tests that may need updating
- **Configuration Changes**: Required config updates
### 🛠️ Recommendations
- **Action Required**: What the team should do
- **Testing Focus**: Areas to test thoroughly
- **Follow-up Tasks**: Any additional work needed
- **Merge Recommendation**: APPROVE/REVIEW_NEEDED/HOLD
### 📚 Useful Links
- Links to relevant changelogs, migration guides, documentation
Be thorough but concise. Focus on actionable insights that help the development team make informed decisions about the dependency updates.

View File

@@ -1,325 +0,0 @@
name: Claude Code
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
issues:
types: [opened, assigned]
pull_request_review:
types: [submitted]
jobs:
claude:
if: |
(
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
) && (
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR' ||
github.event.review.author_association == 'OWNER' ||
github.event.review.author_association == 'MEMBER' ||
github.event.review.author_association == 'COLLABORATOR' ||
github.event.issue.author_association == 'OWNER' ||
github.event.issue.author_association == 'MEMBER' ||
github.event.issue.author_association == 'COLLABORATOR'
)
runs-on: ubuntu-latest
timeout-minutes: 45
permissions:
contents: write
pull-requests: read
issues: read
id-token: write
actions: read # Required for CI access
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 1
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry
run: |
# Extract Poetry version from backend/poetry.lock (matches CI)
cd autogpt_platform/backend
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
# Add Poetry to PATH
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Check poetry.lock
working-directory: autogpt_platform/backend
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
echo "Warning: poetry.lock not up to date, but continuing for setup"
git checkout poetry.lock # Reset for clean setup
fi
- name: Install Python dependencies
working-directory: autogpt_platform/backend
run: poetry install
- name: Generate Prisma Client
working-directory: autogpt_platform/backend
run: poetry run prisma generate
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Set pnpm store directory
run: |
pnpm config set store-dir ~/.pnpm-store
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install JavaScript dependencies
working-directory: autogpt_platform/frontend
run: pnpm install --frozen-lockfile
# Install Playwright browsers for frontend testing
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
# - name: Install Playwright browsers
# working-directory: autogpt_platform/frontend
# run: pnpm playwright install --with-deps chromium
# Docker setup for development environment
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Copy default environment files
working-directory: autogpt_platform
run: |
# Copy default environment files for development
cp .env.default .env
cp backend/.env.default backend/.env
cp frontend/.env.default frontend/.env
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v4
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
restore-keys: |
docker-images-v2-${{ runner.os }}-
docker-images-v1-${{ runner.os }}-
- name: Load or pull Docker images
working-directory: autogpt_platform
run: |
mkdir -p ~/docker-cache
# Define image list for easy maintenance
IMAGES=(
"redis:latest"
"rabbitmq:management"
"clamav/clamav-debian:latest"
"busybox:latest"
"kong:2.8.1"
"supabase/gotrue:v2.170.0"
"supabase/postgres:15.8.1.049"
"supabase/postgres-meta:v0.86.1"
"supabase/studio:20250224-d10db0f"
)
# Check if any cached tar files exist (more reliable than cache-hit)
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
echo "Docker cache found, loading images in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
if [ -f ~/docker-cache/${filename}.tar ]; then
echo "Loading $image..."
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
fi
done
wait
echo "All cached images loaded"
else
echo "No Docker cache found, pulling images in parallel..."
# Pull all images in parallel
for image in "${IMAGES[@]}"; do
docker pull "$image" &
done
wait
# Only save cache on main branches (not PRs) to avoid cache pollution
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "Saving Docker images to cache in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
echo "Saving $image..."
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
done
wait
echo "Docker image cache saved"
else
echo "Skipping cache save for PR/feature branch"
fi
fi
echo "Docker images ready for use"
# Phase 2: Build migrate service with GitHub Actions cache
- name: Build migrate Docker image with cache
working-directory: autogpt_platform
run: |
# Build the migrate image with buildx for GHA caching
docker buildx build \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--target migrate \
--tag autogpt_platform-migrate:latest \
--load \
-f backend/Dockerfile \
..
# Start services using pre-built images
- name: Start Docker services for development
working-directory: autogpt_platform
run: |
# Start essential services (migrate image already built with correct tag)
docker compose --profile local up deps --no-build --detach
echo "Waiting for services to be ready..."
# Wait for database to be ready
echo "Checking database readiness..."
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
echo " Waiting for database..."
sleep 2
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
# Check migrate service status
echo "Checking migration status..."
docker compose ps migrate || echo " Migrate service not visible in ps output"
# Wait for migrate service to complete
echo "Waiting for migrations to complete..."
timeout 30 bash -c '
ATTEMPTS=0
while [ $ATTEMPTS -lt 15 ]; do
ATTEMPTS=$((ATTEMPTS + 1))
# Check using docker directly (more reliable than docker compose ps)
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
if [ -z "$CONTAINER_STATUS" ]; then
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
echo "✅ Migrations completed successfully"
docker compose logs migrate --tail=5 2>/dev/null || true
exit 0
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
echo "❌ Migrations failed with exit code: $EXIT_CODE"
echo "Migration logs:"
docker compose logs migrate --tail=20 2>/dev/null || true
exit 1
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
else
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
fi
sleep 2
done
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
echo "Final container check:"
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
echo "Migration logs (if available):"
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
' || echo "⚠️ Migration check completed with warnings, continuing..."
# Brief wait for other services to stabilize
echo "Waiting 5 seconds for other services to stabilize..."
sleep 5
# Verify installations and provide environment info
- name: Verify setup and show environment info
run: |
echo "=== Python Setup ==="
python --version
poetry --version
echo "=== Node.js Setup ==="
node --version
pnpm --version
echo "=== Additional Tools ==="
docker --version
docker compose version
gh --version || true
echo "=== Services Status ==="
cd autogpt_platform
docker compose ps || true
echo "=== Backend Dependencies ==="
cd backend
poetry show | head -10 || true
echo "=== Frontend Dependencies ==="
cd ../frontend
pnpm list --depth=0 | head -10 || true
echo "=== Environment Files ==="
ls -la ../.env* || true
ls -la .env* || true
ls -la ../backend/.env* || true
echo "✅ AutoGPT Platform development environment setup complete!"
echo "🚀 Ready for development with Docker services running"
echo "📝 Backend server: poetry run serve (port 8000)"
echo "🌐 Frontend server: pnpm dev (port 3000)"
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_args: |
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*), Bash(gh pr edit:*)"
--model opus
additional_permissions: |
actions: read

View File

@@ -1,98 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ "master", "release-*", "dev" ]
pull_request:
branches: [ "master", "release-*", "dev" ]
merge_group:
schedule:
- cron: '15 4 * * 0'
jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
permissions:
# required for all workflows
security-events: write
# required to fetch internal or private CodeQL packs
packages: read
# only required for workflows in private repositories
actions: read
contents: read
strategy:
fail-fast: false
matrix:
include:
- language: typescript
build-mode: none
- language: python
build-mode: none
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
config: |
paths-ignore:
- classic/frontend/build/**
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@@ -1,302 +0,0 @@
name: "Copilot Setup Steps"
# Automatically run the setup steps when they are changed to allow for easy validation, and
# allow manual testing through the repository's "Actions" tab
on:
workflow_dispatch:
push:
paths:
- .github/workflows/copilot-setup-steps.yml
pull_request:
paths:
- .github/workflows/copilot-setup-steps.yml
jobs:
# The job MUST be called `copilot-setup-steps` or it will not be picked up by Copilot.
copilot-setup-steps:
runs-on: ubuntu-latest
timeout-minutes: 45
# Set the permissions to the lowest permissions possible needed for your steps.
# Copilot will be given its own token for its operations.
permissions:
# If you want to clone the repository as part of your setup steps, for example to install dependencies, you'll need the `contents: read` permission. If you don't clone the repository in your setup steps, Copilot will do this for you automatically after the steps complete.
contents: read
# You can define any steps you want, and they will run before the agent starts.
# If you do not check out your code, Copilot will do this for you.
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry
run: |
# Extract Poetry version from backend/poetry.lock (matches CI)
cd autogpt_platform/backend
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
# Add Poetry to PATH
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Check poetry.lock
working-directory: autogpt_platform/backend
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
echo "Warning: poetry.lock not up to date, but continuing for setup"
git checkout poetry.lock # Reset for clean setup
fi
- name: Install Python dependencies
working-directory: autogpt_platform/backend
run: poetry install
- name: Generate Prisma Client
working-directory: autogpt_platform/backend
run: poetry run prisma generate
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Set pnpm store directory
run: |
pnpm config set store-dir ~/.pnpm-store
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install JavaScript dependencies
working-directory: autogpt_platform/frontend
run: pnpm install --frozen-lockfile
# Install Playwright browsers for frontend testing
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
# - name: Install Playwright browsers
# working-directory: autogpt_platform/frontend
# run: pnpm playwright install --with-deps chromium
# Docker setup for development environment
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Copy default environment files
working-directory: autogpt_platform
run: |
# Copy default environment files for development
cp .env.default .env
cp backend/.env.default backend/.env
cp frontend/.env.default frontend/.env
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v4
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
restore-keys: |
docker-images-v2-${{ runner.os }}-
docker-images-v1-${{ runner.os }}-
- name: Load or pull Docker images
working-directory: autogpt_platform
run: |
mkdir -p ~/docker-cache
# Define image list for easy maintenance
IMAGES=(
"redis:latest"
"rabbitmq:management"
"clamav/clamav-debian:latest"
"busybox:latest"
"kong:2.8.1"
"supabase/gotrue:v2.170.0"
"supabase/postgres:15.8.1.049"
"supabase/postgres-meta:v0.86.1"
"supabase/studio:20250224-d10db0f"
)
# Check if any cached tar files exist (more reliable than cache-hit)
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
echo "Docker cache found, loading images in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
if [ -f ~/docker-cache/${filename}.tar ]; then
echo "Loading $image..."
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
fi
done
wait
echo "All cached images loaded"
else
echo "No Docker cache found, pulling images in parallel..."
# Pull all images in parallel
for image in "${IMAGES[@]}"; do
docker pull "$image" &
done
wait
# Only save cache on main branches (not PRs) to avoid cache pollution
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "Saving Docker images to cache in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
echo "Saving $image..."
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
done
wait
echo "Docker image cache saved"
else
echo "Skipping cache save for PR/feature branch"
fi
fi
echo "Docker images ready for use"
# Phase 2: Build migrate service with GitHub Actions cache
- name: Build migrate Docker image with cache
working-directory: autogpt_platform
run: |
# Build the migrate image with buildx for GHA caching
docker buildx build \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--target migrate \
--tag autogpt_platform-migrate:latest \
--load \
-f backend/Dockerfile \
..
# Start services using pre-built images
- name: Start Docker services for development
working-directory: autogpt_platform
run: |
# Start essential services (migrate image already built with correct tag)
docker compose --profile local up deps --no-build --detach
echo "Waiting for services to be ready..."
# Wait for database to be ready
echo "Checking database readiness..."
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
echo " Waiting for database..."
sleep 2
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
# Check migrate service status
echo "Checking migration status..."
docker compose ps migrate || echo " Migrate service not visible in ps output"
# Wait for migrate service to complete
echo "Waiting for migrations to complete..."
timeout 30 bash -c '
ATTEMPTS=0
while [ $ATTEMPTS -lt 15 ]; do
ATTEMPTS=$((ATTEMPTS + 1))
# Check using docker directly (more reliable than docker compose ps)
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
if [ -z "$CONTAINER_STATUS" ]; then
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
echo "✅ Migrations completed successfully"
docker compose logs migrate --tail=5 2>/dev/null || true
exit 0
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
echo "❌ Migrations failed with exit code: $EXIT_CODE"
echo "Migration logs:"
docker compose logs migrate --tail=20 2>/dev/null || true
exit 1
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
else
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
fi
sleep 2
done
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
echo "Final container check:"
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
echo "Migration logs (if available):"
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
' || echo "⚠️ Migration check completed with warnings, continuing..."
# Brief wait for other services to stabilize
echo "Waiting 5 seconds for other services to stabilize..."
sleep 5
# Verify installations and provide environment info
- name: Verify setup and show environment info
run: |
echo "=== Python Setup ==="
python --version
poetry --version
echo "=== Node.js Setup ==="
node --version
pnpm --version
echo "=== Additional Tools ==="
docker --version
docker compose version
gh --version || true
echo "=== Services Status ==="
cd autogpt_platform
docker compose ps || true
echo "=== Backend Dependencies ==="
cd backend
poetry show | head -10 || true
echo "=== Frontend Dependencies ==="
cd ../frontend
pnpm list --depth=0 | head -10 || true
echo "=== Environment Files ==="
ls -la ../.env* || true
ls -la .env* || true
ls -la ../backend/.env* || true
echo "✅ AutoGPT Platform development environment setup complete!"
echo "🚀 Ready for development with Docker services running"
echo "📝 Backend server: poetry run serve (port 8000)"
echo "🌐 Frontend server: pnpm dev (port 3000)"

View File

@@ -0,0 +1,41 @@
name: Platform - AutoGPT Builder CI
on:
push:
branches: [ master ]
paths:
- '.github/workflows/autogpt-builder-ci.yml'
- 'autogpt_platform/autogpt_builder/**'
pull_request:
paths:
- '.github/workflows/autogpt-builder-ci.yml'
- 'autogpt_platform/autogpt_builder/**'
defaults:
run:
shell: bash
working-directory: autogpt_platform/autogpt_builder
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '21'
- name: Install dependencies
run: |
npm install
- name: Check formatting with Prettier
run: |
npx prettier --check .
- name: Run lint
run: |
npm run lint

View File

@@ -1,60 +0,0 @@
name: AutoGPT Platform - Deploy Dev Environment
on:
push:
branches: [ dev ]
paths:
- 'autogpt_platform/**'
workflow_dispatch:
inputs:
git_ref:
description: 'Git ref (branch/tag) of AutoGPT to deploy'
required: true
default: 'master'
type: string
permissions:
contents: 'read'
id-token: 'write'
jobs:
migrate:
environment: develop
name: Run migrations for AutoGPT Platform
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install prisma
- name: Run Backend Migrations
working-directory: ./autogpt_platform/backend
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:
needs: migrate
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: build_deploy_dev
client-payload: '{"ref": "${{ github.event.inputs.git_ref || github.ref }}", "repository": "${{ github.repository }}"}'

View File

@@ -1,54 +0,0 @@
name: AutoGPT Platform - Deploy Prod Environment
on:
release:
types: [published]
workflow_dispatch:
permissions:
contents: 'read'
id-token: 'write'
jobs:
migrate:
environment: production
name: Run migrations for AutoGPT Platform
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.ref_name || 'master' }}
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install prisma
- name: Run Backend Migrations
working-directory: ./autogpt_platform/backend
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:
needs: migrate
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: build_deploy_prod
client-payload: |
{"ref": "${{ github.ref_name || 'master' }}", "repository": "${{ github.repository }}"}

View File

@@ -0,0 +1,56 @@
name: Platform - AutoGPT Builder Infra
on:
push:
branches: [ master ]
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'autogpt_platform/infra/**'
pull_request:
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'autogpt_platform/infra/**'
defaults:
run:
shell: bash
working-directory: autogpt_platform/infra
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: TFLint
uses: pauloconnor/tflint-action@v0.0.2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tflint_path: terraform/
tflint_recurse: true
tflint_changed_only: false
- name: Set up Helm
uses: azure/setup-helm@v4.2.0
with:
version: v3.14.4
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
if [[ -n "$changed" ]]; then
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Run chart-testing (lint)
if: steps.list-changed.outputs.changed == 'true'
run: ct lint --target-branch ${{ github.event.repository.default_branch }}

View File

@@ -0,0 +1,155 @@
name: Platform - AutoGPT Server CI
on:
push:
branches: [master, development, ci-test*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "autogpt_platform/autogpt_server/**"
pull_request:
branches: [master, development, release-*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "autogpt_platform/autogpt_server/**"
concurrency:
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: autogpt_platform/autogpt_server
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
- name: Setup PostgreSQL
uses: ikalnytskyi/action-setup-postgres@v6
with:
username: ${{ secrets.DB_USER || 'postgres' }}
password: ${{ secrets.DB_PASS || 'postgres' }}
database: postgres
port: 5432
id: postgres
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: "."
run: |
docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/autogpt_server/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
- id: lint
name: Run Linter
run: poetry run lint
- name: Run pytest with coverage
run: |
if [[ "${{ runner.debug }}" == "1" ]]; then
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
else
poetry run pytest -vv test
fi
if: success() || (failure() && steps.lint.outcome == 'failure')
env:
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
DB_USER: ${{ secrets.DB_USER || 'postgres' }}
DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
DB_NAME: postgres
DB_PORT: 5432
RUN_ENV: local
PORT: 8080
DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4
# with:
# token: ${{ secrets.CODECOV_TOKEN }}
# flags: autogpt-server,${{ runner.os }}

View File

@@ -1,225 +0,0 @@
name: AutoGPT Platform - Backend CI
on:
push:
branches: [master, dev, ci-test*]
paths:
- ".github/workflows/platform-backend-ci.yml"
- "autogpt_platform/backend/**"
- "autogpt_platform/autogpt_libs/**"
pull_request:
branches: [master, dev, release-*]
paths:
- ".github/workflows/platform-backend-ci.yml"
- "autogpt_platform/backend/**"
- "autogpt_platform/autogpt_libs/**"
merge_group:
concurrency:
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: autogpt_platform/backend
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.11", "3.12", "3.13"]
runs-on: ubuntu-latest
services:
redis:
image: redis:latest
ports:
- 6379:6379
rabbitmq:
image: rabbitmq:3.12-management
ports:
- 5672:5672
- 15672:15672
env:
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
clamav:
image: clamav/clamav-debian:latest
ports:
- 3310:3310
env:
CLAMAV_NO_FRESHCLAMD: false
CLAMD_CONF_StreamMaxLength: 50M
CLAMD_CONF_MaxFileSize: 100M
CLAMD_CONF_MaxScanSize: 100M
CLAMD_CONF_MaxThreads: 4
CLAMD_CONF_ReadTimeout: 300
options: >-
--health-cmd "clamdscan --version || exit 1"
--health-interval 30s
--health-timeout 10s
--health-retries 5
--health-start-period 180s
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Setup Supabase
uses: supabase/setup-cli@v1
with:
version: 1.178.1
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry (Unix)
run: |
# Extract Poetry version from backend/poetry.lock
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
if [ -n "$BASE_REF" ]; then
BASE_BRANCH=${BASE_REF/refs\/heads\//}
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
else
POETRY_VERSION=$HEAD_POETRY_VERSION
fi
echo "Using Poetry version ${POETRY_VERSION}"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$POETRY_VERSION python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
env:
BASE_REF: ${{ github.base_ref || github.event.merge_group.base_ref }}
- name: Check poetry.lock
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
echo "Error: poetry.lock not up to date."
echo
git diff poetry.lock
exit 1
fi
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- id: supabase
name: Start Supabase
working-directory: .
run: |
supabase init
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
# outputs:
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
- name: Wait for ClamAV to be ready
run: |
echo "Waiting for ClamAV daemon to start..."
max_attempts=60
attempt=0
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
sleep 5
attempt=$((attempt+1))
done
if [ $attempt -eq $max_attempts ]; then
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
echo "Checking ClamAV service logs..."
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
fi
echo "ClamAV is ready!"
# Verify ClamAV is responsive
echo "Testing ClamAV connection..."
timeout 10 bash -c 'echo "PING" | nc localhost 3310' || {
echo "ClamAV is not responding to PING"
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
}
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
- id: lint
name: Run Linter
run: poetry run lint
- name: Run pytest with coverage
run: |
if [[ "${{ runner.debug }}" == "1" ]]; then
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG
else
poetry run pytest -s -vv
fi
if: success() || (failure() && steps.lint.outcome == 'failure')
env:
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
JWT_VERIFY_KEY: ${{ steps.supabase.outputs.JWT_SECRET }}
REDIS_HOST: "localhost"
REDIS_PORT: "6379"
ENCRYPTION_KEY: "dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=" # DO NOT USE IN PRODUCTION!!
env:
CI: true
PLAIN_OUTPUT: True
RUN_ENV: local
PORT: 8080
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
# We know these are here, don't report this as a security vulnerability
# This is used as the default credential for the entire system's RabbitMQ instance
# If you want to replace this, you can do so by making our entire system generate
# new credentials for each local user and update the environment variables in
# the backend service, docker composes, and examples
RABBITMQ_DEFAULT_USER: "rabbitmq_user_default"
RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7"
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4
# with:
# token: ${{ secrets.CODECOV_TOKEN }}
# flags: backend,${{ runner.os }}

View File

@@ -1,198 +0,0 @@
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
on:
pull_request:
types: [closed]
issue_comment:
types: [created]
permissions:
issues: write
pull-requests: write
jobs:
dispatch:
runs-on: ubuntu-latest
steps:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
uses: actions/github-script@v7
with:
script: |
const commentBody = context.payload.comment.body.trim();
const commentUser = context.payload.comment.user.login;
const prAuthor = context.payload.issue.user.login;
const authorAssociation = context.payload.comment.author_association;
// Check permissions
const hasPermission = (
authorAssociation === 'OWNER' ||
authorAssociation === 'MEMBER' ||
authorAssociation === 'COLLABORATOR'
);
core.setOutput('comment_body', commentBody);
core.setOutput('has_permission', hasPermission);
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
core.setOutput('permission_denied', 'true');
return;
}
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
return;
}
// Process deploy command
if (commentBody === '!deploy') {
core.setOutput('should_deploy', 'true');
}
// Process undeploy command
else if (commentBody === '!undeploy') {
core.setOutput('should_undeploy', 'true');
}
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
});
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number
});
core.setOutput('pr_number', pr.data.number);
core.setOutput('pr_title', pr.data.title);
core.setOutput('pr_state', pr.data.state);
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "deploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
});
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
});
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
uses: actions/github-script@v7
with:
script: |
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
let lastDeployIndex = -1;
let lastUndeployIndex = -1;
comments.data.forEach((comment, index) => {
if (comment.body.trim() === '!deploy') {
lastDeployIndex = index;
} else if (comment.body.trim() === '!undeploy') {
lastUndeployIndex = index;
}
});
// Should undeploy if there's a !deploy without a subsequent !undeploy
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
core.setOutput('should_undeploy', shouldUndeploy);
- name: Dispatch Undeploy Event (PR closed with active deployment)
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}
- name: Post PR close undeploy comment
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
});

View File

@@ -1,235 +0,0 @@
name: AutoGPT Platform - Frontend CI
on:
push:
branches: [master, dev]
paths:
- ".github/workflows/platform-frontend-ci.yml"
- "autogpt_platform/frontend/**"
pull_request:
paths:
- ".github/workflows/platform-frontend-ci.yml"
- "autogpt_platform/frontend/**"
merge_group:
defaults:
run:
shell: bash
working-directory: autogpt_platform/frontend
jobs:
setup:
runs-on: ubuntu-latest
outputs:
cache-key: ${{ steps.cache-key.outputs.key }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Generate cache key
id: cache-key
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
lint:
runs-on: ubuntu-latest
needs: setup
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Run lint
run: pnpm lint
chromatic:
runs-on: ubuntu-latest
needs: setup
# Only run on dev branch pushes or PRs targeting dev
if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Run Chromatic
uses: chromaui/action@latest
with:
projectToken: chpt_9e7c1a76478c9c8
onlyChanged: true
workingDir: autogpt_platform/frontend
token: ${{ secrets.GITHUB_TOKEN }}
exitOnceUploaded: true
test:
runs-on: big-boi
needs: setup
strategy:
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Copy default supabase .env
run: |
cp ../.env.default ../.env
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
restore-keys: |
${{ runner.os }}-buildx-frontend-test-
- name: Run docker compose
run: |
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
env:
DOCKER_BUILDKIT: 1
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
if [ -d "/tmp/.buildx-cache-new" ]; then
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
fi
- name: Wait for services to be ready
run: |
echo "Waiting for rest_server to be ready..."
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
echo "Waiting for database to be ready..."
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
- name: Create E2E test data
run: |
echo "Creating E2E test data..."
# First try to run the script from inside the container
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
echo "✅ Found e2e_test_data.py in container, running it..."
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
echo "❌ E2E test data creation failed!"
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
exit 1
}
else
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
# Copy the script into the container and run it
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
echo "❌ Failed to copy script to container"
exit 1
}
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
echo "❌ E2E test data creation failed!"
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
exit 1
}
fi
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Install Browser 'chromium'
run: pnpm playwright install --with-deps chromium
- name: Run Playwright tests
run: pnpm test:no-build
- name: Upload Playwright artifacts
if: failure()
uses: actions/upload-artifact@v4
with:
name: playwright-report
path: playwright-report
- name: Print Final Docker Compose logs
if: always()
run: docker compose -f ../docker-compose.yml logs

View File

@@ -1,132 +0,0 @@
name: AutoGPT Platform - Frontend CI
on:
push:
branches: [master, dev]
paths:
- ".github/workflows/platform-fullstack-ci.yml"
- "autogpt_platform/**"
pull_request:
paths:
- ".github/workflows/platform-fullstack-ci.yml"
- "autogpt_platform/**"
merge_group:
defaults:
run:
shell: bash
working-directory: autogpt_platform/frontend
jobs:
setup:
runs-on: ubuntu-latest
outputs:
cache-key: ${{ steps.cache-key.outputs.key }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Generate cache key
id: cache-key
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
types:
runs-on: ubuntu-latest
needs: setup
strategy:
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Copy default supabase .env
run: |
cp ../.env.default ../.env
- name: Copy backend .env
run: |
cp ../backend/.env.default ../backend/.env
- name: Run docker compose
run: |
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Setup .env
run: cp .env.default .env
- name: Wait for services to be ready
run: |
echo "Waiting for rest_server to be ready..."
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
echo "Waiting for database to be ready..."
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
- name: Generate API queries
run: pnpm generate:api:force
- name: Check for API schema changes
run: |
if ! git diff --exit-code src/app/api/openapi.json; then
echo "❌ API schema changes detected in src/app/api/openapi.json"
echo ""
echo "The openapi.json file has been modified after running 'pnpm generate:api-all'."
echo "This usually means changes have been made in the BE endpoints without updating the Frontend."
echo "The API schema is now out of sync with the Front-end queries."
echo ""
echo "To fix this:"
echo "1. Pull the backend 'docker compose pull && docker compose up -d --build --force-recreate'"
echo "2. Run 'pnpm generate:api' locally"
echo "3. Run 'pnpm types' locally"
echo "4. Fix any TypeScript errors that may have been introduced"
echo "5. Commit and push your changes"
echo ""
exit 1
else
echo "✅ No API schema changes detected"
fi
- name: Run Typescript checks
run: pnpm types

View File

@@ -16,7 +16,7 @@ jobs:
# operations-per-run: 5000
stale-issue-message: >
This issue has automatically been marked as _stale_ because it has not had
any activity in the last 170 days. You can _unstale_ it by commenting or
any activity in the last 50 days. You can _unstale_ it by commenting or
removing the label. Otherwise, this issue will be closed in 10 days.
stale-pr-message: >
This pull request has automatically been marked as _stale_ because it has
@@ -25,7 +25,7 @@ jobs:
close-issue-message: >
This issue was closed automatically because it has been stale for 10 days
with no activity.
days-before-stale: 170
days-before-stale: 50
days-before-close: 10
# Do not touch meta issues:
exempt-issue-labels: meta,fridge,project management

View File

@@ -1,21 +0,0 @@
name: Repo - Enforce dev as base branch
on:
pull_request_target:
branches: [ master ]
types: [ opened ]
jobs:
check_pr_target:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- name: Check if PR is from dev or hotfix
if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }}
run: |
gh pr comment ${{ github.event.number }} --repo "$REPO" \
--body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.'
gh pr edit ${{ github.event.number }} --base dev --repo "$REPO"
env:
GITHUB_TOKEN: ${{ github.token }}
REPO: ${{ github.repository }}

View File

@@ -3,7 +3,7 @@ name: Repo - Pull Request auto-label
on:
# So that PRs touching the same files as the push are updated
push:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths-ignore:
- 'classic/forge/tests/vcr_cassettes'
- 'classic/benchmark/reports/**'

View File

@@ -2,7 +2,6 @@ name: Repo - PR Status Checker
on:
pull_request:
types: [opened, synchronize, reopened]
merge_group:
jobs:
status-check:

View File

@@ -5,8 +5,6 @@ import sys
import time
from typing import Dict, List, Tuple
CHECK_INTERVAL = 30
def get_environment_variables() -> Tuple[str, str, str, str, str]:
"""Retrieve and return necessary environment variables."""
@@ -14,11 +12,7 @@ def get_environment_variables() -> Tuple[str, str, str, str, str]:
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
event = json.load(f)
# Handle both PR and merge group events
if "pull_request" in event:
sha = event["pull_request"]["head"]["sha"]
else:
sha = os.environ["GITHUB_SHA"]
sha = event["pull_request"]["head"]["sha"]
return (
os.environ["GITHUB_API_URL"],
@@ -99,10 +93,9 @@ def main():
break
print(
"Some check runs are still in progress. "
f"Waiting {CHECK_INTERVAL} seconds before checking again..."
"Some check runs are still in progress. Waiting 3 minutes before checking again..."
)
time.sleep(CHECK_INTERVAL)
time.sleep(180)
if all_others_passed:
print("All other completed check runs have passed. This check passes.")

View File

@@ -1,60 +0,0 @@
#!/usr/bin/env python3
import sys
if sys.version_info < (3, 11):
print("Python version 3.11 or higher required")
sys.exit(1)
import tomllib
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
"""Extract package version from poetry.lock file."""
try:
if lockfile_path == "-":
data = tomllib.load(sys.stdin.buffer)
else:
with open(lockfile_path, "rb") as f:
data = tomllib.load(f)
except FileNotFoundError:
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
sys.exit(1)
except tomllib.TOMLDecodeError as e:
print(f"Error parsing TOML file: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(1)
# Look for the package in the packages list
packages = data.get("package", [])
for package in packages:
if package.get("name", "").lower() == package_name.lower():
return package.get("version")
return None
def main():
if len(sys.argv) not in (2, 3):
print(
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
file=sys.stderr,
)
sys.exit(1)
package_name = sys.argv[1]
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
version = get_package_version(package_name, lockfile_path)
if version:
print(version)
else:
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

15
.gitignore vendored
View File

@@ -5,8 +5,6 @@ classic/original_autogpt/*.json
auto_gpt_workspace/*
*.mpeg
.env
# Root .env files
/.env
azure.yaml
.vscode
.idea/*
@@ -123,6 +121,7 @@ celerybeat.pid
# Environments
.direnv/
.env
.venv
env/
venv*/
@@ -158,7 +157,7 @@ openai/
CURRENT_BULLETIN.md
# AgBenchmark
classic/benchmark/agbenchmark/reports/
agclassic/benchmark/reports/
# Nodejs
package-lock.json
@@ -166,15 +165,9 @@ package-lock.json
# Allow for locally private items
# private
pri*
pri*
# ignore
ig*
.github_access_token
LICENSE.rtf
autogpt_platform/backend/settings.py
/.auth
/autogpt_platform/frontend/.auth
*.ign.*
.test-contents
.claude/settings.local.json
autogpt_platform/autogpt_server/settings.py

3
.gitmodules vendored
View File

@@ -1,3 +1,6 @@
[submodule "classic/forge/tests/vcr_cassettes"]
path = classic/forge/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
[submodule "autogpt_platform/supabase"]
path = autogpt_platform/supabase
url = https://github.com/supabase/supabase.git

View File

@@ -1,3 +1,6 @@
[pr_reviewer]
num_code_suggestions=0
[pr_code_suggestions]
commitable_code_suggestions=false
num_code_suggestions=0

View File

@@ -10,142 +10,39 @@ repos:
- id: check-symlinks
- id: debug-statements
- repo: https://github.com/Yelp/detect-secrets
rev: v1.5.0
hooks:
- id: detect-secrets
name: Detect secrets
description: Detects high entropy strings that are likely to be passwords.
files: ^autogpt_platform/
stages: [pre-push]
- repo: local
# For proper type checking, all dependencies need to be up-to-date.
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
hooks:
- id: poetry-install
name: Check & Install dependencies - AutoGPT Platform - Backend
alias: poetry-install-platform-backend
entry: poetry -C autogpt_platform/backend install
# include autogpt_libs source (since it's a path dependency)
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - AutoGPT Platform - Libs
alias: poetry-install-platform-libs
entry: poetry -C autogpt_platform/autogpt_libs install
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - AutoGPT
alias: poetry-install-classic-autogpt
entry: poetry -C classic/original_autogpt install
# include forge source (since it's a path dependency)
files: ^classic/(original_autogpt|forge)/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - Forge
alias: poetry-install-classic-forge
entry: poetry -C classic/forge install
files: ^classic/forge/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - Benchmark
alias: poetry-install-classic-benchmark
entry: poetry -C classic/benchmark install
files: ^classic/benchmark/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- repo: local
# For proper type checking, Prisma client must be up-to-date.
hooks:
- id: prisma-generate
name: Prisma Generate - AutoGPT Platform - Backend
alias: prisma-generate-platform-backend
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
# include everything that triggers poetry install + the prisma schema
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
types: [file]
language: system
pass_filenames: false
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.2
hooks:
- id: ruff
name: Lint (Ruff) - AutoGPT Platform - Backend
alias: ruff-lint-platform-backend
files: ^autogpt_platform/backend/
args: [--fix]
- id: ruff
name: Lint (Ruff) - AutoGPT Platform - Libs
alias: ruff-lint-platform-libs
files: ^autogpt_platform/autogpt_libs/
args: [--fix]
- id: ruff-format
name: Format (Ruff) - AutoGPT Platform - Libs
alias: ruff-lint-platform-libs
files: ^autogpt_platform/autogpt_libs/
- repo: local
# isort needs the context of which packages are installed to function, so we
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
hooks:
- id: isort
name: Lint (isort) - AutoGPT Platform - Backend
alias: isort-platform-backend
entry: poetry -P autogpt_platform/backend run isort -p backend
files: ^autogpt_platform/backend/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - AutoGPT
alias: isort-classic-autogpt
entry: poetry -P classic/original_autogpt run isort -p autogpt
- id: isort-autogpt
name: Lint (isort) - AutoGPT
entry: poetry -C classic/original_autogpt run isort
files: ^classic/original_autogpt/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - Forge
alias: isort-classic-forge
entry: poetry -P classic/forge run isort -p forge
- id: isort-forge
name: Lint (isort) - Forge
entry: poetry -C classic/forge run isort
files: ^classic/forge/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - Benchmark
alias: isort-classic-benchmark
entry: poetry -P classic/benchmark run isort -p agbenchmark
- id: isort-benchmark
name: Lint (isort) - Benchmark
entry: poetry -C classic/benchmark run isort
files: ^classic/benchmark/
types: [file, python]
language: system
- repo: https://github.com/psf/black
rev: 24.10.0
rev: 23.12.1
# Black has sensible defaults, doesn't need package context, and ignores
# everything in .gitignore, so it works fine without any config or arguments.
hooks:
- id: black
name: Format (Black)
name: Lint (Black)
language_version: python3.10
- repo: https://github.com/PyCQA/flake8
rev: 7.0.0
@@ -153,126 +50,78 @@ repos:
# them separately.
hooks:
- id: flake8
name: Lint (Flake8) - Classic - AutoGPT
alias: flake8-classic-autogpt
name: Lint (Flake8) - AutoGPT
alias: flake8-autogpt
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
args: [--config=classic/original_autogpt/.flake8]
- id: flake8
name: Lint (Flake8) - Classic - Forge
alias: flake8-classic-forge
name: Lint (Flake8) - Forge
alias: flake8-forge
files: ^classic/forge/(forge|tests)/
args: [--config=classic/forge/.flake8]
- id: flake8
name: Lint (Flake8) - Classic - Benchmark
alias: flake8-classic-benchmark
name: Lint (Flake8) - Benchmark
alias: flake8-benchmark
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=classic/benchmark/.flake8]
- repo: local
hooks:
- id: prettier
name: Format (Prettier) - AutoGPT Platform - Frontend
alias: format-platform-frontend
entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' --
files: ^autogpt_platform/frontend/
types: [file]
language: system
- repo: local
# To have watertight type checking, we check *all* the files in an affected
# project. To trigger on poetry.lock we also reset the file `types` filter.
hooks:
- id: pyright
name: Typecheck - AutoGPT Platform - Backend
alias: pyright-platform-backend
entry: poetry -C autogpt_platform/backend run pyright
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - AutoGPT Platform - Libs
alias: pyright-platform-libs
entry: poetry -C autogpt_platform/autogpt_libs run pyright
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - AutoGPT
alias: pyright-classic-autogpt
name: Typecheck - AutoGPT
alias: pyright-autogpt
entry: poetry -C classic/original_autogpt run pyright
args: [-p, autogpt, autogpt]
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - Forge
alias: pyright-classic-forge
name: Typecheck - Forge
alias: pyright-forge
entry: poetry -C classic/forge run pyright
files: ^classic/forge/(forge/|poetry\.lock$)
args: [-p, forge, forge]
files: ^classic/forge/(classic/forge/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - Benchmark
alias: pyright-classic-benchmark
name: Typecheck - Benchmark
alias: pyright-benchmark
entry: poetry -C classic/benchmark run pyright
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
args: [-p, benchmark, benchmark]
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- repo: local
hooks:
- id: tsc
name: Typecheck - AutoGPT Platform - Frontend
entry: bash -c 'cd autogpt_platform/frontend && pnpm types'
files: ^autogpt_platform/frontend/
types: [file]
- id: pytest-autogpt
name: Run tests - AutoGPT (excl. slow tests)
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
# - repo: local
# hooks:
# - id: pytest
# name: Run tests - AutoGPT Platform - Backend
# alias: pytest-platform-backend
# entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
# # include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
# files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
- id: pytest-forge
name: Run tests - Forge (excl. slow tests)
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
files: ^classic/forge/(classic/forge/|tests/|poetry\.lock$)
language: system
pass_filenames: false
# - id: pytest
# name: Run tests - Classic - AutoGPT (excl. slow tests)
# alias: pytest-classic-autogpt
# entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# # include forge source (since it's a path dependency) but exclude *_test.py files:
# files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Forge (excl. slow tests)
# alias: pytest-classic-forge
# entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
# files: ^classic/forge/(forge/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Benchmark
# alias: pytest-classic-benchmark
# entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
# files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false
- id: pytest-benchmark
name: Run tests - Benchmark
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

View File

@@ -1,12 +1,12 @@
{
"folders": [
{
"name": "frontend",
"path": "../autogpt_platform/frontend"
"name": "autogpt_server",
"path": "../autogpt_platform/autogpt_server"
},
{
"name": "backend",
"path": "../autogpt_platform/backend"
"name": "autogpt_builder",
"path": "../autogpt_platform/autogpt_builder"
},
{
"name": "market",
@@ -24,7 +24,10 @@
"name": "docs",
"path": "../docs"
},
{
"name": "[root]",
"path": ".."
},
{
"name": "classic - autogpt",
"path": "../classic/original_autogpt"
@@ -41,10 +44,6 @@
"name": "classic - frontend",
"path": "../classic/frontend"
},
{
"name": "[root]",
"path": ".."
}
],
"settings": {
"python.analysis.typeCheckingMode": "basic"

67
.vscode/launch.json vendored
View File

@@ -1,67 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Frontend: Server Side",
"type": "node-terminal",
"request": "launch",
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
"command": "pnpm dev"
},
{
"name": "Frontend: Client Side",
"type": "msedge",
"request": "launch",
"url": "http://localhost:3000"
},
{
"name": "Frontend: Full Stack",
"type": "node-terminal",
"request": "launch",
"command": "pnpm dev",
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
"serverReadyAction": {
"pattern": "- Local:.+(https?://.+)",
"uriFormat": "%s",
"action": "debugWithChrome"
}
},
{
"name": "Backend",
"type": "debugpy",
"request": "launch",
"module": "backend.app",
"env": {
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY": "YES"
},
"envFile": "${workspaceFolder}/backend/.env",
"justMyCode": false,
"cwd": "${workspaceFolder}/autogpt_platform/backend"
},
{
"name": "Marketplace",
"type": "debugpy",
"request": "launch",
"module": "autogpt_platform.market.main",
"env": {
"ENV": "dev"
},
"envFile": "${workspaceFolder}/market/.env",
"justMyCode": false,
"cwd": "${workspaceFolder}/market"
}
],
"compounds": [
{
"name": "Everything",
"configurations": ["Backend", "Frontend: Full Stack"],
// "preLaunchTask": "${defaultBuildTask}",
"stopAll": true,
"presentation": {
"hidden": false,
"order": 0
}
}
]
}

View File

@@ -1,53 +0,0 @@
# AutoGPT Platform Contribution Guide
This guide provides context for Codex when updating the **autogpt_platform** folder.
## Directory overview
- `autogpt_platform/backend` FastAPI based backend service.
- `autogpt_platform/autogpt_libs` Shared Python libraries.
- `autogpt_platform/frontend` Next.js + Typescript frontend.
- `autogpt_platform/docker-compose.yml` development stack.
See `docs/content/platform/getting-started.md` for setup instructions.
## Code style
- Format Python code with `poetry run format`.
- Format frontend code using `pnpm format`.
## Testing
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
Always run the relevant linters and tests before committing.
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
Types:
- feat
- fix
- refactor
- ci
- dx (developer experience)
Scopes:
- platform
- platform/library
- platform/marketplace
- backend
- backend/executor
- frontend
- frontend/library
- frontend/marketplace
- blocks
## Pull requests
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
- Rely on the pre-commit checks for linting and formatting
- Fill out the **Changes** section and the checklist.
- Use conventional commit titles with a scope (e.g. `feat(frontend): add feature`).
- Keep out-of-scope changes under 20% of the PR.
- Ensure PR descriptions are complete.
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs

View File

@@ -2,14 +2,14 @@
If you are reading this, you are probably looking for the full **[contribution guide]**,
which is part of our [wiki].
Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
<!-- You can find our immediate priorities and their progress on our public [kanban board]. -->
[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
## Contributing to the AutoGPT Platform Folder
All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license.
## In short
1. Avoid duplicate work, issues, PRs etc.
2. We encourage you to collaborate with fellow community members on some of our bigger

199
LICENSE
View File

@@ -1,204 +1,7 @@
All portions of this repository are under one of two licenses.
- Everything inside the autogpt_platform folder is under the Polyform Shield License.
- Everything outside the autogpt_platform folder is under the MIT License.
More info:
**Polyform Shield License:**
Code and content within the `autogpt_platform` folder is licensed under the Polyform Shield License. This new project is our in-developlemt platform for building, deploying and managing agents.
Read more about this effort here: https://agpt.co/blog/introducing-the-autogpt-platform
**MIT License:**
All other portions of the AutoGPT repository (i.e., everything outside the `autogpt_platform` folder) are licensed under the MIT License. This includes:
- The Original, stand-alone AutoGPT Agent
- Forge: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge
- AG Benchmark: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark
- AutoGPT Classic GUI: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend.
We also publish additional work under the MIT Licence in other repositories, such as GravitasML (https://github.com/Significant-Gravitas/gravitasml) which is developed for and used in the AutoGPT Platform, and our [Code Ability](https://github.com/Significant-Gravitas/AutoGPT-Code-Ability) project.
Both licences are available to read below:
=====================================================
-----------------------------------------------------
=====================================================
# PolyForm Shield License 1.0.0
<https://polyformproject.org/licenses/shield/1.0.0>
## Acceptance
In order to get any license under these terms, you must agree
to them as both strict obligations and conditions to all
your licenses.
## Copyright License
The licensor grants you a copyright license for the
software to do everything you might do with the software
that would otherwise infringe the licensor's copyright
in it for any permitted purpose. However, you may
only distribute the software according to [Distribution
License](#distribution-license) and make changes or new works
based on the software according to [Changes and New Works
License](#changes-and-new-works-license).
## Distribution License
The licensor grants you an additional copyright license
to distribute copies of the software. Your license
to distribute covers distributing the software with
changes and new works permitted by [Changes and New Works
License](#changes-and-new-works-license).
## Notices
You must ensure that anyone who gets a copy of any part of
the software from you also gets a copy of these terms or the
URL for them above, as well as copies of any plain-text lines
beginning with `Required Notice:` that the licensor provided
with the software. For example:
> Required Notice: Copyright Yoyodyne, Inc. (http://example.com)
## Changes and New Works License
The licensor grants you an additional copyright license to
make changes and new works based on the software for any
permitted purpose.
## Patent License
The licensor grants you a patent license for the software that
covers patent claims the licensor can license, or becomes able
to license, that you would infringe by using the software.
## Noncompete
Any purpose is a permitted purpose, except for providing any
product that competes with the software or any product the
licensor or any of its affiliates provides using the software.
## Competition
Goods and services compete even when they provide functionality
through different kinds of interfaces or for different technical
platforms. Applications can compete with services, libraries
with plugins, frameworks with development tools, and so on,
even if they're written in different programming languages
or for different computer architectures. Goods and services
compete even when provided free of charge. If you market a
product as a practical substitute for the software or another
product, it definitely competes.
## New Products
If you are using the software to provide a product that does
not compete, but the licensor or any of its affiliates brings
your product into competition by providing a new version of
the software or another product using the software, you may
continue using versions of the software available under these
terms beforehand to provide your competing product, but not
any later versions.
## Discontinued Products
You may begin using the software to compete with a product
or service that the licensor or any of its affiliates has
stopped providing, unless the licensor includes a plain-text
line beginning with `Licensor Line of Business:` with the
software that mentions that line of business. For example:
> Licensor Line of Business: YoyodyneCMS Content Management
System (http://example.com/cms)
## Sales of Business
If the licensor or any of its affiliates sells a line of
business developing the software or using the software
to provide a product, the buyer can also enforce
[Noncompete](#noncompete) for that product.
## Fair Use
You may have "fair use" rights for the software under the
law. These terms do not limit them.
## No Other Rights
These terms do not allow you to sublicense or transfer any of
your licenses to anyone else, or prevent the licensor from
granting licenses to anyone else. These terms do not imply
any other licenses.
## Patent Defense
If you make any written claim that the software infringes or
contributes to infringement of any patent, your patent license
for the software granted under these terms ends immediately. If
your company makes such a claim, your patent license ends
immediately for work on behalf of your company.
## Violations
The first time you are notified in writing that you have
violated any of these terms, or done anything with the software
not covered by your licenses, your licenses can nonetheless
continue if you come into full compliance with these terms,
and take practical steps to correct past violations, within
32 days of receiving notice. Otherwise, all your licenses
end immediately.
## No Liability
***As far as the law allows, the software comes as is, without
any warranty or condition, and the licensor will not be liable
to you for any damages arising out of these terms or the use
or nature of the software, under any kind of legal claim.***
## Definitions
The **licensor** is the individual or entity offering these
terms, and the **software** is the software the licensor makes
available under these terms.
A **product** can be a good or service, or a combination
of them.
**You** refers to the individual or entity agreeing to these
terms.
**Your company** is any legal entity, sole proprietorship,
or other kind of organization that you work for, plus all
its affiliates.
**Affiliates** means the other organizations than an
organization has control over, is under the control of, or is
under common control with.
**Control** means ownership of substantially all the assets of
an entity, or the power to direct its management and policies
by vote, contract, or otherwise. Control can be direct or
indirect.
**Your licenses** are all the licenses granted to you for the
software under these terms.
**Use** means anything you do with the software requiring one
of your licenses.
=====================================================
-----------------------------------------------------
=====================================================
MIT License
Copyright (c) 2023 Toran Bruce Richards
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
@@ -206,11 +9,9 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

157
README.md
View File

@@ -1,139 +1,43 @@
# AutoGPT: Build, Deploy, and Run AI Agents
# AutoGPT: Build & Use AI Agents
[![Discord Follow](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fdiscord.com%2Fapi%2Finvites%2Fautogpt%3Fwith_counts%3Dtrue&query=%24.approximate_member_count&label=total%20members&logo=discord&logoColor=white&color=7289da)](https://discord.gg/autogpt) &ensp;
[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) &ensp;
[![Twitter Follow](https://img.shields.io/twitter/follow/Auto_GPT?style=social)](https://twitter.com/Auto_GPT) &ensp;
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
<!-- Keep these links. Translations will automatically update with the README. -->
[Deutsch](https://zdoc.app/de/Significant-Gravitas/AutoGPT) |
[Español](https://zdoc.app/es/Significant-Gravitas/AutoGPT) |
[français](https://zdoc.app/fr/Significant-Gravitas/AutoGPT) |
[日本語](https://zdoc.app/ja/Significant-Gravitas/AutoGPT) |
[한국어](https://zdoc.app/ko/Significant-Gravitas/AutoGPT) |
[Português](https://zdoc.app/pt/Significant-Gravitas/AutoGPT) |
[Русский](https://zdoc.app/ru/Significant-Gravitas/AutoGPT) |
[中文](https://zdoc.app/zh/Significant-Gravitas/AutoGPT)
**AutoGPT** is a powerful tool that lets you create and run intelligent agents. These agents can perform various tasks automatically, making your life easier.
**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows.
## How to Get Started
## Hosting Options
- Download to self-host (Free!)
- [Join the Waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta (Closed Beta - Public release Coming Soon!)
https://github.com/user-attachments/assets/8508f4dc-b362-4cab-900f-644964a96cdf
## How to Self-Host the AutoGPT Platform
> [!NOTE]
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
### 🧱 AutoGPT Builder
### System Requirements
The AutoGPT Builder is the frontend. It allows you to design agents using an easy flowchart style. You build your agent by connecting blocks, where each block performs a single action. It's simple and intuitive!
Before proceeding with the installation, ensure your system meets the following requirements:
#### Hardware Requirements
- CPU: 4+ cores recommended
- RAM: Minimum 8GB, 16GB recommended
- Storage: At least 10GB of free space
#### Software Requirements
- Operating Systems:
- Linux (Ubuntu 20.04 or newer recommended)
- macOS (10.15 or newer)
- Windows 10/11 with WSL2
- Required Software (with minimum versions):
- Docker Engine (20.10.0 or newer)
- Docker Compose (2.0.0 or newer)
- Git (2.30 or newer)
- Node.js (16.x or newer)
- npm (8.x or newer)
- VSCode (1.60 or newer) or any modern code editor
#### Network Requirements
- Stable internet connection
- Access to required ports (will be configured in Docker)
- Ability to make outbound HTTPS connections
### Updated Setup Instructions:
We've moved to a fully maintained and regularly updated documentation site.
👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/)
This tutorial assumes you have Docker, VSCode, git and npm installed.
---
#### ⚡ Quick Setup with One-Line Script (Recommended for Local Hosting)
Skip the manual steps and get started in minutes using our automatic setup script.
For macOS/Linux:
```
curl -fsSL https://setup.agpt.co/install.sh -o install.sh && bash install.sh
```
For Windows (PowerShell):
```
powershell -c "iwr https://setup.agpt.co/install.bat -o install.bat; ./install.bat"
```
This will install dependencies, configure Docker, and launch your local instance — all in one go.
### 🧱 AutoGPT Frontend
The AutoGPT frontend is where users interact with our powerful AI automation platform. It offers multiple ways to engage with and leverage our AI agents. This is the interface where you'll bring your AI automation ideas to life:
**Agent Builder:** For those who want to customize, our intuitive, low-code interface allows you to design and configure your own AI agents.
**Workflow Management:** Build, modify, and optimize your automation workflows with ease. You build your agent by connecting blocks, where each block performs a single action.
**Deployment Controls:** Manage the lifecycle of your agents, from testing to production.
**Ready-to-Use Agents:** Don't want to build? Simply select from our library of pre-configured agents and put them to work immediately.
**Agent Interaction:** Whether you've built your own or are using pre-configured agents, easily run and interact with them through our user-friendly interface.
**Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes.
[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks.
[Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
### 💽 AutoGPT Server
The AutoGPT Server is the powerhouse of our platform This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously. It contains all the essential components that make AutoGPT run smoothly.
**Source Code:** The core logic that drives our agents and automation processes.
**Infrastructure:** Robust systems that ensure reliable and scalable performance.
**Marketplace:** A comprehensive marketplace where you can find and deploy a wide range of pre-built agents.
The AutoGPT Server is the backend. This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously.
### 🐙 Example Agents
Here are two examples of what you can do with AutoGPT:
1. **Generate Viral Videos from Trending Topics**
- This agent reads topics on Reddit.
- It identifies trending topics.
- It then automatically creates a short-form video based on the content.
1. **Reddit Marketing Agent**
- This agent reads comments on Reddit.
- It looks for people asking about your product.
- It then automatically responds to them.
2. **Identify Top Quotes from Videos for Social Media**
2. **YouTube Content Repurposing Agent**
- This agent subscribes to your YouTube channel.
- When you post a new video, it transcribes it.
- It uses AI to identify the most impactful quotes to generate a summary.
- Then, it writes a post to automatically publish to your social media.
- It uses AI to write a search engine optimized blog post.
- Then, it publishes this blog post to your Medium account.
These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case.
These examples show just a glimpse of what you can achieve with AutoGPT!
---
### **License Overview:**
🛡️ **Polyform Shield License:**
All code and content within the `autogpt_platform` folder is licensed under the Polyform Shield License. This new project is our in-developlemt platform for building, deploying and managing agents.</br>_[Read more about this effort](https://agpt.co/blog/introducing-the-autogpt-platform)_
🦉 **MIT License:**
All other portions of the AutoGPT repository (i.e., everything outside the `autogpt_platform` folder) are licensed under the MIT License. This includes the original stand-alone AutoGPT Agent, along with projects such as [Forge](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge), [agbenchmark](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) and the [AutoGPT Classic GUI](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend).</br>We also publish additional work under the MIT Licence in other repositories, such as [GravitasML](https://github.com/Significant-Gravitas/gravitasml) which is developed for and used in the AutoGPT Platform. See also our MIT Licenced [Code Ability](https://github.com/Significant-Gravitas/AutoGPT-Code-Ability) project.
---
### Mission
Our mission is to provide the tools, so that you can focus on what matters:
- 🏗️ **Building** - Lay the foundation for something amazing.
@@ -146,20 +50,20 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
&ensp;|&ensp;
**🚀 [Contributing](CONTRIBUTING.md)**
---
## 🤖 AutoGPT Classic
> Below is information about the classic version of AutoGPT.
**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)**
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
### 🏗️ Forge
**Forge your own agent!** &ndash; Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project.
**Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) &ndash;
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
### 🎯 Benchmark
@@ -167,9 +71,9 @@ This guide will walk you through the process of creating your own agent and usin
<!-- TODO: insert visual demonstrating the benchmark -->
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
📦 [`agbenchmark`](https://pypi.org/project/agclassic/benchmark/) on Pypi
&ensp;|&ensp;
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
### 💻 UI
@@ -179,7 +83,7 @@ This guide will walk you through the process of creating your own agent and usin
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
### ⌨️ CLI
@@ -208,7 +112,7 @@ Just clone the repo, install dependencies with `./run setup`, and you should be
[![Join us on Discord](https://invidget.switchblade.xyz/autogpt)](https://discord.gg/autogpt)
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn't created an issue for the same topic.
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasnt created an issue for the same topic.
## 🤝 Sister projects
@@ -218,8 +122,6 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
---
## Stars stats
<p align="center">
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
<picture>
@@ -229,10 +131,3 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
</picture>
</a>
</p>
## ⚡ Contributors
<a href="https://github.com/Significant-Gravitas/AutoGPT/graphs/contributors" alt="View Contributors">
<img src="https://contrib.rocks/image?repo=Significant-Gravitas/AutoGPT&max=1000&columns=10" alt="Contributors" />
</a>

View File

@@ -1,48 +0,0 @@
# Security Policy
## Reporting Security Issues
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
Instead, please report them via:
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
<!--- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty-->
### Reporting Process
1. **Submit Report**: Use one of the above channels to submit your report
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
3. **Collaboration**: We will collaborate with you to understand and validate the issue
4. **Resolution**: We will work on a fix and coordinate the release process
### Disclosure Policy
- Please provide detailed reports with reproducible steps
- Include the version/commit hash where you discovered the vulnerability
- Allow us a 90-day security fix window before any public disclosure
- After patch is released, allow 30 days for users to update before public disclosure (for a total of 120 days max between update time and fix time)
- Share any potential mitigations or workarounds if known
## Supported Versions
Only the following versions are eligible for security updates:
| Version | Supported |
|---------|-----------|
| Latest release on master branch | ✅ |
| Development commits (pre-master) | ✅ |
| Classic folder (deprecated) | ❌ |
| All other versions | ❌ |
## Security Best Practices
When using this project:
1. Always use the latest stable version
2. Review security advisories before updating
3. Follow our security documentation and guidelines
4. Keep your dependencies up to date
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
## Past Security Advisories
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
---
Last updated: November 2024

View File

@@ -1,123 +0,0 @@
############
# Secrets
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
############
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-encryption-key-32-chars-min
############
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
############
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=5432
# default user is postgres
############
# Supavisor -- Database pooler
############
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=your-tenant-id
############
# API Proxy - Configuration for the Kong Reverse proxy.
############
KONG_HTTP_PORT=8000
KONG_HTTPS_PORT=8443
############
# API - Configuration for PostgREST.
############
PGRST_DB_SCHEMAS=public,storage,graphql_public
############
# Auth - Configuration for the GoTrue authentication server.
############
## General
SITE_URL=http://localhost:3000
ADDITIONAL_REDIRECT_URLS=
JWT_EXPIRY=3600
DISABLE_SIGNUP=false
API_EXTERNAL_URL=http://localhost:8000
## Mailer Config
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
## Email auth
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=false
SMTP_ADMIN_EMAIL=admin@example.com
SMTP_HOST=supabase-mail
SMTP_PORT=2500
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
ENABLE_ANONYMOUS_USERS=false
## Phone auth
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
############
# Studio - Configuration for the Dashboard
############
STUDIO_DEFAULT_ORGANIZATION=Default Organization
STUDIO_DEFAULT_PROJECT=Default Project
STUDIO_PORT=3000
# replace if you intend to use Studio outside of localhost
SUPABASE_PUBLIC_URL=http://localhost:8000
# Enable webp support
IMGPROXY_ENABLE_WEBP_DETECTION=true
# Add your OpenAI API key to enable SQL Editor Assistant
OPENAI_API_KEY=
############
# Functions - Configuration for Functions
############
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
FUNCTIONS_VERIFY_JWT=false
############
# Logs - Configuration for Logflare
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
############
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
# Change vector.toml sinks to reflect this change
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
# Docker socket location - this value will differ depending on your OS
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
# Google Cloud Project details
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER

View File

@@ -1,2 +0,0 @@
*.ignore.*
*.ign.*

View File

@@ -1,273 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Repository Overview
AutoGPT Platform is a monorepo containing:
- **Backend** (`/backend`): Python FastAPI server with async support
- **Frontend** (`/frontend`): Next.js React application
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
## Essential Commands
### Backend Development
```bash
# Install dependencies
cd backend && poetry install
# Run database migrations
poetry run prisma migrate dev
# Start all services (database, redis, rabbitmq, clamav)
docker compose up -d
# Run the backend server
poetry run serve
# Run tests
poetry run test
# Run specific test
poetry run pytest path/to/test_file.py::test_function_name
# Run block tests (tests that validate all blocks work correctly)
poetry run pytest backend/blocks/test/test_block.py -xvs
# Run tests for a specific block (e.g., GetCurrentTimeBlock)
poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[GetCurrentTimeBlock]' -xvs
# Lint and format
# prefer format if you want to just "fix" it and only get the errors that can't be autofixed
poetry run format # Black + isort
poetry run lint # ruff
```
More details can be found in TESTING.md
#### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Frontend Development
```bash
# Install dependencies
cd frontend && pnpm i
# Generate API client from OpenAPI spec
pnpm generate:api
# Start development server
pnpm dev
# Run E2E tests
pnpm test
# Run Storybook for component development
pnpm storybook
# Build production
pnpm build
# Format and lint
pnpm format
# Type checking
pnpm types
```
**📖 Complete Guide**: See `/frontend/CONTRIBUTING.md` and `/frontend/.cursorrules` for comprehensive frontend patterns.
**Key Frontend Conventions:**
- Separate render logic from data/behavior in components
- Use generated API hooks from `@/app/api/__generated__/endpoints/`
- Use function declarations (not arrow functions) for components/handlers
- Use design system components from `src/components/` (atoms, molecules, organisms)
- Only use Phosphor Icons
- Never use `src/components/__legacy__/*` or deprecated `BackendAPI`
## Architecture Overview
### Backend Architecture
- **API Layer**: FastAPI with REST and WebSocket endpoints
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
- **Queue System**: RabbitMQ for async task processing
- **Execution Engine**: Separate executor service processes agent workflows
- **Authentication**: JWT-based with Supabase integration
- **Security**: Cache protection middleware prevents sensitive data caching in browsers/proxies
### Frontend Architecture
- **Framework**: Next.js 15 App Router (client-first approach)
- **Data Fetching**: Type-safe generated API hooks via Orval + React Query
- **State Management**: React Query for server state, co-located UI state in components/hooks
- **Component Structure**: Separate render logic (`.tsx`) from business logic (`use*.ts` hooks)
- **Workflow Builder**: Visual graph editor using @xyflow/react
- **UI Components**: shadcn/ui (Radix UI primitives) with Tailwind CSS styling
- **Icons**: Phosphor Icons only
- **Feature Flags**: LaunchDarkly integration
- **Error Handling**: ErrorCard for render errors, toast for mutations, Sentry for exceptions
- **Testing**: Playwright for E2E, Storybook for component development
### Key Concepts
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
3. **Integrations**: OAuth and API connections stored per user
4. **Store**: Marketplace for sharing agent templates
5. **Virus Scanning**: ClamAV integration for file upload security
### Testing Approach
- Backend uses pytest with snapshot testing for API responses
- Test files are colocated with source files (`*_test.py`)
- Frontend uses Playwright for E2E tests
- Component testing via Storybook
### Database Schema
Key models (defined in `/backend/schema.prisma`):
- `User`: Authentication and profile data
- `AgentGraph`: Workflow definitions with version control
- `AgentGraphExecution`: Execution history and results
- `AgentNode`: Individual nodes in a workflow
- `StoreListing`: Marketplace listings for sharing agents
### Environment Configuration
#### Configuration Files
- **Backend**: `/backend/.env.default` (defaults) → `/backend/.env` (user overrides)
- **Frontend**: `/frontend/.env.default` (defaults) → `/frontend/.env` (user overrides)
- **Platform**: `/.env.default` (Supabase/shared defaults) → `/.env` (user overrides)
#### Docker Environment Loading Order
1. `.env.default` files provide base configuration (tracked in git)
2. `.env` files provide user-specific overrides (gitignored)
3. Docker Compose `environment:` sections provide service-specific overrides
4. Shell environment variables have highest precedence
#### Key Points
- All services use hardcoded defaults in docker-compose files (no `${VARIABLE}` substitutions)
- The `env_file` directive loads variables INTO containers at runtime
- Backend/Frontend services use YAML anchors for consistent configuration
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
### Common Development Tasks
**Adding a new block:**
Follow the comprehensive [Block SDK Guide](../../../docs/content/platform/block-sdk-guide.md) which covers:
- Provider configuration with `ProviderBuilder`
- Block schema definition
- Authentication (API keys, OAuth, webhooks)
- Testing and validation
- File organization
Quick steps:
1. Create new file in `/backend/backend/blocks/`
2. Configure provider using `ProviderBuilder` in `_config.py`
3. Inherit from `Block` base class
4. Define input/output schemas using `BlockSchema`
5. Implement async `run` method
6. Generate unique block ID using `uuid.uuid4()`
7. Test with `poetry run pytest backend/blocks/test/test_block.py`
Note: when making many new blocks analyze the interfaces for each of these blocks and picture if they would go well together in a graph based editor or would they struggle to connect productively?
ex: do the inputs and outputs tie well together?
**Modifying the API:**
1. Update route in `/backend/backend/server/routers/`
2. Add/update Pydantic models in same directory
3. Write tests alongside the route file
4. Run `poetry run test` to verify
**Frontend feature development:**
See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference:
1. **Pages**: Create in `src/app/(platform)/feature-name/page.tsx`
- Add `usePageName.ts` hook for logic
- Put sub-components in local `components/` folder
2. **Components**: Structure as `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts`
- Use design system components from `src/components/` (atoms, molecules, organisms)
- Never use `src/components/__legacy__/*`
3. **Data fetching**: Use generated API hooks from `@/app/api/__generated__/endpoints/`
- Regenerate with `pnpm generate:api`
- Pattern: `use{Method}{Version}{OperationName}`
4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only
5. **Testing**: Add Storybook stories for new components, Playwright for E2E
6. **Code conventions**: Function declarations (not arrow functions) for components/handlers
### Security Implementation
**Cache Protection Middleware:**
- Located in `/backend/backend/server/middleware/security.py`
- Default behavior: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
- Uses an allow list approach - only explicitly permitted paths can be cached
- Cacheable paths include: static assets (`/static/*`, `/_next/static/*`), health checks, public store pages, documentation
- Prevents sensitive data (auth tokens, API keys, user data) from being cached by browsers/proxies
- To allow caching for a new endpoint, add it to `CACHEABLE_PATHS` in the middleware
- Applied to both main API server and external API applications
### Creating Pull Requests
- Create the PR aginst the `dev` branch of the repository.
- Ensure the branch name is descriptive (e.g., `feature/add-new-block`)/
- Use conventional commit messages (see below)/
- Fill out the .github/PULL_REQUEST_TEMPLATE.md template as the PR description/
- Run the github pre-commit hooks to ensure code quality.
### Reviewing/Revising Pull Requests
- When the user runs /pr-comments or tries to fetch them, also run gh api /repos/Significant-Gravitas/AutoGPT/pulls/[issuenum]/reviews to get the reviews
- Use gh api /repos/Significant-Gravitas/AutoGPT/pulls/[issuenum]/reviews/[review_id]/comments to get the review contents
- Use gh api /repos/Significant-Gravitas/AutoGPT/issues/9924/comments to get the pr specific comments
### Conventional Commits
Use this format for commit messages and Pull Request titles:
**Conventional Commit Types:**
- `feat`: Introduces a new feature to the codebase
- `fix`: Patches a bug in the codebase
- `refactor`: Code change that neither fixes a bug nor adds a feature; also applies to removing features
- `ci`: Changes to CI configuration
- `docs`: Documentation-only changes
- `dx`: Improvements to the developer experience
**Recommended Base Scopes:**
- `platform`: Changes affecting both frontend and backend
- `frontend`
- `backend`
- `infra`
- `blocks`: Modifications/additions of individual blocks
**Subscope Examples:**
- `backend/executor`
- `backend/db`
- `frontend/builder` (includes changes to the block UI component)
- `infra/prod`
Use these scopes and subscopes for clarity and consistency in commit messages.

View File

@@ -1,21 +0,0 @@
**Determinist Ltd**
**Contributor License Agreement (“Agreement”)**
Thank you for your interest in the AutoGPT open source project at [https://github.com/Significant-Gravitas/AutoGPT](https://github.com/Significant-Gravitas/AutoGPT) stewarded by Determinist Ltd (“**Determinist**”), with offices at 3rd Floor 1 Ashley Road, Altrincham, Cheshire, WA14 2DT, United Kingdom. The form of license below is a document that clarifies the terms under which You, the person listed below, may contribute software code described below (the “**Contribution**”) to the project. We appreciate your participation in our project, and your help in improving our products, so we want you to understand what will be done with the Contributions. This license is for your protection as well as the protection of Determinist and its licensees; it does not change your rights to use your own Contributions for any other purpose.
By submitting a Pull Request which modifies the content of the “autogpt\_platform” folder at [https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt\_platform](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt_platform), You hereby agree:
1\. **You grant us the ability to use the Contributions in any way**. You hereby grant to Determinist a non-exclusive, irrevocable, worldwide, royalty-free, sublicenseable, transferable license under all of Your relevant intellectual property rights (including copyright, patent, and any other rights), to use, copy, prepare derivative works of, distribute and publicly perform and display the Contributions on any licensing terms, including without limitation: (a) open source licenses like the GNU General Public License (GPL), the GNU Lesser General Public License (LGPL), the Common Public License, or the Berkeley Science Division license (BSD); and (b) binary, proprietary, or commercial licenses.
2\. **Grant of Patent License**. You hereby grant to Determinist a worldwide, non-exclusive, royalty-free, irrevocable, license, under any rights you may have, now or in the future, in any patents or patent applications, to make, have made, use, offer to sell, sell, and import products containing the Contribution or portions of the Contribution. This license extends to patent claims that are infringed by the Contribution alone or by combination of the Contribution with other inventions.
4\. **Limitations on Licenses**. The licenses granted in this Agreement will continue for the duration of the applicable patent or intellectual property right under which such license is granted. The licenses granted in this Agreement will include the right to grant and authorize sublicenses, so long as the sublicenses are within the scope of the licenses granted in this Agreement. Except for the licenses granted herein, You reserve all right, title, and interest in and to the Contribution.
5\. **You are able to grant us these rights**. You represent that You are legally entitled to grant the above license. If Your employer has rights to intellectual property that You create, You represent that You are authorized to make the Contributions on behalf of that employer, or that Your employer has waived such rights for the Contributions.
3\. **The Contributions are your original work**. You represent that the Contributions are Your original works of authorship, and to Your knowledge, no other person claims, or has the right to claim, any right in any invention or patent related to the Contributions. You also represent that You are not legally obligated, whether by entering into an agreement or otherwise, in any way that conflicts with the terms of this license. For example, if you have signed an agreement requiring you to assign the intellectual property rights in the Contributions to an employer or customer, that would conflict with the terms of this license.
6\. **We determine the code that is in our products**. You understand that the decision to include the Contribution in any product or source repository is entirely that of Determinist, and this agreement does not guarantee that the Contributions will be included in any product.
7\. **No Implied Warranties.** Determinist acknowledges that, except as explicitly described in this Agreement, the Contribution is provided on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.

View File

@@ -1,164 +0,0 @@
# PolyForm Shield License 1.0.0
<https://polyformproject.org/licenses/shield/1.0.0>
## Acceptance
In order to get any license under these terms, you must agree
to them as both strict obligations and conditions to all
your licenses.
## Copyright License
The licensor grants you a copyright license for the
software to do everything you might do with the software
that would otherwise infringe the licensor's copyright
in it for any permitted purpose. However, you may
only distribute the software according to [Distribution
License](#distribution-license) and make changes or new works
based on the software according to [Changes and New Works
License](#changes-and-new-works-license).
## Distribution License
The licensor grants you an additional copyright license
to distribute copies of the software. Your license
to distribute covers distributing the software with
changes and new works permitted by [Changes and New Works
License](#changes-and-new-works-license).
## Notices
You must ensure that anyone who gets a copy of any part of
the software from you also gets a copy of these terms or the
URL for them above, as well as copies of any plain-text lines
beginning with `Required Notice:` that the licensor provided
with the software. For example:
> Required Notice: Copyright Yoyodyne, Inc. (http://example.com)
## Changes and New Works License
The licensor grants you an additional copyright license to
make changes and new works based on the software for any
permitted purpose.
## Patent License
The licensor grants you a patent license for the software that
covers patent claims the licensor can license, or becomes able
to license, that you would infringe by using the software.
## Noncompete
Any purpose is a permitted purpose, except for providing any
product that competes with the software or any product the
licensor or any of its affiliates provides using the software.
## Competition
Goods and services compete even when they provide functionality
through different kinds of interfaces or for different technical
platforms. Applications can compete with services, libraries
with plugins, frameworks with development tools, and so on,
even if they're written in different programming languages
or for different computer architectures. Goods and services
compete even when provided free of charge. If you market a
product as a practical substitute for the software or another
product, it definitely competes.
## New Products
If you are using the software to provide a product that does
not compete, but the licensor or any of its affiliates brings
your product into competition by providing a new version of
the software or another product using the software, you may
continue using versions of the software available under these
terms beforehand to provide your competing product, but not
any later versions.
## Discontinued Products
You may begin using the software to compete with a product
or service that the licensor or any of its affiliates has
stopped providing, unless the licensor includes a plain-text
line beginning with `Licensor Line of Business:` with the
software that mentions that line of business. For example:
> Licensor Line of Business: YoyodyneCMS Content Management
System (http://example.com/cms)
## Sales of Business
If the licensor or any of its affiliates sells a line of
business developing the software or using the software
to provide a product, the buyer can also enforce
[Noncompete](#noncompete) for that product.
## Fair Use
You may have "fair use" rights for the software under the
law. These terms do not limit them.
## No Other Rights
These terms do not allow you to sublicense or transfer any of
your licenses to anyone else, or prevent the licensor from
granting licenses to anyone else. These terms do not imply
any other licenses.
## Patent Defense
If you make any written claim that the software infringes or
contributes to infringement of any patent, your patent license
for the software granted under these terms ends immediately. If
your company makes such a claim, your patent license ends
immediately for work on behalf of your company.
## Violations
The first time you are notified in writing that you have
violated any of these terms, or done anything with the software
not covered by your licenses, your licenses can nonetheless
continue if you come into full compliance with these terms,
and take practical steps to correct past violations, within
32 days of receiving notice. Otherwise, all your licenses
end immediately.
## No Liability
***As far as the law allows, the software comes as is, without
any warranty or condition, and the licensor will not be liable
to you for any damages arising out of these terms or the use
or nature of the software, under any kind of legal claim.***
## Definitions
The **licensor** is the individual or entity offering these
terms, and the **software** is the software the licensor makes
available under these terms.
A **product** can be a good or service, or a combination
of them.
**You** refers to the individual or entity agreeing to these
terms.
**Your company** is any legal entity, sole proprietorship,
or other kind of organization that you work for, plus all
its affiliates.
**Affiliates** means the other organizations than an
organization has control over, is under the control of, or is
under common control with.
**Control** means ownership of substantially all the assets of
an entity, or the power to direct its management and policies
by vote, contract, or otherwise. Control can be direct or
indirect.
**Your licenses** are all the licenses granted to you for the
software under these terms.
**Use** means anything you do with the software requiring one
of your licenses.

View File

@@ -1,57 +0,0 @@
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend
# Run just Supabase + Redis + RabbitMQ
start-core:
docker compose up -d deps
# Stop core services
stop-core:
docker compose stop deps
reset-db:
rm -rf db/docker/volumes/db/data
cd backend && poetry run prisma migrate deploy
cd backend && poetry run prisma generate
# View logs for core services
logs-core:
docker compose logs -f deps
# Run formatting and linting for backend and frontend
format:
cd backend && poetry run format
cd frontend && pnpm format
cd frontend && pnpm lint
init-env:
cp -n .env.default .env || true
cd backend && cp -n .env.default .env || true
cd frontend && cp -n .env.default .env || true
# Run migrations for backend
migrate:
cd backend && poetry run prisma migrate deploy
cd backend && poetry run prisma generate
run-backend:
cd backend && poetry run app
run-frontend:
cd frontend && pnpm dev
test-data:
cd backend && poetry run python test/test_data_creator.py
help:
@echo "Usage: make <target>"
@echo "Targets:"
@echo " start-core - Start just the core services (Supabase, Redis, RabbitMQ) in background"
@echo " stop-core - Stop the core services"
@echo " reset-db - Reset the database by deleting the volume"
@echo " logs-core - Tail the logs for core services"
@echo " format - Format & lint backend (Python) and frontend (TypeScript) code"
@echo " migrate - Run backend database migrations"
@echo " run-backend - Run the backend FastAPI server"
@echo " run-frontend - Run the frontend Next.js development server"
@echo " test-data - Run the test data creator"

View File

@@ -13,119 +13,83 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
To run the AutoGPT Platform, follow these steps:
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
```
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
cd AutoGPT/autogpt_platform
```
2. Run the following command:
```
cp .env.default .env
```
This command will copy the `.env.default` file to `.env`. You can modify the `.env` file to add your own environment variables.
1. Clone this repository to your local machine.
2. Navigate to autogpt_platform/supabase
3. Run the following command:
```
git submodule update --init --recursive
```
4. Navigate back to rnd (cd ..)
5. Run the following command:
```
cp supabase/docker/.env.example .env
```
6. Run the following command:
```
docker compose up -d
docker compose -f docker-compose.combined.yml up -d
```
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
4. After all the services are in ready state, open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
### Running Just Core services
You can now run the following to enable just the core services.
```
# For help
make help
# Run just Supabase + Redis + RabbitMQ
make start-core
# Stop core services
make stop-core
# View logs from core services
make logs-core
# Run formatting and linting for backend and frontend
make format
# Run migrations for backend database
make migrate
# Run backend server
make run-backend
# Run frontend development server
make run-frontend
```
This command will start all the necessary backend services defined in the `docker-compose.combined.yml` file in detached mode.
7. Navigate to autogpt_platform/autogpt_builder.
8. Run the following command:
```
cp .env.example .env.local
```
9. Run the following command:
```
yarn dev
```
### Docker Compose Commands
Here are some useful Docker Compose commands for managing your AutoGPT Platform:
- `docker compose up -d`: Start the services in detached mode.
- `docker compose stop`: Stop the running services without removing them.
- `docker compose -f docker-compose.combined.yml up -d`: Start the services in detached mode.
- `docker compose -f docker-compose.combined.yml stop`: Stop the running services without removing them.
- `docker compose rm`: Remove stopped service containers.
- `docker compose build`: Build or rebuild services.
- `docker compose down`: Stop and remove containers, networks, and volumes.
- `docker compose watch`: Watch for changes in your services and automatically update them.
### Sample Scenarios
Here are some common scenarios where you might use multiple Docker Compose commands:
1. Updating and restarting a specific service:
```
docker compose build api_srv
docker compose up -d --no-deps api_srv
```
This rebuilds the `api_srv` service and restarts it without affecting other services.
2. Viewing logs for troubleshooting:
```
docker compose logs -f api_srv ws_srv
```
This shows and follows the logs for both `api_srv` and `ws_srv` services.
3. Scaling a service for increased load:
```
docker compose up -d --scale executor=3
```
This scales the `executor` service to 3 instances to handle increased load.
4. Stopping the entire system for maintenance:
```
docker compose stop
docker compose rm -f
docker compose pull
docker compose up -d
```
This stops all services, removes containers, pulls the latest images, and restarts the system.
5. Developing with live updates:
```
docker compose watch
```
This watches for changes in your code and automatically updates the relevant services.
6. Checking the status of services:
@@ -136,6 +100,7 @@ Here are some common scenarios where you might use multiple Docker Compose comma
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
### Persisting Data
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
@@ -164,27 +129,5 @@ To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
### API Client Generation
The platform includes scripts for generating and managing the API client:
- `pnpm fetch:openapi`: Fetches the OpenAPI specification from the backend service (requires backend to be running on port 8006)
- `pnpm generate:api-client`: Generates the TypeScript API client from the OpenAPI specification using Orval
- `pnpm generate:api`: Runs both fetch and generate commands in sequence
#### Manual API Client Updates
If you need to update the API client after making changes to the backend API:
1. Ensure the backend services are running:
```
docker compose up -d
```
2. Generate the updated API client:
```
pnpm generate:api
```
This will fetch the latest OpenAPI specification and regenerate the TypeScript client code.

View File

@@ -0,0 +1,15 @@
NEXT_PUBLIC_AUTH_CALLBACK_URL=http://localhost:8006/auth/callback
NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api
NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws
NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market
## Supabase credentials
NEXT_PUBLIC_SUPABASE_URL=http://localhost:8000
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
## OAuth Callback URL
## This should be {domain}/auth/callback
## Only used if you're using Supabase and OAuth
AUTH_CALLBACK_URL=http://localhost:3000/auth/callback
GA_MEASUREMENT_ID=G-FH2XK2W4GN

View File

@@ -0,0 +1,3 @@
{
"extends": "next/core-web-vitals"
}

View File

@@ -22,16 +22,10 @@
# debug
npm-debug.log*
pnpm-debug.log*
yarn-debug.log*
yarn-error.log*
# lock files (from yarn1 or npm)
yarn.lock
package-lock.json
# local env files
.env
.env*.local
# vercel
@@ -43,18 +37,3 @@ next-env.d.ts
# Sentry Config File
.env.sentry-build-plugin
node_modules/
/test-results/
/playwright-report/
/blob-report/
/playwright/.cache/
*storybook.log
storybook-static
*.ignore.*
*.ign.*
!.npmrc
.cursorrules
# Generated API files
src/app/api/__generated__/

View File

@@ -0,0 +1,4 @@
node_modules
.next
build
public

View File

@@ -0,0 +1,32 @@
# Base stage for both dev and prod
FROM node:21-alpine AS base
WORKDIR /app
COPY autogpt_platform/autogpt_builder/package.json autogpt_platform/autogpt_builder/yarn.lock ./
RUN yarn install --frozen-lockfile
# Dev stage
FROM base AS dev
ENV NODE_ENV=development
COPY autogpt_platform/autogpt_builder/ .
EXPOSE 3000
CMD ["yarn", "run", "dev"]
# Build stage for prod
FROM base AS build
COPY autogpt_platform/autogpt_builder/ .
RUN npm run build
# Prod stage
FROM node:21-alpine AS prod
ENV NODE_ENV=production
WORKDIR /app
COPY --from=build /app/package.json /app/yarn.lock ./
RUN yarn install --frozen-lockfile
COPY --from=build /app/.next ./.next
COPY --from=build /app/public ./public
COPY --from=build /app/next.config.mjs ./next.config.mjs
EXPOSE 3000
CMD ["npm", "start"]

View File

@@ -0,0 +1,41 @@
This is the frontend for AutoGPT's next generation
## Getting Started
Run the following installation once.
```bash
npm install
# or
yarn install
# or
pnpm install
# or
bun install
```
Next, run the development server:
```bash
npm run dev
# or
yarn dev
# or
pnpm dev
# or
bun dev
```
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
For subsequent runs, you do not have to `npm install` again. Simply do `npm run dev`.
If the project is updated via git, you will need to `npm install` after each update.
This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
## Deploy
TODO

View File

@@ -0,0 +1,84 @@
import { withSentryConfig } from "@sentry/nextjs";
import dotenv from "dotenv";
// Load environment variables
dotenv.config();
/** @type {import('next').NextConfig} */
const nextConfig = {
env: {
NEXT_PUBLIC_AGPT_SERVER_URL: process.env.NEXT_PUBLIC_AGPT_SERVER_URL,
NEXT_PUBLIC_AGPT_MARKETPLACE_URL:
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL,
},
images: {
domains: ["images.unsplash.com"],
},
async redirects() {
return [
{
source: "/monitor", // FIXME: Remove after 2024-09-01
destination: "/",
permanent: false,
},
];
},
// TODO: Re-enable TypeScript checks once current issues are resolved
typescript: {
ignoreBuildErrors: true,
},
};
export default withSentryConfig(nextConfig, {
// For all available options, see:
// https://github.com/getsentry/sentry-webpack-plugin#options
org: "significant-gravitas",
project: "builder",
// Only print logs for uploading source maps in CI
silent: !process.env.CI,
// For all available options, see:
// https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/
// Upload a larger set of source maps for prettier stack traces (increases build time)
widenClientFileUpload: true,
// Automatically annotate React components to show their full name in breadcrumbs and session replay
reactComponentAnnotation: {
enabled: true,
},
// Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers.
// This can increase your server load as well as your hosting bill.
// Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client-
// side errors will fail.
tunnelRoute: "/monitoring",
// Hides source maps from generated client bundles
hideSourceMaps: true,
// Automatically tree-shake Sentry logger statements to reduce bundle size
disableLogger: true,
// Enables automatic instrumentation of Vercel Cron Monitors. (Does not yet work with App Router route handlers.)
// See the following for more information:
// https://docs.sentry.io/product/crons/
// https://vercel.com/docs/cron-jobs
automaticVercelMonitors: true,
async headers() {
return [
{
source: "/:path*",
headers: [
{
key: "Document-Policy",
value: "js-profiling",
},
],
},
];
},
});

View File

@@ -0,0 +1,72 @@
{
"name": "autogpt_builder",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint",
"format": "prettier --write ."
},
"dependencies": {
"@hookform/resolvers": "^3.9.0",
"@next/third-parties": "^14.2.5",
"@radix-ui/react-avatar": "^1.1.0",
"@radix-ui/react-checkbox": "^1.1.1",
"@radix-ui/react-collapsible": "^1.1.0",
"@radix-ui/react-dialog": "^1.1.1",
"@radix-ui/react-dropdown-menu": "^2.1.1",
"@radix-ui/react-icons": "^1.3.0",
"@radix-ui/react-label": "^2.1.0",
"@radix-ui/react-popover": "^1.1.1",
"@radix-ui/react-scroll-area": "^1.1.0",
"@radix-ui/react-select": "^2.1.1",
"@radix-ui/react-separator": "^1.1.0",
"@radix-ui/react-slot": "^1.1.0",
"@radix-ui/react-switch": "^1.1.0",
"@radix-ui/react-toast": "^1.2.1",
"@radix-ui/react-tooltip": "^1.1.2",
"@sentry/nextjs": "^8",
"@supabase/ssr": "^0.4.0",
"@supabase/supabase-js": "^2.45.0",
"@tanstack/react-table": "^8.20.5",
"@xyflow/react": "^12.1.0",
"ajv": "^8.17.1",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"cmdk": "1.0.0",
"date-fns": "^3.6.0",
"dotenv": "^16.4.5",
"lucide-react": "^0.407.0",
"moment": "^2.30.1",
"next": "14.2.4",
"next-themes": "^0.3.0",
"react": "^18",
"react-day-picker": "^8.10.1",
"react-dom": "^18",
"react-hook-form": "^7.52.1",
"react-icons": "^5.2.1",
"react-markdown": "^9.0.1",
"react-modal": "^3.16.1",
"react-shepherd": "^6.1.1",
"recharts": "^2.12.7",
"tailwind-merge": "^2.3.0",
"tailwindcss-animate": "^1.0.7",
"uuid": "^10.0.0",
"zod": "^3.23.8"
},
"devDependencies": {
"@types/node": "^20",
"@types/react": "^18",
"@types/react-dom": "^18",
"@types/react-modal": "^3.16.3",
"eslint": "^8",
"eslint-config-next": "14.2.4",
"postcss": "^8",
"prettier": "^3.3.3",
"prettier-plugin-tailwindcss": "^0.6.6",
"tailwindcss": "^3.4.1",
"typescript": "^5"
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -2,34 +2,18 @@
// The config you add here will be used whenever a users loads a page in their browser.
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
import { environment } from "@/services/environment";
import * as Sentry from "@sentry/nextjs";
const isProdOrDev = environment.isProd() || environment.isDev();
const isCloud = environment.isCloud();
const isDisabled = process.env.DISABLE_SENTRY === "true";
const shouldEnable = !isDisabled && isProdOrDev && isCloud;
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
environment: environment.getEnvironmentStr(),
enabled: shouldEnable,
// Add optional integrations for additional features
integrations: [
Sentry.captureConsoleIntegration(),
Sentry.extraErrorDataIntegration(),
Sentry.browserProfilingIntegration(),
Sentry.replayIntegration(),
Sentry.httpClientIntegration(),
Sentry.launchDarklyIntegration(),
Sentry.replayIntegration({
unmask: [".sentry-unmask, [data-sentry-unmask]"],
}),
Sentry.replayCanvasIntegration(),
Sentry.reportingObserverIntegration(),
Sentry.browserProfilingIntegration(),
// Sentry.feedbackIntegration({
// // Additional SDK configuration goes in here, for example:
// colorScheme: "system",
@@ -42,11 +26,17 @@ Sentry.init({
// Set `tracePropagationTargets` to control for which URLs trace propagation should be enabled
tracePropagationTargets: [
"localhost",
"localhost:8006",
/^https:\/\/dev\-builder\.agpt\.co\/api/,
/^https:\/\/.*\.agpt\.co\/api/,
],
beforeSend(event, hint) {
// Check if it is an exception, and if so, show the report dialog
if (event.exception && event.event_id) {
Sentry.showReportDialog({ eventId: event.event_id });
}
return event;
},
// Define how likely Replay events are sampled.
// This sets the sample rate to be 10%. You may want this to be 100% while
// in development and sample at a lower rate in production
@@ -64,7 +54,4 @@ Sentry.init({
// For example, a tracesSampleRate of 0.5 and profilesSampleRate of 0.5 would
// result in 25% of transactions being profiled (0.5*0.5=0.25)
profilesSampleRate: 1.0,
enableLogs: true,
});
export const onRouterTransitionStart = Sentry.captureRouterTransitionStart;

View File

@@ -3,37 +3,14 @@
// Note that this config is unrelated to the Vercel Edge Runtime and is also required when running locally.
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
import { environment } from "@/services/environment";
import * as Sentry from "@sentry/nextjs";
const isProdOrDev = environment.isProd() || environment.isDev();
const isCloud = environment.isCloud();
const isDisabled = process.env.DISABLE_SENTRY === "true";
const shouldEnable = !isDisabled && isProdOrDev && isCloud;
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
environment: environment.getEnvironmentStr(),
enabled: shouldEnable,
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
tracePropagationTargets: [
"localhost",
"localhost:8006",
/^https:\/\/dev\-builder\.agpt\.co\/api/,
/^https:\/\/.*\.agpt\.co\/api/,
],
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
enableLogs: true,
integrations: [
Sentry.captureConsoleIntegration({ levels: ["fatal", "error", "warn"] }),
Sentry.extraErrorDataIntegration(),
],
});

View File

@@ -2,31 +2,14 @@
// The config you add here will be used whenever the server handles a request.
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
import { environment } from "@/services/environment";
import * as Sentry from "@sentry/nextjs";
// import { NodeProfilingIntegration } from "@sentry/profiling-node";
const isProdOrDev = environment.isProd() || environment.isDev();
const isCloud = environment.isCloud();
const isDisabled = process.env.DISABLE_SENTRY === "true";
const shouldEnable = !isDisabled && isProdOrDev && isCloud;
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
environment: environment.getEnvironmentStr(),
enabled: shouldEnable,
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
tracePropagationTargets: [
"localhost",
"localhost:8006",
/^https:\/\/dev\-builder\.agpt\.co\/api/,
/^https:\/\/.*\.agpt\.co\/api/,
],
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
@@ -35,9 +18,6 @@ Sentry.init({
integrations: [
Sentry.anrIntegration(),
// NodeProfilingIntegration,
Sentry.extraErrorDataIntegration(),
// Sentry.fsIntegration(),
],
enableLogs: true,
});

View File

@@ -0,0 +1,100 @@
"use client";
import { useState } from "react";
import Link from "next/link";
import { BinaryIcon, XIcon } from "lucide-react";
import { usePathname } from "next/navigation"; // Add this import
const tabs = [
{ name: "Dashboard", href: "/admin/dashboard" },
{ name: "Marketplace", href: "/admin/marketplace" },
{ name: "Users", href: "/admin/users" },
{ name: "Settings", href: "/admin/settings" },
];
export default function AdminLayout({
children,
}: {
children: React.ReactNode;
}) {
const pathname = usePathname(); // Get the current pathname
const [activeTab, setActiveTab] = useState(() => {
// Set active tab based on the current route
return tabs.find((tab) => tab.href === pathname)?.name || tabs[0].name;
});
const [mobileMenuOpen, setMobileMenuOpen] = useState(false);
return (
<div className="min-h-screen bg-gray-100">
<nav className="bg-white shadow-sm">
<div className="max-w-10xl mx-auto px-4 sm:px-6 lg:px-8">
<div className="flex h-16 items-center justify-between">
<div className="flex items-center">
<div className="flex-shrink-0">
<h1 className="text-xl font-bold">Admin Panel</h1>
</div>
<div className="hidden sm:ml-6 sm:flex sm:space-x-8">
{tabs.map((tab) => (
<Link
key={tab.name}
href={tab.href}
className={`${
activeTab === tab.name
? "border-indigo-500 text-indigo-600"
: "border-transparent text-gray-500 hover:border-gray-300 hover:text-gray-700"
} inline-flex items-center border-b-2 px-1 pt-1 text-sm font-medium`}
onClick={() => setActiveTab(tab.name)}
>
{tab.name}
</Link>
))}
</div>
</div>
<div className="sm:hidden">
<button
type="button"
className="inline-flex items-center justify-center rounded-md p-2 text-gray-400 hover:bg-gray-100 hover:text-gray-500 focus:outline-none focus:ring-2 focus:ring-inset focus:ring-indigo-500"
onClick={() => setMobileMenuOpen(!mobileMenuOpen)}
>
<span className="sr-only">Open main menu</span>
{mobileMenuOpen ? (
<XIcon className="block h-6 w-6" aria-hidden="true" />
) : (
<BinaryIcon className="block h-6 w-6" aria-hidden="true" />
)}
</button>
</div>
</div>
</div>
{mobileMenuOpen && (
<div className="sm:hidden">
<div className="space-y-1 pb-3 pt-2">
{tabs.map((tab) => (
<Link
key={tab.name}
href={tab.href}
className={`${
activeTab === tab.name
? "border-indigo-500 bg-indigo-50 text-indigo-700"
: "border-transparent text-gray-600 hover:border-gray-300 hover:bg-gray-50 hover:text-gray-800"
} block border-l-4 py-2 pl-3 pr-4 text-base font-medium`}
onClick={() => {
setActiveTab(tab.name);
setMobileMenuOpen(false);
}}
>
{tab.name}
</Link>
))}
</div>
</div>
)}
</nav>
<main className="py-10">
<div className="mx-auto max-w-7xl px-4 sm:px-6 lg:px-8">{children}</div>
</main>
</div>
);
}

View File

@@ -0,0 +1,25 @@
import { withRoleAccess } from "@/lib/withRoleAccess";
import React from "react";
import { getReviewableAgents } from "@/components/admin/marketplace/actions";
import AdminMarketplaceAgentList from "@/components/admin/marketplace/AdminMarketplaceAgentList";
import AdminFeaturedAgentsControl from "@/components/admin/marketplace/AdminFeaturedAgentsControl";
import { Separator } from "@/components/ui/separator";
async function AdminMarketplace() {
const reviewableAgents = await getReviewableAgents();
return (
<>
<AdminMarketplaceAgentList agents={reviewableAgents.agents} />
<Separator className="my-4" />
<AdminFeaturedAgentsControl className="mt-4" />
</>
);
}
export default async function AdminDashboardPage() {
"use server";
const withAdminAccess = await withRoleAccess(["admin"]);
const ProtectedAdminMarketplace = await withAdminAccess(AdminMarketplace);
return <ProtectedAdminMarketplace />;
}

View File

@@ -0,0 +1,36 @@
"use client";
import { useEffect, useState } from "react";
export default function AuthErrorPage() {
const [errorType, setErrorType] = useState<string | null>(null);
const [errorCode, setErrorCode] = useState<string | null>(null);
const [errorDescription, setErrorDescription] = useState<string | null>(null);
useEffect(() => {
// This code only runs on the client side
if (typeof window !== "undefined") {
const hash = window.location.hash.substring(1); // Remove the leading '#'
const params = new URLSearchParams(hash);
setErrorType(params.get("error"));
setErrorCode(params.get("error_code"));
setErrorDescription(
params.get("error_description")?.replace(/\+/g, " ") ?? null,
); // Replace '+' with space
}
}, []);
if (!errorType && !errorCode && !errorDescription) {
return <div>Loading...</div>;
}
return (
<div>
<h1>Authentication Error</h1>
{errorType && <p>Error Type: {errorType}</p>}
{errorCode && <p>Error Code: {errorCode}</p>}
{errorDescription && <p>Error Description: {errorDescription}</p>}
</div>
);
}

View File

@@ -0,0 +1,36 @@
import { NextResponse } from "next/server";
import { createServerClient } from "@/lib/supabase/server";
// Handle the callback to complete the user session login
export async function GET(request: Request) {
const { searchParams, origin } = new URL(request.url);
const code = searchParams.get("code");
// if "next" is in param, use it as the redirect URL
const next = searchParams.get("next") ?? "/profile";
if (code) {
const supabase = createServerClient();
if (!supabase) {
return NextResponse.redirect(`${origin}/error`);
}
const { data, error } = await supabase.auth.exchangeCodeForSession(code);
// data.session?.refresh_token is available if you need to store it for later use
if (!error) {
const forwardedHost = request.headers.get("x-forwarded-host"); // original origin before load balancer
const isLocalEnv = process.env.NODE_ENV === "development";
if (isLocalEnv) {
// we can be sure that there is no load balancer in between, so no need to watch for X-Forwarded-Host
return NextResponse.redirect(`${origin}${next}`);
} else if (forwardedHost) {
return NextResponse.redirect(`https://${forwardedHost}${next}`);
} else {
return NextResponse.redirect(`${origin}${next}`);
}
}
}
// return the user to an error page with instructions
return NextResponse.redirect(`${origin}/auth/auth-code-error`);
}

View File

@@ -2,7 +2,7 @@ import { type EmailOtpType } from "@supabase/supabase-js";
import { type NextRequest } from "next/server";
import { redirect } from "next/navigation";
import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase";
import { createServerClient } from "@/lib/supabase/server";
// Email confirmation route
export async function GET(request: NextRequest) {
@@ -12,7 +12,7 @@ export async function GET(request: NextRequest) {
const next = searchParams.get("next") ?? "/";
if (token_hash && type) {
const supabase = await getServerSupabase();
const supabase = createServerClient();
if (!supabase) {
redirect("/error");

View File

@@ -0,0 +1,16 @@
"use client";
import { useSearchParams } from "next/navigation";
import FlowEditor from '@/components/Flow';
export default function Home() {
const query = useSearchParams();
return (
<FlowEditor
className="flow-container w-full min-h-[86vh] border border-gray-300 dark:border-gray-700 rounded-lg"
flowID={query.get("flowID") ?? query.get("templateID") ?? undefined}
template={!!query.get("templateID")}
/>
);
}

View File

@@ -1,8 +1,8 @@
"use client";
import { useEffect } from "react";
import { IconCircleAlert } from "@/components/__legacy__/ui/icons";
import { Button } from "@/components/__legacy__/ui/button";
import { IconCircleAlert } from "@/components/ui/icons";
import { Button } from "@/components/ui/button";
import Link from "next/link";
export default function Error({

View File

@@ -0,0 +1,27 @@
"use client";
import * as Sentry from "@sentry/nextjs";
import NextError from "next/error";
import { useEffect } from "react";
export default function GlobalError({
error,
}: {
error: Error & { digest?: string };
}) {
useEffect(() => {
Sentry.captureException(error);
}, [error]);
return (
<html>
<body>
{/* `NextError` is the default Next.js error page component. Its type
definition requires a `statusCode` prop. However, since the App Router
does not expose status codes for errors, we simply pass 0 to render a
generic error message. */}
<NextError statusCode={0} />
</body>
</html>
);
}

View File

@@ -0,0 +1,75 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
@layer utilities {
.text-balance {
text-wrap: balance;
}
}
@layer base {
:root {
--background: 0 0% 100%;
--foreground: 240 10% 3.9%;
--card: 0 0% 100%;
--card-foreground: 240 10% 3.9%;
--popover: 0 0% 100%;
--popover-foreground: 240 10% 3.9%;
--primary: 240 5.9% 10%;
--primary-foreground: 0 0% 98%;
--secondary: 240 4.8% 95.9%;
--secondary-foreground: 240 5.9% 10%;
--muted: 240 4.8% 95.9%;
--muted-foreground: 240 3.8% 46.1%;
--accent: 240 4.8% 95.9%;
--accent-foreground: 240 5.9% 10%;
--destructive: 0 84.2% 60.2%;
--destructive-foreground: 0 0% 98%;
--border: 240 5.9% 90%;
--input: 240 5.9% 90%;
--ring: 240 5.9% 10%;
--radius: 0.5rem;
--chart-1: 12 76% 61%;
--chart-2: 173 58% 39%;
--chart-3: 197 37% 24%;
--chart-4: 43 74% 66%;
--chart-5: 27 87% 67%;
}
.dark {
--background: 240 10% 3.9%;
--foreground: 0 0% 98%;
--card: 240 10% 3.9%;
--card-foreground: 0 0% 98%;
--popover: 240 10% 3.9%;
--popover-foreground: 0 0% 98%;
--primary: 0 0% 98%;
--primary-foreground: 240 5.9% 10%;
--secondary: 240 3.7% 15.9%;
--secondary-foreground: 0 0% 98%;
--muted: 240 3.7% 15.9%;
--muted-foreground: 240 5% 64.9%;
--accent: 240 3.7% 15.9%;
--accent-foreground: 0 0% 98%;
--destructive: 0 62.8% 30.6%;
--destructive-foreground: 0 0% 98%;
--border: 240 3.7% 15.9%;
--input: 240 3.7% 15.9%;
--ring: 240 4.9% 83.9%;
--chart-1: 220 70% 50%;
--chart-2: 160 60% 45%;
--chart-3: 30 80% 55%;
--chart-4: 280 65% 60%;
--chart-5: 340 75% 55%;
}
}
@layer base {
* {
@apply border-border;
}
body {
@apply bg-background text-foreground;
}
}

View File

@@ -0,0 +1,49 @@
import React from "react";
import type { Metadata } from "next";
import { Inter } from "next/font/google";
import { Providers } from "@/app/providers";
import { NavBar } from "@/components/NavBar";
import { cn } from "@/lib/utils";
import "./globals.css";
import TallyPopupSimple from "@/components/TallyPopup";
import { GoogleAnalytics } from "@next/third-parties/google";
import { Toaster } from "@/components/ui/toaster";
const inter = Inter({ subsets: ["latin"] });
export const metadata: Metadata = {
title: "NextGen AutoGPT",
description: "Your one stop shop to creating AI Agents",
};
export default function RootLayout({
children,
}: Readonly<{
children: React.ReactNode;
}>) {
return (
<html lang="en">
<body className={cn("antialiased transition-colors", inter.className)}>
<Providers
attribute="class"
defaultTheme="light"
// Feel free to remove this line if you want to use the system theme by default
// enableSystem
disableTransitionOnChange
>
<div className="flex min-h-screen flex-col">
<NavBar />
<main className="flex-1 overflow-hidden p-4">{children}</main>
<TallyPopupSimple />
</div>
<Toaster />
</Providers>
</body>
<GoogleAnalytics
gaId={process.env.GA_MEASUREMENT_ID || "G-FH2XK2W4GN"} // This is the measurement Id for the Google Analytics dev project
/>
</html>
);
}

View File

@@ -1,7 +1,7 @@
import AgentFlowListSkeleton from "@/app/(platform)/monitoring/components/skeletons/AgentFlowListSkeleton";
import AgentFlowListSkeleton from "@/components/monitor/skeletons/AgentFlowListSkeleton";
import React from "react";
import FlowRunsListSkeleton from "@/app/(platform)/monitoring/components/skeletons/FlowRunsListSkeleton";
import FlowRunsStatusSkeleton from "@/app/(platform)/monitoring/components/skeletons/FlowRunsStatusSkeleton";
import FlowRunsListSkeleton from "@/components/monitor/skeletons/FlowRunsListSkeleton";
import FlowRunsStatusSkeleton from "@/components/monitor/skeletons/FlowRunsStatusSkeleton";
export default function MonitorLoadingSkeleton() {
return (

View File

@@ -0,0 +1,64 @@
"use server";
import { revalidatePath } from "next/cache";
import { redirect } from "next/navigation";
import { createServerClient } from "@/lib/supabase/server";
import { z } from "zod";
import * as Sentry from "@sentry/nextjs";
const loginFormSchema = z.object({
email: z.string().email().min(2).max(64),
password: z.string().min(6).max(64),
});
export async function login(values: z.infer<typeof loginFormSchema>) {
return await Sentry.withServerActionInstrumentation("login", {}, async () => {
const supabase = createServerClient();
if (!supabase) {
redirect("/error");
}
// We are sure that the values are of the correct type because zod validates the form
const { data, error } = await supabase.auth.signInWithPassword(values);
if (error) {
return error.message;
}
if (data.session) {
await supabase.auth.setSession(data.session);
}
revalidatePath("/", "layout");
redirect("/profile");
});
}
export async function signup(values: z.infer<typeof loginFormSchema>) {
"use server";
return await Sentry.withServerActionInstrumentation(
"signup",
{},
async () => {
const supabase = createServerClient();
if (!supabase) {
redirect("/error");
}
// We are sure that the values are of the correct type because zod validates the form
const { data, error } = await supabase.auth.signUp(values);
if (error) {
return error.message;
}
if (data.session) {
await supabase.auth.setSession(data.session);
}
revalidatePath("/", "layout");
redirect("/profile");
},
);
}

View File

@@ -0,0 +1,234 @@
"use client";
import useUser from "@/hooks/useUser";
import { login, signup } from "./actions";
import { Button } from "@/components/ui/button";
import {
Form,
FormControl,
FormDescription,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form";
import { useForm } from "react-hook-form";
import { Input } from "@/components/ui/input";
import { z } from "zod";
import { zodResolver } from "@hookform/resolvers/zod";
import { PasswordInput } from "@/components/PasswordInput";
import { FaGoogle, FaGithub, FaDiscord, FaSpinner } from "react-icons/fa";
import { useState } from "react";
import { useSupabase } from "@/components/SupabaseProvider";
import { useRouter } from "next/navigation";
import Link from "next/link";
import { Checkbox } from "@/components/ui/checkbox";
const loginFormSchema = z.object({
email: z.string().email().min(2).max(64),
password: z.string().min(6).max(64),
agreeToTerms: z.boolean().refine((value) => value === true, {
message: "You must agree to the Terms of Service and Privacy Policy",
}),
});
export default function LoginPage() {
const { supabase, isLoading: isSupabaseLoading } = useSupabase();
const { user, isLoading: isUserLoading } = useUser();
const [feedback, setFeedback] = useState<string | null>(null);
const router = useRouter();
const [isLoading, setIsLoading] = useState(false);
const form = useForm<z.infer<typeof loginFormSchema>>({
resolver: zodResolver(loginFormSchema),
defaultValues: {
email: "",
password: "",
agreeToTerms: false,
},
});
if (user) {
console.log("User exists, redirecting to profile");
router.push("/profile");
}
if (isUserLoading || isSupabaseLoading || user) {
return (
<div className="flex h-[80vh] items-center justify-center">
<FaSpinner className="mr-2 h-16 w-16 animate-spin" />
</div>
);
}
if (!supabase) {
return (
<div>
User accounts are disabled because Supabase client is unavailable
</div>
);
}
async function handleSignInWithProvider(
provider: "google" | "github" | "discord",
) {
const { data, error } = await supabase!.auth.signInWithOAuth({
provider: provider,
options: {
redirectTo:
process.env.AUTH_CALLBACK_URL ??
`http://localhost:3000/auth/callback`,
},
});
if (!error) {
setFeedback(null);
return;
}
setFeedback(error.message);
}
const onLogin = async (data: z.infer<typeof loginFormSchema>) => {
setIsLoading(true);
const error = await login(data);
setIsLoading(false);
if (error) {
setFeedback(error);
return;
}
setFeedback(null);
};
const onSignup = async (data: z.infer<typeof loginFormSchema>) => {
if (await form.trigger()) {
setIsLoading(true);
const error = await signup(data);
setIsLoading(false);
if (error) {
setFeedback(error);
return;
}
setFeedback(null);
}
};
return (
<div className="flex h-[80vh] items-center justify-center">
<div className="w-full max-w-md space-y-6 rounded-lg p-8 shadow-md">
<div className="mb-6 space-y-2">
<Button
className="w-full"
onClick={() => handleSignInWithProvider("google")}
variant="outline"
type="button"
disabled={isLoading}
>
<FaGoogle className="mr-2 h-4 w-4" />
Sign in with Google
</Button>
<Button
className="w-full"
onClick={() => handleSignInWithProvider("github")}
variant="outline"
type="button"
disabled={isLoading}
>
<FaGithub className="mr-2 h-4 w-4" />
Sign in with GitHub
</Button>
<Button
className="w-full"
onClick={() => handleSignInWithProvider("discord")}
variant="outline"
type="button"
disabled={isLoading}
>
<FaDiscord className="mr-2 h-4 w-4" />
Sign in with Discord
</Button>
</div>
<Form {...form}>
<form onSubmit={form.handleSubmit(onLogin)}>
<FormField
control={form.control}
name="email"
render={({ field }) => (
<FormItem className="mb-4">
<FormLabel>Email</FormLabel>
<FormControl>
<Input placeholder="user@email.com" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="password"
render={({ field }) => (
<FormItem>
<FormLabel>Password</FormLabel>
<FormControl>
<PasswordInput placeholder="password" {...field} />
</FormControl>
<FormDescription>
Password needs to be at least 6 characters long
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="agreeToTerms"
render={({ field }) => (
<FormItem className="mt-4 flex flex-row items-start space-x-3 space-y-0">
<FormControl>
<Checkbox
checked={field.value}
onCheckedChange={field.onChange}
/>
</FormControl>
<div className="space-y-1 leading-none">
<FormLabel>
I agree to the{" "}
<Link href="/terms-of-service" className="underline">
Terms of Service
</Link>{" "}
and{" "}
<Link
href="https://www.notion.so/auto-gpt/Privacy-Policy-ab11c9c20dbd4de1a15dcffe84d77984"
className="underline"
>
Privacy Policy
</Link>
</FormLabel>
<FormMessage />
</div>
</FormItem>
)}
/>
<div className="mb-6 mt-6 flex w-full space-x-4">
<Button
className="flex w-1/2 justify-center"
type="submit"
disabled={isLoading}
>
Log in
</Button>
<Button
className="flex w-1/2 justify-center"
variant="outline"
type="button"
onClick={form.handleSubmit(onSignup)}
disabled={isLoading}
>
Sign up
</Button>
</div>
</form>
<p className="text-sm text-red-500">{feedback}</p>
</Form>
</div>
</div>
);
}

View File

@@ -0,0 +1,41 @@
import { Suspense } from "react";
import { notFound } from "next/navigation";
import MarketplaceAPI from "@/lib/marketplace-api";
import { AgentDetailResponse } from "@/lib/marketplace-api";
import AgentDetailContent from "@/components/marketplace/AgentDetailContent";
async function getAgentDetails(id: string): Promise<AgentDetailResponse> {
const apiUrl =
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
"http://localhost:8015/api/v1/market";
const api = new MarketplaceAPI(apiUrl);
try {
console.log(`Fetching agent details for id: ${id}`);
const agent = await api.getAgentDetails(id);
console.log(`Agent details fetched:`, agent);
return agent;
} catch (error) {
console.error(`Error fetching agent details:`, error);
throw error;
}
}
export default async function AgentDetailPage({
params,
}: {
params: { id: string };
}) {
let agent: AgentDetailResponse;
try {
agent = await getAgentDetails(params.id);
} catch (error) {
return notFound();
}
return (
<Suspense fallback={<div>Loading...</div>}>
<AgentDetailContent agent={agent} />
</Suspense>
);
}

View File

@@ -0,0 +1,317 @@
"use client";
import React, { useEffect, useMemo, useState, useCallback } from "react";
import { useRouter } from "next/navigation";
import Image from "next/image";
import { Input } from "@/components/ui/input";
import { Button } from "@/components/ui/button";
import MarketplaceAPI, {
AgentResponse,
AgentListResponse,
AgentWithRank,
} from "@/lib/marketplace-api";
import {
ChevronLeft,
ChevronRight,
PlusCircle,
Search,
Star,
} from "lucide-react";
// Utility Functions
function debounce<T extends (...args: any[]) => any>(
func: T,
wait: number,
): (...args: Parameters<T>) => void {
let timeout: NodeJS.Timeout | null = null;
return (...args: Parameters<T>) => {
if (timeout) clearTimeout(timeout);
timeout = setTimeout(() => func(...args), wait);
};
}
// Types
type Agent = AgentResponse | AgentWithRank;
// Components
const HeroSection: React.FC = () => {
const router = useRouter();
return (
<div className="relative bg-indigo-600 py-6">
<div className="absolute inset-0 z-0">
<Image
src="https://images.unsplash.com/photo-1562408590-e32931084e23?auto=format&fit=crop&w=2070&q=80"
alt="Marketplace background"
layout="fill"
objectFit="cover"
quality={75}
priority
className="opacity-20"
/>
<div
className="absolute inset-0 bg-indigo-600 mix-blend-multiply"
aria-hidden="true"
></div>
</div>
<div className="relative mx-auto flex max-w-7xl items-center justify-between px-4 py-4 sm:px-6 lg:px-8">
<div>
<h1 className="text-2xl font-extrabold tracking-tight text-white sm:text-3xl lg:text-4xl">
AutoGPT Marketplace
</h1>
<p className="mt-2 max-w-3xl text-sm text-indigo-100 sm:text-base">
Discover and share proven AI Agents to supercharge your business.
</p>
</div>
<Button
onClick={() => router.push("/marketplace/submit")}
className="flex items-center bg-white text-indigo-600 hover:bg-indigo-50"
>
<PlusCircle className="mr-2 h-4 w-4" />
Submit Agent
</Button>
</div>
</div>
);
};
const SearchInput: React.FC<{
value: string;
onChange: (e: React.ChangeEvent<HTMLInputElement>) => void;
}> = ({ value, onChange }) => (
<div className="relative mb-8">
<Input
placeholder="Search agents..."
type="text"
className="w-full rounded-full border-gray-300 py-2 pl-10 pr-4 focus:border-indigo-500 focus:ring-indigo-500"
value={value}
onChange={onChange}
/>
<Search
className="absolute left-3 top-1/2 -translate-y-1/2 transform text-gray-400"
size={20}
/>
</div>
);
const AgentCard: React.FC<{ agent: Agent; featured?: boolean }> = ({
agent,
featured = false,
}) => {
const router = useRouter();
const handleClick = () => {
router.push(`/marketplace/${agent.id}`);
};
return (
<div
className={`flex cursor-pointer flex-col justify-between rounded-lg border p-6 transition-colors duration-200 hover:bg-gray-50 ${featured ? "border-indigo-500 shadow-md" : "border-gray-200"}`}
onClick={handleClick}
>
<div>
<div className="mb-2 flex items-center justify-between">
<h3 className="truncate text-lg font-semibold text-gray-900">
{agent.name}
</h3>
{featured && <Star className="text-indigo-500" size={20} />}
</div>
<p className="mb-4 line-clamp-2 text-sm text-gray-500">
{agent.description}
</p>
<div className="mb-2 text-xs text-gray-400">
Categories: {agent.categories.join(", ")}
</div>
</div>
<div className="flex items-end justify-between">
<div className="text-xs text-gray-400">
Updated {new Date(agent.updatedAt).toLocaleDateString()}
</div>
<div className="text-xs text-gray-400">Downloads {agent.downloads}</div>
{"rank" in agent && (
<div className="text-xs text-indigo-600">
Rank: {agent.rank.toFixed(2)}
</div>
)}
</div>
</div>
);
};
const AgentGrid: React.FC<{
agents: Agent[];
title: string;
featured?: boolean;
}> = ({ agents, title, featured = false }) => (
<div className="mb-12">
<h2 className="mb-4 text-2xl font-bold text-gray-900">{title}</h2>
<div className="grid grid-cols-1 gap-6 md:grid-cols-2 lg:grid-cols-3">
{agents.map((agent) => (
<AgentCard agent={agent} key={agent.id} featured={featured} />
))}
</div>
</div>
);
const Pagination: React.FC<{
page: number;
totalPages: number;
onPrevPage: () => void;
onNextPage: () => void;
}> = ({ page, totalPages, onPrevPage, onNextPage }) => (
<div className="mt-8 flex items-center justify-between">
<Button
onClick={onPrevPage}
disabled={page === 1}
className="flex items-center space-x-2 rounded-md border border-gray-300 bg-white px-4 py-2 text-sm font-medium text-gray-700 shadow-sm hover:bg-gray-50"
>
<ChevronLeft size={16} />
<span>Previous</span>
</Button>
<span className="text-sm text-gray-700">
Page {page} of {totalPages}
</span>
<Button
onClick={onNextPage}
disabled={page === totalPages}
className="flex items-center space-x-2 rounded-md border border-gray-300 bg-white px-4 py-2 text-sm font-medium text-gray-700 shadow-sm hover:bg-gray-50"
>
<span>Next</span>
<ChevronRight size={16} />
</Button>
</div>
);
// Main Component
const Marketplace: React.FC = () => {
const apiUrl =
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
"http://localhost:8015/api/v1/market";
const api = useMemo(() => new MarketplaceAPI(apiUrl), [apiUrl]);
const [searchValue, setSearchValue] = useState("");
const [searchResults, setSearchResults] = useState<Agent[]>([]);
const [featuredAgents, setFeaturedAgents] = useState<Agent[]>([]);
const [topAgents, setTopAgents] = useState<Agent[]>([]);
const [page, setPage] = useState(1);
const [totalPages, setTotalPages] = useState(1);
const [isLoading, setIsLoading] = useState(false);
const fetchTopAgents = useCallback(
async (currentPage: number) => {
setIsLoading(true);
try {
const response = await api.getTopDownloadedAgents(currentPage, 9);
setTopAgents(response.agents);
setTotalPages(response.total_pages);
} catch (error) {
console.error("Error fetching top agents:", error);
} finally {
setIsLoading(false);
}
},
[api],
);
const fetchFeaturedAgents = useCallback(async () => {
try {
const featured = await api.getFeaturedAgents();
setFeaturedAgents(featured.agents);
} catch (error) {
console.error("Error fetching featured agents:", error);
}
}, [api]);
const searchAgents = useCallback(
async (searchTerm: string) => {
setIsLoading(true);
try {
const response = await api.searchAgents(searchTerm, 1, 30);
const filteredAgents = response.filter((agent) => agent.rank > 0);
setSearchResults(filteredAgents);
} catch (error) {
console.error("Error searching agents:", error);
} finally {
setIsLoading(false);
}
},
[api],
);
const debouncedSearch = useMemo(
() => debounce(searchAgents, 300),
[searchAgents],
);
useEffect(() => {
if (searchValue) {
debouncedSearch(searchValue);
} else {
fetchTopAgents(page);
}
}, [searchValue, page, debouncedSearch, fetchTopAgents]);
useEffect(() => {
fetchFeaturedAgents();
}, [fetchFeaturedAgents]);
const handleInputChange = (e: React.ChangeEvent<HTMLInputElement>) => {
setSearchValue(e.target.value);
setPage(1);
};
const handleNextPage = () => {
if (page < totalPages) {
setPage(page + 1);
}
};
const handlePrevPage = () => {
if (page > 1) {
setPage(page - 1);
}
};
return (
<div className="min-h-screen bg-gray-50">
<HeroSection />
<div className="mx-auto max-w-7xl px-4 py-12 sm:px-6 lg:px-8">
<SearchInput value={searchValue} onChange={handleInputChange} />
{isLoading ? (
<div className="py-12 text-center">
<div className="inline-block h-8 w-8 animate-spin rounded-full border-b-2 border-gray-900"></div>
<p className="mt-2 text-gray-600">Loading agents...</p>
</div>
) : searchValue ? (
searchResults.length > 0 ? (
<AgentGrid agents={searchResults} title="Search Results" />
) : (
<div className="py-12 text-center">
<p className="text-gray-600">
No agents found matching your search criteria.
</p>
</div>
)
) : (
<>
{featuredAgents.length > 0 && (
<AgentGrid
agents={featuredAgents}
title="Featured Agents"
featured={true}
/>
)}
<AgentGrid agents={topAgents} title="Top Downloaded Agents" />
<Pagination
page={page}
totalPages={totalPages}
onPrevPage={handlePrevPage}
onNextPage={handleNextPage}
/>
</>
)}
</div>
</div>
);
};
export default Marketplace;

View File

@@ -0,0 +1,449 @@
"use client";
import React, { useState, useEffect, useMemo } from "react";
import { useRouter } from "next/navigation";
import { useForm, Controller } from "react-hook-form";
import MarketplaceAPI from "@/lib/marketplace-api";
import AutoGPTServerAPI from "@/lib/autogpt-server-api";
import { Card } from "@/components/ui/card";
import { Input } from "@/components/ui/input";
import { Button } from "@/components/ui/button";
import { Textarea } from "@/components/ui/textarea";
import { Alert, AlertTitle, AlertDescription } from "@/components/ui/alert";
import { Checkbox } from "@/components/ui/checkbox";
import {
MultiSelector,
MultiSelectorContent,
MultiSelectorInput,
MultiSelectorItem,
MultiSelectorList,
MultiSelectorTrigger,
} from "@/components/ui/multiselect";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
type FormData = {
name: string;
description: string;
author: string;
keywords: string[];
categories: string[];
agreeToTerms: boolean;
selectedAgentId: string;
};
const keywords = [
"Automation",
"AI Workflows",
"Integration",
"Task Automation",
"Data Processing",
"Workflow Management",
"Real-time Analytics",
"Custom Triggers",
"Event-driven",
"API Integration",
"Data Transformation",
"Multi-step Workflows",
"Collaboration Tools",
"Business Process Automation",
"No-code Solutions",
"AI-Powered",
"Smart Notifications",
"Data Syncing",
"User Engagement",
"Reporting Automation",
"Lead Generation",
"Customer Support Automation",
"E-commerce Automation",
"Social Media Management",
"Email Marketing Automation",
"Document Management",
"Data Enrichment",
"Performance Tracking",
"Predictive Analytics",
"Resource Allocation",
"Chatbot",
"Virtual Assistant",
"Workflow Automation",
"Social Media Manager",
"Email Optimizer",
"Content Generator",
"Data Analyzer",
"Task Scheduler",
"Customer Service Bot",
"Personalization Engine",
];
const SubmitPage: React.FC = () => {
const router = useRouter();
const {
control,
handleSubmit,
watch,
setValue,
formState: { errors },
} = useForm<FormData>({
defaultValues: {
selectedAgentId: "", // Initialize with an empty string
name: "",
description: "",
author: "",
keywords: [],
categories: [],
agreeToTerms: false,
},
});
const [isSubmitting, setIsSubmitting] = useState(false);
const [submitError, setSubmitError] = useState<string | null>(null);
const [userAgents, setUserAgents] = useState<
Array<{ id: string; name: string; version: number }>
>([]);
const [selectedAgentGraph, setSelectedAgentGraph] = useState<any>(null);
const selectedAgentId = watch("selectedAgentId");
useEffect(() => {
const fetchUserAgents = async () => {
const api = new AutoGPTServerAPI();
const agents = await api.listGraphs();
console.log(agents);
setUserAgents(
agents.map((agent) => ({
id: agent.id,
name: agent.name || `Agent (${agent.id})`,
version: agent.version,
})),
);
};
fetchUserAgents();
}, []);
useEffect(() => {
const fetchAgentGraph = async () => {
if (selectedAgentId) {
const api = new AutoGPTServerAPI();
const graph = await api.getGraph(selectedAgentId);
setSelectedAgentGraph(graph);
setValue("name", graph.name);
setValue("description", graph.description);
}
};
fetchAgentGraph();
}, [selectedAgentId, setValue]);
const onSubmit = async (data: FormData) => {
setIsSubmitting(true);
setSubmitError(null);
if (!data.agreeToTerms) {
throw new Error("You must agree to the terms of service");
}
try {
if (!selectedAgentGraph) {
throw new Error("Please select an agent");
}
const api = new MarketplaceAPI();
await api.submitAgent(
{
...selectedAgentGraph,
name: data.name,
description: data.description,
},
data.author,
data.keywords,
data.categories,
);
router.push("/marketplace?submission=success");
} catch (error) {
console.error("Submission error:", error);
setSubmitError(
error instanceof Error ? error.message : "An unknown error occurred",
);
} finally {
setIsSubmitting(false);
}
};
return (
<div className="container mx-auto px-4 py-8">
<h1 className="mb-6 text-3xl font-bold">Submit Your Agent</h1>
<Card className="p-6">
<form onSubmit={handleSubmit(onSubmit)}>
<div className="space-y-4">
<Controller
name="selectedAgentId"
control={control}
rules={{ required: "Please select an agent" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Select Agent
</label>
<Select
onValueChange={field.onChange}
value={field.value || ""}
>
<SelectTrigger className="w-full">
<SelectValue placeholder="Select an agent" />
</SelectTrigger>
<SelectContent>
{userAgents.map((agent) => (
<SelectItem key={agent.id} value={agent.id}>
{agent.name} (v{agent.version})
</SelectItem>
))}
</SelectContent>
</Select>
{errors.selectedAgentId && (
<p className="mt-1 text-sm text-red-600">
{errors.selectedAgentId.message}
</p>
)}
</div>
)}
/>
{/* {selectedAgentGraph && (
<div className="mt-4" style={{ height: "600px" }}>
<ReactFlow
nodes={nodes}
edges={edges}
fitView
attributionPosition="bottom-left"
nodesConnectable={false}
nodesDraggable={false}
zoomOnScroll={false}
panOnScroll={false}
elementsSelectable={false}
>
<Controls showInteractive={false} />
<Background />
</ReactFlow>
</div>
)} */}
<Controller
name="name"
control={control}
rules={{ required: "Name is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Agent Name
</label>
<Input
id={field.name}
placeholder="Enter your agent's name"
{...field}
/>
{errors.name && (
<p className="mt-1 text-sm text-red-600">
{errors.name.message}
</p>
)}
</div>
)}
/>
<Controller
name="description"
control={control}
rules={{ required: "Description is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Description
</label>
<Textarea
id={field.name}
placeholder="Describe your agent"
{...field}
/>
{errors.description && (
<p className="mt-1 text-sm text-red-600">
{errors.description.message}
</p>
)}
</div>
)}
/>
<Controller
name="author"
control={control}
rules={{ required: "Author is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Author
</label>
<Input
id={field.name}
placeholder="Your name or username"
{...field}
/>
{errors.author && (
<p className="mt-1 text-sm text-red-600">
{errors.author.message}
</p>
)}
</div>
)}
/>
<Controller
name="keywords"
control={control}
rules={{ required: "At least one keyword is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Keywords
</label>
<MultiSelector
values={field.value || []}
onValuesChange={field.onChange}
>
<MultiSelectorTrigger>
<MultiSelectorInput placeholder="Add keywords" />
</MultiSelectorTrigger>
<MultiSelectorContent>
<MultiSelectorList>
{keywords.map((keyword) => (
<MultiSelectorItem key={keyword} value={keyword}>
{keyword}
</MultiSelectorItem>
))}
</MultiSelectorList>
</MultiSelectorContent>
</MultiSelector>
{errors.keywords && (
<p className="mt-1 text-sm text-red-600">
{errors.keywords.message}
</p>
)}
</div>
)}
/>
<Controller
name="categories"
control={control}
rules={{ required: "At least one category is required" }}
render={({ field }) => (
<div>
<label
htmlFor={field.name}
className="block text-sm font-medium text-gray-700"
>
Categories
</label>
<MultiSelector
values={field.value || []}
onValuesChange={field.onChange}
>
<MultiSelectorTrigger>
<MultiSelectorInput placeholder="Select categories" />
</MultiSelectorTrigger>
<MultiSelectorContent>
<MultiSelectorList>
<MultiSelectorItem value="productivity">
Productivity
</MultiSelectorItem>
<MultiSelectorItem value="entertainment">
Entertainment
</MultiSelectorItem>
<MultiSelectorItem value="education">
Education
</MultiSelectorItem>
<MultiSelectorItem value="business">
Business
</MultiSelectorItem>
<MultiSelectorItem value="other">
Other
</MultiSelectorItem>
</MultiSelectorList>
</MultiSelectorContent>
</MultiSelector>
{errors.categories && (
<p className="mt-1 text-sm text-red-600">
{errors.categories.message}
</p>
)}
</div>
)}
/>
<Controller
name="agreeToTerms"
control={control}
rules={{ required: "You must agree to the terms of service" }}
render={({ field }) => (
<div className="flex items-center space-x-2">
<Checkbox
id="agreeToTerms"
checked={field.value}
onCheckedChange={field.onChange}
/>
<label
htmlFor="agreeToTerms"
className="text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70"
>
I agree to the{" "}
<a href="/terms" className="text-blue-500 hover:underline">
terms of service
</a>
</label>
</div>
)}
/>
{errors.agreeToTerms && (
<p className="mt-1 text-sm text-red-600">
{errors.agreeToTerms.message}
</p>
)}
{submitError && (
<Alert variant="destructive">
<AlertTitle>Submission Failed</AlertTitle>
<AlertDescription>{submitError}</AlertDescription>
</Alert>
)}
<Button type="submit" className="w-full" disabled={isSubmitting}>
{isSubmitting ? "Submitting..." : "Submit Agent"}
</Button>
</div>
</form>
</Card>
</div>
);
};
export default SubmitPage;

View File

@@ -0,0 +1,178 @@
"use client";
import React, { useCallback, useEffect, useMemo, useState } from "react";
import AutoGPTServerAPI, {
GraphMeta,
NodeExecutionResult,
} from "@/lib/autogpt-server-api";
import { Card } from "@/components/ui/card";
import { FlowRun } from "@/lib/types";
import {
AgentFlowList,
FlowInfo,
FlowRunInfo,
FlowRunsList,
FlowRunsStats,
} from "@/components/monitor";
const Monitor = () => {
const [flows, setFlows] = useState<GraphMeta[]>([]);
const [flowRuns, setFlowRuns] = useState<FlowRun[]>([]);
const [selectedFlow, setSelectedFlow] = useState<GraphMeta | null>(null);
const [selectedRun, setSelectedRun] = useState<FlowRun | null>(null);
const api = useMemo(() => new AutoGPTServerAPI(), []);
const refreshFlowRuns = useCallback(
(flowID: string) => {
// Fetch flow run IDs
api.listGraphRunIDs(flowID).then((runIDs) =>
runIDs.map((runID) => {
let run;
if (
(run = flowRuns.find((fr) => fr.id == runID)) &&
!["waiting", "running"].includes(run.status)
) {
return;
}
// Fetch flow run
api.getGraphExecutionInfo(flowID, runID).then((execInfo) =>
setFlowRuns((flowRuns) => {
if (execInfo.length == 0) return flowRuns;
const flowRunIndex = flowRuns.findIndex((fr) => fr.id == runID);
const flowRun = flowRunFromNodeExecutionResults(execInfo);
if (flowRunIndex > -1) {
flowRuns.splice(flowRunIndex, 1, flowRun);
} else {
flowRuns.push(flowRun);
}
return [...flowRuns];
}),
);
}),
);
},
[api, flowRuns],
);
const fetchFlowsAndRuns = useCallback(() => {
api.listGraphs().then((flows) => {
setFlows(flows);
flows.map((flow) => refreshFlowRuns(flow.id));
});
}, [api, refreshFlowRuns]);
useEffect(() => fetchFlowsAndRuns(), [fetchFlowsAndRuns]);
useEffect(() => {
const intervalId = setInterval(
() => flows.map((f) => refreshFlowRuns(f.id)),
5000,
);
return () => clearInterval(intervalId);
}, [flows, refreshFlowRuns]);
const column1 = "md:col-span-2 xl:col-span-3 xxl:col-span-2";
const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3 space-y-4";
const column3 = "col-span-full xl:col-span-4 xxl:col-span-5";
return (
<div className="grid grid-cols-1 gap-4 md:grid-cols-5 lg:grid-cols-4 xl:grid-cols-10">
<AgentFlowList
className={column1}
flows={flows}
flowRuns={flowRuns}
selectedFlow={selectedFlow}
onSelectFlow={(f) => {
setSelectedRun(null);
setSelectedFlow(f.id == selectedFlow?.id ? null : f);
}}
/>
<FlowRunsList
className={column2}
flows={flows}
runs={[
...(selectedFlow
? flowRuns.filter((v) => v.graphID == selectedFlow.id)
: flowRuns),
].sort((a, b) => Number(a.startTime) - Number(b.startTime))}
selectedRun={selectedRun}
onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)}
/>
{(selectedRun && (
<FlowRunInfo
flow={selectedFlow || flows.find((f) => f.id == selectedRun.graphID)!}
flowRun={selectedRun}
className={column3}
/>
)) ||
(selectedFlow && (
<FlowInfo
flow={selectedFlow}
flowRuns={flowRuns.filter((r) => r.graphID == selectedFlow.id)}
className={column3}
/>
)) || (
<Card className={`p-6 ${column3}`}>
<FlowRunsStats flows={flows} flowRuns={flowRuns} />
</Card>
)}
</div>
);
};
function flowRunFromNodeExecutionResults(
nodeExecutionResults: NodeExecutionResult[],
): FlowRun {
// Determine overall status
let status: "running" | "waiting" | "success" | "failed" = "success";
for (const execution of nodeExecutionResults) {
if (execution.status === "FAILED") {
status = "failed";
break;
} else if (["QUEUED", "RUNNING"].includes(execution.status)) {
status = "running";
break;
} else if (execution.status === "INCOMPLETE") {
status = "waiting";
}
}
// Determine aggregate startTime, endTime, and totalRunTime
const now = Date.now();
const startTime = Math.min(
...nodeExecutionResults.map((ner) => ner.add_time.getTime()),
now,
);
const endTime = ["success", "failed"].includes(status)
? Math.max(
...nodeExecutionResults.map((ner) => ner.end_time?.getTime() || 0),
startTime,
)
: now;
const duration = (endTime - startTime) / 1000; // Convert to seconds
const totalRunTime =
nodeExecutionResults.reduce(
(cum, node) =>
cum +
((node.end_time?.getTime() ?? now) -
(node.start_time?.getTime() ?? now)),
0,
) / 1000;
return {
id: nodeExecutionResults[0].graph_exec_id,
graphID: nodeExecutionResults[0].graph_id,
graphVersion: nodeExecutionResults[0].graph_version,
status,
startTime,
endTime,
duration,
totalRunTime,
nodeExecutionResults: nodeExecutionResults,
};
}
export default Monitor;

View File

@@ -0,0 +1,33 @@
"use client";
import { useSupabase } from "@/components/SupabaseProvider";
import { Button } from "@/components/ui/button";
import useUser from "@/hooks/useUser";
import { useRouter } from "next/navigation";
import { FaSpinner } from "react-icons/fa";
export default function PrivatePage() {
const { user, isLoading, error } = useUser();
const { supabase } = useSupabase();
const router = useRouter();
if (isLoading) {
return (
<div className="flex h-[80vh] items-center justify-center">
<FaSpinner className="mr-2 h-16 w-16 animate-spin" />
</div>
);
}
if (error || !user || !supabase) {
router.push("/login");
return null;
}
return (
<div>
<p>Hello {user.email}</p>
<Button onClick={() => supabase.auth.signOut()}>Log out</Button>
</div>
);
}

View File

@@ -0,0 +1,17 @@
"use client";
import * as React from "react";
import { ThemeProvider as NextThemesProvider } from "next-themes";
import { ThemeProviderProps } from "next-themes/dist/types";
import { TooltipProvider } from "@/components/ui/tooltip";
import SupabaseProvider from "@/components/SupabaseProvider";
export function Providers({ children, ...props }: ThemeProviderProps) {
return (
<NextThemesProvider {...props}>
<SupabaseProvider>
<TooltipProvider>{children}</TooltipProvider>
</SupabaseProvider>
</NextThemesProvider>
);
}

View File

@@ -1,12 +1,11 @@
import {
BaseEdge,
ConnectionLineComponentProps,
Node,
getBezierPath,
Position,
} from "@xyflow/react";
export default function ConnectionLine<NodeType extends Node>({
const ConnectionLine: React.FC<ConnectionLineComponentProps> = ({
fromPosition,
fromHandle,
fromX,
@@ -14,11 +13,11 @@ export default function ConnectionLine<NodeType extends Node>({
toPosition,
toX,
toY,
}: ConnectionLineComponentProps<NodeType>) {
}) => {
const sourceX =
fromPosition === Position.Right
? fromX + ((fromHandle?.width ?? 0) / 2 - 5)
: fromX - ((fromHandle?.width ?? 0) / 2 - 5);
? fromX + (fromHandle?.width! / 2 - 5)
: fromX - (fromHandle?.width! / 2 - 5);
const [path] = getBezierPath({
sourceX: sourceX,
@@ -30,4 +29,6 @@ export default function ConnectionLine<NodeType extends Node>({
});
return <BaseEdge path={path} style={{ strokeWidth: 2, stroke: "#555" }} />;
}
};
export default ConnectionLine;

View File

@@ -0,0 +1,32 @@
"use client";
import { useState, useEffect } from "react";
import { Button } from "@/components/ui/button";
import { IconRefresh } from "@/components/ui/icons";
import AutoGPTServerAPI from "@/lib/autogpt-server-api";
export default function CreditButton() {
const [credit, setCredit] = useState<number | null>(null);
const api = new AutoGPTServerAPI();
const fetchCredit = async () => {
const response = await api.getUserCredit();
setCredit(response.credits);
};
useEffect(() => {
fetchCredit();
}, [api]);
return (
credit !== null && (
<Button
onClick={fetchCredit}
variant="outline"
className="flex items-center space-x-2 text-muted-foreground"
>
<span>Credits: {credit}</span>
<IconRefresh />
</Button>
)
);
}

View File

@@ -0,0 +1,235 @@
import React, { useCallback, useContext, useEffect, useState } from "react";
import {
BaseEdge,
EdgeLabelRenderer,
EdgeProps,
useReactFlow,
XYPosition,
Edge,
Node,
} from "@xyflow/react";
import "./customedge.css";
import { X } from "lucide-react";
import { useBezierPath } from "@/hooks/useBezierPath";
import { FlowContext } from "./Flow";
export type CustomEdgeData = {
edgeColor: string;
sourcePos?: XYPosition;
isStatic?: boolean;
beadUp?: number;
beadDown?: number;
beadData?: any[];
};
type Bead = {
t: number;
targetT: number;
startTime: number;
};
export type CustomEdge = Edge<CustomEdgeData, "custom">;
export function CustomEdge({
id,
data,
selected,
sourceX,
sourceY,
targetX,
targetY,
markerEnd,
}: EdgeProps<CustomEdge>) {
const [isHovered, setIsHovered] = useState(false);
const [beads, setBeads] = useState<{
beads: Bead[];
created: number;
destroyed: number;
}>({ beads: [], created: 0, destroyed: 0 });
const { svgPath, length, getPointForT, getTForDistance } = useBezierPath(
sourceX - 5,
sourceY,
targetX + 3,
targetY,
);
const { deleteElements } = useReactFlow<Node, CustomEdge>();
const { visualizeBeads } = useContext(FlowContext) ?? {
visualizeBeads: "no",
};
const onEdgeRemoveClick = () => {
deleteElements({ edges: [{ id }] });
};
const animationDuration = 500; // Duration in milliseconds for bead to travel the curve
const beadDiameter = 12;
const deltaTime = 16;
const setTargetPositions = useCallback(
(beads: Bead[]) => {
const distanceBetween = Math.min(
(length - beadDiameter) / (beads.length + 1),
beadDiameter,
);
return beads.map((bead, index) => {
const distanceFromEnd = beadDiameter * 1.35;
const targetPosition = distanceBetween * index + distanceFromEnd;
const t = getTForDistance(-targetPosition);
return {
...bead,
t: visualizeBeads === "animate" ? bead.t : t,
targetT: t,
} as Bead;
});
},
[getTForDistance, length, visualizeBeads],
);
useEffect(() => {
if (data?.beadUp === 0 && data?.beadDown === 0) {
setBeads({ beads: [], created: 0, destroyed: 0 });
return;
}
const beadUp = data?.beadUp!;
// Add beads
setBeads(({ beads, created, destroyed }) => {
const newBeads = [];
for (let i = 0; i < beadUp - created; i++) {
newBeads.push({ t: 0, targetT: 0, startTime: Date.now() });
}
const b = setTargetPositions([...beads, ...newBeads]);
return { beads: b, created: beadUp, destroyed };
});
// Remove beads if not animating
if (visualizeBeads !== "animate") {
setBeads(({ beads, created, destroyed }) => {
let destroyedCount = 0;
const newBeads = beads
.map((bead) => ({ ...bead }))
.filter((bead, index) => {
const beadDown = data?.beadDown!;
// Remove always one less bead in case of static edge, so it stays at the connection point
const removeCount = beadDown - destroyed - (data?.isStatic ? 1 : 0);
if (bead.t >= bead.targetT && index < removeCount) {
destroyedCount++;
return false;
}
return true;
});
return {
beads: setTargetPositions(newBeads),
created,
destroyed: destroyed + destroyedCount,
};
});
return;
}
// Animate and remove beads
const interval = setInterval(() => {
setBeads(({ beads, created, destroyed }) => {
let destroyedCount = 0;
const newBeads = beads
.map((bead) => {
const progressIncrement = deltaTime / animationDuration;
const t = Math.min(
bead.t + bead.targetT * progressIncrement,
bead.targetT,
);
return {
...bead,
t,
};
})
.filter((bead, index) => {
const beadDown = data?.beadDown!;
// Remove always one less bead in case of static edge, so it stays at the connection point
const removeCount = beadDown - destroyed - (data?.isStatic ? 1 : 0);
if (bead.t >= bead.targetT && index < removeCount) {
destroyedCount++;
return false;
}
return true;
});
return {
beads: setTargetPositions(newBeads),
created,
destroyed: destroyed + destroyedCount,
};
});
}, deltaTime);
return () => clearInterval(interval);
}, [data, setTargetPositions, visualizeBeads]);
const middle = getPointForT(0.5);
return (
<>
<BaseEdge
path={svgPath}
markerEnd={markerEnd}
style={{
strokeWidth: (isHovered ? 3 : 2) + (data?.isStatic ? 0.5 : 0),
stroke:
(data?.edgeColor ?? "#555555") +
(selected || isHovered ? "" : "80"),
strokeDasharray: data?.isStatic ? "5 3" : "0",
}}
/>
<path
d={svgPath}
fill="none"
strokeOpacity={0}
strokeWidth={20}
className="react-flow__edge-interaction"
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
/>
<EdgeLabelRenderer>
<div
style={{
position: "absolute",
transform: `translate(-50%, -50%) translate(${middle.x}px,${middle.y}px)`,
pointerEvents: "all",
}}
className="edge-label-renderer"
>
<button
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
className={`edge-label-button ${isHovered ? "visible" : ""}`}
onClick={onEdgeRemoveClick}
>
<X className="size-4" />
</button>
</div>
</EdgeLabelRenderer>
{beads.beads.map((bead, index) => {
const pos = getPointForT(bead.t);
return (
<circle
key={index}
cx={pos.x}
cy={pos.y}
r={beadDiameter / 2} // Bead radius
fill={data?.edgeColor ?? "#555555"}
/>
);
})}
</>
);
}

View File

@@ -0,0 +1,660 @@
import React, {
useState,
useEffect,
useCallback,
useRef,
useContext,
} from "react";
import { NodeProps, useReactFlow, Node, Edge } from "@xyflow/react";
import "@xyflow/react/dist/style.css";
import "./customnode.css";
import InputModalComponent from "./InputModalComponent";
import OutputModalComponent from "./OutputModalComponent";
import {
BlockIORootSchema,
BlockIOStringSubSchema,
Category,
NodeExecutionResult,
BlockUIType,
BlockCost,
} from "@/lib/autogpt-server-api/types";
import { beautifyString, cn, setNestedProperty } from "@/lib/utils";
import { Button } from "@/components/ui/button";
import { Switch } from "@/components/ui/switch";
import { Copy, Trash2 } from "lucide-react";
import { history } from "./history";
import NodeHandle from "./NodeHandle";
import {
NodeGenericInputField,
NodeTextBoxInput,
} from "./node-input-components";
import SchemaTooltip from "./SchemaTooltip";
import { getPrimaryCategoryColor } from "@/lib/utils";
import { FlowContext } from "./Flow";
import { Badge } from "./ui/badge";
import DataTable from "./DataTable";
type ParsedKey = { key: string; index?: number };
export type ConnectionData = Array<{
edge_id: string;
source: string;
sourceHandle: string;
target: string;
targetHandle: string;
}>;
export type CustomNodeData = {
blockType: string;
blockCosts: BlockCost[];
title: string;
description: string;
categories: Category[];
inputSchema: BlockIORootSchema;
outputSchema: BlockIORootSchema;
hardcodedValues: { [key: string]: any };
connections: ConnectionData;
isOutputOpen: boolean;
status?: NodeExecutionResult["status"];
/** executionResults contains outputs across multiple executions
* with the last element being the most recent output */
executionResults?: {
execId: string;
data: NodeExecutionResult["output_data"];
}[];
block_id: string;
backend_id?: string;
errors?: { [key: string]: string };
isOutputStatic?: boolean;
uiType: BlockUIType;
};
export type CustomNode = Node<CustomNodeData, "custom">;
export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
const [isOutputOpen, setIsOutputOpen] = useState(data.isOutputOpen || false);
const [isAdvancedOpen, setIsAdvancedOpen] = useState(false);
const [isModalOpen, setIsModalOpen] = useState(false);
const [activeKey, setActiveKey] = useState<string | null>(null);
const [inputModalValue, setInputModalValue] = useState<string>("");
const [isOutputModalOpen, setIsOutputModalOpen] = useState(false);
const [isHovered, setIsHovered] = useState(false);
const { updateNodeData, deleteElements, addNodes, getNode } = useReactFlow<
CustomNode,
Edge
>();
const isInitialSetup = useRef(true);
const flowContext = useContext(FlowContext);
if (!flowContext) {
throw new Error("FlowContext consumer must be inside FlowEditor component");
}
const { setIsAnyModalOpen, getNextNodeId } = flowContext;
useEffect(() => {
if (data.executionResults || data.status) {
setIsOutputOpen(true);
}
}, [data.executionResults, data.status]);
useEffect(() => {
setIsOutputOpen(data.isOutputOpen);
}, [data.isOutputOpen]);
useEffect(() => {
setIsAnyModalOpen?.(isModalOpen || isOutputModalOpen);
}, [isModalOpen, isOutputModalOpen, data, setIsAnyModalOpen]);
useEffect(() => {
isInitialSetup.current = false;
}, []);
const setHardcodedValues = (values: any) => {
updateNodeData(id, { hardcodedValues: values });
};
const setErrors = (errors: { [key: string]: string }) => {
updateNodeData(id, { errors });
};
const toggleOutput = (checked: boolean) => {
setIsOutputOpen(checked);
};
const toggleAdvancedSettings = (checked: boolean) => {
setIsAdvancedOpen(checked);
};
const generateOutputHandles = (
schema: BlockIORootSchema,
nodeType: BlockUIType,
) => {
if (
!schema?.properties ||
nodeType === BlockUIType.OUTPUT ||
nodeType === BlockUIType.NOTE
)
return null;
const keys = Object.keys(schema.properties);
return keys.map((key) => (
<div key={key}>
<NodeHandle
keyName={key}
isConnected={isHandleConnected(key)}
schema={schema.properties[key]}
side="right"
/>
</div>
));
};
const generateInputHandles = (
schema: BlockIORootSchema,
nodeType: BlockUIType,
) => {
if (!schema?.properties) return null;
let keys = Object.entries(schema.properties);
switch (nodeType) {
case BlockUIType.INPUT:
// For INPUT blocks, dont include connection handles
return keys.map(([propKey, propSchema]) => {
const isRequired = data.inputSchema.required?.includes(propKey);
const isConnected = isHandleConnected(propKey);
const isAdvanced = propSchema.advanced;
return (
(isRequired || isAdvancedOpen || !isAdvanced) && (
<div key={propKey}>
<span className="text-m green -mb-1 text-gray-900">
{propSchema.title || beautifyString(propKey)}
</span>
<div key={propKey} onMouseOver={() => {}}>
{!isConnected && (
<NodeGenericInputField
className="mb-2 mt-1"
propKey={propKey}
propSchema={propSchema}
currentValue={getValue(propKey)}
connections={data.connections}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
errors={data.errors ?? {}}
displayName={propSchema.title || beautifyString(propKey)}
/>
)}
</div>
</div>
)
);
});
case BlockUIType.NOTE:
// For NOTE blocks, don't render any input handles
const [noteKey, noteSchema] = keys[0];
return (
<div key={noteKey}>
<NodeTextBoxInput
className=""
selfKey={noteKey}
schema={noteSchema as BlockIOStringSubSchema}
value={getValue(noteKey)}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
error={data.errors?.[noteKey] ?? ""}
displayName={noteSchema.title || beautifyString(noteKey)}
/>
</div>
);
case BlockUIType.OUTPUT:
// For OUTPUT blocks, only show the 'value' property
return keys.map(([propKey, propSchema]) => {
const isRequired = data.inputSchema.required?.includes(propKey);
const isConnected = isHandleConnected(propKey);
const isAdvanced = propSchema.advanced;
return (
(isRequired || isAdvancedOpen || !isAdvanced) && (
<div key={propKey} onMouseOver={() => {}}>
{propKey !== "value" ? (
<span className="text-m green -mb-1 text-gray-900">
{propSchema.title || beautifyString(propKey)}
</span>
) : (
<NodeHandle
keyName={propKey}
isConnected={isConnected}
isRequired={isRequired}
schema={propSchema}
side="left"
/>
)}
{!isConnected && (
<NodeGenericInputField
className="mb-2 mt-1"
propKey={propKey}
propSchema={propSchema}
currentValue={getValue(propKey)}
connections={data.connections}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
errors={data.errors ?? {}}
displayName={propSchema.title || beautifyString(propKey)}
/>
)}
</div>
)
);
});
default:
return keys.map(([propKey, propSchema]) => {
const isRequired = data.inputSchema.required?.includes(propKey);
const isConnected = isHandleConnected(propKey);
const isAdvanced = propSchema.advanced;
return (
(isRequired || isAdvancedOpen || isConnected || !isAdvanced) && (
<div key={propKey} onMouseOver={() => {}}>
<NodeHandle
keyName={propKey}
isConnected={isConnected}
isRequired={isRequired}
schema={propSchema}
side="left"
/>
{!isConnected && (
<NodeGenericInputField
className="mb-2 mt-1"
propKey={propKey}
propSchema={propSchema}
currentValue={getValue(propKey)}
connections={data.connections}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
errors={data.errors ?? {}}
displayName={propSchema.title || beautifyString(propKey)}
/>
)}
</div>
)
);
});
}
};
const handleInputChange = (path: string, value: any) => {
const keys = parseKeys(path);
const newValues = JSON.parse(JSON.stringify(data.hardcodedValues));
let current = newValues;
for (let i = 0; i < keys.length - 1; i++) {
const { key: currentKey, index } = keys[i];
if (index !== undefined) {
if (!current[currentKey]) current[currentKey] = [];
if (!current[currentKey][index]) current[currentKey][index] = {};
current = current[currentKey][index];
} else {
if (!current[currentKey]) current[currentKey] = {};
current = current[currentKey];
}
}
const lastKey = keys[keys.length - 1];
if (lastKey.index !== undefined) {
if (!current[lastKey.key]) current[lastKey.key] = [];
current[lastKey.key][lastKey.index] = value;
} else {
current[lastKey.key] = value;
}
// console.log(`Updating hardcoded values for node ${id}:`, newValues);
if (!isInitialSetup.current) {
history.push({
type: "UPDATE_INPUT",
payload: { nodeId: id, oldValues: data.hardcodedValues, newValues },
undo: () => setHardcodedValues(data.hardcodedValues),
redo: () => setHardcodedValues(newValues),
});
}
setHardcodedValues(newValues);
const errors = data.errors || {};
// Remove error with the same key
setNestedProperty(errors, path, null);
setErrors({ ...errors });
};
// Helper function to parse keys with array indices
//TODO move to utils
const parseKeys = (key: string): ParsedKey[] => {
const splits = key.split(/_@_|_#_|_\$_|\./);
const keys: ParsedKey[] = [];
let currentKey: string | null = null;
splits.forEach((split) => {
const isInteger = /^\d+$/.test(split);
if (!isInteger) {
if (currentKey !== null) {
keys.push({ key: currentKey });
}
currentKey = split;
} else {
if (currentKey !== null) {
keys.push({ key: currentKey, index: parseInt(split, 10) });
currentKey = null;
} else {
throw new Error("Invalid key format: array index without a key");
}
}
});
if (currentKey !== null) {
keys.push({ key: currentKey });
}
return keys;
};
const getValue = (key: string) => {
const keys = parseKeys(key);
return keys.reduce((acc, k) => {
if (acc === undefined) return undefined;
if (k.index !== undefined) {
return Array.isArray(acc[k.key]) ? acc[k.key][k.index] : undefined;
}
return acc[k.key];
}, data.hardcodedValues as any);
};
const isHandleConnected = (key: string) => {
return (
data.connections &&
data.connections.some((conn: any) => {
if (typeof conn === "string") {
const [source, target] = conn.split(" -> ");
return (
(target.includes(key) && target.includes(data.title)) ||
(source.includes(key) && source.includes(data.title))
);
}
return (
(conn.target === id && conn.targetHandle === key) ||
(conn.source === id && conn.sourceHandle === key)
);
})
);
};
const handleInputClick = (key: string) => {
console.log(`Opening modal for key: ${key}`);
setActiveKey(key);
const value = getValue(key);
setInputModalValue(
typeof value === "object" ? JSON.stringify(value, null, 2) : value,
);
setIsModalOpen(true);
};
const handleModalSave = (value: string) => {
if (activeKey) {
try {
const parsedValue = JSON.parse(value);
handleInputChange(activeKey, parsedValue);
} catch (error) {
handleInputChange(activeKey, value);
}
}
setIsModalOpen(false);
setActiveKey(null);
};
const handleOutputClick = () => {
setIsOutputModalOpen(true);
};
const handleHovered = () => {
setIsHovered(true);
};
const handleMouseLeave = () => {
setIsHovered(false);
};
const deleteNode = useCallback(() => {
console.log("Deleting node:", id);
// Remove the node
deleteElements({ nodes: [{ id }] });
}, [id, deleteElements]);
const copyNode = useCallback(() => {
const newId = getNextNodeId();
const currentNode = getNode(id);
if (!currentNode) {
console.error("Cannot copy node: current node not found");
return;
}
const verticalOffset = height ?? 100;
const newNode: CustomNode = {
id: newId,
type: currentNode.type,
position: {
x: currentNode.position.x,
y: currentNode.position.y - verticalOffset - 20,
},
data: {
...data,
title: `${data.title} (Copy)`,
block_id: data.block_id,
connections: [],
isOutputOpen: false,
},
};
addNodes(newNode);
history.push({
type: "ADD_NODE",
payload: { node: newNode },
undo: () => deleteElements({ nodes: [{ id: newId }] }),
redo: () => addNodes(newNode),
});
}, [id, data, height, addNodes, deleteElements, getNode, getNextNodeId]);
const hasConfigErrors =
data.errors &&
Object.entries(data.errors).some(([_, value]) => value !== null);
const outputData = data.executionResults?.at(-1)?.data;
const hasOutputError =
typeof outputData === "object" &&
outputData !== null &&
"error" in outputData;
useEffect(() => {
if (hasConfigErrors) {
const filteredErrors = Object.fromEntries(
Object.entries(data.errors || {}).filter(
([_, value]) => value !== null,
),
);
console.error(
"Block configuration errors for",
data.title,
":",
filteredErrors,
);
}
if (hasOutputError) {
console.error(
"Block output contains error for",
data.title,
":",
outputData.error,
);
}
}, [hasConfigErrors, hasOutputError, data.errors, outputData, data.title]);
const blockClasses = [
"custom-node",
"dark-theme",
"rounded-xl",
"border",
"bg-white/[.9]",
"shadow-md",
]
.filter(Boolean)
.join(" ");
const errorClass =
hasConfigErrors || hasOutputError ? "border-red-500 border-2" : "";
const statusClass =
hasConfigErrors || hasOutputError
? "failed"
: (data.status?.toLowerCase() ?? "");
const hasAdvancedFields =
data.inputSchema &&
Object.entries(data.inputSchema.properties).some(([key, value]) => {
return (
value.advanced === true && !data.inputSchema.required?.includes(key)
);
});
const inputValues = data.hardcodedValues;
const blockCost =
data.blockCosts &&
data.blockCosts.find((cost) =>
Object.entries(cost.cost_filter).every(
// Undefined, null, or empty values are considered equal
([key, value]) =>
value === inputValues[key] || (!value && !inputValues[key]),
),
);
console.debug(`Block cost ${inputValues}|${data.blockCosts}=${blockCost}`);
return (
<div
className={`${data.uiType === BlockUIType.NOTE ? "w-[300px]" : "w-[500px]"} ${blockClasses} ${errorClass} ${statusClass} ${data.uiType === BlockUIType.NOTE ? "bg-yellow-100" : "bg-white"}`}
onMouseEnter={handleHovered}
onMouseLeave={handleMouseLeave}
data-id={`custom-node-${id}`}
>
<div
className={`mb-2 p-3 ${data.uiType === BlockUIType.NOTE ? "bg-yellow-100" : getPrimaryCategoryColor(data.categories)} rounded-t-xl`}
>
<div className="flex items-center justify-between">
<div className="font-roboto p-3 text-lg font-semibold">
{beautifyString(
data.blockType?.replace(/Block$/, "") || data.title,
)}
</div>
<SchemaTooltip description={data.description} />
</div>
<div className="flex gap-[5px]">
{isHovered && (
<>
<Button
variant="outline"
size="icon"
onClick={copyNode}
title="Copy node"
>
<Copy size={18} />
</Button>
<Button
variant="outline"
size="icon"
onClick={deleteNode}
title="Delete node"
>
<Trash2 size={18} />
</Button>
</>
)}
</div>
</div>
{blockCost && (
<div className="p-3 text-right font-semibold">
Cost: {blockCost.cost_amount} / {blockCost.cost_type}
</div>
)}
{data.uiType !== BlockUIType.NOTE ? (
<div className="flex items-start justify-between p-3">
<div>
{data.inputSchema &&
generateInputHandles(data.inputSchema, data.uiType)}
</div>
<div className="flex-none">
{data.outputSchema &&
generateOutputHandles(data.outputSchema, data.uiType)}
</div>
</div>
) : (
<div>
{data.inputSchema &&
generateInputHandles(data.inputSchema, data.uiType)}
</div>
)}
{isOutputOpen && data.uiType !== BlockUIType.NOTE && (
<div
data-id="latest-output"
className="nodrag m-3 break-words rounded-md border-[1.5px] p-2"
>
{(data.executionResults?.length ?? 0) > 0 ? (
<>
<DataTable
title="Latest Output"
truncateLongData
data={data.executionResults!.at(-1)?.data || {}}
/>
<div className="flex justify-end">
<Button variant="ghost" onClick={handleOutputClick}>
View More
</Button>
</div>
</>
) : (
<span>No outputs yet</span>
)}
</div>
)}
{data.uiType !== BlockUIType.NOTE && (
<div className="mt-2.5 flex items-center pb-4 pl-4">
<Switch checked={isOutputOpen} onCheckedChange={toggleOutput} />
<span className="m-1 mr-4">Output</span>
{hasAdvancedFields && (
<>
<Switch onCheckedChange={toggleAdvancedSettings} />
<span className="m-1">Advanced</span>
</>
)}
{data.status && (
<Badge
variant="outline"
data-id={`badge-${id}-${data.status}`}
className={cn(data.status.toLowerCase(), "ml-auto mr-5")}
>
{data.status}
</Badge>
)}
</div>
)}
<InputModalComponent
title={activeKey ? `Enter ${beautifyString(activeKey)}` : undefined}
isOpen={isModalOpen}
onClose={() => setIsModalOpen(false)}
onSave={handleModalSave}
defaultValue={inputModalValue}
key={activeKey}
/>
<OutputModalComponent
isOpen={isOutputModalOpen}
onClose={() => setIsOutputModalOpen(false)}
executionResults={data.executionResults?.toReversed() || []}
/>
</div>
);
}

View File

@@ -0,0 +1,92 @@
import { beautifyString } from "@/lib/utils";
import { Button } from "./ui/button";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "./ui/table";
import { Clipboard } from "lucide-react";
import { useToast } from "./ui/use-toast";
type DataTableProps = {
title?: string;
truncateLongData?: boolean;
data: { [key: string]: Array<any> };
};
export default function DataTable({
title,
truncateLongData,
data,
}: DataTableProps) {
const { toast } = useToast();
const maxChars = 100;
const copyData = (pin: string, data: string) => {
navigator.clipboard.writeText(data).then(() => {
toast({
title: `"${pin}" data copied to clipboard!`,
duration: 2000,
});
});
};
return (
<>
{title && <strong className="mt-2 flex justify-center">{title}</strong>}
<Table className="cursor-default select-text">
<TableHeader>
<TableRow>
<TableHead>Pin</TableHead>
<TableHead>Data</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{Object.entries(data).map(([key, value]) => (
<TableRow className="group" key={key}>
<TableCell className="cursor-text">
{beautifyString(key)}
</TableCell>
<TableCell className="cursor-text">
<div className="flex min-h-9 items-center">
<Button
className="absolute right-1 top-auto m-1 hidden p-2 group-hover:block"
variant="outline"
size="icon"
onClick={() =>
copyData(
beautifyString(key),
value
.map((i) =>
typeof i === "object"
? JSON.stringify(i)
: String(i),
)
.join(", "),
)
}
title="Copy Data"
>
<Clipboard size={18} />
</Button>
{value
.map((i) => {
const text =
typeof i === "object" ? JSON.stringify(i) : String(i);
return truncateLongData && text.length > maxChars
? text.slice(0, maxChars) + "..."
: text;
})
.join(", ")}
</div>
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</>
);
}

View File

@@ -0,0 +1,636 @@
"use client";
import React, {
useState,
useCallback,
useEffect,
useRef,
MouseEvent,
createContext,
} from "react";
import {
ReactFlow,
ReactFlowProvider,
Controls,
Background,
Node,
OnConnect,
Connection,
MarkerType,
NodeChange,
EdgeChange,
useReactFlow,
applyEdgeChanges,
applyNodeChanges,
useViewport,
} from "@xyflow/react";
import "@xyflow/react/dist/style.css";
import { CustomNode } from "./CustomNode";
import "./flow.css";
import { Link } from "@/lib/autogpt-server-api";
import { getTypeColor, filterBlocksByType } from "@/lib/utils";
import { history } from "./history";
import { CustomEdge } from "./CustomEdge";
import ConnectionLine from "./ConnectionLine";
import { Control, ControlPanel } from "@/components/edit/control/ControlPanel";
import { SaveControl } from "@/components/edit/control/SaveControl";
import { BlocksControl } from "@/components/edit/control/BlocksControl";
import {
IconPlay,
IconUndo2,
IconRedo2,
IconSquare,
IconOutput,
} from "@/components/ui/icons";
import { startTutorial } from "./tutorial";
import useAgentGraph from "@/hooks/useAgentGraph";
import { v4 as uuidv4 } from "uuid";
import { useRouter, usePathname, useSearchParams } from "next/navigation";
import { LogOut } from "lucide-react";
import RunnerUIWrapper, {
RunnerUIWrapperRef,
} from "@/components/RunnerUIWrapper";
// This is for the history, this is the minimum distance a block must move before it is logged
// It helps to prevent spamming the history with small movements especially when pressing on a input in a block
const MINIMUM_MOVE_BEFORE_LOG = 50;
type FlowContextType = {
visualizeBeads: "no" | "static" | "animate";
setIsAnyModalOpen: (isOpen: boolean) => void;
getNextNodeId: () => string;
};
export const FlowContext = createContext<FlowContextType | null>(null);
const FlowEditor: React.FC<{
flowID?: string;
template?: boolean;
className?: string;
}> = ({ flowID, template, className }) => {
const { addNodes, addEdges, getNode, deleteElements, updateNode } =
useReactFlow<CustomNode, CustomEdge>();
const [nodeId, setNodeId] = useState<number>(1);
const [copiedNodes, setCopiedNodes] = useState<CustomNode[]>([]);
const [copiedEdges, setCopiedEdges] = useState<CustomEdge[]>([]);
const [isAnyModalOpen, setIsAnyModalOpen] = useState(false);
const [visualizeBeads, setVisualizeBeads] = useState<
"no" | "static" | "animate"
>("animate");
const {
agentName,
setAgentName,
agentDescription,
setAgentDescription,
savedAgent,
availableNodes,
getOutputType,
requestSave,
requestSaveAndRun,
requestStopRun,
isRunning,
nodes,
setNodes,
edges,
setEdges,
} = useAgentGraph(flowID, template, visualizeBeads !== "no");
const router = useRouter();
const pathname = usePathname();
const initialPositionRef = useRef<{
[key: string]: { x: number; y: number };
}>({});
const isDragging = useRef(false);
// State to control if tutorial has started
const [tutorialStarted, setTutorialStarted] = useState(false);
// State to control if blocks menu should be pinned open
const [pinBlocksPopover, setPinBlocksPopover] = useState(false);
const runnerUIRef = useRef<RunnerUIWrapperRef>(null);
useEffect(() => {
const params = new URLSearchParams(window.location.search);
// If resetting tutorial
if (params.get("resetTutorial") === "true") {
localStorage.removeItem("shepherd-tour"); // Clear tutorial flag
router.push(pathname);
} else {
// Otherwise, start tutorial if conditions are met
const shouldStartTutorial = !localStorage.getItem("shepherd-tour");
if (
shouldStartTutorial &&
availableNodes.length > 0 &&
!tutorialStarted
) {
startTutorial(setPinBlocksPopover);
setTutorialStarted(true);
localStorage.setItem("shepherd-tour", "yes");
}
}
}, [availableNodes, tutorialStarted, router, pathname]);
useEffect(() => {
const handleKeyDown = (event: KeyboardEvent) => {
const isMac = navigator.platform.toUpperCase().indexOf("MAC") >= 0;
const isUndo =
(isMac ? event.metaKey : event.ctrlKey) && event.key === "z";
const isRedo =
(isMac ? event.metaKey : event.ctrlKey) &&
(event.key === "y" || (event.shiftKey && event.key === "Z"));
if (isUndo) {
event.preventDefault();
handleUndo();
}
if (isRedo) {
event.preventDefault();
handleRedo();
}
};
window.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
};
}, []);
const onNodeDragStart = (_: MouseEvent, node: Node) => {
initialPositionRef.current[node.id] = { ...node.position };
isDragging.current = true;
};
const onNodeDragEnd = (_: MouseEvent, node: Node | null) => {
if (!node) return;
isDragging.current = false;
const oldPosition = initialPositionRef.current[node.id];
const newPosition = node.position;
// Calculate the movement distance
if (!oldPosition || !newPosition) return;
const distanceMoved = Math.sqrt(
Math.pow(newPosition.x - oldPosition.x, 2) +
Math.pow(newPosition.y - oldPosition.y, 2),
);
if (distanceMoved > MINIMUM_MOVE_BEFORE_LOG) {
// Minimum movement threshold
history.push({
type: "UPDATE_NODE_POSITION",
payload: { nodeId: node.id, oldPosition, newPosition },
undo: () => updateNode(node.id, { position: oldPosition }),
redo: () => updateNode(node.id, { position: newPosition }),
});
}
delete initialPositionRef.current[node.id];
};
// Function to clear status, output, and close the output info dropdown of all nodes
// and reset data beads on edges
const clearNodesStatusAndOutput = useCallback(() => {
setNodes((nds) => {
const newNodes = nds.map((node) => ({
...node,
data: {
...node.data,
status: undefined,
isOutputOpen: false,
},
}));
return newNodes;
});
}, [setNodes]);
const onNodesChange = useCallback(
(nodeChanges: NodeChange<CustomNode>[]) => {
// Persist the changes
setNodes((prev) => applyNodeChanges(nodeChanges, prev));
// Remove all edges that were connected to deleted nodes
nodeChanges
.filter((change) => change.type === "remove")
.forEach((deletedNode) => {
const nodeID = deletedNode.id;
const deletedNodeData = nodes.find((node) => node.id === nodeID);
if (deletedNodeData) {
history.push({
type: "DELETE_NODE",
payload: { node: deletedNodeData },
undo: () => addNodes(deletedNodeData),
redo: () => deleteElements({ nodes: [{ id: nodeID }] }),
});
}
const connectedEdges = edges.filter((edge) =>
[edge.source, edge.target].includes(nodeID),
);
deleteElements({
edges: connectedEdges.map((edge) => ({ id: edge.id })),
});
});
},
[deleteElements, setNodes, nodes, edges, addNodes],
);
const formatEdgeID = useCallback((conn: Link | Connection): string => {
if ("sink_id" in conn) {
return `${conn.source_id}_${conn.source_name}_${conn.sink_id}_${conn.sink_name}`;
} else {
return `${conn.source}_${conn.sourceHandle}_${conn.target}_${conn.targetHandle}`;
}
}, []);
const onConnect: OnConnect = useCallback(
(connection: Connection) => {
// Check if this exact connection already exists
const existingConnection = edges.find(
(edge) =>
edge.source === connection.source &&
edge.target === connection.target &&
edge.sourceHandle === connection.sourceHandle &&
edge.targetHandle === connection.targetHandle,
);
if (existingConnection) {
console.warn("This exact connection already exists.");
return;
}
const edgeColor = getTypeColor(
getOutputType(nodes, connection.source!, connection.sourceHandle!),
);
const sourceNode = getNode(connection.source!);
const newEdge: CustomEdge = {
id: formatEdgeID(connection),
type: "custom",
markerEnd: {
type: MarkerType.ArrowClosed,
strokeWidth: 2,
color: edgeColor,
},
data: {
edgeColor,
sourcePos: sourceNode!.position,
isStatic: sourceNode!.data.isOutputStatic,
},
...connection,
source: connection.source!,
target: connection.target!,
};
addEdges(newEdge);
history.push({
type: "ADD_EDGE",
payload: { edge: newEdge },
undo: () => {
deleteElements({ edges: [{ id: newEdge.id }] });
},
redo: () => {
addEdges(newEdge);
},
});
clearNodesStatusAndOutput(); // Clear status and output on connection change
},
[
getNode,
addEdges,
deleteElements,
clearNodesStatusAndOutput,
nodes,
edges,
formatEdgeID,
getOutputType,
],
);
const onEdgesChange = useCallback(
(edgeChanges: EdgeChange<CustomEdge>[]) => {
// Persist the changes
setEdges((prev) => applyEdgeChanges(edgeChanges, prev));
// Propagate edge changes to node data
const addedEdges = edgeChanges.filter((change) => change.type === "add"),
replaceEdges = edgeChanges.filter(
(change) => change.type === "replace",
),
removedEdges = edgeChanges.filter((change) => change.type === "remove"),
selectedEdges = edgeChanges.filter(
(change) => change.type === "select",
);
if (addedEdges.length > 0 || removedEdges.length > 0) {
setNodes((nds) => {
const newNodes = nds.map((node) => ({
...node,
data: {
...node.data,
connections: [
// Remove node connections for deleted edges
...node.data.connections.filter(
(conn) =>
!removedEdges.some(
(removedEdge) => removedEdge.id === conn.edge_id,
),
),
// Add node connections for added edges
...addedEdges.map((addedEdge) => ({
edge_id: addedEdge.item.id,
source: addedEdge.item.source,
target: addedEdge.item.target,
sourceHandle: addedEdge.item.sourceHandle!,
targetHandle: addedEdge.item.targetHandle!,
})),
],
},
}));
return newNodes;
});
if (removedEdges.length > 0) {
clearNodesStatusAndOutput(); // Clear status and output on edge deletion
}
}
if (replaceEdges.length > 0) {
// Reset node connections for all edges
console.warn(
"useReactFlow().setRootEdges was used to overwrite all edges. " +
"Use addEdges, deleteElements, or reconnectEdge for incremental changes.",
replaceEdges,
);
setNodes((nds) =>
nds.map((node) => ({
...node,
data: {
...node.data,
connections: [
...replaceEdges.map((replaceEdge) => ({
edge_id: replaceEdge.item.id,
source: replaceEdge.item.source,
target: replaceEdge.item.target,
sourceHandle: replaceEdge.item.sourceHandle!,
targetHandle: replaceEdge.item.targetHandle!,
})),
],
},
})),
);
clearNodesStatusAndOutput();
}
},
[setNodes, clearNodesStatusAndOutput, setEdges],
);
const getNextNodeId = useCallback(() => {
return uuidv4();
}, []);
const { x, y, zoom } = useViewport();
const addNode = useCallback(
(blockId: string, nodeType: string) => {
const nodeSchema = availableNodes.find((node) => node.id === blockId);
if (!nodeSchema) {
console.error(`Schema not found for block ID: ${blockId}`);
return;
}
// Calculate the center of the viewport considering zoom
const viewportCenter = {
x: (window.innerWidth / 2 - x) / zoom,
y: (window.innerHeight / 2 - y) / zoom,
};
const newNode: CustomNode = {
id: nodeId.toString(),
type: "custom",
position: viewportCenter, // Set the position to the calculated viewport center
data: {
blockType: nodeType,
blockCosts: nodeSchema.costs,
title: `${nodeType} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
inputSchema: nodeSchema.inputSchema,
outputSchema: nodeSchema.outputSchema,
hardcodedValues: {},
connections: [],
isOutputOpen: false,
block_id: blockId,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
addNodes(newNode);
setNodeId((prevId) => prevId + 1);
clearNodesStatusAndOutput(); // Clear status and output when a new node is added
history.push({
type: "ADD_NODE",
payload: { node: newNode.data },
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
redo: () => addNodes(newNode),
});
},
[
nodeId,
availableNodes,
addNodes,
deleteElements,
clearNodesStatusAndOutput,
x,
y,
zoom,
],
);
const handleUndo = () => {
history.undo();
};
const handleRedo = () => {
history.redo();
};
const handleKeyDown = useCallback(
(event: KeyboardEvent) => {
// Prevent copy/paste if any modal is open or if the focus is on an input element
const activeElement = document.activeElement;
const isInputField =
activeElement?.tagName === "INPUT" ||
activeElement?.tagName === "TEXTAREA" ||
activeElement?.getAttribute("contenteditable") === "true";
if (isAnyModalOpen || isInputField) return;
if (event.ctrlKey || event.metaKey) {
if (event.key === "c" || event.key === "C") {
// Copy selected nodes
const selectedNodes = nodes.filter((node) => node.selected);
const selectedEdges = edges.filter((edge) => edge.selected);
setCopiedNodes(selectedNodes);
setCopiedEdges(selectedEdges);
}
if (event.key === "v" || event.key === "V") {
// Paste copied nodes
if (copiedNodes.length > 0) {
const oldToNewNodeIDMap: Record<string, string> = {};
const pastedNodes = copiedNodes.map((node, index) => {
const newNodeId = (nodeId + index).toString();
oldToNewNodeIDMap[node.id] = newNodeId;
return {
...node,
id: newNodeId,
position: {
x: node.position.x + 20, // Offset pasted nodes
y: node.position.y + 20,
},
data: {
...node.data,
status: undefined, // Reset status
executionResults: undefined, // Clear output data
},
};
});
setNodes((existingNodes) =>
// Deselect copied nodes
existingNodes.map((node) => ({ ...node, selected: false })),
);
addNodes(pastedNodes);
setNodeId((prevId) => prevId + copiedNodes.length);
const pastedEdges = copiedEdges.map((edge) => {
const newSourceId = oldToNewNodeIDMap[edge.source] ?? edge.source;
const newTargetId = oldToNewNodeIDMap[edge.target] ?? edge.target;
return {
...edge,
id: `${newSourceId}_${edge.sourceHandle}_${newTargetId}_${edge.targetHandle}_${Date.now()}`,
source: newSourceId,
target: newTargetId,
};
});
addEdges(pastedEdges);
}
}
}
},
[
isAnyModalOpen,
nodes,
edges,
copiedNodes,
setNodes,
addNodes,
copiedEdges,
addEdges,
nodeId,
],
);
useEffect(() => {
window.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
};
}, [handleKeyDown]);
const onNodesDelete = useCallback(() => {
clearNodesStatusAndOutput();
}, [clearNodesStatusAndOutput]);
const editorControls: Control[] = [
{
label: "Undo",
icon: <IconUndo2 />,
onClick: handleUndo,
},
{
label: "Redo",
icon: <IconRedo2 />,
onClick: handleRedo,
},
{
label: !savedAgent
? "Please save the agent to run"
: !isRunning
? "Run"
: "Stop",
icon: !isRunning ? <IconPlay /> : <IconSquare />,
onClick: !isRunning
? () => runnerUIRef.current?.runOrOpenInput()
: requestStopRun,
disabled: !savedAgent,
},
{
label: "Runner Output",
icon: <LogOut size={18} strokeWidth={1.8} />,
onClick: () => runnerUIRef.current?.openRunnerOutput(),
},
];
return (
<FlowContext.Provider
value={{ visualizeBeads, setIsAnyModalOpen, getNextNodeId }}
>
<div className={className}>
<ReactFlow
nodes={nodes}
edges={edges}
nodeTypes={{ custom: CustomNode }}
edgeTypes={{ custom: CustomEdge }}
connectionLineComponent={ConnectionLine}
onConnect={onConnect}
onNodesChange={onNodesChange}
onNodesDelete={onNodesDelete}
onEdgesChange={onEdgesChange}
onNodeDragStop={onNodeDragEnd}
onNodeDragStart={onNodeDragStart}
deleteKeyCode={["Backspace", "Delete"]}
minZoom={0.2}
maxZoom={2}
>
<Controls />
<Background />
<ControlPanel className="absolute z-10" controls={editorControls}>
<BlocksControl
pinBlocksPopover={pinBlocksPopover} // Pass the state to BlocksControl
blocks={availableNodes}
addBlock={addNode}
/>
<SaveControl
agentMeta={savedAgent}
onSave={(isTemplate) => requestSave(isTemplate ?? false)}
agentDescription={agentDescription}
onDescriptionChange={setAgentDescription}
agentName={agentName}
onNameChange={setAgentName}
/>
</ControlPanel>
</ReactFlow>
</div>
<RunnerUIWrapper
ref={runnerUIRef}
nodes={nodes}
setNodes={setNodes}
isRunning={isRunning}
requestSaveAndRun={requestSaveAndRun}
/>
</FlowContext.Provider>
);
};
const WrappedFlowEditor: typeof FlowEditor = (props) => (
<ReactFlowProvider>
<FlowEditor {...props} />
</ReactFlowProvider>
);
export default WrappedFlowEditor;

View File

@@ -1,9 +1,9 @@
import React, { FC, useEffect, useState } from "react";
import { Button } from "../../../../../components/__legacy__/ui/button";
import { Textarea } from "../../../../../components/__legacy__/ui/textarea";
import { Button } from "./ui/button";
import { Textarea } from "./ui/textarea";
import { Maximize2, Minimize2, Clipboard } from "lucide-react";
import { createPortal } from "react-dom";
import { toast } from "../../../../../components/molecules/Toast/use-toast";
import { toast } from "./ui/use-toast";
interface ModalProps {
isOpen: boolean;

Some files were not shown because too many files have changed in this diff Show More