Compare commits

..

1 Commits

Author SHA1 Message Date
SwiftyOS
94f0cfd38a removed git from cli 2024-05-09 09:36:24 +02:00
5125 changed files with 168606 additions and 593478 deletions

View File

@@ -1,18 +0,0 @@
version = 1
test_patterns = ["**/*.spec.ts","**/*_test.py","**/*_tests.py","**/test_*.py"]
exclude_patterns = ["classic/**"]
[[analyzers]]
name = "javascript"
[analyzers.meta]
plugins = ["react"]
environment = ["nodejs"]
[[analyzers]]
name = "python"
[analyzers.meta]
runtime_version = "3.x.x"

View File

@@ -1,65 +0,0 @@
# Ignore everything by default, selectively add things to context
*
# Platform - Libs
!autogpt_platform/autogpt_libs/autogpt_libs/
!autogpt_platform/autogpt_libs/pyproject.toml
!autogpt_platform/autogpt_libs/poetry.lock
!autogpt_platform/autogpt_libs/README.md
# Platform - Backend
!autogpt_platform/backend/backend/
!autogpt_platform/backend/test/e2e_test_data.py
!autogpt_platform/backend/migrations/
!autogpt_platform/backend/schema.prisma
!autogpt_platform/backend/pyproject.toml
!autogpt_platform/backend/poetry.lock
!autogpt_platform/backend/README.md
!autogpt_platform/backend/.env
# Platform - Market
!autogpt_platform/market/market/
!autogpt_platform/market/scripts.py
!autogpt_platform/market/schema.prisma
!autogpt_platform/market/pyproject.toml
!autogpt_platform/market/poetry.lock
!autogpt_platform/market/README.md
# Platform - Frontend
!autogpt_platform/frontend/src/
!autogpt_platform/frontend/public/
!autogpt_platform/frontend/scripts/
!autogpt_platform/frontend/package.json
!autogpt_platform/frontend/pnpm-lock.yaml
!autogpt_platform/frontend/tsconfig.json
!autogpt_platform/frontend/README.md
## config
!autogpt_platform/frontend/*.config.*
!autogpt_platform/frontend/.env.*
!autogpt_platform/frontend/.env
# Classic - AutoGPT
!classic/original_autogpt/autogpt/
!classic/original_autogpt/pyproject.toml
!classic/original_autogpt/poetry.lock
!classic/original_autogpt/README.md
!classic/original_autogpt/tests/
# Classic - Benchmark
!classic/benchmark/agbenchmark/
!classic/benchmark/pyproject.toml
!classic/benchmark/poetry.lock
!classic/benchmark/README.md
# Classic - Forge
!classic/forge/
!classic/forge/pyproject.toml
!classic/forge/poetry.lock
!classic/forge/README.md
# Classic - Frontend
!classic/frontend/build/web/
# Explicitly re-ignore some folders
.*
**/__pycache__

9
.gitattributes vendored
View File

@@ -1,10 +1,3 @@
classic/frontend/build/** linguist-generated
frontend/build/* linguist-generated
**/poetry.lock linguist-generated
docs/_javascript/** linguist-vendored
# Exclude VCR cassettes from stats
classic/forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
* text=auto

12
.github/CODEOWNERS vendored
View File

@@ -1,7 +1,5 @@
* @Significant-Gravitas/maintainers
.github/workflows/ @Significant-Gravitas/devops
classic/forge/ @Significant-Gravitas/forge-maintainers
classic/benchmark/ @Significant-Gravitas/benchmark-maintainers
classic/frontend/ @Significant-Gravitas/frontend-maintainers
autogpt_platform/infra @Significant-Gravitas/devops
.github/CODEOWNERS @Significant-Gravitas/admins
.github/workflows/ @Significant-Gravitas/devops
autogpts/autogpt/ @Significant-Gravitas/maintainers
autogpts/forge/ @Significant-Gravitas/forge-maintainers
benchmark/ @Significant-Gravitas/benchmark-maintainers
frontend/ @Significant-Gravitas/frontend-maintainers

View File

@@ -16,7 +16,7 @@ body:
[discussions]: https://github.com/Significant-Gravitas/AutoGPT/discussions
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
[existing issues]: https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue
[wiki page on Contributing]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
- type: checkboxes
attributes:
@@ -88,16 +88,14 @@ body:
- type: dropdown
attributes:
label: What LLM Provider do you use?
label: Do you use OpenAI GPT-3 or GPT-4?
description: >
If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
If you are using AutoGPT with `--gpt3only`, your problems may be caused by
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
options:
- Azure
- Groq
- Anthropic
- Llamafile
- Other (detail in issue)
- GPT-3.5
- GPT-4
- GPT-4(32k)
validations:
required: true
@@ -128,13 +126,6 @@ body:
label: Specify the area
description: Please specify the area you think is best related to the issue.
- type: input
attributes:
label: What commit or version are you using?
description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash.
validations:
required: true
- type: textarea
attributes:
label: Describe your issue.

View File

@@ -5,7 +5,7 @@ body:
- type: markdown
attributes:
value: |
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing)
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
Please provide a searchable summary of the issue in the title above ⬆️.
- type: checkboxes
attributes:

View File

@@ -1,39 +1,31 @@
### Background
<!-- Clearly explain the need for these changes: -->
### Changes 🏗️
<!-- Concisely describe all of the changes made in this pull request: -->
### Checklist 📋
### PR Quality Scorecard ✨
#### For code changes:
- [ ] I have clearly listed my changes in the PR description
- [ ] I have made a test plan
- [ ] I have tested my changes according to the test plan:
<!-- Put your test plan here: -->
- [ ] ...
<!--
Check out our contribution guide:
https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
<details>
<summary>Example test plan</summary>
- [ ] Create from scratch and execute an agent with at least 3 blocks
- [ ] Import an agent from file upload, and confirm it executes correctly
- [ ] Upload agent to marketplace
- [ ] Import an agent from marketplace and confirm it executes correctly
- [ ] Edit an agent from monitor, and confirm it executes correctly
</details>
1. Avoid duplicate work, issues, PRs etc.
2. Also consider contributing something other than code; see the [contribution guide]
for options.
3. Clearly explain your changes.
4. Avoid making unnecessary changes, especially if they're purely based on personal
preferences. Doing so is the maintainers' job. ;-)
-->
#### For configuration changes:
- [ ] `.env.default` is updated or already compatible with my changes
- [ ] `docker-compose.yml` is updated or already compatible with my changes
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
<details>
<summary>Examples of configuration changes</summary>
- Changing ports
- Adding new services that need to communicate with each other
- Secrets or environment variable changes
- New or infrastructure changes such as databases
</details>
- [x] Have you used the PR description template? &ensp; `+2 pts`
- [ ] Is your pull request atomic, focusing on a single change? &ensp; `+5 pts`
- [ ] Have you linked the GitHub issue(s) that this PR addresses? &ensp; `+5 pts`
- [ ] Have you documented your changes clearly and comprehensively? &ensp; `+5 pts`
- [ ] Have you changed or added a feature? &ensp; `-4 pts`
- [ ] Have you added/updated corresponding documentation? &ensp; `+4 pts`
- [ ] Have you added/updated corresponding integration tests? &ensp; `+5 pts`
- [ ] Have you changed the behavior of AutoGPT? &ensp; `-5 pts`
- [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance? &ensp; `+10 pts`

View File

@@ -1,322 +0,0 @@
# GitHub Copilot Instructions for AutoGPT
This file provides comprehensive onboarding information for GitHub Copilot coding agent to work efficiently with the AutoGPT repository.
## Repository Overview
**AutoGPT** is a powerful platform for creating, deploying, and managing continuous AI agents that automate complex workflows. This is a large monorepo (~150MB) containing multiple components:
- **AutoGPT Platform** (`autogpt_platform/`) - Main focus: Modern AI agent platform (Polyform Shield License)
- **Classic AutoGPT** (`classic/`) - Legacy agent system (MIT License)
- **Documentation** (`docs/`) - MkDocs-based documentation site
- **Infrastructure** - Docker configurations, CI/CD, and development tools
**Primary Languages & Frameworks:**
- **Backend**: Python 3.10-3.13, FastAPI, Prisma ORM, PostgreSQL, RabbitMQ
- **Frontend**: TypeScript, Next.js 15, React, Tailwind CSS, Radix UI
- **Development**: Docker, Poetry, pnpm, Playwright, Storybook
## Build and Validation Instructions
### Essential Setup Commands
**Always run these commands in the correct directory and in this order:**
1. **Initial Setup** (required once):
```bash
# Clone and enter repository
git clone <repo> && cd AutoGPT
# Start all services (database, redis, rabbitmq, clamav)
cd autogpt_platform && docker compose --profile local up deps --build --detach
```
2. **Backend Setup** (always run before backend development):
```bash
cd autogpt_platform/backend
poetry install # Install dependencies
poetry run prisma migrate dev # Run database migrations
poetry run prisma generate # Generate Prisma client
```
3. **Frontend Setup** (always run before frontend development):
```bash
cd autogpt_platform/frontend
pnpm install # Install dependencies
```
### Runtime Requirements
**Critical:** Always ensure Docker services are running before starting development:
```bash
cd autogpt_platform && docker compose --profile local up deps --build --detach
```
**Python Version:** Use Python 3.11 (required; managed by Poetry via pyproject.toml)
**Node.js Version:** Use Node.js 21+ with pnpm package manager
### Development Commands
**Backend Development:**
```bash
cd autogpt_platform/backend
poetry run serve # Start development server (port 8000)
poetry run test # Run all tests (requires ~5 minutes)
poetry run pytest path/to/test.py # Run specific test
poetry run format # Format code (Black + isort) - always run first
poetry run lint # Lint code (ruff) - run after format
```
**Frontend Development:**
```bash
cd autogpt_platform/frontend
pnpm dev # Start development server (port 3000) - use for active development
pnpm build # Build for production (only needed for E2E tests or deployment)
pnpm test # Run Playwright E2E tests (requires build first)
pnpm test-ui # Run tests with UI
pnpm format # Format and lint code
pnpm storybook # Start component development server
```
### Testing Strategy
**Backend Tests:**
- **Block Tests**: `poetry run pytest backend/blocks/test/test_block.py -xvs` (validates all blocks)
- **Specific Block**: `poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[BlockName]' -xvs`
- **Snapshot Tests**: Use `--snapshot-update` when output changes, always review with `git diff`
**Frontend Tests:**
- **E2E Tests**: Always run `pnpm dev` before `pnpm test` (Playwright requires running instance)
- **Component Tests**: Use Storybook for isolated component development
### Critical Validation Steps
**Before committing changes:**
1. Run `poetry run format` (backend) and `pnpm format` (frontend)
2. Ensure all tests pass in modified areas
3. Verify Docker services are still running
4. Check that database migrations apply cleanly
**Common Issues & Workarounds:**
- **Prisma issues**: Run `poetry run prisma generate` after schema changes
- **Permission errors**: Ensure Docker has proper permissions
- **Port conflicts**: Check the `docker-compose.yml` file for the current list of exposed ports. You can list all mapped ports with:
- **Test timeouts**: Backend tests can take 5+ minutes, use `-x` flag to stop on first failure
## Project Layout & Architecture
### Core Architecture
**AutoGPT Platform** (`autogpt_platform/`):
- `backend/` - FastAPI server with async support
- `backend/backend/` - Core API logic
- `backend/blocks/` - Agent execution blocks
- `backend/data/` - Database models and schemas
- `schema.prisma` - Database schema definition
- `frontend/` - Next.js application
- `src/app/` - App Router pages and layouts
- `src/components/` - Reusable React components
- `src/lib/` - Utilities and configurations
- `autogpt_libs/` - Shared Python utilities
- `docker-compose.yml` - Development stack orchestration
**Key Configuration Files:**
- `pyproject.toml` - Python dependencies and tooling
- `package.json` - Node.js dependencies and scripts
- `schema.prisma` - Database schema and migrations
- `next.config.mjs` - Next.js configuration
- `tailwind.config.ts` - Styling configuration
### Security & Middleware
**Cache Protection**: Backend includes middleware preventing sensitive data caching in browsers/proxies
**Authentication**: JWT-based with Supabase integration
**User ID Validation**: All data access requires user ID checks - verify this for any `data/*.py` changes
### Development Workflow
**GitHub Actions**: Multiple CI/CD workflows in `.github/workflows/`
- `platform-backend-ci.yml` - Backend testing and validation
- `platform-frontend-ci.yml` - Frontend testing and validation
- `platform-fullstack-ci.yml` - End-to-end integration tests
**Pre-commit Hooks**: Run linting and formatting checks
**Conventional Commits**: Use format `type(scope): description` (e.g., `feat(backend): add API`)
### Key Source Files
**Backend Entry Points:**
- `backend/backend/server/server.py` - FastAPI application setup
- `backend/backend/data/` - Database models and user management
- `backend/blocks/` - Agent execution blocks and logic
**Frontend Entry Points:**
- `frontend/src/app/layout.tsx` - Root application layout
- `frontend/src/app/page.tsx` - Home page
- `frontend/src/lib/supabase/` - Authentication and database client
**Protected Routes**: Update `frontend/lib/supabase/middleware.ts` when adding protected routes
### Agent Block System
Agents are built using a visual block-based system where each block performs a single action. Blocks are defined in `backend/blocks/` and must include:
- Block definition with input/output schemas
- Execution logic with proper error handling
- Tests validating functionality
### Database & ORM
**Prisma ORM** with PostgreSQL backend including pgvector for embeddings:
- Schema in `schema.prisma`
- Migrations in `backend/migrations/`
- Always run `prisma migrate dev` and `prisma generate` after schema changes
## Environment Configuration
### Configuration Files Priority Order
1. **Backend**: `/backend/.env.default` → `/backend/.env` (user overrides)
2. **Frontend**: `/frontend/.env.default` → `/frontend/.env` (user overrides)
3. **Platform**: `/.env.default` (Supabase/shared) → `/.env` (user overrides)
4. Docker Compose `environment:` sections override file-based config
5. Shell environment variables have highest precedence
### Docker Environment Setup
- All services use hardcoded defaults (no `${VARIABLE}` substitutions)
- The `env_file` directive loads variables INTO containers at runtime
- Backend/Frontend services use YAML anchors for consistent configuration
- Copy `.env.default` files to `.env` for local development customization
## Advanced Development Patterns
### Adding New Blocks
1. Create file in `/backend/backend/blocks/`
2. Inherit from `Block` base class with input/output schemas
3. Implement `run` method with proper error handling
4. Generate block UUID using `uuid.uuid4()`
5. Register in block registry
6. Write tests alongside block implementation
7. Consider how inputs/outputs connect with other blocks in graph editor
### API Development
1. Update routes in `/backend/backend/server/routers/`
2. Add/update Pydantic models in same directory
3. Write tests alongside route files
4. For `data/*.py` changes, validate user ID checks
5. Run `poetry run test` to verify changes
### Frontend Development
**📖 Complete Frontend Guide**: See `autogpt_platform/frontend/CONTRIBUTING.md` and `autogpt_platform/frontend/.cursorrules` for comprehensive patterns and conventions.
**Quick Reference:**
**Component Structure:**
- Separate render logic from data/behavior
- Structure: `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts`
- Exception: Small components (3-4 lines of logic) can be inline
- Render-only components can be direct files without folders
**Data Fetching:**
- Use generated API hooks from `@/app/api/__generated__/endpoints/`
- Generated via Orval from backend OpenAPI spec
- Pattern: `use{Method}{Version}{OperationName}`
- Example: `useGetV2ListLibraryAgents`
- Regenerate with: `pnpm generate:api`
- **Never** use deprecated `BackendAPI` or `src/lib/autogpt-server-api/*`
**Code Conventions:**
- Use function declarations for components and handlers (not arrow functions)
- Only arrow functions for small inline lambdas (map, filter, etc.)
- Components: `PascalCase`, Hooks: `camelCase` with `use` prefix
- No barrel files or `index.ts` re-exports
- Minimal comments (code should be self-documenting)
**Styling:**
- Use Tailwind CSS utilities only
- Use design system components from `src/components/` (atoms, molecules, organisms)
- Never use `src/components/__legacy__/*`
- Only use Phosphor Icons (`@phosphor-icons/react`)
- Prefer design tokens over hardcoded values
**Error Handling:**
- Render errors: Use `<ErrorCard />` component
- Mutation errors: Display with toast notifications
- Manual exceptions: Use `Sentry.captureException()`
- Global error boundaries already configured
**Testing:**
- Add/update Storybook stories for UI components (`pnpm storybook`)
- Run Playwright E2E tests with `pnpm test`
- Verify in Chromatic after PR
**Architecture:**
- Default to client components ("use client")
- Server components only for SEO or extreme TTFB needs
- Use React Query for server state (via generated hooks)
- Co-locate UI state in components/hooks
### Security Guidelines
**Cache Protection Middleware** (`/backend/backend/server/middleware/security.py`):
- Default: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
- Uses allow list approach for cacheable paths (static assets, health checks, public pages)
- Prevents sensitive data caching in browsers/proxies
- Add new cacheable endpoints to `CACHEABLE_PATHS`
### CI/CD Alignment
The repository has comprehensive CI workflows that test:
- **Backend**: Python 3.11-3.13, services (Redis/RabbitMQ/ClamAV), Prisma migrations, Poetry lock validation
- **Frontend**: Node.js 21, pnpm, Playwright with Docker Compose stack, API schema validation
- **Integration**: Full-stack type checking and E2E testing
Match these patterns when developing locally - the copilot setup environment mirrors these CI configurations.
## Collaboration with Other AI Assistants
This repository is actively developed with assistance from Claude (via CLAUDE.md files). When working on this codebase:
- Check for existing CLAUDE.md files that provide additional context
- Follow established patterns and conventions already in the codebase
- Maintain consistency with existing code style and architecture
- Consider that changes may be reviewed and extended by both human developers and AI assistants
## Trust These Instructions
These instructions are comprehensive and tested. Only perform additional searches if:
1. Information here is incomplete for your specific task
2. You encounter errors not covered by the workarounds
3. You need to understand implementation details not covered above
For detailed platform development patterns, refer to `autogpt_platform/CLAUDE.md` and `AGENTS.md` in the repository root.

153
.github/dependabot.yml vendored
View File

@@ -1,153 +0,0 @@
version: 2
updates:
# autogpt_libs (Poetry project)
- package-ecosystem: "pip"
directory: "autogpt_platform/autogpt_libs"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
commit-message:
prefix: "chore(libs/deps)"
prefix-development: "chore(libs/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# backend (Poetry project)
- package-ecosystem: "pip"
directory: "autogpt_platform/backend"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
commit-message:
prefix: "chore(backend/deps)"
prefix-development: "chore(backend/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# frontend (Next.js project)
- package-ecosystem: "npm"
directory: "autogpt_platform/frontend"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
commit-message:
prefix: "chore(frontend/deps)"
prefix-development: "chore(frontend/deps-dev)"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# infra (Terraform)
- package-ecosystem: "terraform"
directory: "autogpt_platform/infra"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
target-branch: "dev"
commit-message:
prefix: "chore(infra/deps)"
prefix-development: "chore(infra/deps-dev)"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# Docker
- package-ecosystem: "docker"
directory: "autogpt_platform/"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# Docs
- package-ecosystem: "pip"
directory: "docs/"
schedule:
interval: "weekly"
open-pull-requests-limit: 1
target-branch: "dev"
commit-message:
prefix: "chore(docs/deps)"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"

40
.github/labeler.yml vendored
View File

@@ -1,33 +1,23 @@
Classic AutoGPT Agent:
AutoGPT Agent:
- changed-files:
- any-glob-to-any-file: classic/original_autogpt/**
Classic Benchmark:
- changed-files:
- any-glob-to-any-file: classic/benchmark/**
Classic Frontend:
- changed-files:
- any-glob-to-any-file: classic/frontend/**
- any-glob-to-any-file: autogpts/autogpt/**
Forge:
- changed-files:
- any-glob-to-any-file: classic/forge/**
- any-glob-to-any-file: autogpts/forge/**
Benchmark:
- changed-files:
- any-glob-to-any-file: benchmark/**
Frontend:
- changed-files:
- any-glob-to-any-file: frontend/**
Arena:
- changed-files:
- any-glob-to-any-file: arena/**
documentation:
- changed-files:
- any-glob-to-any-file: docs/**
platform/frontend:
- changed-files:
- any-glob-to-any-file: autogpt_platform/frontend/**
platform/backend:
- changed-files:
- all-globs-to-any-file:
- autogpt_platform/backend/**
- '!autogpt_platform/backend/backend/blocks/**'
platform/blocks:
- changed-files:
- any-glob-to-any-file: autogpt_platform/backend/backend/blocks/**

169
.github/workflows/arena-intake.yml vendored Normal file
View File

@@ -0,0 +1,169 @@
name: Arena intake
on:
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs
pull_request_target:
types: [ opened, synchronize ]
paths:
- 'arena/**'
jobs:
check:
permissions:
pull-requests: write
runs-on: ubuntu-latest
steps:
- name: Checkout PR
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Check Arena entry
uses: actions/github-script@v7
with:
script: |
console.log('⚙️ Setting up...');
const fs = require('fs');
const path = require('path');
const pr = context.payload.pull_request;
const isFork = pr.head.repo.fork;
console.log('🔄️ Fetching PR diff metadata...');
const prFilesChanged = (await github.rest.pulls.listFiles({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
})).data;
console.debug(prFilesChanged);
const arenaFilesChanged = prFilesChanged.filter(
({ filename: file }) => file.startsWith('arena/') && file.endsWith('.json')
);
const hasChangesInAutogptsFolder = prFilesChanged.some(
({ filename }) => filename.startsWith('autogpts/')
);
console.log(`🗒️ ${arenaFilesChanged.length} arena entries affected`);
console.debug(arenaFilesChanged);
if (arenaFilesChanged.length === 0) {
// If no files in `arena/` are changed, this job does not need to run.
return;
}
let close = false;
let flagForManualCheck = false;
let issues = [];
if (isFork) {
if (arenaFilesChanged.length > 1) {
// Impacting multiple entries in `arena/` is not allowed
issues.push('This pull request impacts multiple arena entries');
}
if (hasChangesInAutogptsFolder) {
// PRs that include the custom agent are generally not allowed
issues.push(
'This pull request includes changes in `autogpts/`.\n'
+ 'Please make sure to only submit your arena entry (`arena/*.json`), '
+ 'and not to accidentally include your custom agent itself.'
);
}
}
if (arenaFilesChanged.length === 1) {
const newArenaFile = arenaFilesChanged[0]
const newArenaFileName = path.basename(newArenaFile.filename)
console.log(`🗒️ Arena entry in PR: ${newArenaFile}`);
if (newArenaFile.status != 'added') {
flagForManualCheck = true;
}
if (pr.mergeable != false) {
const newArenaEntry = JSON.parse(fs.readFileSync(newArenaFile.filename));
const allArenaFiles = await (await glob.create('arena/*.json')).glob();
console.debug(newArenaEntry);
console.log(`➡️ Checking ${newArenaFileName} against existing entries...`);
for (const file of allArenaFiles) {
const existingEntryName = path.basename(file);
if (existingEntryName === newArenaFileName) {
continue;
}
console.debug(`Checking against ${existingEntryName}...`);
const arenaEntry = JSON.parse(fs.readFileSync(file));
if (arenaEntry.github_repo_url === newArenaEntry.github_repo_url) {
console.log(`⚠️ Duplicate detected: ${existingEntryName}`);
issues.push(
`The \`github_repo_url\` specified in __${newArenaFileName}__ `
+ `already exists in __${existingEntryName}__. `
+ `This PR will be closed as duplicate.`
)
close = true;
}
}
} else {
console.log('⚠️ PR has conflicts');
issues.push(
`__${newArenaFileName}__ conflicts with existing entry with the same name`
)
close = true;
}
} // end if (arenaFilesChanged.length === 1)
console.log('🏁 Finished checking against existing entries');
if (issues.length == 0) {
console.log('✅ No issues detected');
if (flagForManualCheck) {
console.log('🤔 Requesting review from maintainers...');
await github.rest.pulls.requestReviewers({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
reviewers: ['Pwuts'],
// team_reviewers: ['maintainers'], // doesn't work: https://stackoverflow.com/a/64977184/4751645
});
} else {
console.log('➡️ Approving PR...');
await github.rest.pulls.createReview({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
event: 'APPROVE',
});
}
} else {
console.log(`⚠️ ${issues.length} issues detected`);
console.log('➡️ Posting comment indicating issues...');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr.number,
body: `Our automation found one or more issues with this submission:\n`
+ issues.map(i => `- ${i.replace('\n', '\n ')}`).join('\n'),
});
console.log("➡️ Applying label 'invalid'...");
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr.number,
labels: ['invalid'],
});
if (close) {
console.log('➡️ Auto-closing PR...');
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
state: 'closed',
});
}
}

296
.github/workflows/autogpt-ci.yml vendored Normal file
View File

@@ -0,0 +1,296 @@
name: AutoGPT Python CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
concurrency:
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: autogpts/autogpt
jobs:
lint:
runs-on: ubuntu-latest
env:
min-python-version: "3.10"
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }}
- name: Install Python dependencies
run: |
curl -sSL https://install.python-poetry.org | python3 -
poetry install
- name: Lint with flake8
run: poetry run flake8
- name: Check black formatting
run: poetry run black . --check
if: success() || failure()
- name: Check isort formatting
run: poetry run isort . --check
if: success() || failure()
# - name: Check mypy formatting
# run: poetry run mypy
# if: success() || failure()
# - name: Check for unused imports and pass statements
# run: |
# cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
# poetry run $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: '.'
run: |
docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Configure git user Auto-GPT-Bot
run: |
git config --global user.name "Auto-GPT-Bot"
git config --global user.email "github-bot@agpt.co"
- name: Checkout cassettes
if: ${{ startsWith(github.event_name, 'pull_request') }}
env:
PR_BASE: ${{ github.event.pull_request.base.ref }}
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
run: |
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
cassette_base_branch="${PR_BASE}"
cd tests/vcr_cassettes
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
cassette_base_branch="master"
fi
if git ls-remote --exit-code --heads origin $cassette_branch ; then
git fetch origin $cassette_branch
git fetch origin $cassette_base_branch
git checkout $cassette_branch
# Pick non-conflicting cassette updates from the base branch
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
echo "Using cassettes from mirror branch '$cassette_branch'," \
"synced to upstream branch '$cassette_base_branch'."
else
git checkout -b $cassette_branch
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
"Using cassettes from '$cassette_base_branch'."
fi
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('autogpts/autogpt/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Run pytest with coverage
run: |
poetry run pytest -vv \
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \
tests/unit tests/integration
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: autogpt-agent,${{ runner.os }}
- id: setup_git_auth
name: Set up git token authentication
# Cassettes may be pushed even when tests fail
if: success() || failure()
run: |
config_key="http.${{ github.server_url }}/.extraheader"
if [ "${{ runner.os }}" = 'macOS' ]; then
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
else
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
fi
git config "$config_key" \
"Authorization: Basic $base64_pat"
cd tests/vcr_cassettes
git config "$config_key" \
"Authorization: Basic $base64_pat"
echo "config_key=$config_key" >> $GITHUB_OUTPUT
- id: push_cassettes
name: Push updated cassettes
# For pull requests, push updated cassettes even when tests fail
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
env:
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
run: |
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
is_pull_request=true
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
else
cassette_branch="${{ github.ref_name }}"
fi
cd tests/vcr_cassettes
# Commit & push changes to cassettes if any
if ! git diff --quiet; then
git add .
git commit -m "Auto-update cassettes"
git push origin HEAD:$cassette_branch
if [ ! $is_pull_request ]; then
cd ../..
git add tests/vcr_cassettes
git commit -m "Update cassette submodule"
git push origin HEAD:$cassette_branch
fi
echo "updated=true" >> $GITHUB_OUTPUT
else
echo "updated=false" >> $GITHUB_OUTPUT
echo "No cassette changes to commit"
fi
- name: Post Set up git token auth
if: steps.setup_git_auth.outcome == 'success'
run: |
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
- name: Apply "behaviour change" label and comment on PR
if: ${{ startsWith(github.event_name, 'pull_request') }}
run: |
PR_NUMBER="${{ github.event.pull_request.number }}"
TOKEN="${{ secrets.PAT_REVIEW }}"
REPO="${{ github.repository }}"
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
echo "Adding label and comment..."
echo $TOKEN | gh auth login --with-token
gh issue edit $PR_NUMBER --add-label "behaviour change"
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
fi
- name: Upload logs to artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: test-logs
path: autogpts/autogpt/logs/

View File

@@ -0,0 +1,59 @@
name: Purge Auto-GPT Docker CI cache
on:
schedule:
- cron: 20 4 * * 1,4
env:
BASE_BRANCH: development
IMAGE_NAME: auto-gpt
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
context: autogpts/autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.schedule }}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
push_forced_label:
new_commits_json: ${{ null }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

165
.github/workflows/autogpt-docker-ci.yml vendored Normal file
View File

@@ -0,0 +1,165 @@
name: AutoGPT Docker CI
on:
push:
branches: [ master, development ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
concurrency:
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
working-directory: autogpts/autogpt
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt
DEV_IMAGE_TAG: latest-dev
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- if: runner.debug
run: |
ls -al
du -hs *
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
context: autogpts/autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.event.after }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
new_commits_json: ${{ toJSON(github.event.commits) }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
working-directory: ./
continue-on-error: true
test:
runs-on: ubuntu-latest
timeout-minutes: 10
services:
minio:
image: minio/minio:edge-cicd
options: >
--name=minio
--health-interval=10s --health-timeout=5s --health-retries=3
--health-cmd="curl -f http://localhost:9000/minio/health/live"
steps:
- name: Check out repository
uses: actions/checkout@v4
with:
submodules: true
- if: github.event_name == 'push'
name: Log in to Docker hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
context: autogpts/autogpt
build-args: BUILD_TYPE=dev # include pytest
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-dev
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
- id: test
name: Run tests
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: http://minio:9000
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
run: |
set +e
docker run --env CI --env OPENAI_API_KEY \
--network container:minio \
--env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
--entrypoint poetry ${{ env.IMAGE_NAME }} run \
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
--numprocesses=4 --durations=10 \
tests/unit tests/integration 2>&1 | tee test_output.txt
test_failure=${PIPESTATUS[0]}
cat << $EOF >> $GITHUB_STEP_SUMMARY
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
\`\`\`
$(cat test_output.txt)
\`\`\`
$EOF
exit $test_failure
- if: github.event_name == 'push' && github.ref_name == 'master'
name: Push image to Docker Hub
run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}

View File

@@ -0,0 +1,91 @@
name: AutoGPT Docker Release
on:
release:
types: [ published, edited ]
workflow_dispatch:
inputs:
no_cache:
type: boolean
description: 'Build from scratch, without using cached layers'
defaults:
run:
working-directory: autogpts/autogpt
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
jobs:
build:
if: startsWith(github.ref, 'refs/tags/autogpt-')
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to Docker hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# slashes are not allowed in image tags, but can appear in git branch or tag names
- id: sanitize_tag
name: Sanitize image tag
run: |
tag=${raw_tag//\//-}
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
env:
raw_tag: ${{ github.ref_name }}
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
context: autogpts/autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:latest,
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
labels: GIT_REVISION=${{ github.sha }}
# cache layers in GitHub Actions cache to speed up builds
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
cache-to: type=gha,scope=autogpt-docker-release,mode=max
- name: Push image to Docker Hub
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
inputs_no_cache: ${{ inputs.no_cache }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
ref_type: ${{ github.ref_type }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
working-directory: ./
continue-on-error: true

View File

@@ -0,0 +1,97 @@
name: AutoGPTs Nightly Benchmark
on:
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
jobs:
benchmark:
permissions:
contents: write
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt ]
fail-fast: false
timeout-minutes: 120
env:
min-python-version: '3.10'
REPORTS_BRANCH: data/benchmark-reports
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python -
- name: Prepare reports folder
run: mkdir -p ${{ env.REPORTS_FOLDER }}
- run: poetry -C benchmark install
- name: Benchmark ${{ matrix.agent-name }}
run: |
./run agent start ${{ matrix.agent-name }}
cd autogpts/${{ matrix.agent-name }}
set +e # Do not quit on non-zero exit codes
poetry run agbenchmark run -N 3 \
--test=ReadFile \
--test=BasicRetrieval --test=RevenueRetrieval2 \
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
--test=UrlShortener --test=TicTacToe --test=Battleship \
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
--test=WebArenaTask_134 --test=WebArenaTask_163
# Convert exit code 1 (some challenges failed) to exit code 0
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
exit 0
else
exit $?
fi
env:
AGENT_NAME: ${{ matrix.agent-name }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
- name: Push reports to data branch
run: |
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
rm ${{ env.REPORTS_FOLDER }}/*.json
# Find folder with newest (untracked) report in it
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
| xargs -I {} dirname {} \
| xargs -I {} git ls-files --others --exclude-standard {} \
| xargs -I {} dirname {} \
| sort -u)
json_report_file="$report_subfolder/report.json"
# Convert JSON report to Markdown
markdown_report_file="$report_subfolder/report.md"
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
git config --global user.name 'GitHub Actions'
git config --global user.email 'github-actions@agpt.co'
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
&& git checkout ${{ env.REPORTS_BRANCH }} \
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
git reset --hard
git add ${{ env.REPORTS_FOLDER }}
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
&& git push origin ${{ env.REPORTS_BRANCH }}

69
.github/workflows/autogpts-ci.yml vendored Normal file
View File

@@ -0,0 +1,69 @@
name: AutoGPTs smoke test CI
on:
workflow_dispatch:
schedule:
- cron: '0 8 * * *'
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpts/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '!**/*.md'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpts/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '!**/*.md'
jobs:
run-tests:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt, forge ]
fail-fast: false
timeout-minutes: 20
env:
min-python-version: '3.10'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./autogpts/${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
run: |
./run agent start ${{ matrix.agent-name }}
cd autogpts/${{ matrix.agent-name }}
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
poetry run agbenchmark --test=WriteFile
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AGENT_NAME: ${{ matrix.agent-name }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
HELICONE_CACHE_ENABLED: false
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
TELEMETRY_ENVIRONMENT: autogpt-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}

141
.github/workflows/benchmark-ci.yml vendored Normal file
View File

@@ -0,0 +1,141 @@
name: Benchmark CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- 'benchmark/**'
- .github/workflows/benchmark-ci.yml
- '!benchmark/reports/**'
pull_request:
branches: [ master, development, release-* ]
paths:
- 'benchmark/**'
- '!benchmark/reports/**'
- .github/workflows/benchmark-ci.yml
env:
min-python-version: '3.10'
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
working-directory: ./benchmark/
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Install Poetry
working-directory: ./benchmark/
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Install dependencies
working-directory: ./benchmark/
run: |
export POETRY_VIRTUALENVS_IN_PROJECT=true
poetry install -vvv
- name: Lint with flake8
working-directory: ./benchmark/
run: poetry run flake8
- name: Check black formatting
working-directory: ./benchmark/
run: poetry run black . --exclude test.py --check
if: success() || failure()
- name: Check isort formatting
working-directory: ./benchmark/
run: poetry run isort . --check
if: success() || failure()
- name: Check for unused imports and pass statements
working-directory: ./benchmark/
run: |
cmd="poetry run autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring agbenchmark"
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
if: success() || failure()
tests-agbenchmark:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ forge ]
fail-fast: false
timeout-minutes: 20
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./autogpts/${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
run: |
./run agent start ${{ matrix.agent-name }}
cd autogpts/${{ matrix.agent-name }}
set +e # Ignore non-zero exit codes and continue execution
echo "Running the following command: poetry run agbenchmark --maintain --mock"
poetry run agbenchmark --maintain --mock
EXIT_CODE=$?
set -e # Stop ignoring non-zero exit codes
# Check if the exit code was 5, and if so, exit with 0 instead
if [ $EXIT_CODE -eq 5 ]; then
echo "regression_tests.json is empty."
fi
echo "Running the following command: poetry run agbenchmark --mock"
poetry run agbenchmark --mock
echo "Running the following command: poetry run agbenchmark --mock --category=data"
poetry run agbenchmark --mock --category=data
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
poetry run agbenchmark --mock --category=coding
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
poetry run agbenchmark --test=WriteFile
cd ../../benchmark
poetry install
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
export BUILD_SKILL_TREE=true
poetry run agbenchmark --mock
poetry run pytest -vv -s tests
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
if [ ! -z "$CHANGED" ]; then
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
echo "$CHANGED"
exit 1
else
echo "No unstaged changes."
fi
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}

View File

@@ -0,0 +1,55 @@
name: Publish to PyPI
on:
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: true
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.8
- name: Install Poetry
working-directory: ./benchmark/
run: |
curl -sSL https://install.python-poetry.org | python3 -
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
- name: Build project for distribution
working-directory: ./benchmark/
run: poetry build
- name: Install dependencies
working-directory: ./benchmark/
run: poetry install
- name: Check Version
working-directory: ./benchmark/
id: check-version
run: |
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
- name: Create Release
uses: ncipollo/release-action@v1
with:
artifacts: "benchmark/dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: false
tag: agbenchmark-v${{ steps.check-version.outputs.version }}
commit: master
- name: Build and publish
working-directory: ./benchmark/
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}

View File

@@ -1,145 +0,0 @@
name: Classic - AutoGPT CI
on:
push:
branches: [ master, dev, ci-test* ]
paths:
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
pull_request:
branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
concurrency:
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: classic/original_autogpt
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: '.'
run: |
docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Configure git user Auto-GPT-Bot
run: |
git config --global user.name "Auto-GPT-Bot"
git config --global user.email "github-bot@agpt.co"
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Run pytest with coverage
run: |
poetry run pytest -vv \
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
tests/unit tests/integration
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: autogpt-agent,${{ runner.os }}
- name: Upload logs to artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: test-logs
path: classic/original_autogpt/logs/

View File

@@ -1,60 +0,0 @@
name: Classic - Purge Auto-GPT Docker CI cache
on:
schedule:
- cron: 20 4 * * 1,4
env:
BASE_BRANCH: dev
IMAGE_NAME: auto-gpt
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.schedule }}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: dev
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
push_forced_label:
new_commits_json: ${{ null }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

View File

@@ -1,166 +0,0 @@
name: Classic - AutoGPT Docker CI
on:
push:
branches: [master, dev]
paths:
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
pull_request:
branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
concurrency:
group: ${{ format('classic-autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
working-directory: classic/original_autogpt
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt
DEV_IMAGE_TAG: latest-dev
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- if: runner.debug
run: |
ls -al
du -hs *
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: dev
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.event.after }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
new_commits_json: ${{ toJSON(github.event.commits) }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
test:
runs-on: ubuntu-latest
timeout-minutes: 10
services:
minio:
image: minio/minio:edge-cicd
options: >
--name=minio
--health-interval=10s --health-timeout=5s --health-retries=3
--health-cmd="curl -f http://localhost:9000/minio/health/live"
steps:
- name: Check out repository
uses: actions/checkout@v4
with:
submodules: true
- if: github.event_name == 'push'
name: Log in to Docker hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=dev # include pytest
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-dev
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
- id: test
name: Run tests
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: http://minio:9000
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
run: |
set +e
docker run --env CI --env OPENAI_API_KEY \
--network container:minio \
--env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
--entrypoint poetry ${{ env.IMAGE_NAME }} run \
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
--numprocesses=4 --durations=10 \
tests/unit tests/integration 2>&1 | tee test_output.txt
test_failure=${PIPESTATUS[0]}
cat << $EOF >> $GITHUB_STEP_SUMMARY
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
\`\`\`
$(cat test_output.txt)
\`\`\`
$EOF
exit $test_failure
- if: github.event_name == 'push' && github.ref_name == 'master'
name: Push image to Docker Hub
run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}

View File

@@ -1,87 +0,0 @@
name: Classic - AutoGPT Docker Release
on:
release:
types: [published, edited]
workflow_dispatch:
inputs:
no_cache:
type: boolean
description: 'Build from scratch, without using cached layers'
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
jobs:
build:
if: startsWith(github.ref, 'refs/tags/autogpt-')
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to Docker hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# slashes are not allowed in image tags, but can appear in git branch or tag names
- id: sanitize_tag
name: Sanitize image tag
run: |
tag=${raw_tag//\//-}
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
env:
raw_tag: ${{ github.ref_name }}
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: Dockerfile.autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:latest,
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
labels: GIT_REVISION=${{ github.sha }}
# cache layers in GitHub Actions cache to speed up builds
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
cache-to: type=gha,scope=autogpt-docker-release,mode=max
- name: Push image to Docker Hub
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
inputs_no_cache: ${{ inputs.no_cache }}
prod_branch: master
dev_branch: dev
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
ref_type: ${{ github.ref_type }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

View File

@@ -1,76 +0,0 @@
name: Classic - Agent smoke tests
on:
workflow_dispatch:
schedule:
- cron: '0 8 * * *'
push:
branches: [ master, dev, ci-test* ]
paths:
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '!**/*.md'
pull_request:
branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '!**/*.md'
defaults:
run:
shell: bash
working-directory: classic
jobs:
serve-agent-protocol:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ original_autogpt ]
fail-fast: false
timeout-minutes: 20
env:
min-python-version: '3.10'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./classic/${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
poetry run agbenchmark --test=WriteFile
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AGENT_NAME: ${{ matrix.agent-name }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
HELICONE_CACHE_ENABLED: false
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
TELEMETRY_ENVIRONMENT: autogpt-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}

View File

@@ -1,176 +0,0 @@
name: Classic - AGBenchmark CI
on:
push:
branches: [ master, dev, ci-test* ]
paths:
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
pull_request:
branches: [ master, dev, release-* ]
paths:
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
concurrency:
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
env:
min-python-version: '3.10'
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
defaults:
run:
shell: bash
working-directory: classic/benchmark
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Run pytest with coverage
run: |
poetry run pytest -vv \
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
tests
env:
CI: true
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: agbenchmark,${{ runner.os }}
self-test-with-agent:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [forge]
fail-fast: false
timeout-minutes: 20
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
working-directory: classic
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
set +e # Ignore non-zero exit codes and continue execution
echo "Running the following command: poetry run agbenchmark --maintain --mock"
poetry run agbenchmark --maintain --mock
EXIT_CODE=$?
set -e # Stop ignoring non-zero exit codes
# Check if the exit code was 5, and if so, exit with 0 instead
if [ $EXIT_CODE -eq 5 ]; then
echo "regression_tests.json is empty."
fi
echo "Running the following command: poetry run agbenchmark --mock"
poetry run agbenchmark --mock
echo "Running the following command: poetry run agbenchmark --mock --category=data"
poetry run agbenchmark --mock --category=data
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
poetry run agbenchmark --mock --category=coding
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
# poetry run agbenchmark --test=WriteFile
cd ../benchmark
poetry install
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
export BUILD_SKILL_TREE=true
# poetry run agbenchmark --mock
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
# if [ ! -z "$CHANGED" ]; then
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
# echo "$CHANGED"
# exit 1
# else
# echo "No unstaged changes."
# fi
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}

View File

@@ -1,55 +0,0 @@
name: Classic - Publish to PyPI
on:
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: true
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.8
- name: Install Poetry
working-directory: ./classic/benchmark/
run: |
curl -sSL https://install.python-poetry.org | python3 -
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
- name: Build project for distribution
working-directory: ./classic/benchmark/
run: poetry build
- name: Install dependencies
working-directory: ./classic/benchmark/
run: poetry install
- name: Check Version
working-directory: ./classic/benchmark/
id: check-version
run: |
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
- name: Create Release
uses: ncipollo/release-action@v1
with:
artifacts: "classic/benchmark/dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: false
tag: agbenchmark-v${{ steps.check-version.outputs.version }}
commit: master
- name: Build and publish
working-directory: ./classic/benchmark/
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}

View File

@@ -1,243 +0,0 @@
name: Classic - Forge CI
on:
push:
branches: [ master, dev, ci-test* ]
paths:
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
concurrency:
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: classic/forge
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: '.'
run: |
docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Checkout cassettes
if: ${{ startsWith(github.event_name, 'pull_request') }}
env:
PR_BASE: ${{ github.event.pull_request.base.ref }}
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
run: |
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
cassette_base_branch="${PR_BASE}"
cd tests/vcr_cassettes
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
cassette_base_branch="master"
fi
if git ls-remote --exit-code --heads origin $cassette_branch ; then
git fetch origin $cassette_branch
git fetch origin $cassette_base_branch
git checkout $cassette_branch
# Pick non-conflicting cassette updates from the base branch
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
echo "Using cassettes from mirror branch '$cassette_branch'," \
"synced to upstream branch '$cassette_base_branch'."
else
git checkout -b $cassette_branch
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
"Using cassettes from '$cassette_base_branch'."
fi
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Run pytest with coverage
run: |
poetry run pytest -vv \
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
forge
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: forge,${{ runner.os }}
- id: setup_git_auth
name: Set up git token authentication
# Cassettes may be pushed even when tests fail
if: success() || failure()
run: |
config_key="http.${{ github.server_url }}/.extraheader"
if [ "${{ runner.os }}" = 'macOS' ]; then
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
else
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
fi
git config "$config_key" \
"Authorization: Basic $base64_pat"
cd tests/vcr_cassettes
git config "$config_key" \
"Authorization: Basic $base64_pat"
echo "config_key=$config_key" >> $GITHUB_OUTPUT
- id: push_cassettes
name: Push updated cassettes
# For pull requests, push updated cassettes even when tests fail
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
env:
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
run: |
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
is_pull_request=true
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
else
cassette_branch="${{ github.ref_name }}"
fi
cd tests/vcr_cassettes
# Commit & push changes to cassettes if any
if ! git diff --quiet; then
git add .
git commit -m "Auto-update cassettes"
git push origin HEAD:$cassette_branch
if [ ! $is_pull_request ]; then
cd ../..
git add tests/vcr_cassettes
git commit -m "Update cassette submodule"
git push origin HEAD:$cassette_branch
fi
echo "updated=true" >> $GITHUB_OUTPUT
else
echo "updated=false" >> $GITHUB_OUTPUT
echo "No cassette changes to commit"
fi
- name: Post Set up git token auth
if: steps.setup_git_auth.outcome == 'success'
run: |
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
- name: Apply "behaviour change" label and comment on PR
if: ${{ startsWith(github.event_name, 'pull_request') }}
run: |
PR_NUMBER="${{ github.event.pull_request.number }}"
TOKEN="${{ secrets.PAT_REVIEW }}"
REPO="${{ github.repository }}"
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
echo "Adding label and comment..."
echo $TOKEN | gh auth login --with-token
gh issue edit $PR_NUMBER --add-label "behaviour change"
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
fi
- name: Upload logs to artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: test-logs
path: classic/forge/logs/

View File

@@ -1,60 +0,0 @@
name: Classic - Frontend CI/CD
on:
push:
branches:
- master
- dev
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'classic/frontend/**'
- '.github/workflows/classic-frontend-ci.yml'
pull_request:
paths:
- 'classic/frontend/**'
- '.github/workflows/classic-frontend-ci.yml'
jobs:
build:
permissions:
contents: write
pull-requests: write
runs-on: ubuntu-latest
env:
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.13.2'
- name: Build Flutter to Web
run: |
cd classic/frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
# if: github.event_name == 'push'
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add classic/frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v7
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true
title: "Update frontend build in `${{ github.ref_name }}`"
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
commit-message: "Update frontend build based on commit ${{ github.sha }}"

View File

@@ -1,151 +0,0 @@
name: Classic - Python checks
on:
push:
branches: [ master, dev, ci-test* ]
paths:
- '.github/workflows/classic-python-checks-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- '**.py'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-python-checks-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- '**.py'
- '!classic/forge/tests/vcr_cassettes'
concurrency:
group: ${{ format('classic-python-checks-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
jobs:
get-changed-parts:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- id: changes-in
name: Determine affected subprojects
uses: dorny/paths-filter@v3
with:
filters: |
original_autogpt:
- classic/original_autogpt/autogpt/**
- classic/original_autogpt/tests/**
- classic/original_autogpt/poetry.lock
forge:
- classic/forge/forge/**
- classic/forge/tests/**
- classic/forge/poetry.lock
benchmark:
- classic/benchmark/agbenchmark/**
- classic/benchmark/tests/**
- classic/benchmark/poetry.lock
outputs:
changed-parts: ${{ steps.changes-in.outputs.changes }}
lint:
needs: get-changed-parts
runs-on: ubuntu-latest
env:
min-python-version: "3.10"
strategy:
matrix:
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python3 -
# Install dependencies
- name: Install Python dependencies
run: poetry -C classic/${{ matrix.sub-package }} install
# Lint
- name: Lint (isort)
run: poetry run isort --check .
working-directory: classic/${{ matrix.sub-package }}
- name: Lint (Black)
if: success() || failure()
run: poetry run black --check .
working-directory: classic/${{ matrix.sub-package }}
- name: Lint (Flake8)
if: success() || failure()
run: poetry run flake8 .
working-directory: classic/${{ matrix.sub-package }}
types:
needs: get-changed-parts
runs-on: ubuntu-latest
env:
min-python-version: "3.10"
strategy:
matrix:
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python3 -
# Install dependencies
- name: Install Python dependencies
run: poetry -C classic/${{ matrix.sub-package }} install
# Typecheck
- name: Typecheck
if: success() || failure()
run: poetry run pyright
working-directory: classic/${{ matrix.sub-package }}

View File

@@ -1,97 +0,0 @@
name: Auto Fix CI Failures
on:
workflow_run:
workflows: ["CI"]
types:
- completed
permissions:
contents: write
pull-requests: write
actions: read
issues: write
id-token: write # Required for OIDC token exchange
jobs:
auto-fix:
if: |
github.event.workflow_run.conclusion == 'failure' &&
github.event.workflow_run.pull_requests[0] &&
!startsWith(github.event.workflow_run.head_branch, 'claude-auto-fix-ci-')
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_branch }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup git identity
run: |
git config --global user.email "claude[bot]@users.noreply.github.com"
git config --global user.name "claude[bot]"
- name: Create fix branch
id: branch
run: |
BRANCH_NAME="claude-auto-fix-ci-${{ github.event.workflow_run.head_branch }}-${{ github.run_id }}"
git checkout -b "$BRANCH_NAME"
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
- name: Get CI failure details
id: failure_details
uses: actions/github-script@v7
with:
script: |
const run = await github.rest.actions.getWorkflowRun({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{ github.event.workflow_run.id }}
});
const jobs = await github.rest.actions.listJobsForWorkflowRun({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{ github.event.workflow_run.id }}
});
const failedJobs = jobs.data.jobs.filter(job => job.conclusion === 'failure');
let errorLogs = [];
for (const job of failedJobs) {
const logs = await github.rest.actions.downloadJobLogsForWorkflowRun({
owner: context.repo.owner,
repo: context.repo.repo,
job_id: job.id
});
errorLogs.push({
jobName: job.name,
logs: logs.data
});
}
return {
runUrl: run.data.html_url,
failedJobs: failedJobs.map(j => j.name),
errorLogs: errorLogs
};
- name: Fix CI failures with Claude
id: claude
uses: anthropics/claude-code-action@v1
with:
prompt: |
/fix-ci
Failed CI Run: ${{ fromJSON(steps.failure_details.outputs.result).runUrl }}
Failed Jobs: ${{ join(fromJSON(steps.failure_details.outputs.result).failedJobs, ', ') }}
PR Number: ${{ github.event.workflow_run.pull_requests[0].number }}
Branch Name: ${{ steps.branch.outputs.branch_name }}
Base Branch: ${{ github.event.workflow_run.head_branch }}
Repository: ${{ github.repository }}
Error logs:
${{ toJSON(fromJSON(steps.failure_details.outputs.result).errorLogs) }}
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_args: "--allowedTools 'Edit,MultiEdit,Write,Read,Glob,Grep,LS,Bash(git:*),Bash(bun:*),Bash(npm:*),Bash(npx:*),Bash(gh:*)'"

View File

@@ -1,379 +0,0 @@
# Claude Dependabot PR Review Workflow
#
# This workflow automatically runs Claude analysis on Dependabot PRs to:
# - Identify dependency changes and their versions
# - Look up changelogs for updated packages
# - Assess breaking changes and security impacts
# - Provide actionable recommendations for the development team
#
# Triggered on: Dependabot PRs (opened, synchronize)
# Requirements: ANTHROPIC_API_KEY secret must be configured
name: Claude Dependabot PR Review
on:
pull_request:
types: [opened, synchronize]
jobs:
dependabot-review:
# Only run on Dependabot PRs
if: github.actor == 'dependabot[bot]'
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: write
pull-requests: read
issues: read
id-token: write
actions: read # Required for CI access
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 1
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry
run: |
# Extract Poetry version from backend/poetry.lock (matches CI)
cd autogpt_platform/backend
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
# Add Poetry to PATH
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Check poetry.lock
working-directory: autogpt_platform/backend
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
echo "Warning: poetry.lock not up to date, but continuing for setup"
git checkout poetry.lock # Reset for clean setup
fi
- name: Install Python dependencies
working-directory: autogpt_platform/backend
run: poetry install
- name: Generate Prisma Client
working-directory: autogpt_platform/backend
run: poetry run prisma generate
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22"
- name: Enable corepack
run: corepack enable
- name: Set pnpm store directory
run: |
pnpm config set store-dir ~/.pnpm-store
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install JavaScript dependencies
working-directory: autogpt_platform/frontend
run: pnpm install --frozen-lockfile
# Install Playwright browsers for frontend testing
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
# - name: Install Playwright browsers
# working-directory: autogpt_platform/frontend
# run: pnpm playwright install --with-deps chromium
# Docker setup for development environment
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Copy default environment files
working-directory: autogpt_platform
run: |
# Copy default environment files for development
cp .env.default .env
cp backend/.env.default backend/.env
cp frontend/.env.default frontend/.env
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v4
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
restore-keys: |
docker-images-v2-${{ runner.os }}-
docker-images-v1-${{ runner.os }}-
- name: Load or pull Docker images
working-directory: autogpt_platform
run: |
mkdir -p ~/docker-cache
# Define image list for easy maintenance
IMAGES=(
"redis:latest"
"rabbitmq:management"
"clamav/clamav-debian:latest"
"busybox:latest"
"kong:2.8.1"
"supabase/gotrue:v2.170.0"
"supabase/postgres:15.8.1.049"
"supabase/postgres-meta:v0.86.1"
"supabase/studio:20250224-d10db0f"
)
# Check if any cached tar files exist (more reliable than cache-hit)
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
echo "Docker cache found, loading images in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
if [ -f ~/docker-cache/${filename}.tar ]; then
echo "Loading $image..."
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
fi
done
wait
echo "All cached images loaded"
else
echo "No Docker cache found, pulling images in parallel..."
# Pull all images in parallel
for image in "${IMAGES[@]}"; do
docker pull "$image" &
done
wait
# Only save cache on main branches (not PRs) to avoid cache pollution
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "Saving Docker images to cache in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
echo "Saving $image..."
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
done
wait
echo "Docker image cache saved"
else
echo "Skipping cache save for PR/feature branch"
fi
fi
echo "Docker images ready for use"
# Phase 2: Build migrate service with GitHub Actions cache
- name: Build migrate Docker image with cache
working-directory: autogpt_platform
run: |
# Build the migrate image with buildx for GHA caching
docker buildx build \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--target migrate \
--tag autogpt_platform-migrate:latest \
--load \
-f backend/Dockerfile \
..
# Start services using pre-built images
- name: Start Docker services for development
working-directory: autogpt_platform
run: |
# Start essential services (migrate image already built with correct tag)
docker compose --profile local up deps --no-build --detach
echo "Waiting for services to be ready..."
# Wait for database to be ready
echo "Checking database readiness..."
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
echo " Waiting for database..."
sleep 2
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
# Check migrate service status
echo "Checking migration status..."
docker compose ps migrate || echo " Migrate service not visible in ps output"
# Wait for migrate service to complete
echo "Waiting for migrations to complete..."
timeout 30 bash -c '
ATTEMPTS=0
while [ $ATTEMPTS -lt 15 ]; do
ATTEMPTS=$((ATTEMPTS + 1))
# Check using docker directly (more reliable than docker compose ps)
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
if [ -z "$CONTAINER_STATUS" ]; then
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
echo "✅ Migrations completed successfully"
docker compose logs migrate --tail=5 2>/dev/null || true
exit 0
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
echo "❌ Migrations failed with exit code: $EXIT_CODE"
echo "Migration logs:"
docker compose logs migrate --tail=20 2>/dev/null || true
exit 1
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
else
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
fi
sleep 2
done
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
echo "Final container check:"
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
echo "Migration logs (if available):"
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
' || echo "⚠️ Migration check completed with warnings, continuing..."
# Brief wait for other services to stabilize
echo "Waiting 5 seconds for other services to stabilize..."
sleep 5
# Verify installations and provide environment info
- name: Verify setup and show environment info
run: |
echo "=== Python Setup ==="
python --version
poetry --version
echo "=== Node.js Setup ==="
node --version
pnpm --version
echo "=== Additional Tools ==="
docker --version
docker compose version
gh --version || true
echo "=== Services Status ==="
cd autogpt_platform
docker compose ps || true
echo "=== Backend Dependencies ==="
cd backend
poetry show | head -10 || true
echo "=== Frontend Dependencies ==="
cd ../frontend
pnpm list --depth=0 | head -10 || true
echo "=== Environment Files ==="
ls -la ../.env* || true
ls -la .env* || true
ls -la ../backend/.env* || true
echo "✅ AutoGPT Platform development environment setup complete!"
echo "🚀 Ready for development with Docker services running"
echo "📝 Backend server: poetry run serve (port 8000)"
echo "🌐 Frontend server: pnpm dev (port 3000)"
- name: Run Claude Dependabot Analysis
id: claude_review
uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_args: |
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*)"
prompt: |
You are Claude, an AI assistant specialized in reviewing Dependabot dependency update PRs.
Your primary tasks are:
1. **Analyze the dependency changes** in this Dependabot PR
2. **Look up changelogs** for all updated dependencies to understand what changed
3. **Identify breaking changes** and assess potential impact on the AutoGPT codebase
4. **Provide actionable recommendations** for the development team
## Analysis Process:
1. **Identify Changed Dependencies**:
- Use git diff to see what dependencies were updated
- Parse package.json, poetry.lock, requirements files, etc.
- List all package versions: old → new
2. **Changelog Research**:
- For each updated dependency, look up its changelog/release notes
- Use WebFetch to access GitHub releases, NPM package pages, PyPI project pages. The pr should also have some details
- Focus on versions between the old and new versions
- Identify: breaking changes, deprecations, security fixes, new features
3. **Breaking Change Assessment**:
- Categorize changes: BREAKING, MAJOR, MINOR, PATCH, SECURITY
- Assess impact on AutoGPT's usage patterns
- Check if AutoGPT uses affected APIs/features
- Look for migration guides or upgrade instructions
4. **Codebase Impact Analysis**:
- Search the AutoGPT codebase for usage of changed APIs
- Identify files that might be affected by breaking changes
- Check test files for deprecated usage patterns
- Look for configuration changes needed
## Output Format:
Provide a comprehensive review comment with:
### 🔍 Dependency Analysis Summary
- List of updated packages with version changes
- Overall risk assessment (LOW/MEDIUM/HIGH)
### 📋 Detailed Changelog Review
For each updated dependency:
- **Package**: name (old_version → new_version)
- **Changes**: Summary of key changes
- **Breaking Changes**: List any breaking changes
- **Security Fixes**: Note security improvements
- **Migration Notes**: Any upgrade steps needed
### ⚠️ Impact Assessment
- **Breaking Changes Found**: Yes/No with details
- **Affected Files**: List AutoGPT files that may need updates
- **Test Impact**: Any tests that may need updating
- **Configuration Changes**: Required config updates
### 🛠️ Recommendations
- **Action Required**: What the team should do
- **Testing Focus**: Areas to test thoroughly
- **Follow-up Tasks**: Any additional work needed
- **Merge Recommendation**: APPROVE/REVIEW_NEEDED/HOLD
### 📚 Useful Links
- Links to relevant changelogs, migration guides, documentation
Be thorough but concise. Focus on actionable insights that help the development team make informed decisions about the dependency updates.

View File

@@ -1,331 +0,0 @@
name: Claude Code
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
issues:
types: [opened, assigned]
pull_request_review:
types: [submitted]
jobs:
claude:
if: |
(
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
) && (
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR' ||
github.event.review.author_association == 'OWNER' ||
github.event.review.author_association == 'MEMBER' ||
github.event.review.author_association == 'COLLABORATOR' ||
github.event.issue.author_association == 'OWNER' ||
github.event.issue.author_association == 'MEMBER' ||
github.event.issue.author_association == 'COLLABORATOR'
)
runs-on: ubuntu-latest
timeout-minutes: 45
permissions:
contents: write
pull-requests: read
issues: read
id-token: write
actions: read # Required for CI access
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@v1.3.1
with:
large-packages: false # slow
docker-images: false # limited benefit
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry
run: |
# Extract Poetry version from backend/poetry.lock (matches CI)
cd autogpt_platform/backend
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
# Add Poetry to PATH
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Check poetry.lock
working-directory: autogpt_platform/backend
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
echo "Warning: poetry.lock not up to date, but continuing for setup"
git checkout poetry.lock # Reset for clean setup
fi
- name: Install Python dependencies
working-directory: autogpt_platform/backend
run: poetry install
- name: Generate Prisma Client
working-directory: autogpt_platform/backend
run: poetry run prisma generate
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22"
- name: Enable corepack
run: corepack enable
- name: Set pnpm store directory
run: |
pnpm config set store-dir ~/.pnpm-store
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install JavaScript dependencies
working-directory: autogpt_platform/frontend
run: pnpm install --frozen-lockfile
# Install Playwright browsers for frontend testing
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
# - name: Install Playwright browsers
# working-directory: autogpt_platform/frontend
# run: pnpm playwright install --with-deps chromium
# Docker setup for development environment
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Copy default environment files
working-directory: autogpt_platform
run: |
# Copy default environment files for development
cp .env.default .env
cp backend/.env.default backend/.env
cp frontend/.env.default frontend/.env
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v4
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
restore-keys: |
docker-images-v2-${{ runner.os }}-
docker-images-v1-${{ runner.os }}-
- name: Load or pull Docker images
working-directory: autogpt_platform
run: |
mkdir -p ~/docker-cache
# Define image list for easy maintenance
IMAGES=(
"redis:latest"
"rabbitmq:management"
"clamav/clamav-debian:latest"
"busybox:latest"
"kong:2.8.1"
"supabase/gotrue:v2.170.0"
"supabase/postgres:15.8.1.049"
"supabase/postgres-meta:v0.86.1"
"supabase/studio:20250224-d10db0f"
)
# Check if any cached tar files exist (more reliable than cache-hit)
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
echo "Docker cache found, loading images in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
if [ -f ~/docker-cache/${filename}.tar ]; then
echo "Loading $image..."
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
fi
done
wait
echo "All cached images loaded"
else
echo "No Docker cache found, pulling images in parallel..."
# Pull all images in parallel
for image in "${IMAGES[@]}"; do
docker pull "$image" &
done
wait
# Only save cache on main branches (not PRs) to avoid cache pollution
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "Saving Docker images to cache in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
echo "Saving $image..."
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
done
wait
echo "Docker image cache saved"
else
echo "Skipping cache save for PR/feature branch"
fi
fi
echo "Docker images ready for use"
# Phase 2: Build migrate service with GitHub Actions cache
- name: Build migrate Docker image with cache
working-directory: autogpt_platform
run: |
# Build the migrate image with buildx for GHA caching
docker buildx build \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--target migrate \
--tag autogpt_platform-migrate:latest \
--load \
-f backend/Dockerfile \
..
# Start services using pre-built images
- name: Start Docker services for development
working-directory: autogpt_platform
run: |
# Start essential services (migrate image already built with correct tag)
docker compose --profile local up deps --no-build --detach
echo "Waiting for services to be ready..."
# Wait for database to be ready
echo "Checking database readiness..."
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
echo " Waiting for database..."
sleep 2
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
# Check migrate service status
echo "Checking migration status..."
docker compose ps migrate || echo " Migrate service not visible in ps output"
# Wait for migrate service to complete
echo "Waiting for migrations to complete..."
timeout 30 bash -c '
ATTEMPTS=0
while [ $ATTEMPTS -lt 15 ]; do
ATTEMPTS=$((ATTEMPTS + 1))
# Check using docker directly (more reliable than docker compose ps)
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
if [ -z "$CONTAINER_STATUS" ]; then
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
echo "✅ Migrations completed successfully"
docker compose logs migrate --tail=5 2>/dev/null || true
exit 0
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
echo "❌ Migrations failed with exit code: $EXIT_CODE"
echo "Migration logs:"
docker compose logs migrate --tail=20 2>/dev/null || true
exit 1
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
else
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
fi
sleep 2
done
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
echo "Final container check:"
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
echo "Migration logs (if available):"
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
' || echo "⚠️ Migration check completed with warnings, continuing..."
# Brief wait for other services to stabilize
echo "Waiting 5 seconds for other services to stabilize..."
sleep 5
# Verify installations and provide environment info
- name: Verify setup and show environment info
run: |
echo "=== Python Setup ==="
python --version
poetry --version
echo "=== Node.js Setup ==="
node --version
pnpm --version
echo "=== Additional Tools ==="
docker --version
docker compose version
gh --version || true
echo "=== Services Status ==="
cd autogpt_platform
docker compose ps || true
echo "=== Backend Dependencies ==="
cd backend
poetry show | head -10 || true
echo "=== Frontend Dependencies ==="
cd ../frontend
pnpm list --depth=0 | head -10 || true
echo "=== Environment Files ==="
ls -la ../.env* || true
ls -la .env* || true
ls -la ../backend/.env* || true
echo "✅ AutoGPT Platform development environment setup complete!"
echo "🚀 Ready for development with Docker services running"
echo "📝 Backend server: poetry run serve (port 8000)"
echo "🌐 Frontend server: pnpm dev (port 3000)"
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_args: |
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*), Bash(gh pr edit:*)"
--model opus
additional_permissions: |
actions: read

View File

@@ -0,0 +1,34 @@
name: 'Close stale issues'
on:
schedule:
- cron: '30 1 * * *'
workflow_dispatch:
permissions:
issues: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9
with:
# operations-per-run: 5000
stale-issue-message: >
This issue has automatically been marked as _stale_ because it has not had
any activity in the last 50 days. You can _unstale_ it by commenting or
removing the label. Otherwise, this issue will be closed in 10 days.
stale-pr-message: >
This pull request has automatically been marked as _stale_ because it has
not had any activity in the last 50 days. You can _unstale_ it by commenting
or removing the label.
close-issue-message: >
This issue was closed automatically because it has been stale for 10 days
with no activity.
days-before-stale: 50
days-before-close: 10
# Do not touch meta issues:
exempt-issue-labels: meta,fridge,project management
# Do not affect pull requests:
days-before-pr-stale: -1
days-before-pr-close: -1

View File

@@ -1,98 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ "master", "release-*", "dev" ]
pull_request:
branches: [ "master", "release-*", "dev" ]
merge_group:
schedule:
- cron: '15 4 * * 0'
jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
permissions:
# required for all workflows
security-events: write
# required to fetch internal or private CodeQL packs
packages: read
# only required for workflows in private repositories
actions: read
contents: read
strategy:
fail-fast: false
matrix:
include:
- language: typescript
build-mode: none
- language: python
build-mode: none
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
config: |
paths-ignore:
- classic/frontend/build/**
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@@ -1,302 +0,0 @@
name: "Copilot Setup Steps"
# Automatically run the setup steps when they are changed to allow for easy validation, and
# allow manual testing through the repository's "Actions" tab
on:
workflow_dispatch:
push:
paths:
- .github/workflows/copilot-setup-steps.yml
pull_request:
paths:
- .github/workflows/copilot-setup-steps.yml
jobs:
# The job MUST be called `copilot-setup-steps` or it will not be picked up by Copilot.
copilot-setup-steps:
runs-on: ubuntu-latest
timeout-minutes: 45
# Set the permissions to the lowest permissions possible needed for your steps.
# Copilot will be given its own token for its operations.
permissions:
# If you want to clone the repository as part of your setup steps, for example to install dependencies, you'll need the `contents: read` permission. If you don't clone the repository in your setup steps, Copilot will do this for you automatically after the steps complete.
contents: read
# You can define any steps you want, and they will run before the agent starts.
# If you do not check out your code, Copilot will do this for you.
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry
run: |
# Extract Poetry version from backend/poetry.lock (matches CI)
cd autogpt_platform/backend
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
# Add Poetry to PATH
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Check poetry.lock
working-directory: autogpt_platform/backend
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
echo "Warning: poetry.lock not up to date, but continuing for setup"
git checkout poetry.lock # Reset for clean setup
fi
- name: Install Python dependencies
working-directory: autogpt_platform/backend
run: poetry install
- name: Generate Prisma Client
working-directory: autogpt_platform/backend
run: poetry run prisma generate
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22"
- name: Enable corepack
run: corepack enable
- name: Set pnpm store directory
run: |
pnpm config set store-dir ~/.pnpm-store
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install JavaScript dependencies
working-directory: autogpt_platform/frontend
run: pnpm install --frozen-lockfile
# Install Playwright browsers for frontend testing
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
# - name: Install Playwright browsers
# working-directory: autogpt_platform/frontend
# run: pnpm playwright install --with-deps chromium
# Docker setup for development environment
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Copy default environment files
working-directory: autogpt_platform
run: |
# Copy default environment files for development
cp .env.default .env
cp backend/.env.default backend/.env
cp frontend/.env.default frontend/.env
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
uses: actions/cache@v4
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
restore-keys: |
docker-images-v2-${{ runner.os }}-
docker-images-v1-${{ runner.os }}-
- name: Load or pull Docker images
working-directory: autogpt_platform
run: |
mkdir -p ~/docker-cache
# Define image list for easy maintenance
IMAGES=(
"redis:latest"
"rabbitmq:management"
"clamav/clamav-debian:latest"
"busybox:latest"
"kong:2.8.1"
"supabase/gotrue:v2.170.0"
"supabase/postgres:15.8.1.049"
"supabase/postgres-meta:v0.86.1"
"supabase/studio:20250224-d10db0f"
)
# Check if any cached tar files exist (more reliable than cache-hit)
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
echo "Docker cache found, loading images in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
if [ -f ~/docker-cache/${filename}.tar ]; then
echo "Loading $image..."
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
fi
done
wait
echo "All cached images loaded"
else
echo "No Docker cache found, pulling images in parallel..."
# Pull all images in parallel
for image in "${IMAGES[@]}"; do
docker pull "$image" &
done
wait
# Only save cache on main branches (not PRs) to avoid cache pollution
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "Saving Docker images to cache in parallel..."
for image in "${IMAGES[@]}"; do
# Convert image name to filename (replace : and / with -)
filename=$(echo "$image" | tr ':/' '--')
echo "Saving $image..."
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
done
wait
echo "Docker image cache saved"
else
echo "Skipping cache save for PR/feature branch"
fi
fi
echo "Docker images ready for use"
# Phase 2: Build migrate service with GitHub Actions cache
- name: Build migrate Docker image with cache
working-directory: autogpt_platform
run: |
# Build the migrate image with buildx for GHA caching
docker buildx build \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--target migrate \
--tag autogpt_platform-migrate:latest \
--load \
-f backend/Dockerfile \
..
# Start services using pre-built images
- name: Start Docker services for development
working-directory: autogpt_platform
run: |
# Start essential services (migrate image already built with correct tag)
docker compose --profile local up deps --no-build --detach
echo "Waiting for services to be ready..."
# Wait for database to be ready
echo "Checking database readiness..."
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
echo " Waiting for database..."
sleep 2
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
# Check migrate service status
echo "Checking migration status..."
docker compose ps migrate || echo " Migrate service not visible in ps output"
# Wait for migrate service to complete
echo "Waiting for migrations to complete..."
timeout 30 bash -c '
ATTEMPTS=0
while [ $ATTEMPTS -lt 15 ]; do
ATTEMPTS=$((ATTEMPTS + 1))
# Check using docker directly (more reliable than docker compose ps)
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
if [ -z "$CONTAINER_STATUS" ]; then
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
echo "✅ Migrations completed successfully"
docker compose logs migrate --tail=5 2>/dev/null || true
exit 0
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
echo "❌ Migrations failed with exit code: $EXIT_CODE"
echo "Migration logs:"
docker compose logs migrate --tail=20 2>/dev/null || true
exit 1
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
else
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
fi
sleep 2
done
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
echo "Final container check:"
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
echo "Migration logs (if available):"
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
' || echo "⚠️ Migration check completed with warnings, continuing..."
# Brief wait for other services to stabilize
echo "Waiting 5 seconds for other services to stabilize..."
sleep 5
# Verify installations and provide environment info
- name: Verify setup and show environment info
run: |
echo "=== Python Setup ==="
python --version
poetry --version
echo "=== Node.js Setup ==="
node --version
pnpm --version
echo "=== Additional Tools ==="
docker --version
docker compose version
gh --version || true
echo "=== Services Status ==="
cd autogpt_platform
docker compose ps || true
echo "=== Backend Dependencies ==="
cd backend
poetry show | head -10 || true
echo "=== Frontend Dependencies ==="
cd ../frontend
pnpm list --depth=0 | head -10 || true
echo "=== Environment Files ==="
ls -la ../.env* || true
ls -la .env* || true
ls -la ../backend/.env* || true
echo "✅ AutoGPT Platform development environment setup complete!"
echo "🚀 Ready for development with Docker services running"
echo "📝 Backend server: poetry run serve (port 8000)"
echo "🌐 Frontend server: pnpm dev (port 3000)"

60
.github/workflows/frontend-ci.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: Frontend CI/CD
on:
push:
branches:
- master
- development
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'frontend/**'
- '.github/workflows/frontend-ci.yml'
pull_request:
paths:
- 'frontend/**'
- '.github/workflows/frontend-ci.yml'
jobs:
build:
permissions:
contents: write
pull-requests: write
runs-on: ubuntu-latest
env:
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.13.2'
- name: Build Flutter to Web
run: |
cd frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
# if: github.event_name == 'push'
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v6
with:
add-paths: frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true
title: "Update frontend build in `${{ github.ref_name }}`"
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
commit-message: "Update frontend build based on commit ${{ github.sha }}"

133
.github/workflows/hackathon.yml vendored Normal file
View File

@@ -0,0 +1,133 @@
name: Hackathon
on:
workflow_dispatch:
inputs:
agents:
description: "Agents to run (comma-separated)"
required: false
default: "autogpt" # Default agents if none are specified
jobs:
matrix-setup:
runs-on: ubuntu-latest
# Service containers to run with `matrix-setup`
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
env-name: ${{ steps.set-matrix.outputs.env-name }}
steps:
- id: set-matrix
run: |
if [ "${{ github.event_name }}" == "schedule" ]; then
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::[ 'irrelevant']"
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::$matrix_string"
else
echo "::set-output name=env-name::testing"
echo "::set-output name=matrix::[ 'irrelevant' ]"
fi
tests:
environment:
name: "${{ needs.matrix-setup.outputs.env-name }}"
needs: matrix-setup
env:
min-python-version: "3.10"
name: "${{ matrix.agent-name }}"
runs-on: ubuntu-latest
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
timeout-minutes: 50
strategy:
fail-fast: false
matrix:
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
steps:
- name: Print Environment Name
run: |
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
- name: Check Docker Container
id: check
run: docker ps
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Install Poetry
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version: v18.15
- name: Run benchmark
run: |
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
git clone "$link" -b "$branch" "$AGENT_NAME"
cd $AGENT_NAME
cp ./autogpts/$AGENT_NAME/.env.example ./autogpts/$AGENT_NAME/.env || echo "file not found"
./run agent start $AGENT_NAME
cd ../benchmark
poetry install
poetry run agbenchmark --no-dep
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
AGENT_NAME: ${{ matrix.agent-name }}

View File

@@ -1,60 +0,0 @@
name: AutoGPT Platform - Deploy Dev Environment
on:
push:
branches: [ dev ]
paths:
- 'autogpt_platform/**'
workflow_dispatch:
inputs:
git_ref:
description: 'Git ref (branch/tag) of AutoGPT to deploy'
required: true
default: 'master'
type: string
permissions:
contents: 'read'
id-token: 'write'
jobs:
migrate:
environment: develop
name: Run migrations for AutoGPT Platform
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install prisma
- name: Run Backend Migrations
working-directory: ./autogpt_platform/backend
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:
needs: migrate
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: build_deploy_dev
client-payload: '{"ref": "${{ github.event.inputs.git_ref || github.ref }}", "repository": "${{ github.repository }}"}'

View File

@@ -1,54 +0,0 @@
name: AutoGPT Platform - Deploy Prod Environment
on:
release:
types: [published]
workflow_dispatch:
permissions:
contents: 'read'
id-token: 'write'
jobs:
migrate:
environment: production
name: Run migrations for AutoGPT Platform
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.ref_name || 'master' }}
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install prisma
- name: Run Backend Migrations
working-directory: ./autogpt_platform/backend
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:
needs: migrate
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: build_deploy_prod
client-payload: |
{"ref": "${{ github.ref_name || 'master' }}", "repository": "${{ github.repository }}"}

View File

@@ -1,225 +0,0 @@
name: AutoGPT Platform - Backend CI
on:
push:
branches: [master, dev, ci-test*]
paths:
- ".github/workflows/platform-backend-ci.yml"
- "autogpt_platform/backend/**"
- "autogpt_platform/autogpt_libs/**"
pull_request:
branches: [master, dev, release-*]
paths:
- ".github/workflows/platform-backend-ci.yml"
- "autogpt_platform/backend/**"
- "autogpt_platform/autogpt_libs/**"
merge_group:
concurrency:
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: autogpt_platform/backend
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.11", "3.12", "3.13"]
runs-on: ubuntu-latest
services:
redis:
image: redis:latest
ports:
- 6379:6379
rabbitmq:
image: rabbitmq:3.12-management
ports:
- 5672:5672
- 15672:15672
env:
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
clamav:
image: clamav/clamav-debian:latest
ports:
- 3310:3310
env:
CLAMAV_NO_FRESHCLAMD: false
CLAMD_CONF_StreamMaxLength: 50M
CLAMD_CONF_MaxFileSize: 100M
CLAMD_CONF_MaxScanSize: 100M
CLAMD_CONF_MaxThreads: 4
CLAMD_CONF_ReadTimeout: 300
options: >-
--health-cmd "clamdscan --version || exit 1"
--health-interval 30s
--health-timeout 10s
--health-retries 5
--health-start-period 180s
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Setup Supabase
uses: supabase/setup-cli@v1
with:
version: 1.178.1
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry (Unix)
run: |
# Extract Poetry version from backend/poetry.lock
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
if [ -n "$BASE_REF" ]; then
BASE_BRANCH=${BASE_REF/refs\/heads\//}
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
else
POETRY_VERSION=$HEAD_POETRY_VERSION
fi
echo "Using Poetry version ${POETRY_VERSION}"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$POETRY_VERSION python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
env:
BASE_REF: ${{ github.base_ref || github.event.merge_group.base_ref }}
- name: Check poetry.lock
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
echo "Error: poetry.lock not up to date."
echo
git diff poetry.lock
exit 1
fi
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- id: supabase
name: Start Supabase
working-directory: .
run: |
supabase init
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
# outputs:
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
- name: Wait for ClamAV to be ready
run: |
echo "Waiting for ClamAV daemon to start..."
max_attempts=60
attempt=0
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
sleep 5
attempt=$((attempt+1))
done
if [ $attempt -eq $max_attempts ]; then
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
echo "Checking ClamAV service logs..."
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
fi
echo "ClamAV is ready!"
# Verify ClamAV is responsive
echo "Testing ClamAV connection..."
timeout 10 bash -c 'echo "PING" | nc localhost 3310' || {
echo "ClamAV is not responding to PING"
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
}
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
- id: lint
name: Run Linter
run: poetry run lint
- name: Run pytest with coverage
run: |
if [[ "${{ runner.debug }}" == "1" ]]; then
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG
else
poetry run pytest -s -vv
fi
if: success() || (failure() && steps.lint.outcome == 'failure')
env:
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
JWT_VERIFY_KEY: ${{ steps.supabase.outputs.JWT_SECRET }}
REDIS_HOST: "localhost"
REDIS_PORT: "6379"
ENCRYPTION_KEY: "dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=" # DO NOT USE IN PRODUCTION!!
env:
CI: true
PLAIN_OUTPUT: True
RUN_ENV: local
PORT: 8080
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
# We know these are here, don't report this as a security vulnerability
# This is used as the default credential for the entire system's RabbitMQ instance
# If you want to replace this, you can do so by making our entire system generate
# new credentials for each local user and update the environment variables in
# the backend service, docker composes, and examples
RABBITMQ_DEFAULT_USER: "rabbitmq_user_default"
RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7"
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4
# with:
# token: ${{ secrets.CODECOV_TOKEN }}
# flags: backend,${{ runner.os }}

View File

@@ -1,198 +0,0 @@
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
on:
pull_request:
types: [closed]
issue_comment:
types: [created]
permissions:
issues: write
pull-requests: write
jobs:
dispatch:
runs-on: ubuntu-latest
steps:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
uses: actions/github-script@v7
with:
script: |
const commentBody = context.payload.comment.body.trim();
const commentUser = context.payload.comment.user.login;
const prAuthor = context.payload.issue.user.login;
const authorAssociation = context.payload.comment.author_association;
// Check permissions
const hasPermission = (
authorAssociation === 'OWNER' ||
authorAssociation === 'MEMBER' ||
authorAssociation === 'COLLABORATOR'
);
core.setOutput('comment_body', commentBody);
core.setOutput('has_permission', hasPermission);
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
core.setOutput('permission_denied', 'true');
return;
}
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
return;
}
// Process deploy command
if (commentBody === '!deploy') {
core.setOutput('should_deploy', 'true');
}
// Process undeploy command
else if (commentBody === '!undeploy') {
core.setOutput('should_undeploy', 'true');
}
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
});
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number
});
core.setOutput('pr_number', pr.data.number);
core.setOutput('pr_title', pr.data.title);
core.setOutput('pr_state', pr.data.state);
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "deploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
});
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
});
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
uses: actions/github-script@v7
with:
script: |
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
let lastDeployIndex = -1;
let lastUndeployIndex = -1;
comments.data.forEach((comment, index) => {
if (comment.body.trim() === '!deploy') {
lastDeployIndex = index;
} else if (comment.body.trim() === '!undeploy') {
lastUndeployIndex = index;
}
});
// Should undeploy if there's a !deploy without a subsequent !undeploy
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
core.setOutput('should_undeploy', shouldUndeploy);
- name: Dispatch Undeploy Event (PR closed with active deployment)
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}
- name: Post PR close undeploy comment
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
});

View File

@@ -1,239 +0,0 @@
name: AutoGPT Platform - Frontend CI
on:
push:
branches: [master, dev]
paths:
- ".github/workflows/platform-frontend-ci.yml"
- "autogpt_platform/frontend/**"
pull_request:
paths:
- ".github/workflows/platform-frontend-ci.yml"
- "autogpt_platform/frontend/**"
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || format('{0}-{1}', github.ref, github.event.pull_request.number || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
shell: bash
working-directory: autogpt_platform/frontend
jobs:
setup:
runs-on: ubuntu-latest
outputs:
cache-key: ${{ steps.cache-key.outputs.key }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
- name: Enable corepack
run: corepack enable
- name: Generate cache key
id: cache-key
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
lint:
runs-on: ubuntu-latest
needs: setup
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Run lint
run: pnpm lint
chromatic:
runs-on: ubuntu-latest
needs: setup
# Only run on dev branch pushes or PRs targeting dev
if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Run Chromatic
uses: chromaui/action@latest
with:
projectToken: chpt_9e7c1a76478c9c8
onlyChanged: true
workingDir: autogpt_platform/frontend
token: ${{ secrets.GITHUB_TOKEN }}
exitOnceUploaded: true
test:
runs-on: big-boi
needs: setup
strategy:
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
- name: Enable corepack
run: corepack enable
- name: Copy default supabase .env
run: |
cp ../.env.default ../.env
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
restore-keys: |
${{ runner.os }}-buildx-frontend-test-
- name: Run docker compose
run: |
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
env:
DOCKER_BUILDKIT: 1
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
if [ -d "/tmp/.buildx-cache-new" ]; then
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
fi
- name: Wait for services to be ready
run: |
echo "Waiting for rest_server to be ready..."
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
echo "Waiting for database to be ready..."
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
- name: Create E2E test data
run: |
echo "Creating E2E test data..."
# First try to run the script from inside the container
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
echo "✅ Found e2e_test_data.py in container, running it..."
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
echo "❌ E2E test data creation failed!"
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
exit 1
}
else
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
# Copy the script into the container and run it
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
echo "❌ Failed to copy script to container"
exit 1
}
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
echo "❌ E2E test data creation failed!"
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
exit 1
}
fi
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Install Browser 'chromium'
run: pnpm playwright install --with-deps chromium
- name: Run Playwright tests
run: pnpm test:no-build
- name: Upload Playwright artifacts
if: failure()
uses: actions/upload-artifact@v4
with:
name: playwright-report
path: playwright-report
- name: Print Final Docker Compose logs
if: always()
run: docker compose -f ../docker-compose.yml logs

View File

@@ -1,136 +0,0 @@
name: AutoGPT Platform - Frontend CI
on:
push:
branches: [master, dev]
paths:
- ".github/workflows/platform-fullstack-ci.yml"
- "autogpt_platform/**"
pull_request:
paths:
- ".github/workflows/platform-fullstack-ci.yml"
- "autogpt_platform/**"
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
shell: bash
working-directory: autogpt_platform/frontend
jobs:
setup:
runs-on: ubuntu-latest
outputs:
cache-key: ${{ steps.cache-key.outputs.key }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
- name: Enable corepack
run: corepack enable
- name: Generate cache key
id: cache-key
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
types:
runs-on: ubuntu-latest
needs: setup
strategy:
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
- name: Enable corepack
run: corepack enable
- name: Copy default supabase .env
run: |
cp ../.env.default ../.env
- name: Copy backend .env
run: |
cp ../backend/.env.default ../backend/.env
- name: Run docker compose
run: |
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Setup .env
run: cp .env.default .env
- name: Wait for services to be ready
run: |
echo "Waiting for rest_server to be ready..."
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
echo "Waiting for database to be ready..."
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
- name: Generate API queries
run: pnpm generate:api:force
- name: Check for API schema changes
run: |
if ! git diff --exit-code src/app/api/openapi.json; then
echo "❌ API schema changes detected in src/app/api/openapi.json"
echo ""
echo "The openapi.json file has been modified after running 'pnpm generate:api-all'."
echo "This usually means changes have been made in the BE endpoints without updating the Frontend."
echo "The API schema is now out of sync with the Front-end queries."
echo ""
echo "To fix this:"
echo "1. Pull the backend 'docker compose pull && docker compose up -d --build --force-recreate'"
echo "2. Run 'pnpm generate:api' locally"
echo "3. Run 'pnpm types' locally"
echo "4. Fix any TypeScript errors that may have been introduced"
echo "5. Commit and push your changes"
echo ""
exit 1
else
echo "✅ No API schema changes detected"
fi
- name: Run Typescript checks
run: pnpm types

66
.github/workflows/pr-label.yml vendored Normal file
View File

@@ -0,0 +1,66 @@
name: "Pull Request auto-label"
on:
# So that PRs touching the same files as the push are updated
push:
branches: [ master, development, release-* ]
paths-ignore:
- 'autogpts/autogpt/tests/vcr_cassettes'
- 'benchmark/reports/**'
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs
pull_request_target:
types: [ opened, synchronize ]
concurrency:
group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }}
cancel-in-progress: true
jobs:
conflicts:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- name: Update PRs with conflict labels
uses: eps1lon/actions-label-merge-conflict@releases/2.x
with:
dirtyLabel: "conflicts"
#removeOnDirtyLabel: "PR: ready to ship"
repoToken: "${{ secrets.GITHUB_TOKEN }}"
commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."
size:
if: ${{ github.event_name == 'pull_request_target' }}
permissions:
issues: write
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: codelytv/pr-size-labeler@v1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
xs_label: 'size/xs'
xs_max_size: 2
s_label: 'size/s'
s_max_size: 10
m_label: 'size/m'
m_max_size: 100
l_label: 'size/l'
l_max_size: 500
xl_label: 'size/xl'
message_if_xl:
scope:
if: ${{ github.event_name == 'pull_request_target' }}
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5
with:
sync-labels: true

View File

@@ -1,34 +0,0 @@
name: Repo - Close stale issues
on:
schedule:
- cron: '30 1 * * *'
workflow_dispatch:
permissions:
issues: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v10
with:
# operations-per-run: 5000
stale-issue-message: >
This issue has automatically been marked as _stale_ because it has not had
any activity in the last 170 days. You can _unstale_ it by commenting or
removing the label. Otherwise, this issue will be closed in 10 days.
stale-pr-message: >
This pull request has automatically been marked as _stale_ because it has
not had any activity in the last 50 days. You can _unstale_ it by commenting
or removing the label.
close-issue-message: >
This issue was closed automatically because it has been stale for 10 days
with no activity.
days-before-stale: 170
days-before-close: 10
# Do not touch meta issues:
exempt-issue-labels: meta,fridge,project management
# Do not affect pull requests:
days-before-pr-stale: -1
days-before-pr-close: -1

View File

@@ -1,21 +0,0 @@
name: Repo - Enforce dev as base branch
on:
pull_request_target:
branches: [ master ]
types: [ opened ]
jobs:
check_pr_target:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- name: Check if PR is from dev or hotfix
if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }}
run: |
gh pr comment ${{ github.event.number }} --repo "$REPO" \
--body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.'
gh pr edit ${{ github.event.number }} --base dev --repo "$REPO"
env:
GITHUB_TOKEN: ${{ github.token }}
REPO: ${{ github.repository }}

View File

@@ -1,66 +0,0 @@
name: Repo - Pull Request auto-label
on:
# So that PRs touching the same files as the push are updated
push:
branches: [ master, dev, release-* ]
paths-ignore:
- 'classic/forge/tests/vcr_cassettes'
- 'classic/benchmark/reports/**'
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs
pull_request_target:
types: [ opened, synchronize ]
concurrency:
group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }}
cancel-in-progress: true
jobs:
conflicts:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- name: Update PRs with conflict labels
uses: eps1lon/actions-label-merge-conflict@releases/2.x
with:
dirtyLabel: "conflicts"
#removeOnDirtyLabel: "PR: ready to ship"
repoToken: "${{ secrets.GITHUB_TOKEN }}"
commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."
size:
if: ${{ github.event_name == 'pull_request_target' }}
permissions:
issues: write
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: codelytv/pr-size-labeler@v1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
xs_label: 'size/xs'
xs_max_size: 2
s_label: 'size/s'
s_max_size: 10
m_label: 'size/m'
m_max_size: 100
l_label: 'size/l'
l_max_size: 500
xl_label: 'size/xl'
message_if_xl:
scope:
if: ${{ github.event_name == 'pull_request_target' }}
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v6
with:
sync-labels: true

View File

@@ -1,4 +1,4 @@
name: Repo - Github Stats
name: github-repo-stats
on:
schedule:

View File

@@ -1,32 +0,0 @@
name: Repo - PR Status Checker
on:
pull_request:
types: [opened, synchronize, reopened]
merge_group:
jobs:
status-check:
name: Check PR Status
runs-on: ubuntu-latest
steps:
# - name: Wait some time for all actions to start
# run: sleep 30
- uses: actions/checkout@v4
# with:
# fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install requests
- name: Check PR Status
run: |
echo "Current directory before running Python script:"
pwd
echo "Attempting to run Python script:"
python .github/workflows/scripts/check_actions_status.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,116 +0,0 @@
import json
import os
import requests
import sys
import time
from typing import Dict, List, Tuple
CHECK_INTERVAL = 30
def get_environment_variables() -> Tuple[str, str, str, str, str]:
"""Retrieve and return necessary environment variables."""
try:
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
event = json.load(f)
# Handle both PR and merge group events
if "pull_request" in event:
sha = event["pull_request"]["head"]["sha"]
else:
sha = os.environ["GITHUB_SHA"]
return (
os.environ["GITHUB_API_URL"],
os.environ["GITHUB_REPOSITORY"],
sha,
os.environ["GITHUB_TOKEN"],
os.environ["GITHUB_RUN_ID"],
)
except KeyError as e:
print(f"Error: Missing required environment variable or event data: {e}")
sys.exit(1)
def make_api_request(url: str, headers: Dict[str, str]) -> Dict:
"""Make an API request and return the JSON response."""
try:
print("Making API request to:", url)
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
print(f"Error: API request failed. {e}")
sys.exit(1)
def process_check_runs(check_runs: List[Dict]) -> Tuple[bool, bool]:
"""Process check runs and return their status."""
runs_in_progress = False
all_others_passed = True
for run in check_runs:
if str(run["name"]) != "Check PR Status":
status = run["status"]
conclusion = run["conclusion"]
if status == "completed":
if conclusion not in ["success", "skipped", "neutral"]:
all_others_passed = False
print(
f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}"
)
else:
runs_in_progress = True
print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
all_others_passed = False
else:
print(
f"Skipping check run {run['name']} (ID: {run['id']}) as it is the current run."
)
return runs_in_progress, all_others_passed
def main():
api_url, repo, sha, github_token, current_run_id = get_environment_variables()
endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
headers = {
"Accept": "application/vnd.github.v3+json",
}
if github_token:
headers["Authorization"] = f"token {github_token}"
print(f"Current run ID: {current_run_id}")
while True:
data = make_api_request(endpoint, headers)
check_runs = data["check_runs"]
print("Processing check runs...")
print(check_runs)
runs_in_progress, all_others_passed = process_check_runs(check_runs)
if not runs_in_progress:
break
print(
"Some check runs are still in progress. "
f"Waiting {CHECK_INTERVAL} seconds before checking again..."
)
time.sleep(CHECK_INTERVAL)
if all_others_passed:
print("All other completed check runs have passed. This check passes.")
sys.exit(0)
else:
print("Some check runs have failed or have not completed. This check fails.")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,60 +0,0 @@
#!/usr/bin/env python3
import sys
if sys.version_info < (3, 11):
print("Python version 3.11 or higher required")
sys.exit(1)
import tomllib
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
"""Extract package version from poetry.lock file."""
try:
if lockfile_path == "-":
data = tomllib.load(sys.stdin.buffer)
else:
with open(lockfile_path, "rb") as f:
data = tomllib.load(f)
except FileNotFoundError:
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
sys.exit(1)
except tomllib.TOMLDecodeError as e:
print(f"Error parsing TOML file: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(1)
# Look for the package in the packages list
packages = data.get("package", [])
for package in packages:
if package.get("name", "").lower() == package_name.lower():
return package.get("version")
return None
def main():
if len(sys.argv) not in (2, 3):
print(
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
file=sys.stderr,
)
sys.exit(1)
package_name = sys.argv[1]
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
version = get_package_version(package_name, lockfile_path)
if version:
print(version)
else:
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

28
.gitignore vendored
View File

@@ -1,13 +1,13 @@
## Original ignores
.github_access_token
classic/original_autogpt/keys.py
classic/original_autogpt/*.json
autogpt/keys.py
autogpt/*.json
auto_gpt_workspace/*
*.mpeg
.env
# Root .env files
/.env
azure.yaml
ai_settings.yaml
last_run_ai_settings.yaml
.vscode
.idea/*
auto-gpt.json
@@ -34,6 +34,7 @@ dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
@@ -123,6 +124,7 @@ celerybeat.pid
# Environments
.direnv/
.env
.venv
env/
venv*/
@@ -158,24 +160,20 @@ openai/
CURRENT_BULLETIN.md
# AgBenchmark
classic/benchmark/agbenchmark/reports/
agbenchmark/reports/
# Nodejs
package-lock.json
package.json
# Allow for locally private items
# private
pri*
pri*
# ignore
ig*
.github_access_token
LICENSE.rtf
autogpt_platform/backend/settings.py
/.auth
/autogpt_platform/frontend/.auth
arena/TestAgent.json
*.ign.*
.test-contents
.claude/settings.local.json
/autogpt_platform/backend/logs
# evo.ninja
autogpts/evo.ninja/*
!autogpts/evo.ninja/setup

4
.gitmodules vendored
View File

@@ -1,3 +1,3 @@
[submodule "classic/forge/tests/vcr_cassettes"]
path = classic/forge/tests/vcr_cassettes
[submodule "autogpts/autogpt/tests/vcr_cassettes"]
path = autogpts/autogpt/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes

View File

@@ -1,3 +1,6 @@
[pr_reviewer]
num_code_suggestions=0
[pr_code_suggestions]
commitable_code_suggestions=false
num_code_suggestions=0

View File

@@ -1,278 +0,0 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
args: ["--maxkb=500"]
- id: fix-byte-order-marker
- id: check-case-conflict
- id: check-merge-conflict
- id: check-symlinks
- id: debug-statements
- repo: https://github.com/Yelp/detect-secrets
rev: v1.5.0
hooks:
- id: detect-secrets
name: Detect secrets
description: Detects high entropy strings that are likely to be passwords.
files: ^autogpt_platform/
stages: [pre-push]
- repo: local
# For proper type checking, all dependencies need to be up-to-date.
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
hooks:
- id: poetry-install
name: Check & Install dependencies - AutoGPT Platform - Backend
alias: poetry-install-platform-backend
entry: poetry -C autogpt_platform/backend install
# include autogpt_libs source (since it's a path dependency)
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - AutoGPT Platform - Libs
alias: poetry-install-platform-libs
entry: poetry -C autogpt_platform/autogpt_libs install
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - AutoGPT
alias: poetry-install-classic-autogpt
entry: poetry -C classic/original_autogpt install
# include forge source (since it's a path dependency)
files: ^classic/(original_autogpt|forge)/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - Forge
alias: poetry-install-classic-forge
entry: poetry -C classic/forge install
files: ^classic/forge/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - Benchmark
alias: poetry-install-classic-benchmark
entry: poetry -C classic/benchmark install
files: ^classic/benchmark/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- repo: local
# For proper type checking, Prisma client must be up-to-date.
hooks:
- id: prisma-generate
name: Prisma Generate - AutoGPT Platform - Backend
alias: prisma-generate-platform-backend
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
# include everything that triggers poetry install + the prisma schema
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
types: [file]
language: system
pass_filenames: false
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.2
hooks:
- id: ruff
name: Lint (Ruff) - AutoGPT Platform - Backend
alias: ruff-lint-platform-backend
files: ^autogpt_platform/backend/
args: [--fix]
- id: ruff
name: Lint (Ruff) - AutoGPT Platform - Libs
alias: ruff-lint-platform-libs
files: ^autogpt_platform/autogpt_libs/
args: [--fix]
- id: ruff-format
name: Format (Ruff) - AutoGPT Platform - Libs
alias: ruff-lint-platform-libs
files: ^autogpt_platform/autogpt_libs/
- repo: local
# isort needs the context of which packages are installed to function, so we
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
hooks:
- id: isort
name: Lint (isort) - AutoGPT Platform - Backend
alias: isort-platform-backend
entry: poetry -P autogpt_platform/backend run isort -p backend
files: ^autogpt_platform/backend/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - AutoGPT
alias: isort-classic-autogpt
entry: poetry -P classic/original_autogpt run isort -p autogpt
files: ^classic/original_autogpt/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - Forge
alias: isort-classic-forge
entry: poetry -P classic/forge run isort -p forge
files: ^classic/forge/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - Benchmark
alias: isort-classic-benchmark
entry: poetry -P classic/benchmark run isort -p agbenchmark
files: ^classic/benchmark/
types: [file, python]
language: system
- repo: https://github.com/psf/black
rev: 24.10.0
# Black has sensible defaults, doesn't need package context, and ignores
# everything in .gitignore, so it works fine without any config or arguments.
hooks:
- id: black
name: Format (Black)
- repo: https://github.com/PyCQA/flake8
rev: 7.0.0
# To have flake8 load the config of the individual subprojects, we have to call
# them separately.
hooks:
- id: flake8
name: Lint (Flake8) - Classic - AutoGPT
alias: flake8-classic-autogpt
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
args: [--config=classic/original_autogpt/.flake8]
- id: flake8
name: Lint (Flake8) - Classic - Forge
alias: flake8-classic-forge
files: ^classic/forge/(forge|tests)/
args: [--config=classic/forge/.flake8]
- id: flake8
name: Lint (Flake8) - Classic - Benchmark
alias: flake8-classic-benchmark
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=classic/benchmark/.flake8]
- repo: local
hooks:
- id: prettier
name: Format (Prettier) - AutoGPT Platform - Frontend
alias: format-platform-frontend
entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' --
files: ^autogpt_platform/frontend/
types: [file]
language: system
- repo: local
# To have watertight type checking, we check *all* the files in an affected
# project. To trigger on poetry.lock we also reset the file `types` filter.
hooks:
- id: pyright
name: Typecheck - AutoGPT Platform - Backend
alias: pyright-platform-backend
entry: poetry -C autogpt_platform/backend run pyright
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - AutoGPT Platform - Libs
alias: pyright-platform-libs
entry: poetry -C autogpt_platform/autogpt_libs run pyright
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - AutoGPT
alias: pyright-classic-autogpt
entry: poetry -C classic/original_autogpt run pyright
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - Forge
alias: pyright-classic-forge
entry: poetry -C classic/forge run pyright
files: ^classic/forge/(forge/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - Benchmark
alias: pyright-classic-benchmark
entry: poetry -C classic/benchmark run pyright
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- repo: local
hooks:
- id: tsc
name: Typecheck - AutoGPT Platform - Frontend
entry: bash -c 'cd autogpt_platform/frontend && pnpm types'
files: ^autogpt_platform/frontend/
types: [file]
language: system
pass_filenames: false
# - repo: local
# hooks:
# - id: pytest
# name: Run tests - AutoGPT Platform - Backend
# alias: pytest-platform-backend
# entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
# # include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
# files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
# - id: pytest
# name: Run tests - Classic - AutoGPT (excl. slow tests)
# alias: pytest-classic-autogpt
# entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# # include forge source (since it's a path dependency) but exclude *_test.py files:
# files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Forge (excl. slow tests)
# alias: pytest-classic-forge
# entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
# files: ^classic/forge/(forge/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Benchmark
# alias: pytest-classic-benchmark
# entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
# files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false

View File

@@ -1,62 +0,0 @@
{
"folders": [
{
"name": "frontend",
"path": "../autogpt_platform/frontend"
},
{
"name": "backend",
"path": "../autogpt_platform/backend"
},
{
"name": "market",
"path": "../autogpt_platform/market"
},
{
"name": "lib",
"path": "../autogpt_platform/autogpt_libs"
},
{
"name": "infra",
"path": "../autogpt_platform/infra"
},
{
"name": "docs",
"path": "../docs"
},
{
"name": "classic - autogpt",
"path": "../classic/original_autogpt"
},
{
"name": "classic - benchmark",
"path": "../classic/benchmark"
},
{
"name": "classic - forge",
"path": "../classic/forge"
},
{
"name": "classic - frontend",
"path": "../classic/frontend"
},
{
"name": "[root]",
"path": ".."
}
],
"settings": {
"python.analysis.typeCheckingMode": "basic"
},
"extensions": {
"recommendations": [
"charliermarsh.ruff",
"dart-code.flutter",
"ms-python.black-formatter",
"ms-python.vscode-pylance",
"prisma.prisma",
"qwtel.sqlite-viewer"
]
}
}

67
.vscode/launch.json vendored
View File

@@ -1,67 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Frontend: Server Side",
"type": "node-terminal",
"request": "launch",
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
"command": "pnpm dev"
},
{
"name": "Frontend: Client Side",
"type": "msedge",
"request": "launch",
"url": "http://localhost:3000"
},
{
"name": "Frontend: Full Stack",
"type": "node-terminal",
"request": "launch",
"command": "pnpm dev",
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
"serverReadyAction": {
"pattern": "- Local:.+(https?://.+)",
"uriFormat": "%s",
"action": "debugWithChrome"
}
},
{
"name": "Backend",
"type": "debugpy",
"request": "launch",
"module": "backend.app",
"env": {
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY": "YES"
},
"envFile": "${workspaceFolder}/backend/.env",
"justMyCode": false,
"cwd": "${workspaceFolder}/autogpt_platform/backend"
},
{
"name": "Marketplace",
"type": "debugpy",
"request": "launch",
"module": "autogpt_platform.market.main",
"env": {
"ENV": "dev"
},
"envFile": "${workspaceFolder}/market/.env",
"justMyCode": false,
"cwd": "${workspaceFolder}/market"
}
],
"compounds": [
{
"name": "Everything",
"configurations": ["Backend", "Frontend: Full Stack"],
// "preLaunchTask": "${defaultBuildTask}",
"stopAll": true,
"presentation": {
"hidden": false,
"order": 0
}
}
]
}

View File

@@ -1,53 +0,0 @@
# AutoGPT Platform Contribution Guide
This guide provides context for Codex when updating the **autogpt_platform** folder.
## Directory overview
- `autogpt_platform/backend` FastAPI based backend service.
- `autogpt_platform/autogpt_libs` Shared Python libraries.
- `autogpt_platform/frontend` Next.js + Typescript frontend.
- `autogpt_platform/docker-compose.yml` development stack.
See `docs/content/platform/getting-started.md` for setup instructions.
## Code style
- Format Python code with `poetry run format`.
- Format frontend code using `pnpm format`.
## Testing
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
Always run the relevant linters and tests before committing.
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
Types:
- feat
- fix
- refactor
- ci
- dx (developer experience)
Scopes:
- platform
- platform/library
- platform/marketplace
- backend
- backend/executor
- frontend
- frontend/library
- frontend/marketplace
- blocks
## Pull requests
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
- Rely on the pre-commit checks for linting and formatting
- Fill out the **Changes** section and the checklist.
- Use conventional commit titles with a scope (e.g. `feat(frontend): add feature`).
- Keep out-of-scope changes under 20% of the PR.
- Ensure PR descriptions are complete.
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs

View File

@@ -74,7 +74,7 @@ Lists all the available agents.
**Output**:
```
🎉 New agent 'my_agent' created and switched to the new directory in agents folder.
🎉 New agent 'my_agent' created and switched to the new directory in autogpts folder.
```
Creates a new agent named 'my_agent'.

View File

@@ -1,38 +1,34 @@
# AutoGPT Contribution Guide
If you are reading this, you are probably looking for the full **[contribution guide]**,
which is part of our [wiki].
If you are reading this, you are probably looking for our **[contribution guide]**,
which is part of our [knowledge base].
[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
You can find our immediate priorities and their progress on our public [kanban board].
[contribution guide]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
[knowledge base]: https://github.com/Significant-Gravitas/Nexus/wiki
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
## Contributing to the AutoGPT Platform Folder
All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license.
## In short
1. Avoid duplicate work, issues, PRs etc.
2. We encourage you to collaborate with fellow community members on some of our bigger
[todo's][roadmap]!
[todo's][kanban board]!
* We highly recommend to post your idea and discuss it in the [dev channel].
3. Create a draft PR when starting work on bigger changes.
4. Adhere to the [Code Guidelines]
4. Create a draft PR when starting work on bigger changes.
3. Please also consider contributing something other than code; see the
[contribution guide] for options.
5. Clearly explain your changes when submitting a PR.
6. Don't submit broken code: test/validate your changes.
6. Don't submit stuff that's broken.
7. Avoid making unnecessary changes, especially if they're purely based on your personal
preferences. Doing so is the maintainers' job. ;-)
8. Please also consider contributing something other than code; see the
[contribution guide] for options.
[dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
[code guidelines]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing#code-guidelines
If you wish to involve with the project (beyond just contributing PRs), please read the
wiki page about [Catalyzing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Catalyzing).
wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
In fact, why not just look through the whole wiki (it's only a few pages) and
hop on our Discord. See you there! :-)
❤️ & 🔆
The team @ AutoGPT
❤️ & 🔆
The team @ AutoGPT
https://discord.gg/autogpt

199
LICENSE
View File

@@ -1,204 +1,7 @@
All portions of this repository are under one of two licenses.
- Everything inside the autogpt_platform folder is under the Polyform Shield License.
- Everything outside the autogpt_platform folder is under the MIT License.
More info:
**Polyform Shield License:**
Code and content within the `autogpt_platform` folder is licensed under the Polyform Shield License. This new project is our in-developlemt platform for building, deploying and managing agents.
Read more about this effort here: https://agpt.co/blog/introducing-the-autogpt-platform
**MIT License:**
All other portions of the AutoGPT repository (i.e., everything outside the `autogpt_platform` folder) are licensed under the MIT License. This includes:
- The Original, stand-alone AutoGPT Agent
- Forge: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge
- AG Benchmark: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark
- AutoGPT Classic GUI: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend.
We also publish additional work under the MIT Licence in other repositories, such as GravitasML (https://github.com/Significant-Gravitas/gravitasml) which is developed for and used in the AutoGPT Platform, and our [Code Ability](https://github.com/Significant-Gravitas/AutoGPT-Code-Ability) project.
Both licences are available to read below:
=====================================================
-----------------------------------------------------
=====================================================
# PolyForm Shield License 1.0.0
<https://polyformproject.org/licenses/shield/1.0.0>
## Acceptance
In order to get any license under these terms, you must agree
to them as both strict obligations and conditions to all
your licenses.
## Copyright License
The licensor grants you a copyright license for the
software to do everything you might do with the software
that would otherwise infringe the licensor's copyright
in it for any permitted purpose. However, you may
only distribute the software according to [Distribution
License](#distribution-license) and make changes or new works
based on the software according to [Changes and New Works
License](#changes-and-new-works-license).
## Distribution License
The licensor grants you an additional copyright license
to distribute copies of the software. Your license
to distribute covers distributing the software with
changes and new works permitted by [Changes and New Works
License](#changes-and-new-works-license).
## Notices
You must ensure that anyone who gets a copy of any part of
the software from you also gets a copy of these terms or the
URL for them above, as well as copies of any plain-text lines
beginning with `Required Notice:` that the licensor provided
with the software. For example:
> Required Notice: Copyright Yoyodyne, Inc. (http://example.com)
## Changes and New Works License
The licensor grants you an additional copyright license to
make changes and new works based on the software for any
permitted purpose.
## Patent License
The licensor grants you a patent license for the software that
covers patent claims the licensor can license, or becomes able
to license, that you would infringe by using the software.
## Noncompete
Any purpose is a permitted purpose, except for providing any
product that competes with the software or any product the
licensor or any of its affiliates provides using the software.
## Competition
Goods and services compete even when they provide functionality
through different kinds of interfaces or for different technical
platforms. Applications can compete with services, libraries
with plugins, frameworks with development tools, and so on,
even if they're written in different programming languages
or for different computer architectures. Goods and services
compete even when provided free of charge. If you market a
product as a practical substitute for the software or another
product, it definitely competes.
## New Products
If you are using the software to provide a product that does
not compete, but the licensor or any of its affiliates brings
your product into competition by providing a new version of
the software or another product using the software, you may
continue using versions of the software available under these
terms beforehand to provide your competing product, but not
any later versions.
## Discontinued Products
You may begin using the software to compete with a product
or service that the licensor or any of its affiliates has
stopped providing, unless the licensor includes a plain-text
line beginning with `Licensor Line of Business:` with the
software that mentions that line of business. For example:
> Licensor Line of Business: YoyodyneCMS Content Management
System (http://example.com/cms)
## Sales of Business
If the licensor or any of its affiliates sells a line of
business developing the software or using the software
to provide a product, the buyer can also enforce
[Noncompete](#noncompete) for that product.
## Fair Use
You may have "fair use" rights for the software under the
law. These terms do not limit them.
## No Other Rights
These terms do not allow you to sublicense or transfer any of
your licenses to anyone else, or prevent the licensor from
granting licenses to anyone else. These terms do not imply
any other licenses.
## Patent Defense
If you make any written claim that the software infringes or
contributes to infringement of any patent, your patent license
for the software granted under these terms ends immediately. If
your company makes such a claim, your patent license ends
immediately for work on behalf of your company.
## Violations
The first time you are notified in writing that you have
violated any of these terms, or done anything with the software
not covered by your licenses, your licenses can nonetheless
continue if you come into full compliance with these terms,
and take practical steps to correct past violations, within
32 days of receiving notice. Otherwise, all your licenses
end immediately.
## No Liability
***As far as the law allows, the software comes as is, without
any warranty or condition, and the licensor will not be liable
to you for any damages arising out of these terms or the use
or nature of the software, under any kind of legal claim.***
## Definitions
The **licensor** is the individual or entity offering these
terms, and the **software** is the software the licensor makes
available under these terms.
A **product** can be a good or service, or a combination
of them.
**You** refers to the individual or entity agreeing to these
terms.
**Your company** is any legal entity, sole proprietorship,
or other kind of organization that you work for, plus all
its affiliates.
**Affiliates** means the other organizations than an
organization has control over, is under the control of, or is
under common control with.
**Control** means ownership of substantially all the assets of
an entity, or the power to direct its management and policies
by vote, contract, or otherwise. Control can be direct or
indirect.
**Your licenses** are all the licenses granted to you for the
software under these terms.
**Use** means anything you do with the software requiring one
of your licenses.
=====================================================
-----------------------------------------------------
=====================================================
MIT License
Copyright (c) 2023 Toran Bruce Richards
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
@@ -206,11 +9,9 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

200
QUICKSTART.md Normal file
View File

@@ -0,0 +1,200 @@
# Quickstart Guide
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
Welcome to the Quickstart Guide! This guide will walk you through the process of setting up and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the necessary steps to jumpstart your journey in the world of AI development with AutoGPT.
## System Requirements
This project supports Linux (Debian based), Mac, and Windows Subsystem for Linux (WSL). If you are using a Windows system, you will need to install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
## Getting Setup
1. **Fork the Repository**
To fork the repository, follow these steps:
- Navigate to the main page of the repository.
![Repository](docs/content/imgs/quickstart/001_repo.png)
- In the top-right corner of the page, click Fork.
![Create Fork UI](docs/content/imgs/quickstart/002_fork.png)
- On the next page, select your GitHub account to create the fork under.
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
2. **Clone the Repository**
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, you can download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
- Open your terminal.
- Navigate to the directory where you want to clone the repository.
- Run the git clone command for the fork you just created
![Clone the Repository](docs/content/imgs/quickstart/003_clone.png)
- Then open your project in your ide
![Open the Project in your IDE](docs/content/imgs/quickstart/004_ide.png)
4. **Setup the Project**
Next we need to setup the required dependencies. We have a tool for helping you do all the tasks you need to on the repo.
It can be accessed by running the `run` command by typing `./run` in the terminal.
The first command you need to use is `./run setup` This will guide you through the process of setting up your system.
Initially you will get instructions for installing flutter, chrome and setting up your github access token like the following image:
> Note: for advanced users. The github access token is only needed for the ./run arena enter command so the system can automatically create a PR
![Setup the Project](docs/content/imgs/quickstart/005_setup.png)
### For Windows Users
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
#### Update WSL
Run the following command in Powershell or Command Prompt to:
1. Enable the optional WSL and Virtual Machine Platform components.
2. Download and install the latest Linux kernel.
3. Set WSL 2 as the default.
4. Download and install the Ubuntu Linux distribution (a reboot may be required).
```shell
wsl --install
```
For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
#### Resolve FileNotFoundError or "No such file or directory" Errors
When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Heres how to install and run `dos2unix` on the script:
```shell
sudo apt update
sudo apt install dos2unix
dos2unix ./run
```
After executing the above commands, running `./run setup` should work successfully.
#### Store Project Files within the WSL File System
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids issues related to path translations and permissions and provides a more consistent development environment.
You can keep running the command to get feedback on where you are up to with your setup.
When setup has been completed, the command will return an output like this:
![Setup Complete](docs/content/imgs/quickstart/006_setup_complete.png)
## Creating Your Agent
After completing the setup, the next step is to create your agent template.
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with a name of your choosing.
Tips for naming your agent:
* Give it its own unique name, or name it after yourself
* Include an important aspect of your agent in the name, such as its purpose
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `Narvis`, `evo.ninja`
![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png)
### Optional: Entering the Arena
Entering the Arena is an optional step intended for those who wish to actively participate in the agent leaderboard. If you decide to participate, you can enter the Arena by running `./run arena enter YOUR_AGENT_NAME`. This step is not mandatory for the development or testing of your agent.
Entries with names like `agent`, `ExampleAgent`, `test_agent` or `MyExampleGPT` will NOT be merged. We also don't accept copycat entries that use the name of other projects, like `AutoGPT` or `evo.ninja`.
![Enter the Arena](docs/content/imgs/quickstart/008_enter_arena.png)
> **Note**
> For advanced users, create a new branch and create a file called YOUR_AGENT_NAME.json in the arena directory. Then commit this and create a PR to merge into the main repo. Only single file entries will be permitted. The json file needs the following format:
> ```json
> {
> "github_repo_url": "https://github.com/Swiftyos/YourAgentName",
> "timestamp": "2023-09-18T10:03:38.051498",
> "commit_hash_to_benchmark": "ac36f7bfc7f23ad8800339fa55943c1405d80d5e",
> "branch_to_benchmark": "master"
> }
> ```
> - `github_repo_url`: the url to your fork
> - `timestamp`: timestamp of the last update of this file
> - `commit_hash_to_benchmark`: the commit hash of your entry. You update each time you have an something ready to be officially entered into the hackathon
> - `branch_to_benchmark`: the branch you are using to develop your agent on, default is master.
## Running your Agent
Your agent can started using the `./run agent start YOUR_AGENT_NAME`
This start the agent on `http://localhost:8000/`
![Start the Agent](docs/content/imgs/quickstart/009_start_agent.png)
The frontend can be accessed from `http://localhost:8000/`, you will first need to login using either a google account or your github account.
![Login](docs/content/imgs/quickstart/010_login.png)
Upon logging in you will get a page that looks something like this. With your task history down the left hand side of the page and the 'chat' window to send tasks to your agent.
![Login](docs/content/imgs/quickstart/011_home.png)
When you have finished with your agent, or if you just need to restart it, use Ctl-C to end the session then you can re-run the start command.
If you are having issues and want to ensure the agent has been stopped there is a `./run agent stop` command which will kill the process using port 8000, which should be the agent.
## Benchmarking your Agent
The benchmarking system can also be accessed using the cli too:
```bash
agpt % ./run benchmark
Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
Commands to start the benchmark and list tests and categories
Options:
--help Show this message and exit.
Commands:
categories Benchmark categories group command
start Starts the benchmark command
tests Benchmark tests group command
agpt % ./run benchmark categories
Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
Benchmark categories group command
Options:
--help Show this message and exit.
Commands:
list List benchmark categories command
agpt % ./run benchmark tests
Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
Benchmark tests group command
Options:
--help Show this message and exit.
Commands:
details Benchmark test details command
list List benchmark tests command
```
The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
```bash
./run benchmark categories list
# And what tests are available with
./run benchmark tests list
```
![Login](docs/content/imgs/quickstart/012_tests.png)
Finally you can run the benchmark with
```bash
./run benchmark start YOUR_AGENT_NAME
```
>

182
README.md
View File

@@ -1,140 +1,10 @@
# AutoGPT: Build, Deploy, and Run AI Agents
# AutoGPT: build & use AI agents
[![Discord Follow](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fdiscord.com%2Fapi%2Finvites%2Fautogpt%3Fwith_counts%3Dtrue&query=%24.approximate_member_count&label=total%20members&logo=discord&logoColor=white&color=7289da)](https://discord.gg/autogpt) &ensp;
[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) &ensp;
[![Twitter Follow](https://img.shields.io/twitter/follow/Auto_GPT?style=social)](https://twitter.com/Auto_GPT) &ensp;
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
<!-- Keep these links. Translations will automatically update with the README. -->
[Deutsch](https://zdoc.app/de/Significant-Gravitas/AutoGPT) |
[Español](https://zdoc.app/es/Significant-Gravitas/AutoGPT) |
[français](https://zdoc.app/fr/Significant-Gravitas/AutoGPT) |
[日本語](https://zdoc.app/ja/Significant-Gravitas/AutoGPT) |
[한국어](https://zdoc.app/ko/Significant-Gravitas/AutoGPT) |
[Português](https://zdoc.app/pt/Significant-Gravitas/AutoGPT) |
[Русский](https://zdoc.app/ru/Significant-Gravitas/AutoGPT) |
[中文](https://zdoc.app/zh/Significant-Gravitas/AutoGPT)
**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows.
## Hosting Options
- Download to self-host (Free!)
- [Join the Waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta (Closed Beta - Public release Coming Soon!)
## How to Self-Host the AutoGPT Platform
> [!NOTE]
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
### System Requirements
Before proceeding with the installation, ensure your system meets the following requirements:
#### Hardware Requirements
- CPU: 4+ cores recommended
- RAM: Minimum 8GB, 16GB recommended
- Storage: At least 10GB of free space
#### Software Requirements
- Operating Systems:
- Linux (Ubuntu 20.04 or newer recommended)
- macOS (10.15 or newer)
- Windows 10/11 with WSL2
- Required Software (with minimum versions):
- Docker Engine (20.10.0 or newer)
- Docker Compose (2.0.0 or newer)
- Git (2.30 or newer)
- Node.js (16.x or newer)
- npm (8.x or newer)
- VSCode (1.60 or newer) or any modern code editor
#### Network Requirements
- Stable internet connection
- Access to required ports (will be configured in Docker)
- Ability to make outbound HTTPS connections
### Updated Setup Instructions:
We've moved to a fully maintained and regularly updated documentation site.
👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/)
This tutorial assumes you have Docker, VSCode, git and npm installed.
---
#### ⚡ Quick Setup with One-Line Script (Recommended for Local Hosting)
Skip the manual steps and get started in minutes using our automatic setup script.
For macOS/Linux:
```
curl -fsSL https://setup.agpt.co/install.sh -o install.sh && bash install.sh
```
For Windows (PowerShell):
```
powershell -c "iwr https://setup.agpt.co/install.bat -o install.bat; ./install.bat"
```
This will install dependencies, configure Docker, and launch your local instance — all in one go.
### 🧱 AutoGPT Frontend
The AutoGPT frontend is where users interact with our powerful AI automation platform. It offers multiple ways to engage with and leverage our AI agents. This is the interface where you'll bring your AI automation ideas to life:
**Agent Builder:** For those who want to customize, our intuitive, low-code interface allows you to design and configure your own AI agents.
**Workflow Management:** Build, modify, and optimize your automation workflows with ease. You build your agent by connecting blocks, where each block performs a single action.
**Deployment Controls:** Manage the lifecycle of your agents, from testing to production.
**Ready-to-Use Agents:** Don't want to build? Simply select from our library of pre-configured agents and put them to work immediately.
**Agent Interaction:** Whether you've built your own or are using pre-configured agents, easily run and interact with them through our user-friendly interface.
**Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes.
[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks.
### 💽 AutoGPT Server
The AutoGPT Server is the powerhouse of our platform This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously. It contains all the essential components that make AutoGPT run smoothly.
**Source Code:** The core logic that drives our agents and automation processes.
**Infrastructure:** Robust systems that ensure reliable and scalable performance.
**Marketplace:** A comprehensive marketplace where you can find and deploy a wide range of pre-built agents.
### 🐙 Example Agents
Here are two examples of what you can do with AutoGPT:
1. **Generate Viral Videos from Trending Topics**
- This agent reads topics on Reddit.
- It identifies trending topics.
- It then automatically creates a short-form video based on the content.
2. **Identify Top Quotes from Videos for Social Media**
- This agent subscribes to your YouTube channel.
- When you post a new video, it transcribes it.
- It uses AI to identify the most impactful quotes to generate a summary.
- Then, it writes a post to automatically publish to your social media.
These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case.
---
### **License Overview:**
🛡️ **Polyform Shield License:**
All code and content within the `autogpt_platform` folder is licensed under the Polyform Shield License. This new project is our in-developlemt platform for building, deploying and managing agents.</br>_[Read more about this effort](https://agpt.co/blog/introducing-the-autogpt-platform)_
🦉 **MIT License:**
All other portions of the AutoGPT repository (i.e., everything outside the `autogpt_platform` folder) are licensed under the MIT License. This includes the original stand-alone AutoGPT Agent, along with projects such as [Forge](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge), [agbenchmark](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) and the [AutoGPT Classic GUI](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend).</br>We also publish additional work under the MIT Licence in other repositories, such as [GravitasML](https://github.com/Significant-Gravitas/gravitasml) which is developed for and used in the AutoGPT Platform. See also our MIT Licenced [Code Ability](https://github.com/Significant-Gravitas/AutoGPT-Code-Ability) project.
---
### Mission
Our mission is to provide the tools, so that you can focus on what matters:
**AutoGPT** is the vision of the power of AI accessible to everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters:
- 🏗️ **Building** - Lay the foundation for something amazing.
- 🧪 **Testing** - Fine-tune your agent to perfection.
@@ -145,21 +15,26 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
**📖 [Documentation](https://docs.agpt.co)**
&ensp;|&ensp;
**🚀 [Contributing](CONTRIBUTING.md)**
&ensp;|&ensp;
**🛠️ [Build your own Agent - Quickstart](QUICKSTART.md)**
---
## 🤖 AutoGPT Classic
> Below is information about the classic version of AutoGPT.
## 🥇 Current Best Agent: evo.ninja
[Current Best Agent]: #-current-best-agent-evoninja
**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)**
The AutoGPT Arena Hackathon saw [**evo.ninja**](https://github.com/polywrap/evo.ninja) earn the top spot on our Arena Leaderboard, proving itself as the best open-source generalist agent. Try it now at https://evo.ninja!
📈 To challenge evo.ninja, AutoGPT, and others, submit your benchmark run to the [Leaderboard](#-leaderboard), and maybe your agent will be up here next!
## 🧱 Building blocks
### 🏗️ Forge
**Forge your own agent!** &ndash; Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project.
**Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/autogpts/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) &ndash;
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/forge/tutorials/001_getting_started.md) &ndash;
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpts/forge) about Forge
### 🎯 Benchmark
@@ -169,17 +44,24 @@ This guide will walk you through the process of creating your own agent and usin
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
&ensp;|&ensp;
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
#### 🏆 [Leaderboard][leaderboard]
[leaderboard]: https://leaderboard.agpt.co
Submit your benchmark run through the UI and claim your place on the AutoGPT Arena Leaderboard! The best scoring general agent earns the title of **[Current Best Agent]**, and will be adopted into our repo so people can easily run it through the [CLI].
[![Screenshot of the AutoGPT Arena leaderboard](https://github.com/Significant-Gravitas/AutoGPT/assets/12185583/60813392-9ddb-4cca-bb44-b477dbae225d)][leaderboard]
### 💻 UI
**Makes agents easy to use!** The `frontend` gives you a user-friendly interface to control and monitor your agents. It connects to agents through the [agent protocol](#-agent-protocol), ensuring compatibility with many agents from both inside and outside of our ecosystem.
<!-- TODO: insert screenshot of front end -->
<!-- TODO: instert screenshot of front end -->
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
### ⌨️ CLI
@@ -196,6 +78,7 @@ Options:
Commands:
agent Commands to create, start and stop agents
arena Commands to enter the arena
benchmark Commands to start the benchmark and list tests and categories
setup Installs dependencies needed for your system.
```
@@ -208,7 +91,7 @@ Just clone the repo, install dependencies with `./run setup`, and you should be
[![Join us on Discord](https://invidget.switchblade.xyz/autogpt)](https://discord.gg/autogpt)
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn't created an issue for the same topic.
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasnt created an issue for the same topic.
## 🤝 Sister projects
@@ -218,8 +101,6 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
---
## Stars stats
<p align="center">
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
<picture>
@@ -229,10 +110,3 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
</picture>
</a>
</p>
## ⚡ Contributors
<a href="https://github.com/Significant-Gravitas/AutoGPT/graphs/contributors" alt="View Contributors">
<img src="https://contrib.rocks/image?repo=Significant-Gravitas/AutoGPT&max=1000&columns=10" alt="Contributors" />
</a>

View File

@@ -1,48 +1,66 @@
# Security Policy
## Reporting Security Issues
- [**Using AutoGPT Securely**](#using-AutoGPT-securely)
- [Restrict Workspace](#restrict-workspace)
- [Untrusted inputs](#untrusted-inputs)
- [Data privacy](#data-privacy)
- [Untrusted environments or networks](#untrusted-environments-or-networks)
- [Multi-Tenant environments](#multi-tenant-environments)
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
## Using AutoGPT Securely
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
### Restrict Workspace
Instead, please report them via:
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
<!--- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty-->
Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False.
### Reporting Process
1. **Submit Report**: Use one of the above channels to submit your report
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
3. **Collaboration**: We will collaborate with you to understand and validate the issue
4. **Resolution**: We will work on a fix and coordinate the release process
Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks.
### Disclosure Policy
- Please provide detailed reports with reproducible steps
- Include the version/commit hash where you discovered the vulnerability
- Allow us a 90-day security fix window before any public disclosure
- After patch is released, allow 30 days for users to update before public disclosure (for a total of 120 days max between update time and fix time)
- Share any potential mitigations or workarounds if known
### Untrusted inputs
## Supported Versions
Only the following versions are eligible for security updates:
When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks.
| Version | Supported |
|---------|-----------|
| Latest release on master branch | ✅ |
| Development commits (pre-master) | ✅ |
| Classic folder (deprecated) | ❌ |
| All other versions | ❌ |
For maximum security when handling untrusted inputs, you may need to employ the following:
## Security Best Practices
When using this project:
1. Always use the latest stable version
2. Review security advisories before updating
3. Follow our security documentation and guidelines
4. Keep your dependencies up to date
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
* Sandboxing: Isolate the process.
* Updates: Keep your libraries (including AutoGPT) updated with the latest security patches.
* Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as:
* Validation: Enforce strict rules on allowed characters and data types.
* Filtering: Remove potentially malicious scripts or code fragments.
* Encoding: Convert special characters into safe representations.
* Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)).
## Past Security Advisories
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
### Data privacy
---
Last updated: November 2024
To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors.
### Untrusted environments or networks
Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers.
Additionally, running it on an untrusted network can expose your data to potential network attacks.
However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network.
### Multi-Tenant environments
If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data.
The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks.
- Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenants identity.
- Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring.
- Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks.
- Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time.
## Reporting a Vulnerability
Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT.
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new).
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.

6
arena/480bot.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/480/AutoGPT",
"timestamp": "2023-10-22T06:49:52.536177",
"commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d",
"branch_to_benchmark": "master"
}

6
arena/AGENT_GORDON.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/filipjakubowski/AutoGPT",
"timestamp": "2023-11-01T17:13:24.272333",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

6
arena/AGENT_JARVIS.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/filipjakubowski/AutoGPT",
"timestamp": "2023-11-04T10:13:11.039444",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

6
arena/AI.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/QingquanBao/AutoGPT",
"timestamp": "2023-11-01T16:20:51.086235",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

7
arena/AKBAgent.json Normal file
View File

@@ -0,0 +1,7 @@
{
"github_repo_url": "https://github.com/imakb/AKBAgent",
"timestamp": "2023-10-31T00:03:23.000000",
"commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
"branch_to_benchmark": "AKBAgent"
}

6
arena/ASSISTANT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/hongzzz/AutoGPT",
"timestamp": "2023-10-13T03:22:59.347424",
"commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e",
"branch_to_benchmark": "master"
}

6
arena/AUTO_ENGINEER.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/kaiomagalhaes/AutoGPT",
"timestamp": "2023-10-04T15:25:30.458687",
"commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Jonobinsoftware/AutoGPT-Tutorial",
"timestamp": "2023-10-10T06:01:23.439061",
"commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/aivaras-mazylis/AutoGPT",
"timestamp": "2023-10-17T13:16:16.327237",
"commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950",
"branch_to_benchmark": "master"
}

6
arena/AgGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/althaf004/AutoGPT",
"timestamp": "2023-09-26T03:40:03.658369",
"commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d",
"branch_to_benchmark": "master"
}

6
arena/AgentJPark.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/againeureka/AutoGPT",
"timestamp": "2023-10-12T02:20:01.005361",
"commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
"branch_to_benchmark": "master"
}

6
arena/AgentKD.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/kitdesai/AgentKD",
"timestamp": "2023-10-14T02:35:09.979434",
"commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
"branch_to_benchmark": "master"
}

6
arena/Ahmad.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/JawadAbu/AutoGPT.git",
"timestamp": "2023-11-05T12:35:35.352028",
"commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c",
"branch_to_benchmark": "master"
}

6
arena/Alfred.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Shadowless422/Alfred",
"timestamp": "2023-10-03T10:42:45.473477",
"commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
"branch_to_benchmark": "master"
}

6
arena/AlphaCISO.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/alphaciso/AutoGPT",
"timestamp": "2023-10-21T08:26:41.961187",
"commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f",
"branch_to_benchmark": "master"
}

6
arena/AndersLensway.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/4nd3rs/AutoGPT",
"timestamp": "2023-10-11T11:00:08.150159",
"commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93",
"branch_to_benchmark": "master"
}

1
arena/AntlerTestGPT.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/pjw1/AntlerAI", "timestamp": "2023-10-07T11:46:39Z", "commit_hash_to_benchmark": "f81e086e5647370854ec639c531c900775a99207", "branch_to_benchmark": "master"}

6
arena/AppleGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Nimit3-droid/AutoGPT",
"timestamp": "2023-10-03T11:59:15.495902",
"commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175",
"branch_to_benchmark": "master"
}

1
arena/AquaAgent.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/somnistudio/SomniGPT", "timestamp": "2023-10-06T16:40:14Z", "commit_hash_to_benchmark": "47eb5124fa97187d7f3fa4036e422cd771cf0ae7", "branch_to_benchmark": "master"}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/AmahAjavon/AutoGPT",
"timestamp": "2023-10-28T20:32:15.845741",
"commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
"branch_to_benchmark": "master"
}

6
arena/AskOpie.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/arunqa/AutoGPT",
"timestamp": "2023-09-26T05:13:24.466017",
"commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d",
"branch_to_benchmark": "master"
}

6
arena/Auto.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Nikhil8652/AutoGPT",
"timestamp": "2023-10-16T09:12:17.452121",
"commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a",
"branch_to_benchmark": "master"
}

6
arena/AutoGPT-ariel.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/RedTachyon/AutoGPT",
"timestamp": "2023-10-21T22:31:30.871023",
"commit_hash_to_benchmark": "eda21d51921899756bf866cf5c4d0f2dcd3e2e23",
"branch_to_benchmark": "master"
}

1
arena/AutoGPT2.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/SarahGrevy/AutoGPT", "timestamp": "2023-10-20T17:21:22Z", "commit_hash_to_benchmark": "32300906c9aafea8c550fa2f9edcc113fbfc512c", "branch_to_benchmark": "master"}

6
arena/AutoGenius.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/JasonDRZ/AutoGPT",
"timestamp": "2023-10-26T13:27:58.805270",
"commit_hash_to_benchmark": "ab2a61833584c42ededa805cbac50718c72aa5ae",
"branch_to_benchmark": "master"
}

6
arena/AutoTDD.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/vshneer/AutoTDD",
"timestamp": "2023-10-11T19:14:30.939747",
"commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/cagdasbas/AutoGPT",
"timestamp": "2023-10-15T08:43:40.193080",
"commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
"branch_to_benchmark": "master"
}

6
arena/AwareAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/LuisLechugaRuiz/AwareAgent",
"timestamp": "2023-10-26T10:10:01.481205",
"commit_hash_to_benchmark": "c180063dde49af02ed95ec4c019611da0a5540d7",
"branch_to_benchmark": "master"
}

6
arena/Bagi_agent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git",
"timestamp": "2023-10-20T09:21:48.837635",
"commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791",
"branch_to_benchmark": "master"
}

6
arena/BanglaSgAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/aniruddha-adhikary/AutoGPT",
"timestamp": "2023-09-27T15:32:24.056105",
"commit_hash_to_benchmark": "6f289e6dfa8246f8993b76c933527f3707b8d7e5",
"branch_to_benchmark": "master"
}

6
arena/Baptiste.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Baptistecaille/AutoGPT",
"timestamp": "2023-10-01T19:44:23.416591",
"commit_hash_to_benchmark": "3da29eae45683457131ee8736bedae7e2a74fbba",
"branch_to_benchmark": "master"
}

1
arena/Bravo06.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/jafar-albadarneh/Bravo06GPT", "timestamp": "2023-10-04T23:01:27Z", "commit_hash_to_benchmark": "f8c177b4b0e4ca45a3a104011b866c0415c648f1", "branch_to_benchmark": "master"}

1
arena/Brillante-AI.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/dabeer021/Brillante-AI", "timestamp": "2023-10-02T19:05:04Z", "commit_hash_to_benchmark": "163ab75379e1ee7792f50d4d70a1f482ca9cb6a1", "branch_to_benchmark": "master"}

6
arena/Bunny.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/razorhasbeen/AutoGPT",
"timestamp": "2023-10-03T11:50:56.725628",
"commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175",
"branch_to_benchmark": "master"
}

Some files were not shown because too many files have changed in this diff Show More