mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Compare commits
4 Commits
test-scree
...
fix/execut
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b20f4cd13 | ||
|
|
a3d0f9cbd2 | ||
|
|
02ddb51446 | ||
|
|
750e096f15 |
18
.deepsource.toml
Normal file
18
.deepsource.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
version = 1
|
||||
|
||||
test_patterns = ["**/*.spec.ts","**/*_test.py","**/*_tests.py","**/test_*.py"]
|
||||
|
||||
exclude_patterns = ["classic/**"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "javascript"
|
||||
|
||||
[analyzers.meta]
|
||||
plugins = ["react"]
|
||||
environment = ["nodejs"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "python"
|
||||
|
||||
[analyzers.meta]
|
||||
runtime_version = "3.x.x"
|
||||
65
.dockerignore
Normal file
65
.dockerignore
Normal file
@@ -0,0 +1,65 @@
|
||||
# Ignore everything by default, selectively add things to context
|
||||
*
|
||||
|
||||
# Platform - Libs
|
||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
||||
!autogpt_platform/autogpt_libs/poetry.lock
|
||||
!autogpt_platform/autogpt_libs/README.md
|
||||
|
||||
# Platform - Backend
|
||||
!autogpt_platform/backend/backend/
|
||||
!autogpt_platform/backend/test/e2e_test_data.py
|
||||
!autogpt_platform/backend/migrations/
|
||||
!autogpt_platform/backend/schema.prisma
|
||||
!autogpt_platform/backend/pyproject.toml
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
!autogpt_platform/backend/.env
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
!autogpt_platform/market/scripts.py
|
||||
!autogpt_platform/market/schema.prisma
|
||||
!autogpt_platform/market/pyproject.toml
|
||||
!autogpt_platform/market/poetry.lock
|
||||
!autogpt_platform/market/README.md
|
||||
|
||||
# Platform - Frontend
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/scripts/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
!autogpt_platform/frontend/README.md
|
||||
## config
|
||||
!autogpt_platform/frontend/*.config.*
|
||||
!autogpt_platform/frontend/.env.*
|
||||
!autogpt_platform/frontend/.env
|
||||
|
||||
# Classic - AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
!classic/original_autogpt/pyproject.toml
|
||||
!classic/original_autogpt/poetry.lock
|
||||
!classic/original_autogpt/README.md
|
||||
!classic/original_autogpt/tests/
|
||||
|
||||
# Classic - Benchmark
|
||||
!classic/benchmark/agbenchmark/
|
||||
!classic/benchmark/pyproject.toml
|
||||
!classic/benchmark/poetry.lock
|
||||
!classic/benchmark/README.md
|
||||
|
||||
# Classic - Forge
|
||||
!classic/forge/
|
||||
!classic/forge/pyproject.toml
|
||||
!classic/forge/poetry.lock
|
||||
!classic/forge/README.md
|
||||
|
||||
# Classic - Frontend
|
||||
!classic/frontend/build/web/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
10
.gitattributes
vendored
Normal file
10
.gitattributes
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
classic/frontend/build/** linguist-generated
|
||||
|
||||
**/poetry.lock linguist-generated
|
||||
|
||||
docs/_javascript/** linguist-vendored
|
||||
|
||||
# Exclude VCR cassettes from stats
|
||||
classic/forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
* text=auto
|
||||
7
.github/CODEOWNERS
vendored
Normal file
7
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
* @Significant-Gravitas/maintainers
|
||||
.github/workflows/ @Significant-Gravitas/devops
|
||||
classic/forge/ @Significant-Gravitas/forge-maintainers
|
||||
classic/benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
classic/frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
autogpt_platform/infra @Significant-Gravitas/devops
|
||||
.github/CODEOWNERS @Significant-Gravitas/admins
|
||||
173
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
Normal file
173
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
name: Bug report 🐛
|
||||
description: Create a bug report for AutoGPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
### ⚠️ Before you continue
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Thoroughly search the [existing issues] before creating a new one**
|
||||
* Read our [wiki page on Contributing]
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/AutoGPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
description: >
|
||||
Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues)
|
||||
to see if an issue already exists for the same problem.
|
||||
options:
|
||||
- label: I have searched the existing issues, and there is no existing issue for my problem
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please confirm that the issue you have is described well and precise in the title above ⬆️.
|
||||
A good rule of thumb: What would you type if you were searching for the issue?
|
||||
|
||||
For example:
|
||||
BAD - my AutoGPT keeps looping
|
||||
GOOD - After performing execute_python_file, AutoGPT goes into a loop where it keeps trying to execute the file.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we can spend building AutoGPT.
|
||||
|
||||
Please help us help you by following these steps:
|
||||
- Search for existing issues, adding a comment when you have the same or similar issue is tidier than "new issue" and
|
||||
newer issues will not be reviewed earlier, this is dependent on the current priorities set by our wonderful team
|
||||
- Ask on our Discord if your issue is known when you are unsure (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it) if possible
|
||||
- If it's a pip/packages issue, mention this in the title and provide pip version, python version
|
||||
- If it's a crash, provide traceback and describe the error you got as precise as possible in the title.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which Operating System are you using?
|
||||
description: >
|
||||
Please select the operating system you were using to run AutoGPT when this problem occurred.
|
||||
options:
|
||||
- Windows
|
||||
- Linux
|
||||
- MacOS
|
||||
- Docker
|
||||
- Devcontainer / Codespace
|
||||
- Windows Subsystem for Linux (WSL)
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the system
|
||||
description: Please specify the system you are working on.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which version of AutoGPT are you using?
|
||||
description: |
|
||||
Please select which version of AutoGPT you were using when this issue occurred.
|
||||
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/AutoGPT/releases/) make sure you were using the latest code.
|
||||
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/AutoGPT/releases/)**.
|
||||
If installed with git you can run `git branch` to see which version of AutoGPT you are running.
|
||||
options:
|
||||
- Latest Release
|
||||
- Stable (branch)
|
||||
- Master (branch)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: What LLM Provider do you use?
|
||||
description: >
|
||||
If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- Azure
|
||||
- Groq
|
||||
- Anthropic
|
||||
- Llamafile
|
||||
- Other (detail in issue)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which area covers your issue best?
|
||||
description: >
|
||||
Select the area related to the issue you are reporting.
|
||||
options:
|
||||
- Installation and setup
|
||||
- Memory
|
||||
- Performance
|
||||
- Prompt
|
||||
- Commands
|
||||
- Plugins
|
||||
- AI Model Limitations
|
||||
- Challenges
|
||||
- Documentation
|
||||
- Logging
|
||||
- Agents
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
autolabels: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: What commit or version are you using?
|
||||
description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe your issue.
|
||||
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
|
||||
validations:
|
||||
required: true
|
||||
|
||||
#Following are optional file content uploads
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
|
||||
|
||||
"The log files are located in the folder 'logs' inside the main AutoGPT folder."
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Activity Log Content
|
||||
description: |
|
||||
Upload the activity log content, this can help us understand the issue better.
|
||||
To do this, go to the folder logs in your main AutoGPT folder, open activity.log and copy/paste the contents to this field.
|
||||
⚠️ The activity log may contain personal data given to AutoGPT by you in prompt or input as well as
|
||||
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Error Log Content
|
||||
description: |
|
||||
Upload the error log content, this will help us understand the issue better.
|
||||
To do this, go to the folder logs in your main AutoGPT folder, open error.log and copy/paste the contents to this field.
|
||||
⚠️ The error log may contain personal data given to AutoGPT by you in prompt or input as well as
|
||||
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
28
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
Normal file
28
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Feature request 🚀
|
||||
description: Suggest a new idea for AutoGPT!
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing)
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
description: Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues) to see if an issue already exists for the same problem.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary 💡
|
||||
description: Describe how it should work.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Examples 🌈
|
||||
description: Provide a link to other implementations, or screenshots of the expected behavior.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation 🔦
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
39
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
39
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Changes 🏗️
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### Checklist 📋
|
||||
|
||||
#### For code changes:
|
||||
- [ ] I have clearly listed my changes in the PR description
|
||||
- [ ] I have made a test plan
|
||||
- [ ] I have tested my changes according to the test plan:
|
||||
<!-- Put your test plan here: -->
|
||||
- [ ] ...
|
||||
|
||||
<details>
|
||||
<summary>Example test plan</summary>
|
||||
|
||||
- [ ] Create from scratch and execute an agent with at least 3 blocks
|
||||
- [ ] Import an agent from file upload, and confirm it executes correctly
|
||||
- [ ] Upload agent to marketplace
|
||||
- [ ] Import an agent from marketplace and confirm it executes correctly
|
||||
- [ ] Edit an agent from monitor, and confirm it executes correctly
|
||||
</details>
|
||||
|
||||
#### For configuration changes:
|
||||
|
||||
- [ ] `.env.default` is updated or already compatible with my changes
|
||||
- [ ] `docker-compose.yml` is updated or already compatible with my changes
|
||||
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
|
||||
|
||||
<details>
|
||||
<summary>Examples of configuration changes</summary>
|
||||
|
||||
- Changing ports
|
||||
- Adding new services that need to communicate with each other
|
||||
- Secrets or environment variable changes
|
||||
- New or infrastructure changes such as databases
|
||||
</details>
|
||||
322
.github/copilot-instructions.md
vendored
Normal file
322
.github/copilot-instructions.md
vendored
Normal file
@@ -0,0 +1,322 @@
|
||||
# GitHub Copilot Instructions for AutoGPT
|
||||
|
||||
This file provides comprehensive onboarding information for GitHub Copilot coding agent to work efficiently with the AutoGPT repository.
|
||||
|
||||
## Repository Overview
|
||||
|
||||
**AutoGPT** is a powerful platform for creating, deploying, and managing continuous AI agents that automate complex workflows. This is a large monorepo (~150MB) containing multiple components:
|
||||
|
||||
- **AutoGPT Platform** (`autogpt_platform/`) - Main focus: Modern AI agent platform (Polyform Shield License)
|
||||
- **Classic AutoGPT** (`classic/`) - Legacy agent system (MIT License)
|
||||
- **Documentation** (`docs/`) - MkDocs-based documentation site
|
||||
- **Infrastructure** - Docker configurations, CI/CD, and development tools
|
||||
|
||||
**Primary Languages & Frameworks:**
|
||||
|
||||
- **Backend**: Python 3.10-3.13, FastAPI, Prisma ORM, PostgreSQL, RabbitMQ
|
||||
- **Frontend**: TypeScript, Next.js 15, React, Tailwind CSS, Radix UI
|
||||
- **Development**: Docker, Poetry, pnpm, Playwright, Storybook
|
||||
|
||||
## Build and Validation Instructions
|
||||
|
||||
### Essential Setup Commands
|
||||
|
||||
**Always run these commands in the correct directory and in this order:**
|
||||
|
||||
1. **Initial Setup** (required once):
|
||||
|
||||
```bash
|
||||
# Clone and enter repository
|
||||
git clone <repo> && cd AutoGPT
|
||||
|
||||
# Start all services (database, redis, rabbitmq, clamav)
|
||||
cd autogpt_platform && docker compose --profile local up deps --build --detach
|
||||
```
|
||||
|
||||
2. **Backend Setup** (always run before backend development):
|
||||
|
||||
```bash
|
||||
cd autogpt_platform/backend
|
||||
poetry install # Install dependencies
|
||||
poetry run prisma migrate dev # Run database migrations
|
||||
poetry run prisma generate # Generate Prisma client
|
||||
```
|
||||
|
||||
3. **Frontend Setup** (always run before frontend development):
|
||||
```bash
|
||||
cd autogpt_platform/frontend
|
||||
pnpm install # Install dependencies
|
||||
```
|
||||
|
||||
### Runtime Requirements
|
||||
|
||||
**Critical:** Always ensure Docker services are running before starting development:
|
||||
|
||||
```bash
|
||||
cd autogpt_platform && docker compose --profile local up deps --build --detach
|
||||
```
|
||||
|
||||
**Python Version:** Use Python 3.11 (required; managed by Poetry via pyproject.toml)
|
||||
**Node.js Version:** Use Node.js 21+ with pnpm package manager
|
||||
|
||||
### Development Commands
|
||||
|
||||
**Backend Development:**
|
||||
|
||||
```bash
|
||||
cd autogpt_platform/backend
|
||||
poetry run serve # Start development server (port 8000)
|
||||
poetry run test # Run all tests (requires ~5 minutes)
|
||||
poetry run pytest path/to/test.py # Run specific test
|
||||
poetry run format # Format code (Black + isort) - always run first
|
||||
poetry run lint # Lint code (ruff) - run after format
|
||||
```
|
||||
|
||||
**Frontend Development:**
|
||||
|
||||
```bash
|
||||
cd autogpt_platform/frontend
|
||||
pnpm dev # Start development server (port 3000) - use for active development
|
||||
pnpm build # Build for production (only needed for E2E tests or deployment)
|
||||
pnpm test # Run Playwright E2E tests (requires build first)
|
||||
pnpm test-ui # Run tests with UI
|
||||
pnpm format # Format and lint code
|
||||
pnpm storybook # Start component development server
|
||||
```
|
||||
|
||||
### Testing Strategy
|
||||
|
||||
**Backend Tests:**
|
||||
|
||||
- **Block Tests**: `poetry run pytest backend/blocks/test/test_block.py -xvs` (validates all blocks)
|
||||
- **Specific Block**: `poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[BlockName]' -xvs`
|
||||
- **Snapshot Tests**: Use `--snapshot-update` when output changes, always review with `git diff`
|
||||
|
||||
**Frontend Tests:**
|
||||
|
||||
- **E2E Tests**: Always run `pnpm dev` before `pnpm test` (Playwright requires running instance)
|
||||
- **Component Tests**: Use Storybook for isolated component development
|
||||
|
||||
### Critical Validation Steps
|
||||
|
||||
**Before committing changes:**
|
||||
|
||||
1. Run `poetry run format` (backend) and `pnpm format` (frontend)
|
||||
2. Ensure all tests pass in modified areas
|
||||
3. Verify Docker services are still running
|
||||
4. Check that database migrations apply cleanly
|
||||
|
||||
**Common Issues & Workarounds:**
|
||||
|
||||
- **Prisma issues**: Run `poetry run prisma generate` after schema changes
|
||||
- **Permission errors**: Ensure Docker has proper permissions
|
||||
- **Port conflicts**: Check the `docker-compose.yml` file for the current list of exposed ports. You can list all mapped ports with:
|
||||
- **Test timeouts**: Backend tests can take 5+ minutes, use `-x` flag to stop on first failure
|
||||
|
||||
## Project Layout & Architecture
|
||||
|
||||
### Core Architecture
|
||||
|
||||
**AutoGPT Platform** (`autogpt_platform/`):
|
||||
|
||||
- `backend/` - FastAPI server with async support
|
||||
- `backend/backend/` - Core API logic
|
||||
- `backend/blocks/` - Agent execution blocks
|
||||
- `backend/data/` - Database models and schemas
|
||||
- `schema.prisma` - Database schema definition
|
||||
- `frontend/` - Next.js application
|
||||
- `src/app/` - App Router pages and layouts
|
||||
- `src/components/` - Reusable React components
|
||||
- `src/lib/` - Utilities and configurations
|
||||
- `autogpt_libs/` - Shared Python utilities
|
||||
- `docker-compose.yml` - Development stack orchestration
|
||||
|
||||
**Key Configuration Files:**
|
||||
|
||||
- `pyproject.toml` - Python dependencies and tooling
|
||||
- `package.json` - Node.js dependencies and scripts
|
||||
- `schema.prisma` - Database schema and migrations
|
||||
- `next.config.mjs` - Next.js configuration
|
||||
- `tailwind.config.ts` - Styling configuration
|
||||
|
||||
### Security & Middleware
|
||||
|
||||
**Cache Protection**: Backend includes middleware preventing sensitive data caching in browsers/proxies
|
||||
**Authentication**: JWT-based with Supabase integration
|
||||
**User ID Validation**: All data access requires user ID checks - verify this for any `data/*.py` changes
|
||||
|
||||
### Development Workflow
|
||||
|
||||
**GitHub Actions**: Multiple CI/CD workflows in `.github/workflows/`
|
||||
|
||||
- `platform-backend-ci.yml` - Backend testing and validation
|
||||
- `platform-frontend-ci.yml` - Frontend testing and validation
|
||||
- `platform-fullstack-ci.yml` - End-to-end integration tests
|
||||
|
||||
**Pre-commit Hooks**: Run linting and formatting checks
|
||||
**Conventional Commits**: Use format `type(scope): description` (e.g., `feat(backend): add API`)
|
||||
|
||||
### Key Source Files
|
||||
|
||||
**Backend Entry Points:**
|
||||
|
||||
- `backend/backend/server/server.py` - FastAPI application setup
|
||||
- `backend/backend/data/` - Database models and user management
|
||||
- `backend/blocks/` - Agent execution blocks and logic
|
||||
|
||||
**Frontend Entry Points:**
|
||||
|
||||
- `frontend/src/app/layout.tsx` - Root application layout
|
||||
- `frontend/src/app/page.tsx` - Home page
|
||||
- `frontend/src/lib/supabase/` - Authentication and database client
|
||||
|
||||
**Protected Routes**: Update `frontend/lib/supabase/middleware.ts` when adding protected routes
|
||||
|
||||
### Agent Block System
|
||||
|
||||
Agents are built using a visual block-based system where each block performs a single action. Blocks are defined in `backend/blocks/` and must include:
|
||||
|
||||
- Block definition with input/output schemas
|
||||
- Execution logic with proper error handling
|
||||
- Tests validating functionality
|
||||
|
||||
### Database & ORM
|
||||
|
||||
**Prisma ORM** with PostgreSQL backend including pgvector for embeddings:
|
||||
|
||||
- Schema in `schema.prisma`
|
||||
- Migrations in `backend/migrations/`
|
||||
- Always run `prisma migrate dev` and `prisma generate` after schema changes
|
||||
|
||||
## Environment Configuration
|
||||
|
||||
### Configuration Files Priority Order
|
||||
|
||||
1. **Backend**: `/backend/.env.default` → `/backend/.env` (user overrides)
|
||||
2. **Frontend**: `/frontend/.env.default` → `/frontend/.env` (user overrides)
|
||||
3. **Platform**: `/.env.default` (Supabase/shared) → `/.env` (user overrides)
|
||||
4. Docker Compose `environment:` sections override file-based config
|
||||
5. Shell environment variables have highest precedence
|
||||
|
||||
### Docker Environment Setup
|
||||
|
||||
- All services use hardcoded defaults (no `${VARIABLE}` substitutions)
|
||||
- The `env_file` directive loads variables INTO containers at runtime
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
- Copy `.env.default` files to `.env` for local development customization
|
||||
|
||||
## Advanced Development Patterns
|
||||
|
||||
### Adding New Blocks
|
||||
|
||||
1. Create file in `/backend/backend/blocks/`
|
||||
2. Inherit from `Block` base class with input/output schemas
|
||||
3. Implement `run` method with proper error handling
|
||||
4. Generate block UUID using `uuid.uuid4()`
|
||||
5. Register in block registry
|
||||
6. Write tests alongside block implementation
|
||||
7. Consider how inputs/outputs connect with other blocks in graph editor
|
||||
|
||||
### API Development
|
||||
|
||||
1. Update routes in `/backend/backend/server/routers/`
|
||||
2. Add/update Pydantic models in same directory
|
||||
3. Write tests alongside route files
|
||||
4. For `data/*.py` changes, validate user ID checks
|
||||
5. Run `poetry run test` to verify changes
|
||||
|
||||
### Frontend Development
|
||||
|
||||
**📖 Complete Frontend Guide**: See `autogpt_platform/frontend/CONTRIBUTING.md` and `autogpt_platform/frontend/.cursorrules` for comprehensive patterns and conventions.
|
||||
|
||||
**Quick Reference:**
|
||||
|
||||
**Component Structure:**
|
||||
|
||||
- Separate render logic from data/behavior
|
||||
- Structure: `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts`
|
||||
- Exception: Small components (3-4 lines of logic) can be inline
|
||||
- Render-only components can be direct files without folders
|
||||
|
||||
**Data Fetching:**
|
||||
|
||||
- Use generated API hooks from `@/app/api/__generated__/endpoints/`
|
||||
- Generated via Orval from backend OpenAPI spec
|
||||
- Pattern: `use{Method}{Version}{OperationName}`
|
||||
- Example: `useGetV2ListLibraryAgents`
|
||||
- Regenerate with: `pnpm generate:api`
|
||||
- **Never** use deprecated `BackendAPI` or `src/lib/autogpt-server-api/*`
|
||||
|
||||
**Code Conventions:**
|
||||
|
||||
- Use function declarations for components and handlers (not arrow functions)
|
||||
- Only arrow functions for small inline lambdas (map, filter, etc.)
|
||||
- Components: `PascalCase`, Hooks: `camelCase` with `use` prefix
|
||||
- No barrel files or `index.ts` re-exports
|
||||
- Minimal comments (code should be self-documenting)
|
||||
|
||||
**Styling:**
|
||||
|
||||
- Use Tailwind CSS utilities only
|
||||
- Use design system components from `src/components/` (atoms, molecules, organisms)
|
||||
- Never use `src/components/__legacy__/*`
|
||||
- Only use Phosphor Icons (`@phosphor-icons/react`)
|
||||
- Prefer design tokens over hardcoded values
|
||||
|
||||
**Error Handling:**
|
||||
|
||||
- Render errors: Use `<ErrorCard />` component
|
||||
- Mutation errors: Display with toast notifications
|
||||
- Manual exceptions: Use `Sentry.captureException()`
|
||||
- Global error boundaries already configured
|
||||
|
||||
**Testing:**
|
||||
|
||||
- Add/update Storybook stories for UI components (`pnpm storybook`)
|
||||
- Run Playwright E2E tests with `pnpm test`
|
||||
- Verify in Chromatic after PR
|
||||
|
||||
**Architecture:**
|
||||
|
||||
- Default to client components ("use client")
|
||||
- Server components only for SEO or extreme TTFB needs
|
||||
- Use React Query for server state (via generated hooks)
|
||||
- Co-locate UI state in components/hooks
|
||||
|
||||
### Security Guidelines
|
||||
|
||||
**Cache Protection Middleware** (`/backend/backend/server/middleware/security.py`):
|
||||
|
||||
- Default: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
|
||||
- Uses allow list approach for cacheable paths (static assets, health checks, public pages)
|
||||
- Prevents sensitive data caching in browsers/proxies
|
||||
- Add new cacheable endpoints to `CACHEABLE_PATHS`
|
||||
|
||||
### CI/CD Alignment
|
||||
|
||||
The repository has comprehensive CI workflows that test:
|
||||
|
||||
- **Backend**: Python 3.11-3.13, services (Redis/RabbitMQ/ClamAV), Prisma migrations, Poetry lock validation
|
||||
- **Frontend**: Node.js 21, pnpm, Playwright with Docker Compose stack, API schema validation
|
||||
- **Integration**: Full-stack type checking and E2E testing
|
||||
|
||||
Match these patterns when developing locally - the copilot setup environment mirrors these CI configurations.
|
||||
|
||||
## Collaboration with Other AI Assistants
|
||||
|
||||
This repository is actively developed with assistance from Claude (via CLAUDE.md files). When working on this codebase:
|
||||
|
||||
- Check for existing CLAUDE.md files that provide additional context
|
||||
- Follow established patterns and conventions already in the codebase
|
||||
- Maintain consistency with existing code style and architecture
|
||||
- Consider that changes may be reviewed and extended by both human developers and AI assistants
|
||||
|
||||
## Trust These Instructions
|
||||
|
||||
These instructions are comprehensive and tested. Only perform additional searches if:
|
||||
|
||||
1. Information here is incomplete for your specific task
|
||||
2. You encounter errors not covered by the workarounds
|
||||
3. You need to understand implementation details not covered above
|
||||
|
||||
For detailed platform development patterns, refer to `autogpt_platform/CLAUDE.md` and `AGENTS.md` in the repository root.
|
||||
153
.github/dependabot.yml
vendored
Normal file
153
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
version: 2
|
||||
updates:
|
||||
# autogpt_libs (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/autogpt_libs"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(libs/deps)"
|
||||
prefix-development: "chore(libs/deps-dev)"
|
||||
ignore:
|
||||
- dependency-name: "poetry"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# backend (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/backend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(backend/deps)"
|
||||
prefix-development: "chore(backend/deps-dev)"
|
||||
ignore:
|
||||
- dependency-name: "poetry"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# frontend (Next.js project)
|
||||
- package-ecosystem: "npm"
|
||||
directory: "autogpt_platform/frontend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(frontend/deps)"
|
||||
prefix-development: "chore(frontend/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# infra (Terraform)
|
||||
- package-ecosystem: "terraform"
|
||||
directory: "autogpt_platform/infra"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(infra/deps)"
|
||||
prefix-development: "chore(infra/deps-dev)"
|
||||
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# Docker
|
||||
- package-ecosystem: "docker"
|
||||
directory: "autogpt_platform/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: "pip"
|
||||
directory: "docs/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(docs/deps)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
33
.github/labeler.yml
vendored
Normal file
33
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
Classic AutoGPT Agent:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/original_autogpt/**
|
||||
|
||||
Classic Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/benchmark/**
|
||||
|
||||
Classic Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/frontend/**
|
||||
|
||||
Forge:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/forge/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
platform/frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/frontend/**
|
||||
|
||||
platform/backend:
|
||||
- changed-files:
|
||||
- all-globs-to-any-file:
|
||||
- autogpt_platform/backend/**
|
||||
- '!autogpt_platform/backend/backend/blocks/**'
|
||||
|
||||
platform/blocks:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/backend/backend/blocks/**
|
||||
145
.github/workflows/classic-autogpt-ci.yml
vendored
Normal file
145
.github/workflows/classic-autogpt-ci.yml
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
name: Classic - AutoGPT CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/original_autogpt
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests/unit tests/integration
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/original_autogpt/logs/
|
||||
60
.github/workflows/classic-autogpt-docker-cache-clean.yml
vendored
Normal file
60
.github/workflows/classic-autogpt-docker-cache-clean.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Classic - Purge Auto-GPT Docker CI cache
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: dev
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
166
.github/workflows/classic-autogpt-docker-ci.yml
vendored
Normal file
166
.github/workflows/classic-autogpt-docker-ci.yml
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
name: Classic - AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: classic/original_autogpt
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt
|
||||
DEV_IMAGE_TAG: latest-dev
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:edge-cicd
|
||||
options: >
|
||||
--name=minio
|
||||
--health-interval=10s --health-timeout=5s --health-retries=3
|
||||
--health-cmd="curl -f http://localhost:9000/minio/health/live"
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- if: github.event_name == 'push'
|
||||
name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: http://minio:9000
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
run: |
|
||||
set +e
|
||||
docker run --env CI --env OPENAI_API_KEY \
|
||||
--network container:minio \
|
||||
--env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
|
||||
--entrypoint poetry ${{ env.IMAGE_NAME }} run \
|
||||
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
--numprocesses=4 --durations=10 \
|
||||
tests/unit tests/integration 2>&1 | tee test_output.txt
|
||||
|
||||
test_failure=${PIPESTATUS[0]}
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$(cat test_output.txt)
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
|
||||
- if: github.event_name == 'push' && github.ref_name == 'master'
|
||||
name: Push image to Docker Hub
|
||||
run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
87
.github/workflows/classic-autogpt-docker-release.yml
vendored
Normal file
87
.github/workflows/classic-autogpt-docker-release.yml
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
name: Classic - AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, edited]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
no_cache:
|
||||
type: boolean
|
||||
description: 'Build from scratch, without using cached layers'
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
76
.github/workflows/classic-autogpts-ci.yml
vendored
Normal file
76
.github/workflows/classic-autogpts-ci.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Classic - Agent smoke tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
serve-agent-protocol:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ original_autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./classic/${{ matrix.agent-name }}/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
HELICONE_CACHE_ENABLED: false
|
||||
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
|
||||
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
176
.github/workflows/classic-benchmark-ci.yml
vendored
Normal file
176
.github/workflows/classic-benchmark-ci.yml
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
name: Classic - AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/benchmark
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: agbenchmark,${{ runner.os }}
|
||||
|
||||
self-test-with-agent:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [forge]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: classic
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Ignore non-zero exit codes and continue execution
|
||||
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
||||
poetry run agbenchmark --maintain --mock
|
||||
EXIT_CODE=$?
|
||||
set -e # Stop ignoring non-zero exit codes
|
||||
# Check if the exit code was 5, and if so, exit with 0 instead
|
||||
if [ $EXIT_CODE -eq 5 ]; then
|
||||
echo "regression_tests.json is empty."
|
||||
fi
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock"
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=data"
|
||||
poetry run agbenchmark --mock --category=data
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
# poetry run agbenchmark --mock
|
||||
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
55
.github/workflows/classic-benchmark_publish_package.yml
vendored
Normal file
55
.github/workflows/classic-benchmark_publish_package.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Classic - Publish to PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./classic/benchmark/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Build project for distribution
|
||||
working-directory: ./classic/benchmark/
|
||||
run: poetry build
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ./classic/benchmark/
|
||||
run: poetry install
|
||||
|
||||
- name: Check Version
|
||||
working-directory: ./classic/benchmark/
|
||||
id: check-version
|
||||
run: |
|
||||
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "classic/benchmark/dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: false
|
||||
tag: agbenchmark-v${{ steps.check-version.outputs.version }}
|
||||
commit: master
|
||||
|
||||
- name: Build and publish
|
||||
working-directory: ./classic/benchmark/
|
||||
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}
|
||||
243
.github/workflows/classic-forge-ci.yml
vendored
Normal file
243
.github/workflows/classic-forge-ci.yml
vendored
Normal file
@@ -0,0 +1,243 @@
|
||||
name: Classic - Forge CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/forge
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
cassette_base_branch="${PR_BASE}"
|
||||
cd tests/vcr_cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
forge
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||
else
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
fi
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||
env:
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/vcr_cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||
REPO="${{ github.repository }}"
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/forge/logs/
|
||||
60
.github/workflows/classic-frontend-ci.yml
vendored
Normal file
60
.github/workflows/classic-frontend-ci.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Classic - Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- dev
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
151
.github/workflows/classic-python-checks.yml
vendored
Normal file
151
.github/workflows/classic-python-checks.yml
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
name: Classic - Python checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-python-checks-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
get-changed-parts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- id: changes-in
|
||||
name: Determine affected subprojects
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
original_autogpt:
|
||||
- classic/original_autogpt/autogpt/**
|
||||
- classic/original_autogpt/tests/**
|
||||
- classic/original_autogpt/poetry.lock
|
||||
forge:
|
||||
- classic/forge/forge/**
|
||||
- classic/forge/tests/**
|
||||
- classic/forge/poetry.lock
|
||||
benchmark:
|
||||
- classic/benchmark/agbenchmark/**
|
||||
- classic/benchmark/tests/**
|
||||
- classic/benchmark/poetry.lock
|
||||
outputs:
|
||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||
|
||||
lint:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
|
||||
# Lint
|
||||
|
||||
- name: Lint (isort)
|
||||
run: poetry run isort --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Black)
|
||||
if: success() || failure()
|
||||
run: poetry run black --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Flake8)
|
||||
if: success() || failure()
|
||||
run: poetry run flake8 .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
types:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
|
||||
# Typecheck
|
||||
|
||||
- name: Typecheck
|
||||
if: success() || failure()
|
||||
run: poetry run pyright
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
97
.github/workflows/claude-ci-failure-auto-fix.yml
vendored
Normal file
97
.github/workflows/claude-ci-failure-auto-fix.yml
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
name: Auto Fix CI Failures
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["CI"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
actions: read
|
||||
issues: write
|
||||
id-token: write # Required for OIDC token exchange
|
||||
|
||||
jobs:
|
||||
auto-fix:
|
||||
if: |
|
||||
github.event.workflow_run.conclusion == 'failure' &&
|
||||
github.event.workflow_run.pull_requests[0] &&
|
||||
!startsWith(github.event.workflow_run.head_branch, 'claude-auto-fix-ci-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.workflow_run.head_branch }}
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup git identity
|
||||
run: |
|
||||
git config --global user.email "claude[bot]@users.noreply.github.com"
|
||||
git config --global user.name "claude[bot]"
|
||||
|
||||
- name: Create fix branch
|
||||
id: branch
|
||||
run: |
|
||||
BRANCH_NAME="claude-auto-fix-ci-${{ github.event.workflow_run.head_branch }}-${{ github.run_id }}"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get CI failure details
|
||||
id: failure_details
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const run = await github.rest.actions.getWorkflowRun({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
});
|
||||
|
||||
const jobs = await github.rest.actions.listJobsForWorkflowRun({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
});
|
||||
|
||||
const failedJobs = jobs.data.jobs.filter(job => job.conclusion === 'failure');
|
||||
|
||||
let errorLogs = [];
|
||||
for (const job of failedJobs) {
|
||||
const logs = await github.rest.actions.downloadJobLogsForWorkflowRun({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
job_id: job.id
|
||||
});
|
||||
errorLogs.push({
|
||||
jobName: job.name,
|
||||
logs: logs.data
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
runUrl: run.data.html_url,
|
||||
failedJobs: failedJobs.map(j => j.name),
|
||||
errorLogs: errorLogs
|
||||
};
|
||||
|
||||
- name: Fix CI failures with Claude
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
prompt: |
|
||||
/fix-ci
|
||||
Failed CI Run: ${{ fromJSON(steps.failure_details.outputs.result).runUrl }}
|
||||
Failed Jobs: ${{ join(fromJSON(steps.failure_details.outputs.result).failedJobs, ', ') }}
|
||||
PR Number: ${{ github.event.workflow_run.pull_requests[0].number }}
|
||||
Branch Name: ${{ steps.branch.outputs.branch_name }}
|
||||
Base Branch: ${{ github.event.workflow_run.head_branch }}
|
||||
Repository: ${{ github.repository }}
|
||||
|
||||
Error logs:
|
||||
${{ toJSON(fromJSON(steps.failure_details.outputs.result).errorLogs) }}
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: "--allowedTools 'Edit,MultiEdit,Write,Read,Glob,Grep,LS,Bash(git:*),Bash(bun:*),Bash(npm:*),Bash(npx:*),Bash(gh:*)'"
|
||||
379
.github/workflows/claude-dependabot.yml
vendored
Normal file
379
.github/workflows/claude-dependabot.yml
vendored
Normal file
@@ -0,0 +1,379 @@
|
||||
# Claude Dependabot PR Review Workflow
|
||||
#
|
||||
# This workflow automatically runs Claude analysis on Dependabot PRs to:
|
||||
# - Identify dependency changes and their versions
|
||||
# - Look up changelogs for updated packages
|
||||
# - Assess breaking changes and security impacts
|
||||
# - Provide actionable recommendations for the development team
|
||||
#
|
||||
# Triggered on: Dependabot PRs (opened, synchronize)
|
||||
# Requirements: ANTHROPIC_API_KEY secret must be configured
|
||||
|
||||
name: Claude Dependabot PR Review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
jobs:
|
||||
dependabot-review:
|
||||
# Only run on Dependabot PRs
|
||||
if: github.actor == 'dependabot[bot]'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
actions: read # Required for CI access
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11" # Use standard version matching CI
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
# Extract Poetry version from backend/poetry.lock (matches CI)
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
|
||||
# Install Poetry
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
|
||||
# Add Poetry to PATH
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Check poetry.lock
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry lock
|
||||
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
|
||||
echo "Warning: poetry.lock not up to date, but continuing for setup"
|
||||
git checkout poetry.lock # Reset for clean setup
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
# Install Playwright browsers for frontend testing
|
||||
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
|
||||
# - name: Install Playwright browsers
|
||||
# working-directory: autogpt_platform/frontend
|
||||
# run: pnpm playwright install --with-deps chromium
|
||||
|
||||
# Docker setup for development environment
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Copy default environment files
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Copy default environment files for development
|
||||
cp .env.default .env
|
||||
cp backend/.env.default backend/.env
|
||||
cp frontend/.env.default frontend/.env
|
||||
|
||||
# Phase 1: Cache and load Docker images for faster setup
|
||||
- name: Set up Docker image cache
|
||||
id: docker-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/docker-cache
|
||||
# Use a versioned key for cache invalidation when image list changes
|
||||
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
|
||||
restore-keys: |
|
||||
docker-images-v2-${{ runner.os }}-
|
||||
docker-images-v1-${{ runner.os }}-
|
||||
|
||||
- name: Load or pull Docker images
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
mkdir -p ~/docker-cache
|
||||
|
||||
# Define image list for easy maintenance
|
||||
IMAGES=(
|
||||
"redis:latest"
|
||||
"rabbitmq:management"
|
||||
"clamav/clamav-debian:latest"
|
||||
"busybox:latest"
|
||||
"kong:2.8.1"
|
||||
"supabase/gotrue:v2.170.0"
|
||||
"supabase/postgres:15.8.1.049"
|
||||
"supabase/postgres-meta:v0.86.1"
|
||||
"supabase/studio:20250224-d10db0f"
|
||||
)
|
||||
|
||||
# Check if any cached tar files exist (more reliable than cache-hit)
|
||||
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
|
||||
echo "Docker cache found, loading images in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
# Convert image name to filename (replace : and / with -)
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
if [ -f ~/docker-cache/${filename}.tar ]; then
|
||||
echo "Loading $image..."
|
||||
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
|
||||
fi
|
||||
done
|
||||
wait
|
||||
echo "All cached images loaded"
|
||||
else
|
||||
echo "No Docker cache found, pulling images in parallel..."
|
||||
# Pull all images in parallel
|
||||
for image in "${IMAGES[@]}"; do
|
||||
docker pull "$image" &
|
||||
done
|
||||
wait
|
||||
|
||||
# Only save cache on main branches (not PRs) to avoid cache pollution
|
||||
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "Saving Docker images to cache in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
# Convert image name to filename (replace : and / with -)
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
echo "Saving $image..."
|
||||
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
|
||||
done
|
||||
wait
|
||||
echo "Docker image cache saved"
|
||||
else
|
||||
echo "Skipping cache save for PR/feature branch"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Docker images ready for use"
|
||||
|
||||
# Phase 2: Build migrate service with GitHub Actions cache
|
||||
- name: Build migrate Docker image with cache
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Build the migrate image with buildx for GHA caching
|
||||
docker buildx build \
|
||||
--cache-from type=gha \
|
||||
--cache-to type=gha,mode=max \
|
||||
--target migrate \
|
||||
--tag autogpt_platform-migrate:latest \
|
||||
--load \
|
||||
-f backend/Dockerfile \
|
||||
..
|
||||
|
||||
# Start services using pre-built images
|
||||
- name: Start Docker services for development
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Start essential services (migrate image already built with correct tag)
|
||||
docker compose --profile local up deps --no-build --detach
|
||||
echo "Waiting for services to be ready..."
|
||||
|
||||
# Wait for database to be ready
|
||||
echo "Checking database readiness..."
|
||||
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
|
||||
echo " Waiting for database..."
|
||||
sleep 2
|
||||
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
|
||||
|
||||
# Check migrate service status
|
||||
echo "Checking migration status..."
|
||||
docker compose ps migrate || echo " Migrate service not visible in ps output"
|
||||
|
||||
# Wait for migrate service to complete
|
||||
echo "Waiting for migrations to complete..."
|
||||
timeout 30 bash -c '
|
||||
ATTEMPTS=0
|
||||
while [ $ATTEMPTS -lt 15 ]; do
|
||||
ATTEMPTS=$((ATTEMPTS + 1))
|
||||
|
||||
# Check using docker directly (more reliable than docker compose ps)
|
||||
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
|
||||
|
||||
if [ -z "$CONTAINER_STATUS" ]; then
|
||||
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
|
||||
echo "✅ Migrations completed successfully"
|
||||
docker compose logs migrate --tail=5 2>/dev/null || true
|
||||
exit 0
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
|
||||
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
|
||||
echo "❌ Migrations failed with exit code: $EXIT_CODE"
|
||||
echo "Migration logs:"
|
||||
docker compose logs migrate --tail=20 2>/dev/null || true
|
||||
exit 1
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
|
||||
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
|
||||
else
|
||||
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
|
||||
echo "Final container check:"
|
||||
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
|
||||
echo "Migration logs (if available):"
|
||||
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
|
||||
' || echo "⚠️ Migration check completed with warnings, continuing..."
|
||||
|
||||
# Brief wait for other services to stabilize
|
||||
echo "Waiting 5 seconds for other services to stabilize..."
|
||||
sleep 5
|
||||
|
||||
# Verify installations and provide environment info
|
||||
- name: Verify setup and show environment info
|
||||
run: |
|
||||
echo "=== Python Setup ==="
|
||||
python --version
|
||||
poetry --version
|
||||
|
||||
echo "=== Node.js Setup ==="
|
||||
node --version
|
||||
pnpm --version
|
||||
|
||||
echo "=== Additional Tools ==="
|
||||
docker --version
|
||||
docker compose version
|
||||
gh --version || true
|
||||
|
||||
echo "=== Services Status ==="
|
||||
cd autogpt_platform
|
||||
docker compose ps || true
|
||||
|
||||
echo "=== Backend Dependencies ==="
|
||||
cd backend
|
||||
poetry show | head -10 || true
|
||||
|
||||
echo "=== Frontend Dependencies ==="
|
||||
cd ../frontend
|
||||
pnpm list --depth=0 | head -10 || true
|
||||
|
||||
echo "=== Environment Files ==="
|
||||
ls -la ../.env* || true
|
||||
ls -la .env* || true
|
||||
ls -la ../backend/.env* || true
|
||||
|
||||
echo "✅ AutoGPT Platform development environment setup complete!"
|
||||
echo "🚀 Ready for development with Docker services running"
|
||||
echo "📝 Backend server: poetry run serve (port 8000)"
|
||||
echo "🌐 Frontend server: pnpm dev (port 3000)"
|
||||
|
||||
|
||||
- name: Run Claude Dependabot Analysis
|
||||
id: claude_review
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*)"
|
||||
prompt: |
|
||||
You are Claude, an AI assistant specialized in reviewing Dependabot dependency update PRs.
|
||||
|
||||
Your primary tasks are:
|
||||
1. **Analyze the dependency changes** in this Dependabot PR
|
||||
2. **Look up changelogs** for all updated dependencies to understand what changed
|
||||
3. **Identify breaking changes** and assess potential impact on the AutoGPT codebase
|
||||
4. **Provide actionable recommendations** for the development team
|
||||
|
||||
## Analysis Process:
|
||||
|
||||
1. **Identify Changed Dependencies**:
|
||||
- Use git diff to see what dependencies were updated
|
||||
- Parse package.json, poetry.lock, requirements files, etc.
|
||||
- List all package versions: old → new
|
||||
|
||||
2. **Changelog Research**:
|
||||
- For each updated dependency, look up its changelog/release notes
|
||||
- Use WebFetch to access GitHub releases, NPM package pages, PyPI project pages. The pr should also have some details
|
||||
- Focus on versions between the old and new versions
|
||||
- Identify: breaking changes, deprecations, security fixes, new features
|
||||
|
||||
3. **Breaking Change Assessment**:
|
||||
- Categorize changes: BREAKING, MAJOR, MINOR, PATCH, SECURITY
|
||||
- Assess impact on AutoGPT's usage patterns
|
||||
- Check if AutoGPT uses affected APIs/features
|
||||
- Look for migration guides or upgrade instructions
|
||||
|
||||
4. **Codebase Impact Analysis**:
|
||||
- Search the AutoGPT codebase for usage of changed APIs
|
||||
- Identify files that might be affected by breaking changes
|
||||
- Check test files for deprecated usage patterns
|
||||
- Look for configuration changes needed
|
||||
|
||||
## Output Format:
|
||||
|
||||
Provide a comprehensive review comment with:
|
||||
|
||||
### 🔍 Dependency Analysis Summary
|
||||
- List of updated packages with version changes
|
||||
- Overall risk assessment (LOW/MEDIUM/HIGH)
|
||||
|
||||
### 📋 Detailed Changelog Review
|
||||
For each updated dependency:
|
||||
- **Package**: name (old_version → new_version)
|
||||
- **Changes**: Summary of key changes
|
||||
- **Breaking Changes**: List any breaking changes
|
||||
- **Security Fixes**: Note security improvements
|
||||
- **Migration Notes**: Any upgrade steps needed
|
||||
|
||||
### ⚠️ Impact Assessment
|
||||
- **Breaking Changes Found**: Yes/No with details
|
||||
- **Affected Files**: List AutoGPT files that may need updates
|
||||
- **Test Impact**: Any tests that may need updating
|
||||
- **Configuration Changes**: Required config updates
|
||||
|
||||
### 🛠️ Recommendations
|
||||
- **Action Required**: What the team should do
|
||||
- **Testing Focus**: Areas to test thoroughly
|
||||
- **Follow-up Tasks**: Any additional work needed
|
||||
- **Merge Recommendation**: APPROVE/REVIEW_NEEDED/HOLD
|
||||
|
||||
### 📚 Useful Links
|
||||
- Links to relevant changelogs, migration guides, documentation
|
||||
|
||||
Be thorough but concise. Focus on actionable insights that help the development team make informed decisions about the dependency updates.
|
||||
331
.github/workflows/claude.yml
vendored
Normal file
331
.github/workflows/claude.yml
vendored
Normal file
@@ -0,0 +1,331 @@
|
||||
name: Claude Code
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
(
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
) && (
|
||||
github.event.comment.author_association == 'OWNER' ||
|
||||
github.event.comment.author_association == 'MEMBER' ||
|
||||
github.event.comment.author_association == 'COLLABORATOR' ||
|
||||
github.event.review.author_association == 'OWNER' ||
|
||||
github.event.review.author_association == 'MEMBER' ||
|
||||
github.event.review.author_association == 'COLLABORATOR' ||
|
||||
github.event.issue.author_association == 'OWNER' ||
|
||||
github.event.issue.author_association == 'MEMBER' ||
|
||||
github.event.issue.author_association == 'COLLABORATOR'
|
||||
)
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
actions: read # Required for CI access
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@v1.3.1
|
||||
with:
|
||||
large-packages: false # slow
|
||||
docker-images: false # limited benefit
|
||||
|
||||
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11" # Use standard version matching CI
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
# Extract Poetry version from backend/poetry.lock (matches CI)
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
|
||||
# Install Poetry
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
|
||||
# Add Poetry to PATH
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Check poetry.lock
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry lock
|
||||
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
|
||||
echo "Warning: poetry.lock not up to date, but continuing for setup"
|
||||
git checkout poetry.lock # Reset for clean setup
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
# Install Playwright browsers for frontend testing
|
||||
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
|
||||
# - name: Install Playwright browsers
|
||||
# working-directory: autogpt_platform/frontend
|
||||
# run: pnpm playwright install --with-deps chromium
|
||||
|
||||
# Docker setup for development environment
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Copy default environment files
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Copy default environment files for development
|
||||
cp .env.default .env
|
||||
cp backend/.env.default backend/.env
|
||||
cp frontend/.env.default frontend/.env
|
||||
|
||||
# Phase 1: Cache and load Docker images for faster setup
|
||||
- name: Set up Docker image cache
|
||||
id: docker-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/docker-cache
|
||||
# Use a versioned key for cache invalidation when image list changes
|
||||
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
|
||||
restore-keys: |
|
||||
docker-images-v2-${{ runner.os }}-
|
||||
docker-images-v1-${{ runner.os }}-
|
||||
|
||||
- name: Load or pull Docker images
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
mkdir -p ~/docker-cache
|
||||
|
||||
# Define image list for easy maintenance
|
||||
IMAGES=(
|
||||
"redis:latest"
|
||||
"rabbitmq:management"
|
||||
"clamav/clamav-debian:latest"
|
||||
"busybox:latest"
|
||||
"kong:2.8.1"
|
||||
"supabase/gotrue:v2.170.0"
|
||||
"supabase/postgres:15.8.1.049"
|
||||
"supabase/postgres-meta:v0.86.1"
|
||||
"supabase/studio:20250224-d10db0f"
|
||||
)
|
||||
|
||||
# Check if any cached tar files exist (more reliable than cache-hit)
|
||||
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
|
||||
echo "Docker cache found, loading images in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
# Convert image name to filename (replace : and / with -)
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
if [ -f ~/docker-cache/${filename}.tar ]; then
|
||||
echo "Loading $image..."
|
||||
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
|
||||
fi
|
||||
done
|
||||
wait
|
||||
echo "All cached images loaded"
|
||||
else
|
||||
echo "No Docker cache found, pulling images in parallel..."
|
||||
# Pull all images in parallel
|
||||
for image in "${IMAGES[@]}"; do
|
||||
docker pull "$image" &
|
||||
done
|
||||
wait
|
||||
|
||||
# Only save cache on main branches (not PRs) to avoid cache pollution
|
||||
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "Saving Docker images to cache in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
# Convert image name to filename (replace : and / with -)
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
echo "Saving $image..."
|
||||
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
|
||||
done
|
||||
wait
|
||||
echo "Docker image cache saved"
|
||||
else
|
||||
echo "Skipping cache save for PR/feature branch"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Docker images ready for use"
|
||||
|
||||
# Phase 2: Build migrate service with GitHub Actions cache
|
||||
- name: Build migrate Docker image with cache
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Build the migrate image with buildx for GHA caching
|
||||
docker buildx build \
|
||||
--cache-from type=gha \
|
||||
--cache-to type=gha,mode=max \
|
||||
--target migrate \
|
||||
--tag autogpt_platform-migrate:latest \
|
||||
--load \
|
||||
-f backend/Dockerfile \
|
||||
..
|
||||
|
||||
# Start services using pre-built images
|
||||
- name: Start Docker services for development
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Start essential services (migrate image already built with correct tag)
|
||||
docker compose --profile local up deps --no-build --detach
|
||||
echo "Waiting for services to be ready..."
|
||||
|
||||
# Wait for database to be ready
|
||||
echo "Checking database readiness..."
|
||||
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
|
||||
echo " Waiting for database..."
|
||||
sleep 2
|
||||
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
|
||||
|
||||
# Check migrate service status
|
||||
echo "Checking migration status..."
|
||||
docker compose ps migrate || echo " Migrate service not visible in ps output"
|
||||
|
||||
# Wait for migrate service to complete
|
||||
echo "Waiting for migrations to complete..."
|
||||
timeout 30 bash -c '
|
||||
ATTEMPTS=0
|
||||
while [ $ATTEMPTS -lt 15 ]; do
|
||||
ATTEMPTS=$((ATTEMPTS + 1))
|
||||
|
||||
# Check using docker directly (more reliable than docker compose ps)
|
||||
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
|
||||
|
||||
if [ -z "$CONTAINER_STATUS" ]; then
|
||||
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
|
||||
echo "✅ Migrations completed successfully"
|
||||
docker compose logs migrate --tail=5 2>/dev/null || true
|
||||
exit 0
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
|
||||
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
|
||||
echo "❌ Migrations failed with exit code: $EXIT_CODE"
|
||||
echo "Migration logs:"
|
||||
docker compose logs migrate --tail=20 2>/dev/null || true
|
||||
exit 1
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
|
||||
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
|
||||
else
|
||||
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
|
||||
echo "Final container check:"
|
||||
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
|
||||
echo "Migration logs (if available):"
|
||||
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
|
||||
' || echo "⚠️ Migration check completed with warnings, continuing..."
|
||||
|
||||
# Brief wait for other services to stabilize
|
||||
echo "Waiting 5 seconds for other services to stabilize..."
|
||||
sleep 5
|
||||
|
||||
# Verify installations and provide environment info
|
||||
- name: Verify setup and show environment info
|
||||
run: |
|
||||
echo "=== Python Setup ==="
|
||||
python --version
|
||||
poetry --version
|
||||
|
||||
echo "=== Node.js Setup ==="
|
||||
node --version
|
||||
pnpm --version
|
||||
|
||||
echo "=== Additional Tools ==="
|
||||
docker --version
|
||||
docker compose version
|
||||
gh --version || true
|
||||
|
||||
echo "=== Services Status ==="
|
||||
cd autogpt_platform
|
||||
docker compose ps || true
|
||||
|
||||
echo "=== Backend Dependencies ==="
|
||||
cd backend
|
||||
poetry show | head -10 || true
|
||||
|
||||
echo "=== Frontend Dependencies ==="
|
||||
cd ../frontend
|
||||
pnpm list --depth=0 | head -10 || true
|
||||
|
||||
echo "=== Environment Files ==="
|
||||
ls -la ../.env* || true
|
||||
ls -la .env* || true
|
||||
ls -la ../backend/.env* || true
|
||||
|
||||
echo "✅ AutoGPT Platform development environment setup complete!"
|
||||
echo "🚀 Ready for development with Docker services running"
|
||||
echo "📝 Backend server: poetry run serve (port 8000)"
|
||||
echo "🌐 Frontend server: pnpm dev (port 3000)"
|
||||
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*), Bash(gh pr edit:*)"
|
||||
--model opus
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
98
.github/workflows/codeql.yml
vendored
Normal file
98
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
pull_request:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
merge_group:
|
||||
schedule:
|
||||
- cron: '15 4 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: typescript
|
||||
build-mode: none
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
config: |
|
||||
paths-ignore:
|
||||
- classic/frontend/build/**
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
302
.github/workflows/copilot-setup-steps.yml
vendored
Normal file
302
.github/workflows/copilot-setup-steps.yml
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
name: "Copilot Setup Steps"
|
||||
|
||||
# Automatically run the setup steps when they are changed to allow for easy validation, and
|
||||
# allow manual testing through the repository's "Actions" tab
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- .github/workflows/copilot-setup-steps.yml
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/copilot-setup-steps.yml
|
||||
|
||||
jobs:
|
||||
# The job MUST be called `copilot-setup-steps` or it will not be picked up by Copilot.
|
||||
copilot-setup-steps:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
|
||||
# Set the permissions to the lowest permissions possible needed for your steps.
|
||||
# Copilot will be given its own token for its operations.
|
||||
permissions:
|
||||
# If you want to clone the repository as part of your setup steps, for example to install dependencies, you'll need the `contents: read` permission. If you don't clone the repository in your setup steps, Copilot will do this for you automatically after the steps complete.
|
||||
contents: read
|
||||
|
||||
# You can define any steps you want, and they will run before the agent starts.
|
||||
# If you do not check out your code, Copilot will do this for you.
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11" # Use standard version matching CI
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
# Extract Poetry version from backend/poetry.lock (matches CI)
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
|
||||
# Install Poetry
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
|
||||
# Add Poetry to PATH
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Check poetry.lock
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
poetry lock
|
||||
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
|
||||
echo "Warning: poetry.lock not up to date, but continuing for setup"
|
||||
git checkout poetry.lock # Reset for clean setup
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
# Install Playwright browsers for frontend testing
|
||||
# NOTE: Disabled to save ~1 minute of setup time. Re-enable if Copilot needs browser automation (e.g., for MCP)
|
||||
# - name: Install Playwright browsers
|
||||
# working-directory: autogpt_platform/frontend
|
||||
# run: pnpm playwright install --with-deps chromium
|
||||
|
||||
# Docker setup for development environment
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Copy default environment files
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Copy default environment files for development
|
||||
cp .env.default .env
|
||||
cp backend/.env.default backend/.env
|
||||
cp frontend/.env.default frontend/.env
|
||||
|
||||
# Phase 1: Cache and load Docker images for faster setup
|
||||
- name: Set up Docker image cache
|
||||
id: docker-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/docker-cache
|
||||
# Use a versioned key for cache invalidation when image list changes
|
||||
key: docker-images-v2-${{ runner.os }}-${{ hashFiles('.github/workflows/copilot-setup-steps.yml') }}
|
||||
restore-keys: |
|
||||
docker-images-v2-${{ runner.os }}-
|
||||
docker-images-v1-${{ runner.os }}-
|
||||
|
||||
- name: Load or pull Docker images
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
mkdir -p ~/docker-cache
|
||||
|
||||
# Define image list for easy maintenance
|
||||
IMAGES=(
|
||||
"redis:latest"
|
||||
"rabbitmq:management"
|
||||
"clamav/clamav-debian:latest"
|
||||
"busybox:latest"
|
||||
"kong:2.8.1"
|
||||
"supabase/gotrue:v2.170.0"
|
||||
"supabase/postgres:15.8.1.049"
|
||||
"supabase/postgres-meta:v0.86.1"
|
||||
"supabase/studio:20250224-d10db0f"
|
||||
)
|
||||
|
||||
# Check if any cached tar files exist (more reliable than cache-hit)
|
||||
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
|
||||
echo "Docker cache found, loading images in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
# Convert image name to filename (replace : and / with -)
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
if [ -f ~/docker-cache/${filename}.tar ]; then
|
||||
echo "Loading $image..."
|
||||
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
|
||||
fi
|
||||
done
|
||||
wait
|
||||
echo "All cached images loaded"
|
||||
else
|
||||
echo "No Docker cache found, pulling images in parallel..."
|
||||
# Pull all images in parallel
|
||||
for image in "${IMAGES[@]}"; do
|
||||
docker pull "$image" &
|
||||
done
|
||||
wait
|
||||
|
||||
# Only save cache on main branches (not PRs) to avoid cache pollution
|
||||
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "Saving Docker images to cache in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
# Convert image name to filename (replace : and / with -)
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
echo "Saving $image..."
|
||||
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
|
||||
done
|
||||
wait
|
||||
echo "Docker image cache saved"
|
||||
else
|
||||
echo "Skipping cache save for PR/feature branch"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Docker images ready for use"
|
||||
|
||||
# Phase 2: Build migrate service with GitHub Actions cache
|
||||
- name: Build migrate Docker image with cache
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Build the migrate image with buildx for GHA caching
|
||||
docker buildx build \
|
||||
--cache-from type=gha \
|
||||
--cache-to type=gha,mode=max \
|
||||
--target migrate \
|
||||
--tag autogpt_platform-migrate:latest \
|
||||
--load \
|
||||
-f backend/Dockerfile \
|
||||
..
|
||||
|
||||
# Start services using pre-built images
|
||||
- name: Start Docker services for development
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
# Start essential services (migrate image already built with correct tag)
|
||||
docker compose --profile local up deps --no-build --detach
|
||||
echo "Waiting for services to be ready..."
|
||||
|
||||
# Wait for database to be ready
|
||||
echo "Checking database readiness..."
|
||||
timeout 30 sh -c 'until docker compose exec -T db pg_isready -U postgres 2>/dev/null; do
|
||||
echo " Waiting for database..."
|
||||
sleep 2
|
||||
done' && echo "✅ Database is ready" || echo "⚠️ Database ready check timeout after 30s, continuing..."
|
||||
|
||||
# Check migrate service status
|
||||
echo "Checking migration status..."
|
||||
docker compose ps migrate || echo " Migrate service not visible in ps output"
|
||||
|
||||
# Wait for migrate service to complete
|
||||
echo "Waiting for migrations to complete..."
|
||||
timeout 30 bash -c '
|
||||
ATTEMPTS=0
|
||||
while [ $ATTEMPTS -lt 15 ]; do
|
||||
ATTEMPTS=$((ATTEMPTS + 1))
|
||||
|
||||
# Check using docker directly (more reliable than docker compose ps)
|
||||
CONTAINER_STATUS=$(docker ps -a --filter "label=com.docker.compose.service=migrate" --format "{{.Status}}" | head -1)
|
||||
|
||||
if [ -z "$CONTAINER_STATUS" ]; then
|
||||
echo " Attempt $ATTEMPTS: Migrate container not found yet..."
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Exited (0)"; then
|
||||
echo "✅ Migrations completed successfully"
|
||||
docker compose logs migrate --tail=5 2>/dev/null || true
|
||||
exit 0
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Exited ([1-9]"; then
|
||||
EXIT_CODE=$(echo "$CONTAINER_STATUS" | grep -oE "Exited \([0-9]+\)" | grep -oE "[0-9]+")
|
||||
echo "❌ Migrations failed with exit code: $EXIT_CODE"
|
||||
echo "Migration logs:"
|
||||
docker compose logs migrate --tail=20 2>/dev/null || true
|
||||
exit 1
|
||||
elif echo "$CONTAINER_STATUS" | grep -q "Up"; then
|
||||
echo " Attempt $ATTEMPTS: Migrate container is running... ($CONTAINER_STATUS)"
|
||||
else
|
||||
echo " Attempt $ATTEMPTS: Migrate container status: $CONTAINER_STATUS"
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "⚠️ Timeout: Could not determine migration status after 30 seconds"
|
||||
echo "Final container check:"
|
||||
docker ps -a --filter "label=com.docker.compose.service=migrate" || true
|
||||
echo "Migration logs (if available):"
|
||||
docker compose logs migrate --tail=10 2>/dev/null || echo " No logs available"
|
||||
' || echo "⚠️ Migration check completed with warnings, continuing..."
|
||||
|
||||
# Brief wait for other services to stabilize
|
||||
echo "Waiting 5 seconds for other services to stabilize..."
|
||||
sleep 5
|
||||
|
||||
# Verify installations and provide environment info
|
||||
- name: Verify setup and show environment info
|
||||
run: |
|
||||
echo "=== Python Setup ==="
|
||||
python --version
|
||||
poetry --version
|
||||
|
||||
echo "=== Node.js Setup ==="
|
||||
node --version
|
||||
pnpm --version
|
||||
|
||||
echo "=== Additional Tools ==="
|
||||
docker --version
|
||||
docker compose version
|
||||
gh --version || true
|
||||
|
||||
echo "=== Services Status ==="
|
||||
cd autogpt_platform
|
||||
docker compose ps || true
|
||||
|
||||
echo "=== Backend Dependencies ==="
|
||||
cd backend
|
||||
poetry show | head -10 || true
|
||||
|
||||
echo "=== Frontend Dependencies ==="
|
||||
cd ../frontend
|
||||
pnpm list --depth=0 | head -10 || true
|
||||
|
||||
echo "=== Environment Files ==="
|
||||
ls -la ../.env* || true
|
||||
ls -la .env* || true
|
||||
ls -la ../backend/.env* || true
|
||||
|
||||
echo "✅ AutoGPT Platform development environment setup complete!"
|
||||
echo "🚀 Ready for development with Docker services running"
|
||||
echo "📝 Backend server: poetry run serve (port 8000)"
|
||||
echo "🌐 Frontend server: pnpm dev (port 3000)"
|
||||
60
.github/workflows/platform-autogpt-deploy-dev.yaml
vendored
Normal file
60
.github/workflows/platform-autogpt-deploy-dev.yaml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: AutoGPT Platform - Deploy Dev Environment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
git_ref:
|
||||
description: 'Git ref (branch/tag) of AutoGPT to deploy'
|
||||
required: true
|
||||
default: 'master'
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_dev
|
||||
client-payload: '{"ref": "${{ github.event.inputs.git_ref || github.ref }}", "repository": "${{ github.repository }}"}'
|
||||
54
.github/workflows/platform-autogpt-deploy-prod.yml
vendored
Normal file
54
.github/workflows/platform-autogpt-deploy-prod.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: AutoGPT Platform - Deploy Prod Environment
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: production
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.ref_name || 'master' }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_prod
|
||||
client-payload: |
|
||||
{"ref": "${{ github.ref_name || 'master' }}", "repository": "${{ github.repository }}"}
|
||||
225
.github/workflows/platform-backend-ci.yml
vendored
Normal file
225
.github/workflows/platform-backend-ci.yml
vendored
Normal file
@@ -0,0 +1,225 @@
|
||||
name: AutoGPT Platform - Backend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/backend
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: redis:latest
|
||||
ports:
|
||||
- 6379:6379
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.12-management
|
||||
ports:
|
||||
- 5672:5672
|
||||
- 15672:15672
|
||||
env:
|
||||
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
|
||||
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
|
||||
clamav:
|
||||
image: clamav/clamav-debian:latest
|
||||
ports:
|
||||
- 3310:3310
|
||||
env:
|
||||
CLAMAV_NO_FRESHCLAMD: false
|
||||
CLAMD_CONF_StreamMaxLength: 50M
|
||||
CLAMD_CONF_MaxFileSize: 100M
|
||||
CLAMD_CONF_MaxScanSize: 100M
|
||||
CLAMD_CONF_MaxThreads: 4
|
||||
CLAMD_CONF_ReadTimeout: 300
|
||||
options: >-
|
||||
--health-cmd "clamdscan --version || exit 1"
|
||||
--health-interval 30s
|
||||
--health-timeout 10s
|
||||
--health-retries 5
|
||||
--health-start-period 180s
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Setup Supabase
|
||||
uses: supabase/setup-cli@v1
|
||||
with:
|
||||
version: 1.178.1
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
run: |
|
||||
# Extract Poetry version from backend/poetry.lock
|
||||
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
|
||||
if [ -n "$BASE_REF" ]; then
|
||||
BASE_BRANCH=${BASE_REF/refs\/heads\//}
|
||||
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
|
||||
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
|
||||
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
|
||||
else
|
||||
POETRY_VERSION=$HEAD_POETRY_VERSION
|
||||
fi
|
||||
echo "Using Poetry version ${POETRY_VERSION}"
|
||||
|
||||
# Install Poetry
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$POETRY_VERSION python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
env:
|
||||
BASE_REF: ${{ github.base_ref || github.event.merge_group.base_ref }}
|
||||
|
||||
- name: Check poetry.lock
|
||||
run: |
|
||||
poetry lock
|
||||
|
||||
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
|
||||
echo "Error: poetry.lock not up to date."
|
||||
echo
|
||||
git diff poetry.lock
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- id: supabase
|
||||
name: Start Supabase
|
||||
working-directory: .
|
||||
run: |
|
||||
supabase init
|
||||
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
|
||||
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
|
||||
# outputs:
|
||||
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
|
||||
|
||||
- name: Wait for ClamAV to be ready
|
||||
run: |
|
||||
echo "Waiting for ClamAV daemon to start..."
|
||||
max_attempts=60
|
||||
attempt=0
|
||||
|
||||
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
|
||||
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
|
||||
sleep 5
|
||||
attempt=$((attempt+1))
|
||||
done
|
||||
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
|
||||
echo "Checking ClamAV service logs..."
|
||||
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "ClamAV is ready!"
|
||||
|
||||
# Verify ClamAV is responsive
|
||||
echo "Testing ClamAV connection..."
|
||||
timeout 10 bash -c 'echo "PING" | nc localhost 3310' || {
|
||||
echo "ClamAV is not responding to PING"
|
||||
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG
|
||||
else
|
||||
poetry run pytest -s -vv
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
|
||||
JWT_VERIFY_KEY: ${{ steps.supabase.outputs.JWT_SECRET }}
|
||||
REDIS_HOST: "localhost"
|
||||
REDIS_PORT: "6379"
|
||||
ENCRYPTION_KEY: "dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=" # DO NOT USE IN PRODUCTION!!
|
||||
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
# We know these are here, don't report this as a security vulnerability
|
||||
# This is used as the default credential for the entire system's RabbitMQ instance
|
||||
# If you want to replace this, you can do so by making our entire system generate
|
||||
# new credentials for each local user and update the environment variables in
|
||||
# the backend service, docker composes, and examples
|
||||
RABBITMQ_DEFAULT_USER: "rabbitmq_user_default"
|
||||
RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7"
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: backend,${{ runner.os }}
|
||||
198
.github/workflows/platform-dev-deploy-event-dispatcher.yml
vendored
Normal file
198
.github/workflows/platform-dev-deploy-event-dispatcher.yml
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check comment permissions and deployment status
|
||||
id: check_status
|
||||
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const commentBody = context.payload.comment.body.trim();
|
||||
const commentUser = context.payload.comment.user.login;
|
||||
const prAuthor = context.payload.issue.user.login;
|
||||
const authorAssociation = context.payload.comment.author_association;
|
||||
|
||||
// Check permissions
|
||||
const hasPermission = (
|
||||
authorAssociation === 'OWNER' ||
|
||||
authorAssociation === 'MEMBER' ||
|
||||
authorAssociation === 'COLLABORATOR'
|
||||
);
|
||||
|
||||
core.setOutput('comment_body', commentBody);
|
||||
core.setOutput('has_permission', hasPermission);
|
||||
|
||||
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
|
||||
core.setOutput('permission_denied', 'true');
|
||||
return;
|
||||
}
|
||||
|
||||
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process deploy command
|
||||
if (commentBody === '!deploy') {
|
||||
core.setOutput('should_deploy', 'true');
|
||||
}
|
||||
// Process undeploy command
|
||||
else if (commentBody === '!undeploy') {
|
||||
core.setOutput('should_undeploy', 'true');
|
||||
}
|
||||
|
||||
- name: Post permission denied comment
|
||||
if: steps.check_status.outputs.permission_denied == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
|
||||
});
|
||||
|
||||
- name: Get PR details for deployment
|
||||
id: pr_details
|
||||
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const pr = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: context.issue.number
|
||||
});
|
||||
core.setOutput('pr_number', pr.data.number);
|
||||
core.setOutput('pr_title', pr.data.title);
|
||||
core.setOutput('pr_state', pr.data.state);
|
||||
|
||||
- name: Dispatch Deploy Event
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "deploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post deploy success comment
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
|
||||
});
|
||||
|
||||
- name: Dispatch Undeploy Event (from comment)
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post undeploy success comment
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
|
||||
});
|
||||
|
||||
- name: Check deployment status on PR close
|
||||
id: check_pr_close
|
||||
if: github.event_name == 'pull_request' && github.event.action == 'closed'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const comments = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number
|
||||
});
|
||||
|
||||
let lastDeployIndex = -1;
|
||||
let lastUndeployIndex = -1;
|
||||
|
||||
comments.data.forEach((comment, index) => {
|
||||
if (comment.body.trim() === '!deploy') {
|
||||
lastDeployIndex = index;
|
||||
} else if (comment.body.trim() === '!undeploy') {
|
||||
lastUndeployIndex = index;
|
||||
}
|
||||
});
|
||||
|
||||
// Should undeploy if there's a !deploy without a subsequent !undeploy
|
||||
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
|
||||
core.setOutput('should_undeploy', shouldUndeploy);
|
||||
|
||||
- name: Dispatch Undeploy Event (PR closed with active deployment)
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ github.event.pull_request.number }}",
|
||||
"pr_title": "${{ github.event.pull_request.title }}",
|
||||
"pr_state": "${{ github.event.pull_request.state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post PR close undeploy comment
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
|
||||
});
|
||||
239
.github/workflows/platform-frontend-ci.yml
vendored
Normal file
239
.github/workflows/platform-frontend-ci.yml
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || format('{0}-{1}', github.ref, github.event.pull_request.number || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run lint
|
||||
run: pnpm lint
|
||||
|
||||
chromatic:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
# Only run on dev branch pushes or PRs targeting dev
|
||||
if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run Chromatic
|
||||
uses: chromaui/action@latest
|
||||
with:
|
||||
projectToken: chpt_9e7c1a76478c9c8
|
||||
onlyChanged: true
|
||||
workingDir: autogpt_platform/frontend
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
exitOnceUploaded: true
|
||||
|
||||
test:
|
||||
runs-on: big-boi
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-frontend-test-
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1
|
||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
fi
|
||||
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Create E2E test data
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
# First try to run the script from inside the container
|
||||
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
||||
echo "✅ Found e2e_test_data.py in container, running it..."
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
||||
# Copy the script into the container and run it
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
||||
echo "❌ Failed to copy script to container"
|
||||
exit 1
|
||||
}
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Install Browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
run: pnpm test:no-build
|
||||
|
||||
- name: Upload Playwright artifacts
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-report
|
||||
path: playwright-report
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.yml logs
|
||||
136
.github/workflows/platform-fullstack-ci.yml
vendored
Normal file
136
.github/workflows/platform-fullstack-ci.yml
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- "autogpt_platform/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- "autogpt_platform/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
types:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Setup .env
|
||||
run: cp .env.default .env
|
||||
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Generate API queries
|
||||
run: pnpm generate:api:force
|
||||
|
||||
- name: Check for API schema changes
|
||||
run: |
|
||||
if ! git diff --exit-code src/app/api/openapi.json; then
|
||||
echo "❌ API schema changes detected in src/app/api/openapi.json"
|
||||
echo ""
|
||||
echo "The openapi.json file has been modified after running 'pnpm generate:api-all'."
|
||||
echo "This usually means changes have been made in the BE endpoints without updating the Frontend."
|
||||
echo "The API schema is now out of sync with the Front-end queries."
|
||||
echo ""
|
||||
echo "To fix this:"
|
||||
echo "1. Pull the backend 'docker compose pull && docker compose up -d --build --force-recreate'"
|
||||
echo "2. Run 'pnpm generate:api' locally"
|
||||
echo "3. Run 'pnpm types' locally"
|
||||
echo "4. Fix any TypeScript errors that may have been introduced"
|
||||
echo "5. Commit and push your changes"
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
echo "✅ No API schema changes detected"
|
||||
fi
|
||||
|
||||
- name: Run Typescript checks
|
||||
run: pnpm types
|
||||
34
.github/workflows/repo-close-stale-issues.yml
vendored
Normal file
34
.github/workflows/repo-close-stale-issues.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Repo - Close stale issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
# operations-per-run: 5000
|
||||
stale-issue-message: >
|
||||
This issue has automatically been marked as _stale_ because it has not had
|
||||
any activity in the last 170 days. You can _unstale_ it by commenting or
|
||||
removing the label. Otherwise, this issue will be closed in 10 days.
|
||||
stale-pr-message: >
|
||||
This pull request has automatically been marked as _stale_ because it has
|
||||
not had any activity in the last 50 days. You can _unstale_ it by commenting
|
||||
or removing the label.
|
||||
close-issue-message: >
|
||||
This issue was closed automatically because it has been stale for 10 days
|
||||
with no activity.
|
||||
days-before-stale: 170
|
||||
days-before-close: 10
|
||||
# Do not touch meta issues:
|
||||
exempt-issue-labels: meta,fridge,project management
|
||||
# Do not affect pull requests:
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
21
.github/workflows/repo-pr-enforce-base-branch.yml
vendored
Normal file
21
.github/workflows/repo-pr-enforce-base-branch.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Repo - Enforce dev as base branch
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: [ master ]
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
check_pr_target:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Check if PR is from dev or hotfix
|
||||
if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }}
|
||||
run: |
|
||||
gh pr comment ${{ github.event.number }} --repo "$REPO" \
|
||||
--body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.'
|
||||
gh pr edit ${{ github.event.number }} --base dev --repo "$REPO"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
66
.github/workflows/repo-pr-label.yml
vendored
Normal file
66
.github/workflows/repo-pr-label.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: Repo - Pull Request auto-label
|
||||
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths-ignore:
|
||||
- 'classic/forge/tests/vcr_cassettes'
|
||||
- 'classic/benchmark/reports/**'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
pull_request_target:
|
||||
types: [ opened, synchronize ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
conflicts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Update PRs with conflict labels
|
||||
uses: eps1lon/actions-label-merge-conflict@releases/2.x
|
||||
with:
|
||||
dirtyLabel: "conflicts"
|
||||
#removeOnDirtyLabel: "PR: ready to ship"
|
||||
repoToken: "${{ secrets.GITHUB_TOKEN }}"
|
||||
commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
|
||||
commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."
|
||||
|
||||
size:
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: codelytv/pr-size-labeler@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
xs_label: 'size/xs'
|
||||
xs_max_size: 2
|
||||
s_label: 'size/s'
|
||||
s_max_size: 10
|
||||
m_label: 'size/m'
|
||||
m_max_size: 100
|
||||
l_label: 'size/l'
|
||||
l_max_size: 500
|
||||
xl_label: 'size/xl'
|
||||
message_if_xl:
|
||||
|
||||
scope:
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
sync-labels: true
|
||||
20
.github/workflows/repo-stats.yml
vendored
Normal file
20
.github/workflows/repo-stats.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: Repo - Github Stats
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run this once per day, towards the end of the day for keeping the most
|
||||
# recent data point most meaningful (hours are interpreted in UTC).
|
||||
- cron: "0 23 * * *"
|
||||
workflow_dispatch: # Allow for running this manually.
|
||||
|
||||
jobs:
|
||||
j1:
|
||||
name: github-repo-stats
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: run-ghrs
|
||||
# Use latest release.
|
||||
uses: jgehrcke/github-repo-stats@HEAD
|
||||
with:
|
||||
ghtoken: ${{ secrets.ghrs_github_api_token }}
|
||||
|
||||
32
.github/workflows/repo-workflow-checker.yml
vendored
Normal file
32
.github/workflows/repo-workflow-checker.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Repo - PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
name: Check PR Status
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# - name: Wait some time for all actions to start
|
||||
# run: sleep 30
|
||||
- uses: actions/checkout@v4
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install requests
|
||||
- name: Check PR Status
|
||||
run: |
|
||||
echo "Current directory before running Python script:"
|
||||
pwd
|
||||
echo "Attempting to run Python script:"
|
||||
python .github/workflows/scripts/check_actions_status.py
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
116
.github/workflows/scripts/check_actions_status.py
vendored
Normal file
116
.github/workflows/scripts/check_actions_status.py
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
import json
|
||||
import os
|
||||
import requests
|
||||
import sys
|
||||
import time
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
CHECK_INTERVAL = 30
|
||||
|
||||
|
||||
def get_environment_variables() -> Tuple[str, str, str, str, str]:
|
||||
"""Retrieve and return necessary environment variables."""
|
||||
try:
|
||||
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
|
||||
event = json.load(f)
|
||||
|
||||
# Handle both PR and merge group events
|
||||
if "pull_request" in event:
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
else:
|
||||
sha = os.environ["GITHUB_SHA"]
|
||||
|
||||
return (
|
||||
os.environ["GITHUB_API_URL"],
|
||||
os.environ["GITHUB_REPOSITORY"],
|
||||
sha,
|
||||
os.environ["GITHUB_TOKEN"],
|
||||
os.environ["GITHUB_RUN_ID"],
|
||||
)
|
||||
except KeyError as e:
|
||||
print(f"Error: Missing required environment variable or event data: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def make_api_request(url: str, headers: Dict[str, str]) -> Dict:
|
||||
"""Make an API request and return the JSON response."""
|
||||
try:
|
||||
print("Making API request to:", url)
|
||||
response = requests.get(url, headers=headers, timeout=10)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.RequestException as e:
|
||||
print(f"Error: API request failed. {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def process_check_runs(check_runs: List[Dict]) -> Tuple[bool, bool]:
|
||||
"""Process check runs and return their status."""
|
||||
runs_in_progress = False
|
||||
all_others_passed = True
|
||||
|
||||
for run in check_runs:
|
||||
if str(run["name"]) != "Check PR Status":
|
||||
status = run["status"]
|
||||
conclusion = run["conclusion"]
|
||||
|
||||
if status == "completed":
|
||||
if conclusion not in ["success", "skipped", "neutral"]:
|
||||
all_others_passed = False
|
||||
print(
|
||||
f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}"
|
||||
)
|
||||
else:
|
||||
runs_in_progress = True
|
||||
print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
|
||||
all_others_passed = False
|
||||
else:
|
||||
print(
|
||||
f"Skipping check run {run['name']} (ID: {run['id']}) as it is the current run."
|
||||
)
|
||||
|
||||
return runs_in_progress, all_others_passed
|
||||
|
||||
|
||||
def main():
|
||||
api_url, repo, sha, github_token, current_run_id = get_environment_variables()
|
||||
|
||||
endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
|
||||
headers = {
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
if github_token:
|
||||
headers["Authorization"] = f"token {github_token}"
|
||||
|
||||
print(f"Current run ID: {current_run_id}")
|
||||
|
||||
while True:
|
||||
data = make_api_request(endpoint, headers)
|
||||
|
||||
check_runs = data["check_runs"]
|
||||
|
||||
print("Processing check runs...")
|
||||
|
||||
print(check_runs)
|
||||
|
||||
runs_in_progress, all_others_passed = process_check_runs(check_runs)
|
||||
|
||||
if not runs_in_progress:
|
||||
break
|
||||
|
||||
print(
|
||||
"Some check runs are still in progress. "
|
||||
f"Waiting {CHECK_INTERVAL} seconds before checking again..."
|
||||
)
|
||||
time.sleep(CHECK_INTERVAL)
|
||||
|
||||
if all_others_passed:
|
||||
print("All other completed check runs have passed. This check passes.")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("Some check runs have failed or have not completed. This check fails.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
98
.github/workflows/scripts/docker-ci-summary.sh
vendored
Executable file
98
.github/workflows/scripts/docker-ci-summary.sh
vendored
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
|
||||
head_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$current_ref/" <<< $compare_url_template)
|
||||
ref_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$commit_hash/" <<< $compare_url_template)
|
||||
|
||||
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
|
||||
|
||||
cat << $EOF
|
||||
# Docker Build summary 🔨
|
||||
|
||||
**Source:** branch \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
|
||||
|
||||
**Build type:** \`$build_type\`
|
||||
|
||||
**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
|
||||
|
||||
## Image details
|
||||
|
||||
**Tags:**
|
||||
$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
|
||||
|
||||
<details>
|
||||
<summary><h3>Layers</h3></summary>
|
||||
|
||||
| Age | Size | Created by instruction |
|
||||
| --------- | ------ | ---------------------- |
|
||||
$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
|
||||
| grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
|
||||
| cut -f-3 `# yeet Comment column`\
|
||||
| sed 's/ ago//' `# fix Layer age`\
|
||||
| sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
|
||||
| sed 's/\$/\\$/g' `# escape variable and shell expansions`\
|
||||
| sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
|
||||
| column -t -s$'\t' -o' | ' `# align columns and add separator`\
|
||||
| sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><h3>ENV</h3></summary>
|
||||
|
||||
| Variable | Value |
|
||||
| -------- | -------- |
|
||||
$(jq -r \
|
||||
'.Config.Env
|
||||
| map(
|
||||
split("=")
|
||||
| "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
|
||||
)
|
||||
| map("| \(.) |")
|
||||
| .[]' <<< $meta
|
||||
)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Raw metadata</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$meta
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
## Build details
|
||||
**Build trigger:** $push_forced_label $event_name \`$event_ref\`
|
||||
|
||||
<details>
|
||||
<summary><code>github</code> context</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$github_context_json
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
### Source
|
||||
**HEAD:** [$repository@\`${commit_hash:0:7}\`]($source_url) on branch [$current_ref]($ref_compare_url)
|
||||
|
||||
**Diff with previous HEAD:** $head_compare_url
|
||||
|
||||
#### New commits
|
||||
$(jq -r 'map([
|
||||
"**Commit [`\(.id[0:7])`](\(.url)) by \(if .author.username then "@"+.author.username else .author.name end):**",
|
||||
.message,
|
||||
(if .committer.name != .author.name then "\n> <sub>**Committer:** \(.committer.name) <\(.committer.email)></sub>" else "" end),
|
||||
"<sub>**Timestamp:** \(.timestamp)</sub>"
|
||||
] | map("> \(.)\n") | join("")) | join("\n")' <<< $new_commits_json)
|
||||
|
||||
### Job environment
|
||||
|
||||
#### \`vars\` context:
|
||||
\`\`\`JSON
|
||||
$vars_json
|
||||
\`\`\`
|
||||
|
||||
#### \`env\` context:
|
||||
\`\`\`JSON
|
||||
$job_env_json
|
||||
\`\`\`
|
||||
|
||||
$EOF
|
||||
85
.github/workflows/scripts/docker-release-summary.sh
vendored
Executable file
85
.github/workflows/scripts/docker-release-summary.sh
vendored
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/bin/bash
|
||||
meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
|
||||
|
||||
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
|
||||
|
||||
cat << $EOF
|
||||
# Docker Release Build summary 🚀🔨
|
||||
|
||||
**Source:** $ref_type \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
|
||||
|
||||
**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
|
||||
|
||||
## Image details
|
||||
|
||||
**Tags:**
|
||||
$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
|
||||
|
||||
<details>
|
||||
<summary><h3>Layers</h3></summary>
|
||||
|
||||
| Age | Size | Created by instruction |
|
||||
| --------- | ------ | ---------------------- |
|
||||
$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
|
||||
| grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
|
||||
| cut -f-3 `# yeet Comment column`\
|
||||
| sed 's/ ago//' `# fix Layer age`\
|
||||
| sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
|
||||
| sed 's/\$/\\$/g' `# escape variable and shell expansions`\
|
||||
| sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
|
||||
| column -t -s$'\t' -o' | ' `# align columns and add separator`\
|
||||
| sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><h3>ENV</h3></summary>
|
||||
|
||||
| Variable | Value |
|
||||
| -------- | -------- |
|
||||
$(jq -r \
|
||||
'.Config.Env
|
||||
| map(
|
||||
split("=")
|
||||
| "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
|
||||
)
|
||||
| map("| \(.) |")
|
||||
| .[]' <<< $meta
|
||||
)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Raw metadata</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$meta
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
## Build details
|
||||
**Build trigger:** $event_name \`$current_ref\`
|
||||
|
||||
| Parameter | Value |
|
||||
| -------------- | ------------ |
|
||||
| \`no_cache\` | \`$inputs_no_cache\` |
|
||||
|
||||
<details>
|
||||
<summary><code>github</code> context</summary>
|
||||
|
||||
\`\`\`JSON
|
||||
$github_context_json
|
||||
\`\`\`
|
||||
</details>
|
||||
|
||||
### Job environment
|
||||
|
||||
#### \`vars\` context:
|
||||
\`\`\`JSON
|
||||
$vars_json
|
||||
\`\`\`
|
||||
|
||||
#### \`env\` context:
|
||||
\`\`\`JSON
|
||||
$job_env_json
|
||||
\`\`\`
|
||||
|
||||
$EOF
|
||||
60
.github/workflows/scripts/get_package_version_from_lockfile.py
vendored
Normal file
60
.github/workflows/scripts/get_package_version_from_lockfile.py
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
print("Python version 3.11 or higher required")
|
||||
sys.exit(1)
|
||||
|
||||
import tomllib
|
||||
|
||||
|
||||
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
|
||||
"""Extract package version from poetry.lock file."""
|
||||
try:
|
||||
if lockfile_path == "-":
|
||||
data = tomllib.load(sys.stdin.buffer)
|
||||
else:
|
||||
with open(lockfile_path, "rb") as f:
|
||||
data = tomllib.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except tomllib.TOMLDecodeError as e:
|
||||
print(f"Error parsing TOML file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error reading file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Look for the package in the packages list
|
||||
packages = data.get("package", [])
|
||||
for package in packages:
|
||||
if package.get("name", "").lower() == package_name.lower():
|
||||
return package.get("version")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) not in (2, 3):
|
||||
print(
|
||||
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
|
||||
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
package_name = sys.argv[1]
|
||||
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
|
||||
|
||||
version = get_package_version(package_name, lockfile_path)
|
||||
|
||||
if version:
|
||||
print(version)
|
||||
else:
|
||||
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
181
.gitignore
vendored
Normal file
181
.gitignore
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
## Original ignores
|
||||
.github_access_token
|
||||
classic/original_autogpt/keys.py
|
||||
classic/original_autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
# Root .env files
|
||||
/.env
|
||||
azure.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
log.txt
|
||||
log-ingestion.txt
|
||||
/logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
site/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.venv
|
||||
env/
|
||||
venv*/
|
||||
ENV/
|
||||
env.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
llama-*
|
||||
vicuna-*
|
||||
|
||||
# mac
|
||||
.DS_Store
|
||||
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
classic/benchmark/agbenchmark/reports/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
|
||||
|
||||
# Allow for locally private items
|
||||
# private
|
||||
pri*
|
||||
# ignore
|
||||
ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
autogpt_platform/backend/settings.py
|
||||
/.auth
|
||||
/autogpt_platform/frontend/.auth
|
||||
|
||||
*.ign.*
|
||||
.test-contents
|
||||
.claude/settings.local.json
|
||||
/autogpt_platform/backend/logs
|
||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||
path = classic/forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
3
.pr_agent.toml
Normal file
3
.pr_agent.toml
Normal file
@@ -0,0 +1,3 @@
|
||||
[pr_code_suggestions]
|
||||
commitable_code_suggestions=false
|
||||
num_code_suggestions=0
|
||||
278
.pre-commit-config.yaml
Normal file
278
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,278 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
args: ["--maxkb=500"]
|
||||
- id: fix-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: https://github.com/Yelp/detect-secrets
|
||||
rev: v1.5.0
|
||||
hooks:
|
||||
- id: detect-secrets
|
||||
name: Detect secrets
|
||||
description: Detects high entropy strings that are likely to be passwords.
|
||||
files: ^autogpt_platform/
|
||||
stages: [pre-push]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, all dependencies need to be up-to-date.
|
||||
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
|
||||
hooks:
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Backend
|
||||
alias: poetry-install-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend install
|
||||
# include autogpt_libs source (since it's a path dependency)
|
||||
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Libs
|
||||
alias: poetry-install-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs install
|
||||
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, Prisma client must be up-to-date.
|
||||
hooks:
|
||||
- id: prisma-generate
|
||||
name: Prisma Generate - AutoGPT Platform - Backend
|
||||
alias: prisma-generate-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
|
||||
# include everything that triggers poetry install + the prisma schema
|
||||
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.7.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Backend
|
||||
alias: ruff-lint-platform-backend
|
||||
files: ^autogpt_platform/backend/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff-format
|
||||
name: Format (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
hooks:
|
||||
- id: isort
|
||||
name: Lint (isort) - AutoGPT Platform - Backend
|
||||
alias: isort-platform-backend
|
||||
entry: poetry -P autogpt_platform/backend run isort -p backend
|
||||
files: ^autogpt_platform/backend/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -P classic/original_autogpt run isort -p autogpt
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -P classic/forge run isort -p forge
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -P classic/benchmark run isort -p agbenchmark
|
||||
files: ^classic/benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.10.0
|
||||
# Black has sensible defaults, doesn't need package context, and ignores
|
||||
# everything in .gitignore, so it works fine without any config or arguments.
|
||||
hooks:
|
||||
- id: black
|
||||
name: Format (Black)
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
# To have flake8 load the config of the individual subprojects, we have to call
|
||||
# them separately.
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: prettier
|
||||
name: Format (Prettier) - AutoGPT Platform - Frontend
|
||||
alias: format-platform-frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' --
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
|
||||
- repo: local
|
||||
# To have watertight type checking, we check *all* the files in an affected
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Backend
|
||||
alias: pyright-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run pyright
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Libs
|
||||
alias: pyright-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs run pyright
|
||||
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: tsc
|
||||
name: Typecheck - AutoGPT Platform - Frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && pnpm types'
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
# - repo: local
|
||||
# hooks:
|
||||
# - id: pytest
|
||||
# name: Run tests - AutoGPT Platform - Backend
|
||||
# alias: pytest-platform-backend
|
||||
# entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
|
||||
# # include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
|
||||
# files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
|
||||
# - id: pytest
|
||||
# name: Run tests - Classic - AutoGPT (excl. slow tests)
|
||||
# alias: pytest-classic-autogpt
|
||||
# entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# # include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
# files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
|
||||
# - id: pytest
|
||||
# name: Run tests - Classic - Forge (excl. slow tests)
|
||||
# alias: pytest-classic-forge
|
||||
# entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
# files: ^classic/forge/(forge/|tests/|poetry\.lock$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
|
||||
# - id: pytest
|
||||
# name: Run tests - Classic - Benchmark
|
||||
# alias: pytest-classic-benchmark
|
||||
# entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
# files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
62
.vscode/all-projects.code-workspace
vendored
Normal file
62
.vscode/all-projects.code-workspace
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"name": "frontend",
|
||||
"path": "../autogpt_platform/frontend"
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"path": "../autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
"path": "../autogpt_platform/market"
|
||||
},
|
||||
{
|
||||
"name": "lib",
|
||||
"path": "../autogpt_platform/autogpt_libs"
|
||||
},
|
||||
{
|
||||
"name": "infra",
|
||||
"path": "../autogpt_platform/infra"
|
||||
},
|
||||
{
|
||||
"name": "docs",
|
||||
"path": "../docs"
|
||||
},
|
||||
|
||||
{
|
||||
"name": "classic - autogpt",
|
||||
"path": "../classic/original_autogpt"
|
||||
},
|
||||
{
|
||||
"name": "classic - benchmark",
|
||||
"path": "../classic/benchmark"
|
||||
},
|
||||
{
|
||||
"name": "classic - forge",
|
||||
"path": "../classic/forge"
|
||||
},
|
||||
{
|
||||
"name": "classic - frontend",
|
||||
"path": "../classic/frontend"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
},
|
||||
"extensions": {
|
||||
"recommendations": [
|
||||
"charliermarsh.ruff",
|
||||
"dart-code.flutter",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.vscode-pylance",
|
||||
"prisma.prisma",
|
||||
"qwtel.sqlite-viewer"
|
||||
]
|
||||
}
|
||||
}
|
||||
67
.vscode/launch.json
vendored
Normal file
67
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Frontend: Server Side",
|
||||
"type": "node-terminal",
|
||||
"request": "launch",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"command": "pnpm dev"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Client Side",
|
||||
"type": "msedge",
|
||||
"request": "launch",
|
||||
"url": "http://localhost:3000"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Full Stack",
|
||||
"type": "node-terminal",
|
||||
|
||||
"request": "launch",
|
||||
"command": "pnpm dev",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"serverReadyAction": {
|
||||
"pattern": "- Local:.+(https?://.+)",
|
||||
"uriFormat": "%s",
|
||||
"action": "debugWithChrome"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Backend",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "backend.app",
|
||||
"env": {
|
||||
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY": "YES"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/backend/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "Marketplace",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "autogpt_platform.market.main",
|
||||
"env": {
|
||||
"ENV": "dev"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/market/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/market"
|
||||
}
|
||||
],
|
||||
"compounds": [
|
||||
{
|
||||
"name": "Everything",
|
||||
"configurations": ["Backend", "Frontend: Full Stack"],
|
||||
// "preLaunchTask": "${defaultBuildTask}",
|
||||
"stopAll": true,
|
||||
"presentation": {
|
||||
"hidden": false,
|
||||
"order": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
53
AGENTS.md
Normal file
53
AGENTS.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# AutoGPT Platform Contribution Guide
|
||||
|
||||
This guide provides context for Codex when updating the **autogpt_platform** folder.
|
||||
|
||||
## Directory overview
|
||||
|
||||
- `autogpt_platform/backend` – FastAPI based backend service.
|
||||
- `autogpt_platform/autogpt_libs` – Shared Python libraries.
|
||||
- `autogpt_platform/frontend` – Next.js + Typescript frontend.
|
||||
- `autogpt_platform/docker-compose.yml` – development stack.
|
||||
|
||||
See `docs/content/platform/getting-started.md` for setup instructions.
|
||||
|
||||
## Code style
|
||||
|
||||
- Format Python code with `poetry run format`.
|
||||
- Format frontend code using `pnpm format`.
|
||||
|
||||
## Testing
|
||||
|
||||
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
|
||||
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
|
||||
|
||||
Always run the relevant linters and tests before committing.
|
||||
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
|
||||
Types:
|
||||
- feat
|
||||
- fix
|
||||
- refactor
|
||||
- ci
|
||||
- dx (developer experience)
|
||||
Scopes:
|
||||
- platform
|
||||
- platform/library
|
||||
- platform/marketplace
|
||||
- backend
|
||||
- backend/executor
|
||||
- frontend
|
||||
- frontend/library
|
||||
- frontend/marketplace
|
||||
- blocks
|
||||
|
||||
## Pull requests
|
||||
|
||||
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
|
||||
- Rely on the pre-commit checks for linting and formatting
|
||||
- Fill out the **Changes** section and the checklist.
|
||||
- Use conventional commit titles with a scope (e.g. `feat(frontend): add feature`).
|
||||
- Keep out-of-scope changes under 20% of the PR.
|
||||
- Ensure PR descriptions are complete.
|
||||
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
|
||||
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
|
||||
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs
|
||||
21
CITATION.cff
Normal file
21
CITATION.cff
Normal file
@@ -0,0 +1,21 @@
|
||||
# This CITATION.cff file was generated with cffinit.
|
||||
# Visit https://bit.ly/cffinit to generate yours today!
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: AutoGPT
|
||||
message: >-
|
||||
If you use this software, please cite it using the
|
||||
metadata from this file.
|
||||
type: software
|
||||
authors:
|
||||
- name: Significant Gravitas
|
||||
website: 'https://agpt.co'
|
||||
repository-code: 'https://github.com/Significant-Gravitas/AutoGPT'
|
||||
url: 'https://agpt.co'
|
||||
abstract: >-
|
||||
A collection of tools and experimental open-source attempts to make GPT-4 fully
|
||||
autonomous.
|
||||
keywords:
|
||||
- AI
|
||||
- Agent
|
||||
license: MIT
|
||||
40
CODE_OF_CONDUCT.md
Normal file
40
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Code of Conduct for AutoGPT
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
The purpose of this Code of Conduct is to provide guidelines for contributors to the AutoGPT projects on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
|
||||
|
||||
## 2. Scope
|
||||
|
||||
This Code of Conduct applies to all contributors, maintainers, and users of the AutoGPT project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
|
||||
|
||||
## 3. Our Standards
|
||||
|
||||
We encourage the following behavior:
|
||||
|
||||
* Being respectful and considerate to others
|
||||
* Actively seeking diverse perspectives
|
||||
* Providing constructive feedback and assistance
|
||||
* Demonstrating empathy and understanding
|
||||
|
||||
We discourage the following behavior:
|
||||
|
||||
* Harassment or discrimination of any kind
|
||||
* Disrespectful, offensive, or inappropriate language or content
|
||||
* Personal attacks or insults
|
||||
* Unwarranted criticism or negativity
|
||||
|
||||
## 4. Reporting and Enforcement
|
||||
|
||||
If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary.
|
||||
|
||||
Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations.
|
||||
|
||||
## 5. Acknowledgements
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html).
|
||||
|
||||
## 6. Contact
|
||||
|
||||
If you have any questions or concerns, please contact the project maintainers on Discord:
|
||||
https://discord.gg/autogpt
|
||||
38
CONTRIBUTING.md
Normal file
38
CONTRIBUTING.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# AutoGPT Contribution Guide
|
||||
If you are reading this, you are probably looking for the full **[contribution guide]**,
|
||||
which is part of our [wiki].
|
||||
|
||||
[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
|
||||
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
|
||||
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
|
||||
## Contributing to the AutoGPT Platform Folder
|
||||
All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license.
|
||||
|
||||
## In short
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. We encourage you to collaborate with fellow community members on some of our bigger
|
||||
[todo's][roadmap]!
|
||||
* We highly recommend to post your idea and discuss it in the [dev channel].
|
||||
3. Create a draft PR when starting work on bigger changes.
|
||||
4. Adhere to the [Code Guidelines]
|
||||
5. Clearly explain your changes when submitting a PR.
|
||||
6. Don't submit broken code: test/validate your changes.
|
||||
7. Avoid making unnecessary changes, especially if they're purely based on your personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
8. Please also consider contributing something other than code; see the
|
||||
[contribution guide] for options.
|
||||
|
||||
[dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
|
||||
[code guidelines]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing#code-guidelines
|
||||
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the
|
||||
wiki page about [Catalyzing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Catalyzing).
|
||||
|
||||
In fact, why not just look through the whole wiki (it's only a few pages) and
|
||||
hop on our Discord. See you there! :-)
|
||||
|
||||
❤️ & 🔆
|
||||
The team @ AutoGPT
|
||||
https://discord.gg/autogpt
|
||||
220
LICENSE
Normal file
220
LICENSE
Normal file
@@ -0,0 +1,220 @@
|
||||
All portions of this repository are under one of two licenses.
|
||||
|
||||
- Everything inside the autogpt_platform folder is under the Polyform Shield License.
|
||||
- Everything outside the autogpt_platform folder is under the MIT License.
|
||||
|
||||
More info:
|
||||
|
||||
**Polyform Shield License:**
|
||||
Code and content within the `autogpt_platform` folder is licensed under the Polyform Shield License. This new project is our in-developlemt platform for building, deploying and managing agents.
|
||||
Read more about this effort here: https://agpt.co/blog/introducing-the-autogpt-platform
|
||||
|
||||
**MIT License:**
|
||||
All other portions of the AutoGPT repository (i.e., everything outside the `autogpt_platform` folder) are licensed under the MIT License. This includes:
|
||||
- The Original, stand-alone AutoGPT Agent
|
||||
- Forge: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge
|
||||
- AG Benchmark: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark
|
||||
- AutoGPT Classic GUI: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend.
|
||||
|
||||
We also publish additional work under the MIT Licence in other repositories, such as GravitasML (https://github.com/Significant-Gravitas/gravitasml) which is developed for and used in the AutoGPT Platform, and our [Code Ability](https://github.com/Significant-Gravitas/AutoGPT-Code-Ability) project.
|
||||
|
||||
Both licences are available to read below:
|
||||
|
||||
=====================================================
|
||||
-----------------------------------------------------
|
||||
=====================================================
|
||||
|
||||
# PolyForm Shield License 1.0.0
|
||||
|
||||
<https://polyformproject.org/licenses/shield/1.0.0>
|
||||
|
||||
## Acceptance
|
||||
|
||||
In order to get any license under these terms, you must agree
|
||||
to them as both strict obligations and conditions to all
|
||||
your licenses.
|
||||
|
||||
## Copyright License
|
||||
|
||||
The licensor grants you a copyright license for the
|
||||
software to do everything you might do with the software
|
||||
that would otherwise infringe the licensor's copyright
|
||||
in it for any permitted purpose. However, you may
|
||||
only distribute the software according to [Distribution
|
||||
License](#distribution-license) and make changes or new works
|
||||
based on the software according to [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Distribution License
|
||||
|
||||
The licensor grants you an additional copyright license
|
||||
to distribute copies of the software. Your license
|
||||
to distribute covers distributing the software with
|
||||
changes and new works permitted by [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Notices
|
||||
|
||||
You must ensure that anyone who gets a copy of any part of
|
||||
the software from you also gets a copy of these terms or the
|
||||
URL for them above, as well as copies of any plain-text lines
|
||||
beginning with `Required Notice:` that the licensor provided
|
||||
with the software. For example:
|
||||
|
||||
> Required Notice: Copyright Yoyodyne, Inc. (http://example.com)
|
||||
|
||||
## Changes and New Works License
|
||||
|
||||
The licensor grants you an additional copyright license to
|
||||
make changes and new works based on the software for any
|
||||
permitted purpose.
|
||||
|
||||
## Patent License
|
||||
|
||||
The licensor grants you a patent license for the software that
|
||||
covers patent claims the licensor can license, or becomes able
|
||||
to license, that you would infringe by using the software.
|
||||
|
||||
## Noncompete
|
||||
|
||||
Any purpose is a permitted purpose, except for providing any
|
||||
product that competes with the software or any product the
|
||||
licensor or any of its affiliates provides using the software.
|
||||
|
||||
## Competition
|
||||
|
||||
Goods and services compete even when they provide functionality
|
||||
through different kinds of interfaces or for different technical
|
||||
platforms. Applications can compete with services, libraries
|
||||
with plugins, frameworks with development tools, and so on,
|
||||
even if they're written in different programming languages
|
||||
or for different computer architectures. Goods and services
|
||||
compete even when provided free of charge. If you market a
|
||||
product as a practical substitute for the software or another
|
||||
product, it definitely competes.
|
||||
|
||||
## New Products
|
||||
|
||||
If you are using the software to provide a product that does
|
||||
not compete, but the licensor or any of its affiliates brings
|
||||
your product into competition by providing a new version of
|
||||
the software or another product using the software, you may
|
||||
continue using versions of the software available under these
|
||||
terms beforehand to provide your competing product, but not
|
||||
any later versions.
|
||||
|
||||
## Discontinued Products
|
||||
|
||||
You may begin using the software to compete with a product
|
||||
or service that the licensor or any of its affiliates has
|
||||
stopped providing, unless the licensor includes a plain-text
|
||||
line beginning with `Licensor Line of Business:` with the
|
||||
software that mentions that line of business. For example:
|
||||
|
||||
> Licensor Line of Business: YoyodyneCMS Content Management
|
||||
System (http://example.com/cms)
|
||||
|
||||
## Sales of Business
|
||||
|
||||
If the licensor or any of its affiliates sells a line of
|
||||
business developing the software or using the software
|
||||
to provide a product, the buyer can also enforce
|
||||
[Noncompete](#noncompete) for that product.
|
||||
|
||||
## Fair Use
|
||||
|
||||
You may have "fair use" rights for the software under the
|
||||
law. These terms do not limit them.
|
||||
|
||||
## No Other Rights
|
||||
|
||||
These terms do not allow you to sublicense or transfer any of
|
||||
your licenses to anyone else, or prevent the licensor from
|
||||
granting licenses to anyone else. These terms do not imply
|
||||
any other licenses.
|
||||
|
||||
## Patent Defense
|
||||
|
||||
If you make any written claim that the software infringes or
|
||||
contributes to infringement of any patent, your patent license
|
||||
for the software granted under these terms ends immediately. If
|
||||
your company makes such a claim, your patent license ends
|
||||
immediately for work on behalf of your company.
|
||||
|
||||
## Violations
|
||||
|
||||
The first time you are notified in writing that you have
|
||||
violated any of these terms, or done anything with the software
|
||||
not covered by your licenses, your licenses can nonetheless
|
||||
continue if you come into full compliance with these terms,
|
||||
and take practical steps to correct past violations, within
|
||||
32 days of receiving notice. Otherwise, all your licenses
|
||||
end immediately.
|
||||
|
||||
## No Liability
|
||||
|
||||
***As far as the law allows, the software comes as is, without
|
||||
any warranty or condition, and the licensor will not be liable
|
||||
to you for any damages arising out of these terms or the use
|
||||
or nature of the software, under any kind of legal claim.***
|
||||
|
||||
## Definitions
|
||||
|
||||
The **licensor** is the individual or entity offering these
|
||||
terms, and the **software** is the software the licensor makes
|
||||
available under these terms.
|
||||
|
||||
A **product** can be a good or service, or a combination
|
||||
of them.
|
||||
|
||||
**You** refers to the individual or entity agreeing to these
|
||||
terms.
|
||||
|
||||
**Your company** is any legal entity, sole proprietorship,
|
||||
or other kind of organization that you work for, plus all
|
||||
its affiliates.
|
||||
|
||||
**Affiliates** means the other organizations than an
|
||||
organization has control over, is under the control of, or is
|
||||
under common control with.
|
||||
|
||||
**Control** means ownership of substantially all the assets of
|
||||
an entity, or the power to direct its management and policies
|
||||
by vote, contract, or otherwise. Control can be direct or
|
||||
indirect.
|
||||
|
||||
**Your licenses** are all the licenses granted to you for the
|
||||
software under these terms.
|
||||
|
||||
**Use** means anything you do with the software requiring one
|
||||
of your licenses.
|
||||
|
||||
=====================================================
|
||||
-----------------------------------------------------
|
||||
=====================================================
|
||||
|
||||
MIT License
|
||||
|
||||
|
||||
Copyright (c) 2023 Toran Bruce Richards
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
238
README.md
Normal file
238
README.md
Normal file
@@ -0,0 +1,238 @@
|
||||
# AutoGPT: Build, Deploy, and Run AI Agents
|
||||
|
||||
[](https://discord.gg/autogpt)  
|
||||
[](https://twitter.com/Auto_GPT)  
|
||||
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
[Deutsch](https://zdoc.app/de/Significant-Gravitas/AutoGPT) |
|
||||
[Español](https://zdoc.app/es/Significant-Gravitas/AutoGPT) |
|
||||
[français](https://zdoc.app/fr/Significant-Gravitas/AutoGPT) |
|
||||
[日本語](https://zdoc.app/ja/Significant-Gravitas/AutoGPT) |
|
||||
[한국어](https://zdoc.app/ko/Significant-Gravitas/AutoGPT) |
|
||||
[Português](https://zdoc.app/pt/Significant-Gravitas/AutoGPT) |
|
||||
[Русский](https://zdoc.app/ru/Significant-Gravitas/AutoGPT) |
|
||||
[中文](https://zdoc.app/zh/Significant-Gravitas/AutoGPT)
|
||||
|
||||
**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows.
|
||||
|
||||
## Hosting Options
|
||||
- Download to self-host (Free!)
|
||||
- [Join the Waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta (Closed Beta - Public release Coming Soon!)
|
||||
|
||||
## How to Self-Host the AutoGPT Platform
|
||||
> [!NOTE]
|
||||
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
|
||||
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
|
||||
|
||||
### System Requirements
|
||||
|
||||
Before proceeding with the installation, ensure your system meets the following requirements:
|
||||
|
||||
#### Hardware Requirements
|
||||
- CPU: 4+ cores recommended
|
||||
- RAM: Minimum 8GB, 16GB recommended
|
||||
- Storage: At least 10GB of free space
|
||||
|
||||
#### Software Requirements
|
||||
- Operating Systems:
|
||||
- Linux (Ubuntu 20.04 or newer recommended)
|
||||
- macOS (10.15 or newer)
|
||||
- Windows 10/11 with WSL2
|
||||
- Required Software (with minimum versions):
|
||||
- Docker Engine (20.10.0 or newer)
|
||||
- Docker Compose (2.0.0 or newer)
|
||||
- Git (2.30 or newer)
|
||||
- Node.js (16.x or newer)
|
||||
- npm (8.x or newer)
|
||||
- VSCode (1.60 or newer) or any modern code editor
|
||||
|
||||
#### Network Requirements
|
||||
- Stable internet connection
|
||||
- Access to required ports (will be configured in Docker)
|
||||
- Ability to make outbound HTTPS connections
|
||||
|
||||
### Updated Setup Instructions:
|
||||
We've moved to a fully maintained and regularly updated documentation site.
|
||||
|
||||
👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/)
|
||||
|
||||
|
||||
This tutorial assumes you have Docker, VSCode, git and npm installed.
|
||||
|
||||
---
|
||||
|
||||
#### ⚡ Quick Setup with One-Line Script (Recommended for Local Hosting)
|
||||
|
||||
Skip the manual steps and get started in minutes using our automatic setup script.
|
||||
|
||||
For macOS/Linux:
|
||||
```
|
||||
curl -fsSL https://setup.agpt.co/install.sh -o install.sh && bash install.sh
|
||||
```
|
||||
|
||||
For Windows (PowerShell):
|
||||
```
|
||||
powershell -c "iwr https://setup.agpt.co/install.bat -o install.bat; ./install.bat"
|
||||
```
|
||||
|
||||
This will install dependencies, configure Docker, and launch your local instance — all in one go.
|
||||
|
||||
### 🧱 AutoGPT Frontend
|
||||
|
||||
The AutoGPT frontend is where users interact with our powerful AI automation platform. It offers multiple ways to engage with and leverage our AI agents. This is the interface where you'll bring your AI automation ideas to life:
|
||||
|
||||
**Agent Builder:** For those who want to customize, our intuitive, low-code interface allows you to design and configure your own AI agents.
|
||||
|
||||
**Workflow Management:** Build, modify, and optimize your automation workflows with ease. You build your agent by connecting blocks, where each block performs a single action.
|
||||
|
||||
**Deployment Controls:** Manage the lifecycle of your agents, from testing to production.
|
||||
|
||||
**Ready-to-Use Agents:** Don't want to build? Simply select from our library of pre-configured agents and put them to work immediately.
|
||||
|
||||
**Agent Interaction:** Whether you've built your own or are using pre-configured agents, easily run and interact with them through our user-friendly interface.
|
||||
|
||||
**Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes.
|
||||
|
||||
[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks.
|
||||
|
||||
### 💽 AutoGPT Server
|
||||
|
||||
The AutoGPT Server is the powerhouse of our platform This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously. It contains all the essential components that make AutoGPT run smoothly.
|
||||
|
||||
**Source Code:** The core logic that drives our agents and automation processes.
|
||||
|
||||
**Infrastructure:** Robust systems that ensure reliable and scalable performance.
|
||||
|
||||
**Marketplace:** A comprehensive marketplace where you can find and deploy a wide range of pre-built agents.
|
||||
|
||||
### 🐙 Example Agents
|
||||
|
||||
Here are two examples of what you can do with AutoGPT:
|
||||
|
||||
1. **Generate Viral Videos from Trending Topics**
|
||||
- This agent reads topics on Reddit.
|
||||
- It identifies trending topics.
|
||||
- It then automatically creates a short-form video based on the content.
|
||||
|
||||
2. **Identify Top Quotes from Videos for Social Media**
|
||||
- This agent subscribes to your YouTube channel.
|
||||
- When you post a new video, it transcribes it.
|
||||
- It uses AI to identify the most impactful quotes to generate a summary.
|
||||
- Then, it writes a post to automatically publish to your social media.
|
||||
|
||||
These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case.
|
||||
|
||||
---
|
||||
|
||||
### **License Overview:**
|
||||
|
||||
🛡️ **Polyform Shield License:**
|
||||
All code and content within the `autogpt_platform` folder is licensed under the Polyform Shield License. This new project is our in-developlemt platform for building, deploying and managing agents.</br>_[Read more about this effort](https://agpt.co/blog/introducing-the-autogpt-platform)_
|
||||
|
||||
🦉 **MIT License:**
|
||||
All other portions of the AutoGPT repository (i.e., everything outside the `autogpt_platform` folder) are licensed under the MIT License. This includes the original stand-alone AutoGPT Agent, along with projects such as [Forge](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge), [agbenchmark](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) and the [AutoGPT Classic GUI](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend).</br>We also publish additional work under the MIT Licence in other repositories, such as [GravitasML](https://github.com/Significant-Gravitas/gravitasml) which is developed for and used in the AutoGPT Platform. See also our MIT Licenced [Code Ability](https://github.com/Significant-Gravitas/AutoGPT-Code-Ability) project.
|
||||
|
||||
---
|
||||
### Mission
|
||||
Our mission is to provide the tools, so that you can focus on what matters:
|
||||
|
||||
- 🏗️ **Building** - Lay the foundation for something amazing.
|
||||
- 🧪 **Testing** - Fine-tune your agent to perfection.
|
||||
- 🤝 **Delegating** - Let AI work for you, and have your ideas come to life.
|
||||
|
||||
Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI innovation.
|
||||
|
||||
**📖 [Documentation](https://docs.agpt.co)**
|
||||
 | 
|
||||
**🚀 [Contributing](CONTRIBUTING.md)**
|
||||
|
||||
---
|
||||
## 🤖 AutoGPT Classic
|
||||
> Below is information about the classic version of AutoGPT.
|
||||
|
||||
**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)**
|
||||
|
||||
### 🏗️ Forge
|
||||
|
||||
**Forge your own agent!** – Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project.
|
||||
|
||||
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) –
|
||||
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge
|
||||
|
||||
### 🎯 Benchmark
|
||||
|
||||
**Measure your agent's performance!** The `agbenchmark` can be used with any agent that supports the agent protocol, and the integration with the project's [CLI] makes it even easier to use with AutoGPT and forge-based agents. The benchmark offers a stringent testing environment. Our framework allows for autonomous, objective performance evaluations, ensuring your agents are primed for real-world action.
|
||||
|
||||
<!-- TODO: insert visual demonstrating the benchmark -->
|
||||
|
||||
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
|
||||
 | 
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark
|
||||
|
||||
### 💻 UI
|
||||
|
||||
**Makes agents easy to use!** The `frontend` gives you a user-friendly interface to control and monitor your agents. It connects to agents through the [agent protocol](#-agent-protocol), ensuring compatibility with many agents from both inside and outside of our ecosystem.
|
||||
|
||||
<!-- TODO: insert screenshot of front end -->
|
||||
|
||||
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend
|
||||
|
||||
### ⌨️ CLI
|
||||
|
||||
[CLI]: #-cli
|
||||
|
||||
To make it as easy as possible to use all of the tools offered by the repository, a CLI is included at the root of the repo:
|
||||
|
||||
```shell
|
||||
$ ./run
|
||||
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
agent Commands to create, start and stop agents
|
||||
benchmark Commands to start the benchmark and list tests and categories
|
||||
setup Installs dependencies needed for your system.
|
||||
```
|
||||
|
||||
Just clone the repo, install dependencies with `./run setup`, and you should be good to go!
|
||||
|
||||
## 🤔 Questions? Problems? Suggestions?
|
||||
|
||||
### Get help - [Discord 💬](https://discord.gg/autogpt)
|
||||
|
||||
[](https://discord.gg/autogpt)
|
||||
|
||||
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn't created an issue for the same topic.
|
||||
|
||||
## 🤝 Sister projects
|
||||
|
||||
### 🔄 Agent Protocol
|
||||
|
||||
To maintain a uniform standard and ensure seamless compatibility with many current and future applications, AutoGPT employs the [agent protocol](https://agentprotocol.ai/) standard by the AI Engineer Foundation. This standardizes the communication pathways from your agent to the frontend and benchmark.
|
||||
|
||||
---
|
||||
|
||||
## Stars stats
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date&theme=dark" />
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" />
|
||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" />
|
||||
</picture>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
## ⚡ Contributors
|
||||
|
||||
<a href="https://github.com/Significant-Gravitas/AutoGPT/graphs/contributors" alt="View Contributors">
|
||||
<img src="https://contrib.rocks/image?repo=Significant-Gravitas/AutoGPT&max=1000&columns=10" alt="Contributors" />
|
||||
</a>
|
||||
48
SECURITY.md
Normal file
48
SECURITY.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
|
||||
|
||||
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
|
||||
|
||||
Instead, please report them via:
|
||||
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
|
||||
<!--- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty-->
|
||||
|
||||
### Reporting Process
|
||||
1. **Submit Report**: Use one of the above channels to submit your report
|
||||
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
|
||||
3. **Collaboration**: We will collaborate with you to understand and validate the issue
|
||||
4. **Resolution**: We will work on a fix and coordinate the release process
|
||||
|
||||
### Disclosure Policy
|
||||
- Please provide detailed reports with reproducible steps
|
||||
- Include the version/commit hash where you discovered the vulnerability
|
||||
- Allow us a 90-day security fix window before any public disclosure
|
||||
- After patch is released, allow 30 days for users to update before public disclosure (for a total of 120 days max between update time and fix time)
|
||||
- Share any potential mitigations or workarounds if known
|
||||
|
||||
## Supported Versions
|
||||
Only the following versions are eligible for security updates:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|-----------|
|
||||
| Latest release on master branch | ✅ |
|
||||
| Development commits (pre-master) | ✅ |
|
||||
| Classic folder (deprecated) | ❌ |
|
||||
| All other versions | ❌ |
|
||||
|
||||
## Security Best Practices
|
||||
When using this project:
|
||||
1. Always use the latest stable version
|
||||
2. Review security advisories before updating
|
||||
3. Follow our security documentation and guidelines
|
||||
4. Keep your dependencies up to date
|
||||
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
|
||||
|
||||
## Past Security Advisories
|
||||
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
|
||||
|
||||
---
|
||||
Last updated: November 2024
|
||||
BIN
assets/gpt_dark_RGB.icns
Normal file
BIN
assets/gpt_dark_RGB.icns
Normal file
Binary file not shown.
BIN
assets/gpt_dark_RGB.ico
Normal file
BIN
assets/gpt_dark_RGB.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.1 MiB |
BIN
assets/gpt_dark_RGB.png
Normal file
BIN
assets/gpt_dark_RGB.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 49 KiB |
123
autogpt_platform/.env.default
Normal file
123
autogpt_platform/.env.default
Normal file
@@ -0,0 +1,123 @@
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=supabase
|
||||
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY=your-encryption-key-32-chars-min
|
||||
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
|
||||
############
|
||||
# Supavisor -- Database pooler
|
||||
############
|
||||
POOLER_PROXY_PORT_TRANSACTION=6543
|
||||
POOLER_DEFAULT_POOL_SIZE=20
|
||||
POOLER_MAX_CLIENT_CONN=100
|
||||
POOLER_TENANT_ID=your-tenant-id
|
||||
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3000
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
ENABLE_ANONYMOUS_USERS=false
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3000
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
# Add your OpenAI API key to enable SQL Editor Assistant
|
||||
OPENAI_API_KEY=
|
||||
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
||||
2
autogpt_platform/.gitignore
vendored
Normal file
2
autogpt_platform/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
275
autogpt_platform/CLAUDE.md
Normal file
275
autogpt_platform/CLAUDE.md
Normal file
@@ -0,0 +1,275 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Repository Overview
|
||||
|
||||
AutoGPT Platform is a monorepo containing:
|
||||
|
||||
- **Backend** (`/backend`): Python FastAPI server with async support
|
||||
- **Frontend** (`/frontend`): Next.js React application
|
||||
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
|
||||
|
||||
## Essential Commands
|
||||
|
||||
### Backend Development
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd backend && poetry install
|
||||
|
||||
# Run database migrations
|
||||
poetry run prisma migrate dev
|
||||
|
||||
# Start all services (database, redis, rabbitmq, clamav)
|
||||
docker compose up -d
|
||||
|
||||
# Run the backend server
|
||||
poetry run serve
|
||||
|
||||
# Run tests
|
||||
poetry run test
|
||||
|
||||
# Run specific test
|
||||
poetry run pytest path/to/test_file.py::test_function_name
|
||||
|
||||
# Run block tests (tests that validate all blocks work correctly)
|
||||
poetry run pytest backend/blocks/test/test_block.py -xvs
|
||||
|
||||
# Run tests for a specific block (e.g., GetCurrentTimeBlock)
|
||||
poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[GetCurrentTimeBlock]' -xvs
|
||||
|
||||
# Lint and format
|
||||
# prefer format if you want to just "fix" it and only get the errors that can't be autofixed
|
||||
poetry run format # Black + isort
|
||||
poetry run lint # ruff
|
||||
```
|
||||
|
||||
More details can be found in TESTING.md
|
||||
|
||||
#### Creating/Updating Snapshots
|
||||
|
||||
When you first write a test or when the expected output changes:
|
||||
|
||||
```bash
|
||||
poetry run pytest path/to/test.py --snapshot-update
|
||||
```
|
||||
|
||||
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
|
||||
|
||||
### Frontend Development
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd frontend && pnpm i
|
||||
|
||||
# Generate API client from OpenAPI spec
|
||||
pnpm generate:api
|
||||
|
||||
# Start development server
|
||||
pnpm dev
|
||||
|
||||
# Run E2E tests
|
||||
pnpm test
|
||||
|
||||
# Run Storybook for component development
|
||||
pnpm storybook
|
||||
|
||||
# Build production
|
||||
pnpm build
|
||||
|
||||
# Format and lint
|
||||
pnpm format
|
||||
|
||||
# Type checking
|
||||
pnpm types
|
||||
```
|
||||
|
||||
**📖 Complete Guide**: See `/frontend/CONTRIBUTING.md` and `/frontend/.cursorrules` for comprehensive frontend patterns.
|
||||
|
||||
**Key Frontend Conventions:**
|
||||
|
||||
- Separate render logic from data/behavior in components
|
||||
- Use generated API hooks from `@/app/api/__generated__/endpoints/`
|
||||
- Use function declarations (not arrow functions) for components/handlers
|
||||
- Use design system components from `src/components/` (atoms, molecules, organisms)
|
||||
- Only use Phosphor Icons
|
||||
- Never use `src/components/__legacy__/*` or deprecated `BackendAPI`
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Backend Architecture
|
||||
|
||||
- **API Layer**: FastAPI with REST and WebSocket endpoints
|
||||
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
|
||||
- **Queue System**: RabbitMQ for async task processing
|
||||
- **Execution Engine**: Separate executor service processes agent workflows
|
||||
- **Authentication**: JWT-based with Supabase integration
|
||||
- **Security**: Cache protection middleware prevents sensitive data caching in browsers/proxies
|
||||
|
||||
### Frontend Architecture
|
||||
|
||||
- **Framework**: Next.js 15 App Router (client-first approach)
|
||||
- **Data Fetching**: Type-safe generated API hooks via Orval + React Query
|
||||
- **State Management**: React Query for server state, co-located UI state in components/hooks
|
||||
- **Component Structure**: Separate render logic (`.tsx`) from business logic (`use*.ts` hooks)
|
||||
- **Workflow Builder**: Visual graph editor using @xyflow/react
|
||||
- **UI Components**: shadcn/ui (Radix UI primitives) with Tailwind CSS styling
|
||||
- **Icons**: Phosphor Icons only
|
||||
- **Feature Flags**: LaunchDarkly integration
|
||||
- **Error Handling**: ErrorCard for render errors, toast for mutations, Sentry for exceptions
|
||||
- **Testing**: Playwright for E2E, Storybook for component development
|
||||
|
||||
### Key Concepts
|
||||
|
||||
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
|
||||
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
|
||||
3. **Integrations**: OAuth and API connections stored per user
|
||||
4. **Store**: Marketplace for sharing agent templates
|
||||
5. **Virus Scanning**: ClamAV integration for file upload security
|
||||
|
||||
### Testing Approach
|
||||
|
||||
- Backend uses pytest with snapshot testing for API responses
|
||||
- Test files are colocated with source files (`*_test.py`)
|
||||
- Frontend uses Playwright for E2E tests
|
||||
- Component testing via Storybook
|
||||
|
||||
### Database Schema
|
||||
|
||||
Key models (defined in `/backend/schema.prisma`):
|
||||
|
||||
- `User`: Authentication and profile data
|
||||
- `AgentGraph`: Workflow definitions with version control
|
||||
- `AgentGraphExecution`: Execution history and results
|
||||
- `AgentNode`: Individual nodes in a workflow
|
||||
- `StoreListing`: Marketplace listings for sharing agents
|
||||
|
||||
### Environment Configuration
|
||||
|
||||
#### Configuration Files
|
||||
|
||||
- **Backend**: `/backend/.env.default` (defaults) → `/backend/.env` (user overrides)
|
||||
- **Frontend**: `/frontend/.env.default` (defaults) → `/frontend/.env` (user overrides)
|
||||
- **Platform**: `/.env.default` (Supabase/shared defaults) → `/.env` (user overrides)
|
||||
|
||||
#### Docker Environment Loading Order
|
||||
|
||||
1. `.env.default` files provide base configuration (tracked in git)
|
||||
2. `.env` files provide user-specific overrides (gitignored)
|
||||
3. Docker Compose `environment:` sections provide service-specific overrides
|
||||
4. Shell environment variables have highest precedence
|
||||
|
||||
#### Key Points
|
||||
|
||||
- All services use hardcoded defaults in docker-compose files (no `${VARIABLE}` substitutions)
|
||||
- The `env_file` directive loads variables INTO containers at runtime
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
**Adding a new block:**
|
||||
|
||||
Follow the comprehensive [Block SDK Guide](../../../docs/content/platform/block-sdk-guide.md) which covers:
|
||||
|
||||
- Provider configuration with `ProviderBuilder`
|
||||
- Block schema definition
|
||||
- Authentication (API keys, OAuth, webhooks)
|
||||
- Testing and validation
|
||||
- File organization
|
||||
|
||||
Quick steps:
|
||||
|
||||
1. Create new file in `/backend/backend/blocks/`
|
||||
2. Configure provider using `ProviderBuilder` in `_config.py`
|
||||
3. Inherit from `Block` base class
|
||||
4. Define input/output schemas using `BlockSchema`
|
||||
5. Implement async `run` method
|
||||
6. Generate unique block ID using `uuid.uuid4()`
|
||||
7. Test with `poetry run pytest backend/blocks/test/test_block.py`
|
||||
|
||||
Note: when making many new blocks analyze the interfaces for each of these blocks and picture if they would go well together in a graph based editor or would they struggle to connect productively?
|
||||
ex: do the inputs and outputs tie well together?
|
||||
|
||||
If you get any pushback or hit complex block conditions check the new_blocks guide in the docs.
|
||||
|
||||
**Modifying the API:**
|
||||
|
||||
1. Update route in `/backend/backend/server/routers/`
|
||||
2. Add/update Pydantic models in same directory
|
||||
3. Write tests alongside the route file
|
||||
4. Run `poetry run test` to verify
|
||||
|
||||
**Frontend feature development:**
|
||||
|
||||
See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference:
|
||||
|
||||
1. **Pages**: Create in `src/app/(platform)/feature-name/page.tsx`
|
||||
- Add `usePageName.ts` hook for logic
|
||||
- Put sub-components in local `components/` folder
|
||||
2. **Components**: Structure as `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts`
|
||||
- Use design system components from `src/components/` (atoms, molecules, organisms)
|
||||
- Never use `src/components/__legacy__/*`
|
||||
3. **Data fetching**: Use generated API hooks from `@/app/api/__generated__/endpoints/`
|
||||
- Regenerate with `pnpm generate:api`
|
||||
- Pattern: `use{Method}{Version}{OperationName}`
|
||||
4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only
|
||||
5. **Testing**: Add Storybook stories for new components, Playwright for E2E
|
||||
6. **Code conventions**: Function declarations (not arrow functions) for components/handlers
|
||||
|
||||
### Security Implementation
|
||||
|
||||
**Cache Protection Middleware:**
|
||||
|
||||
- Located in `/backend/backend/server/middleware/security.py`
|
||||
- Default behavior: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
|
||||
- Uses an allow list approach - only explicitly permitted paths can be cached
|
||||
- Cacheable paths include: static assets (`/static/*`, `/_next/static/*`), health checks, public store pages, documentation
|
||||
- Prevents sensitive data (auth tokens, API keys, user data) from being cached by browsers/proxies
|
||||
- To allow caching for a new endpoint, add it to `CACHEABLE_PATHS` in the middleware
|
||||
- Applied to both main API server and external API applications
|
||||
|
||||
### Creating Pull Requests
|
||||
|
||||
- Create the PR aginst the `dev` branch of the repository.
|
||||
- Ensure the branch name is descriptive (e.g., `feature/add-new-block`)/
|
||||
- Use conventional commit messages (see below)/
|
||||
- Fill out the .github/PULL_REQUEST_TEMPLATE.md template as the PR description/
|
||||
- Run the github pre-commit hooks to ensure code quality.
|
||||
|
||||
### Reviewing/Revising Pull Requests
|
||||
|
||||
- When the user runs /pr-comments or tries to fetch them, also run gh api /repos/Significant-Gravitas/AutoGPT/pulls/[issuenum]/reviews to get the reviews
|
||||
- Use gh api /repos/Significant-Gravitas/AutoGPT/pulls/[issuenum]/reviews/[review_id]/comments to get the review contents
|
||||
- Use gh api /repos/Significant-Gravitas/AutoGPT/issues/9924/comments to get the pr specific comments
|
||||
|
||||
### Conventional Commits
|
||||
|
||||
Use this format for commit messages and Pull Request titles:
|
||||
|
||||
**Conventional Commit Types:**
|
||||
|
||||
- `feat`: Introduces a new feature to the codebase
|
||||
- `fix`: Patches a bug in the codebase
|
||||
- `refactor`: Code change that neither fixes a bug nor adds a feature; also applies to removing features
|
||||
- `ci`: Changes to CI configuration
|
||||
- `docs`: Documentation-only changes
|
||||
- `dx`: Improvements to the developer experience
|
||||
|
||||
**Recommended Base Scopes:**
|
||||
|
||||
- `platform`: Changes affecting both frontend and backend
|
||||
- `frontend`
|
||||
- `backend`
|
||||
- `infra`
|
||||
- `blocks`: Modifications/additions of individual blocks
|
||||
|
||||
**Subscope Examples:**
|
||||
|
||||
- `backend/executor`
|
||||
- `backend/db`
|
||||
- `frontend/builder` (includes changes to the block UI component)
|
||||
- `infra/prod`
|
||||
|
||||
Use these scopes and subscopes for clarity and consistency in commit messages.
|
||||
21
autogpt_platform/Contributor License Agreement (CLA).md
Normal file
21
autogpt_platform/Contributor License Agreement (CLA).md
Normal file
@@ -0,0 +1,21 @@
|
||||
**Determinist Ltd**
|
||||
|
||||
**Contributor License Agreement (“Agreement”)**
|
||||
|
||||
Thank you for your interest in the AutoGPT open source project at [https://github.com/Significant-Gravitas/AutoGPT](https://github.com/Significant-Gravitas/AutoGPT) stewarded by Determinist Ltd (“**Determinist**”), with offices at 3rd Floor 1 Ashley Road, Altrincham, Cheshire, WA14 2DT, United Kingdom. The form of license below is a document that clarifies the terms under which You, the person listed below, may contribute software code described below (the “**Contribution**”) to the project. We appreciate your participation in our project, and your help in improving our products, so we want you to understand what will be done with the Contributions. This license is for your protection as well as the protection of Determinist and its licensees; it does not change your rights to use your own Contributions for any other purpose.
|
||||
|
||||
By submitting a Pull Request which modifies the content of the “autogpt\_platform” folder at [https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt\_platform](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt_platform), You hereby agree:
|
||||
|
||||
1\. **You grant us the ability to use the Contributions in any way**. You hereby grant to Determinist a non-exclusive, irrevocable, worldwide, royalty-free, sublicenseable, transferable license under all of Your relevant intellectual property rights (including copyright, patent, and any other rights), to use, copy, prepare derivative works of, distribute and publicly perform and display the Contributions on any licensing terms, including without limitation: (a) open source licenses like the GNU General Public License (GPL), the GNU Lesser General Public License (LGPL), the Common Public License, or the Berkeley Science Division license (BSD); and (b) binary, proprietary, or commercial licenses.
|
||||
|
||||
2\. **Grant of Patent License**. You hereby grant to Determinist a worldwide, non-exclusive, royalty-free, irrevocable, license, under any rights you may have, now or in the future, in any patents or patent applications, to make, have made, use, offer to sell, sell, and import products containing the Contribution or portions of the Contribution. This license extends to patent claims that are infringed by the Contribution alone or by combination of the Contribution with other inventions.
|
||||
|
||||
4\. **Limitations on Licenses**. The licenses granted in this Agreement will continue for the duration of the applicable patent or intellectual property right under which such license is granted. The licenses granted in this Agreement will include the right to grant and authorize sublicenses, so long as the sublicenses are within the scope of the licenses granted in this Agreement. Except for the licenses granted herein, You reserve all right, title, and interest in and to the Contribution.
|
||||
|
||||
5\. **You are able to grant us these rights**. You represent that You are legally entitled to grant the above license. If Your employer has rights to intellectual property that You create, You represent that You are authorized to make the Contributions on behalf of that employer, or that Your employer has waived such rights for the Contributions.
|
||||
|
||||
3\. **The Contributions are your original work**. You represent that the Contributions are Your original works of authorship, and to Your knowledge, no other person claims, or has the right to claim, any right in any invention or patent related to the Contributions. You also represent that You are not legally obligated, whether by entering into an agreement or otherwise, in any way that conflicts with the terms of this license. For example, if you have signed an agreement requiring you to assign the intellectual property rights in the Contributions to an employer or customer, that would conflict with the terms of this license.
|
||||
|
||||
6\. **We determine the code that is in our products**. You understand that the decision to include the Contribution in any product or source repository is entirely that of Determinist, and this agreement does not guarantee that the Contributions will be included in any product.
|
||||
|
||||
7\. **No Implied Warranties.** Determinist acknowledges that, except as explicitly described in this Agreement, the Contribution is provided on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
164
autogpt_platform/LICENSE.md
Normal file
164
autogpt_platform/LICENSE.md
Normal file
@@ -0,0 +1,164 @@
|
||||
# PolyForm Shield License 1.0.0
|
||||
|
||||
<https://polyformproject.org/licenses/shield/1.0.0>
|
||||
|
||||
## Acceptance
|
||||
|
||||
In order to get any license under these terms, you must agree
|
||||
to them as both strict obligations and conditions to all
|
||||
your licenses.
|
||||
|
||||
## Copyright License
|
||||
|
||||
The licensor grants you a copyright license for the
|
||||
software to do everything you might do with the software
|
||||
that would otherwise infringe the licensor's copyright
|
||||
in it for any permitted purpose. However, you may
|
||||
only distribute the software according to [Distribution
|
||||
License](#distribution-license) and make changes or new works
|
||||
based on the software according to [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Distribution License
|
||||
|
||||
The licensor grants you an additional copyright license
|
||||
to distribute copies of the software. Your license
|
||||
to distribute covers distributing the software with
|
||||
changes and new works permitted by [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Notices
|
||||
|
||||
You must ensure that anyone who gets a copy of any part of
|
||||
the software from you also gets a copy of these terms or the
|
||||
URL for them above, as well as copies of any plain-text lines
|
||||
beginning with `Required Notice:` that the licensor provided
|
||||
with the software. For example:
|
||||
|
||||
> Required Notice: Copyright Yoyodyne, Inc. (http://example.com)
|
||||
|
||||
## Changes and New Works License
|
||||
|
||||
The licensor grants you an additional copyright license to
|
||||
make changes and new works based on the software for any
|
||||
permitted purpose.
|
||||
|
||||
## Patent License
|
||||
|
||||
The licensor grants you a patent license for the software that
|
||||
covers patent claims the licensor can license, or becomes able
|
||||
to license, that you would infringe by using the software.
|
||||
|
||||
## Noncompete
|
||||
|
||||
Any purpose is a permitted purpose, except for providing any
|
||||
product that competes with the software or any product the
|
||||
licensor or any of its affiliates provides using the software.
|
||||
|
||||
## Competition
|
||||
|
||||
Goods and services compete even when they provide functionality
|
||||
through different kinds of interfaces or for different technical
|
||||
platforms. Applications can compete with services, libraries
|
||||
with plugins, frameworks with development tools, and so on,
|
||||
even if they're written in different programming languages
|
||||
or for different computer architectures. Goods and services
|
||||
compete even when provided free of charge. If you market a
|
||||
product as a practical substitute for the software or another
|
||||
product, it definitely competes.
|
||||
|
||||
## New Products
|
||||
|
||||
If you are using the software to provide a product that does
|
||||
not compete, but the licensor or any of its affiliates brings
|
||||
your product into competition by providing a new version of
|
||||
the software or another product using the software, you may
|
||||
continue using versions of the software available under these
|
||||
terms beforehand to provide your competing product, but not
|
||||
any later versions.
|
||||
|
||||
## Discontinued Products
|
||||
|
||||
You may begin using the software to compete with a product
|
||||
or service that the licensor or any of its affiliates has
|
||||
stopped providing, unless the licensor includes a plain-text
|
||||
line beginning with `Licensor Line of Business:` with the
|
||||
software that mentions that line of business. For example:
|
||||
|
||||
> Licensor Line of Business: YoyodyneCMS Content Management
|
||||
System (http://example.com/cms)
|
||||
|
||||
## Sales of Business
|
||||
|
||||
If the licensor or any of its affiliates sells a line of
|
||||
business developing the software or using the software
|
||||
to provide a product, the buyer can also enforce
|
||||
[Noncompete](#noncompete) for that product.
|
||||
|
||||
## Fair Use
|
||||
|
||||
You may have "fair use" rights for the software under the
|
||||
law. These terms do not limit them.
|
||||
|
||||
## No Other Rights
|
||||
|
||||
These terms do not allow you to sublicense or transfer any of
|
||||
your licenses to anyone else, or prevent the licensor from
|
||||
granting licenses to anyone else. These terms do not imply
|
||||
any other licenses.
|
||||
|
||||
## Patent Defense
|
||||
|
||||
If you make any written claim that the software infringes or
|
||||
contributes to infringement of any patent, your patent license
|
||||
for the software granted under these terms ends immediately. If
|
||||
your company makes such a claim, your patent license ends
|
||||
immediately for work on behalf of your company.
|
||||
|
||||
## Violations
|
||||
|
||||
The first time you are notified in writing that you have
|
||||
violated any of these terms, or done anything with the software
|
||||
not covered by your licenses, your licenses can nonetheless
|
||||
continue if you come into full compliance with these terms,
|
||||
and take practical steps to correct past violations, within
|
||||
32 days of receiving notice. Otherwise, all your licenses
|
||||
end immediately.
|
||||
|
||||
## No Liability
|
||||
|
||||
***As far as the law allows, the software comes as is, without
|
||||
any warranty or condition, and the licensor will not be liable
|
||||
to you for any damages arising out of these terms or the use
|
||||
or nature of the software, under any kind of legal claim.***
|
||||
|
||||
## Definitions
|
||||
|
||||
The **licensor** is the individual or entity offering these
|
||||
terms, and the **software** is the software the licensor makes
|
||||
available under these terms.
|
||||
|
||||
A **product** can be a good or service, or a combination
|
||||
of them.
|
||||
|
||||
**You** refers to the individual or entity agreeing to these
|
||||
terms.
|
||||
|
||||
**Your company** is any legal entity, sole proprietorship,
|
||||
or other kind of organization that you work for, plus all
|
||||
its affiliates.
|
||||
|
||||
**Affiliates** means the other organizations than an
|
||||
organization has control over, is under the control of, or is
|
||||
under common control with.
|
||||
|
||||
**Control** means ownership of substantially all the assets of
|
||||
an entity, or the power to direct its management and policies
|
||||
by vote, contract, or otherwise. Control can be direct or
|
||||
indirect.
|
||||
|
||||
**Your licenses** are all the licenses granted to you for the
|
||||
software under these terms.
|
||||
|
||||
**Use** means anything you do with the software requiring one
|
||||
of your licenses.
|
||||
61
autogpt_platform/Makefile
Normal file
61
autogpt_platform/Makefile
Normal file
@@ -0,0 +1,61 @@
|
||||
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend load-store-agents
|
||||
|
||||
# Run just Supabase + Redis + RabbitMQ
|
||||
start-core:
|
||||
docker compose up -d deps
|
||||
|
||||
# Stop core services
|
||||
stop-core:
|
||||
docker compose stop deps
|
||||
|
||||
reset-db:
|
||||
rm -rf db/docker/volumes/db/data
|
||||
cd backend && poetry run prisma migrate deploy
|
||||
cd backend && poetry run prisma generate
|
||||
|
||||
# View logs for core services
|
||||
logs-core:
|
||||
docker compose logs -f deps
|
||||
|
||||
# Run formatting and linting for backend and frontend
|
||||
format:
|
||||
cd backend && poetry run format
|
||||
cd frontend && pnpm format
|
||||
cd frontend && pnpm lint
|
||||
|
||||
init-env:
|
||||
cp -n .env.default .env || true
|
||||
cd backend && cp -n .env.default .env || true
|
||||
cd frontend && cp -n .env.default .env || true
|
||||
|
||||
|
||||
# Run migrations for backend
|
||||
migrate:
|
||||
cd backend && poetry run prisma migrate deploy
|
||||
cd backend && poetry run prisma generate
|
||||
|
||||
run-backend:
|
||||
cd backend && poetry run app
|
||||
|
||||
run-frontend:
|
||||
cd frontend && pnpm dev
|
||||
|
||||
test-data:
|
||||
cd backend && poetry run python test/test_data_creator.py
|
||||
|
||||
load-store-agents:
|
||||
cd backend && poetry run load-store-agents
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo "Targets:"
|
||||
@echo " start-core - Start just the core services (Supabase, Redis, RabbitMQ) in background"
|
||||
@echo " stop-core - Stop the core services"
|
||||
@echo " reset-db - Reset the database by deleting the volume"
|
||||
@echo " logs-core - Tail the logs for core services"
|
||||
@echo " format - Format & lint backend (Python) and frontend (TypeScript) code"
|
||||
@echo " migrate - Run backend database migrations"
|
||||
@echo " run-backend - Run the backend FastAPI server"
|
||||
@echo " run-frontend - Run the frontend Next.js development server"
|
||||
@echo " test-data - Run the test data creator"
|
||||
@echo " load-store-agents - Load store agents from agents/ folder into test database"
|
||||
190
autogpt_platform/README.md
Normal file
190
autogpt_platform/README.md
Normal file
@@ -0,0 +1,190 @@
|
||||
# AutoGPT Platform
|
||||
|
||||
Welcome to the AutoGPT Platform - a powerful system for creating and running AI agents to solve business problems. This platform enables you to harness the power of artificial intelligence to automate tasks, analyze data, and generate insights for your organization.
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Docker
|
||||
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
|
||||
|
||||
### Running the System
|
||||
|
||||
To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
|
||||
|
||||
```
|
||||
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
|
||||
cd AutoGPT/autogpt_platform
|
||||
```
|
||||
|
||||
2. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.default .env
|
||||
```
|
||||
|
||||
This command will copy the `.env.default` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
3. Run the following command:
|
||||
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
4. After all the services are in ready state, open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
|
||||
### Running Just Core services
|
||||
|
||||
You can now run the following to enable just the core services.
|
||||
|
||||
```
|
||||
# For help
|
||||
make help
|
||||
|
||||
# Run just Supabase + Redis + RabbitMQ
|
||||
make start-core
|
||||
|
||||
# Stop core services
|
||||
make stop-core
|
||||
|
||||
# View logs from core services
|
||||
make logs-core
|
||||
|
||||
# Run formatting and linting for backend and frontend
|
||||
make format
|
||||
|
||||
# Run migrations for backend database
|
||||
make migrate
|
||||
|
||||
# Run backend server
|
||||
make run-backend
|
||||
|
||||
# Run frontend development server
|
||||
make run-frontend
|
||||
|
||||
```
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
Here are some useful Docker Compose commands for managing your AutoGPT Platform:
|
||||
|
||||
- `docker compose up -d`: Start the services in detached mode.
|
||||
- `docker compose stop`: Stop the running services without removing them.
|
||||
- `docker compose rm`: Remove stopped service containers.
|
||||
- `docker compose build`: Build or rebuild services.
|
||||
- `docker compose down`: Stop and remove containers, networks, and volumes.
|
||||
- `docker compose watch`: Watch for changes in your services and automatically update them.
|
||||
|
||||
### Sample Scenarios
|
||||
|
||||
Here are some common scenarios where you might use multiple Docker Compose commands:
|
||||
|
||||
1. Updating and restarting a specific service:
|
||||
|
||||
```
|
||||
docker compose build api_srv
|
||||
docker compose up -d --no-deps api_srv
|
||||
```
|
||||
|
||||
This rebuilds the `api_srv` service and restarts it without affecting other services.
|
||||
|
||||
2. Viewing logs for troubleshooting:
|
||||
|
||||
```
|
||||
docker compose logs -f api_srv ws_srv
|
||||
```
|
||||
|
||||
This shows and follows the logs for both `api_srv` and `ws_srv` services.
|
||||
|
||||
3. Scaling a service for increased load:
|
||||
|
||||
```
|
||||
docker compose up -d --scale executor=3
|
||||
```
|
||||
|
||||
This scales the `executor` service to 3 instances to handle increased load.
|
||||
|
||||
4. Stopping the entire system for maintenance:
|
||||
|
||||
```
|
||||
docker compose stop
|
||||
docker compose rm -f
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This stops all services, removes containers, pulls the latest images, and restarts the system.
|
||||
|
||||
5. Developing with live updates:
|
||||
|
||||
```
|
||||
docker compose watch
|
||||
```
|
||||
|
||||
This watches for changes in your code and automatically updates the relevant services.
|
||||
|
||||
6. Checking the status of services:
|
||||
```
|
||||
docker compose ps
|
||||
```
|
||||
This shows the current status of all services defined in your docker-compose.yml file.
|
||||
|
||||
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
|
||||
|
||||
### Persisting Data
|
||||
|
||||
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
|
||||
|
||||
1. Open the `docker-compose.yml` file in a text editor.
|
||||
2. Add volume configurations for PostgreSQL and Redis services:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
postgres:
|
||||
# ... other configurations ...
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
|
||||
redis:
|
||||
# ... other configurations ...
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
redis_data:
|
||||
```
|
||||
|
||||
3. Save the file and run `docker compose up -d` to apply the changes.
|
||||
|
||||
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
|
||||
|
||||
### API Client Generation
|
||||
|
||||
The platform includes scripts for generating and managing the API client:
|
||||
|
||||
- `pnpm fetch:openapi`: Fetches the OpenAPI specification from the backend service (requires backend to be running on port 8006)
|
||||
- `pnpm generate:api-client`: Generates the TypeScript API client from the OpenAPI specification using Orval
|
||||
- `pnpm generate:api`: Runs both fetch and generate commands in sequence
|
||||
|
||||
#### Manual API Client Updates
|
||||
|
||||
If you need to update the API client after making changes to the backend API:
|
||||
|
||||
1. Ensure the backend services are running:
|
||||
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
2. Generate the updated API client:
|
||||
```
|
||||
pnpm generate:api
|
||||
```
|
||||
|
||||
This will fetch the latest OpenAPI specification and regenerate the TypeScript client code.
|
||||
0
autogpt_platform/__init__.py
Normal file
0
autogpt_platform/__init__.py
Normal file
3
autogpt_platform/autogpt_libs/README.md
Normal file
3
autogpt_platform/autogpt_libs/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# AutoGPT Libs
|
||||
|
||||
This is a new project to store shared functionality across different services in the AutoGPT Platform (e.g. authentication)
|
||||
@@ -0,0 +1,78 @@
|
||||
import hashlib
|
||||
import secrets
|
||||
from typing import NamedTuple
|
||||
|
||||
from cryptography.hazmat.primitives.kdf.scrypt import Scrypt
|
||||
|
||||
|
||||
class APIKeyContainer(NamedTuple):
|
||||
"""Container for API key parts."""
|
||||
|
||||
key: str
|
||||
head: str
|
||||
tail: str
|
||||
hash: str
|
||||
salt: str
|
||||
|
||||
|
||||
class APIKeySmith:
|
||||
PREFIX: str = "agpt_"
|
||||
HEAD_LENGTH: int = 8
|
||||
TAIL_LENGTH: int = 8
|
||||
|
||||
def generate_key(self) -> APIKeyContainer:
|
||||
"""Generate a new API key with secure hashing."""
|
||||
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
|
||||
hash, salt = self.hash_key(raw_key)
|
||||
|
||||
return APIKeyContainer(
|
||||
key=raw_key,
|
||||
head=raw_key[: self.HEAD_LENGTH],
|
||||
tail=raw_key[-self.TAIL_LENGTH :],
|
||||
hash=hash,
|
||||
salt=salt,
|
||||
)
|
||||
|
||||
def verify_key(
|
||||
self, provided_key: str, known_hash: str, known_salt: str | None = None
|
||||
) -> bool:
|
||||
"""
|
||||
Verify an API key against a known hash (+ salt).
|
||||
Supports verifying both legacy SHA256 and secure Scrypt hashes.
|
||||
"""
|
||||
if not provided_key.startswith(self.PREFIX):
|
||||
return False
|
||||
|
||||
# Handle legacy SHA256 hashes (migration support)
|
||||
if known_salt is None:
|
||||
legacy_hash = hashlib.sha256(provided_key.encode()).hexdigest()
|
||||
return secrets.compare_digest(legacy_hash, known_hash)
|
||||
|
||||
try:
|
||||
salt_bytes = bytes.fromhex(known_salt)
|
||||
provided_hash = self._hash_key_with_salt(provided_key, salt_bytes)
|
||||
return secrets.compare_digest(provided_hash, known_hash)
|
||||
except (ValueError, TypeError):
|
||||
return False
|
||||
|
||||
def hash_key(self, raw_key: str) -> tuple[str, str]:
|
||||
"""Migrate a legacy hash to secure hash format."""
|
||||
salt = self._generate_salt()
|
||||
hash = self._hash_key_with_salt(raw_key, salt)
|
||||
return hash, salt.hex()
|
||||
|
||||
def _generate_salt(self) -> bytes:
|
||||
"""Generate a random salt for hashing."""
|
||||
return secrets.token_bytes(32)
|
||||
|
||||
def _hash_key_with_salt(self, raw_key: str, salt: bytes) -> str:
|
||||
"""Hash API key using Scrypt with salt."""
|
||||
kdf = Scrypt(
|
||||
length=32,
|
||||
salt=salt,
|
||||
n=2**14, # CPU/memory cost parameter
|
||||
r=8, # Block size parameter
|
||||
p=1, # Parallelization parameter
|
||||
)
|
||||
key_hash = kdf.derive(raw_key.encode())
|
||||
return key_hash.hex()
|
||||
@@ -0,0 +1,79 @@
|
||||
import hashlib
|
||||
|
||||
from autogpt_libs.api_key.keysmith import APIKeySmith
|
||||
|
||||
|
||||
def test_generate_api_key():
|
||||
keysmith = APIKeySmith()
|
||||
key = keysmith.generate_key()
|
||||
|
||||
assert key.key.startswith(keysmith.PREFIX)
|
||||
assert key.head == key.key[: keysmith.HEAD_LENGTH]
|
||||
assert key.tail == key.key[-keysmith.TAIL_LENGTH :]
|
||||
assert len(key.hash) == 64 # 32 bytes hex encoded
|
||||
assert len(key.salt) == 64 # 32 bytes hex encoded
|
||||
|
||||
|
||||
def test_verify_new_secure_key():
|
||||
keysmith = APIKeySmith()
|
||||
key = keysmith.generate_key()
|
||||
|
||||
# Test correct key validates
|
||||
assert keysmith.verify_key(key.key, key.hash, key.salt) is True
|
||||
|
||||
# Test wrong key fails
|
||||
wrong_key = f"{keysmith.PREFIX}wrongkey123"
|
||||
assert keysmith.verify_key(wrong_key, key.hash, key.salt) is False
|
||||
|
||||
|
||||
def test_verify_legacy_key():
|
||||
keysmith = APIKeySmith()
|
||||
legacy_key = f"{keysmith.PREFIX}legacykey123"
|
||||
legacy_hash = hashlib.sha256(legacy_key.encode()).hexdigest()
|
||||
|
||||
# Test legacy key validates without salt
|
||||
assert keysmith.verify_key(legacy_key, legacy_hash) is True
|
||||
|
||||
# Test wrong legacy key fails
|
||||
wrong_key = f"{keysmith.PREFIX}wronglegacy"
|
||||
assert keysmith.verify_key(wrong_key, legacy_hash) is False
|
||||
|
||||
|
||||
def test_rehash_existing_key():
|
||||
keysmith = APIKeySmith()
|
||||
legacy_key = f"{keysmith.PREFIX}migratekey123"
|
||||
|
||||
# Migrate the legacy key
|
||||
new_hash, new_salt = keysmith.hash_key(legacy_key)
|
||||
|
||||
# Verify migrated key works
|
||||
assert keysmith.verify_key(legacy_key, new_hash, new_salt) is True
|
||||
|
||||
# Verify different key fails with migrated hash
|
||||
wrong_key = f"{keysmith.PREFIX}wrongkey"
|
||||
assert keysmith.verify_key(wrong_key, new_hash, new_salt) is False
|
||||
|
||||
|
||||
def test_invalid_key_prefix():
|
||||
keysmith = APIKeySmith()
|
||||
key = keysmith.generate_key()
|
||||
|
||||
# Test key without proper prefix fails
|
||||
invalid_key = "invalid_prefix_key"
|
||||
assert keysmith.verify_key(invalid_key, key.hash, key.salt) is False
|
||||
|
||||
|
||||
def test_secure_hash_requires_salt():
|
||||
keysmith = APIKeySmith()
|
||||
key = keysmith.generate_key()
|
||||
|
||||
# Secure hash without salt should fail
|
||||
assert keysmith.verify_key(key.key, key.hash) is False
|
||||
|
||||
|
||||
def test_invalid_salt_format():
|
||||
keysmith = APIKeySmith()
|
||||
key = keysmith.generate_key()
|
||||
|
||||
# Invalid salt format should fail gracefully
|
||||
assert keysmith.verify_key(key.key, key.hash, "invalid_hex") is False
|
||||
19
autogpt_platform/autogpt_libs/autogpt_libs/auth/__init__.py
Normal file
19
autogpt_platform/autogpt_libs/autogpt_libs/auth/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from .config import verify_settings
|
||||
from .dependencies import (
|
||||
get_optional_user_id,
|
||||
get_user_id,
|
||||
requires_admin_user,
|
||||
requires_user,
|
||||
)
|
||||
from .helpers import add_auth_responses_to_openapi
|
||||
from .models import User
|
||||
|
||||
__all__ = [
|
||||
"verify_settings",
|
||||
"get_user_id",
|
||||
"requires_admin_user",
|
||||
"requires_user",
|
||||
"get_optional_user_id",
|
||||
"add_auth_responses_to_openapi",
|
||||
"User",
|
||||
]
|
||||
90
autogpt_platform/autogpt_libs/autogpt_libs/auth/config.py
Normal file
90
autogpt_platform/autogpt_libs/autogpt_libs/auth/config.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from jwt.algorithms import get_default_algorithms, has_crypto
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthConfigError(ValueError):
|
||||
"""Raised when authentication configuration is invalid."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
ALGO_RECOMMENDATION = (
|
||||
"We highly recommend using an asymmetric algorithm such as ES256, "
|
||||
"because when leaked, a shared secret would allow anyone to "
|
||||
"forge valid tokens and impersonate users. "
|
||||
"More info: https://supabase.com/docs/guides/auth/signing-keys#choosing-the-right-signing-algorithm" # noqa
|
||||
)
|
||||
|
||||
|
||||
class Settings:
|
||||
def __init__(self):
|
||||
self.JWT_VERIFY_KEY: str = os.getenv(
|
||||
"JWT_VERIFY_KEY", os.getenv("SUPABASE_JWT_SECRET", "")
|
||||
).strip()
|
||||
self.JWT_ALGORITHM: str = os.getenv("JWT_SIGN_ALGORITHM", "HS256").strip()
|
||||
|
||||
self.validate()
|
||||
|
||||
def validate(self):
|
||||
if not self.JWT_VERIFY_KEY:
|
||||
raise AuthConfigError(
|
||||
"JWT_VERIFY_KEY must be set. "
|
||||
"An empty JWT secret would allow anyone to forge valid tokens."
|
||||
)
|
||||
|
||||
if len(self.JWT_VERIFY_KEY) < 32:
|
||||
logger.warning(
|
||||
"⚠️ JWT_VERIFY_KEY appears weak (less than 32 characters). "
|
||||
"Consider using a longer, cryptographically secure secret."
|
||||
)
|
||||
|
||||
supported_algorithms = get_default_algorithms().keys()
|
||||
|
||||
if not has_crypto:
|
||||
logger.warning(
|
||||
"⚠️ Asymmetric JWT verification is not available "
|
||||
"because the 'cryptography' package is not installed. "
|
||||
+ ALGO_RECOMMENDATION
|
||||
)
|
||||
|
||||
if (
|
||||
self.JWT_ALGORITHM not in supported_algorithms
|
||||
or self.JWT_ALGORITHM == "none"
|
||||
):
|
||||
raise AuthConfigError(
|
||||
f"Invalid JWT_SIGN_ALGORITHM: '{self.JWT_ALGORITHM}'. "
|
||||
"Supported algorithms are listed on "
|
||||
"https://pyjwt.readthedocs.io/en/stable/algorithms.html"
|
||||
)
|
||||
|
||||
if self.JWT_ALGORITHM.startswith("HS"):
|
||||
logger.warning(
|
||||
f"⚠️ JWT_SIGN_ALGORITHM is set to '{self.JWT_ALGORITHM}', "
|
||||
"a symmetric shared-key signature algorithm. " + ALGO_RECOMMENDATION
|
||||
)
|
||||
|
||||
|
||||
_settings: Settings = None # type: ignore
|
||||
|
||||
|
||||
def get_settings() -> Settings:
|
||||
global _settings
|
||||
|
||||
if not _settings:
|
||||
_settings = Settings()
|
||||
|
||||
return _settings
|
||||
|
||||
|
||||
def verify_settings() -> None:
|
||||
global _settings
|
||||
|
||||
if not _settings:
|
||||
_settings = Settings() # calls validation indirectly
|
||||
return
|
||||
|
||||
_settings.validate()
|
||||
306
autogpt_platform/autogpt_libs/autogpt_libs/auth/config_test.py
Normal file
306
autogpt_platform/autogpt_libs/autogpt_libs/auth/config_test.py
Normal file
@@ -0,0 +1,306 @@
|
||||
"""
|
||||
Comprehensive tests for auth configuration to ensure 100% line and branch coverage.
|
||||
These tests verify critical security checks preventing JWT token forgery.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt_libs.auth.config import AuthConfigError, Settings
|
||||
|
||||
|
||||
def test_environment_variable_precedence(mocker: MockerFixture):
|
||||
"""Test that environment variables take precedence over defaults."""
|
||||
secret = "environment-secret-key-with-proper-length-123456"
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == secret
|
||||
|
||||
|
||||
def test_environment_variable_backwards_compatible(mocker: MockerFixture):
|
||||
"""Test that SUPABASE_JWT_SECRET is read if JWT_VERIFY_KEY is not set."""
|
||||
secret = "environment-secret-key-with-proper-length-123456"
|
||||
mocker.patch.dict(os.environ, {"SUPABASE_JWT_SECRET": secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == secret
|
||||
|
||||
|
||||
def test_auth_config_error_inheritance():
|
||||
"""Test that AuthConfigError is properly defined as an Exception."""
|
||||
assert issubclass(AuthConfigError, Exception)
|
||||
error = AuthConfigError("test message")
|
||||
assert str(error) == "test message"
|
||||
|
||||
|
||||
def test_settings_static_after_creation(mocker: MockerFixture):
|
||||
"""Test that settings maintain their values after creation."""
|
||||
secret = "immutable-secret-key-with-proper-length-12345"
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
original_secret = settings.JWT_VERIFY_KEY
|
||||
|
||||
# Changing environment after creation shouldn't affect settings
|
||||
os.environ["JWT_VERIFY_KEY"] = "different-secret"
|
||||
|
||||
assert settings.JWT_VERIFY_KEY == original_secret
|
||||
|
||||
|
||||
def test_settings_load_with_valid_secret(mocker: MockerFixture):
|
||||
"""Test auth enabled with a valid JWT secret."""
|
||||
valid_secret = "a" * 32 # 32 character secret
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": valid_secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == valid_secret
|
||||
|
||||
|
||||
def test_settings_load_with_strong_secret(mocker: MockerFixture):
|
||||
"""Test auth enabled with a cryptographically strong secret."""
|
||||
strong_secret = "super-secret-jwt-token-with-at-least-32-characters-long"
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": strong_secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == strong_secret
|
||||
assert len(settings.JWT_VERIFY_KEY) >= 32
|
||||
|
||||
|
||||
def test_secret_empty_raises_error(mocker: MockerFixture):
|
||||
"""Test that auth enabled with empty secret raises AuthConfigError."""
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": ""}, clear=True)
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
Settings()
|
||||
assert "JWT_VERIFY_KEY" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_secret_missing_raises_error(mocker: MockerFixture):
|
||||
"""Test that auth enabled without secret env var raises AuthConfigError."""
|
||||
mocker.patch.dict(os.environ, {}, clear=True)
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
Settings()
|
||||
assert "JWT_VERIFY_KEY" in str(exc_info.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("secret", [" ", " ", "\t", "\n", " \t\n "])
|
||||
def test_secret_only_whitespace_raises_error(mocker: MockerFixture, secret: str):
|
||||
"""Test that auth enabled with whitespace-only secret raises error."""
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": secret}, clear=True)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
Settings()
|
||||
|
||||
|
||||
def test_secret_weak_logs_warning(
|
||||
mocker: MockerFixture, caplog: pytest.LogCaptureFixture
|
||||
):
|
||||
"""Test that weak JWT secret triggers warning log."""
|
||||
weak_secret = "short" # Less than 32 characters
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": weak_secret}, clear=True)
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == weak_secret
|
||||
assert "key appears weak" in caplog.text.lower()
|
||||
assert "less than 32 characters" in caplog.text
|
||||
|
||||
|
||||
def test_secret_31_char_logs_warning(
|
||||
mocker: MockerFixture, caplog: pytest.LogCaptureFixture
|
||||
):
|
||||
"""Test that 31-character secret triggers warning (boundary test)."""
|
||||
secret_31 = "a" * 31 # Exactly 31 characters
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": secret_31}, clear=True)
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
settings = Settings()
|
||||
assert len(settings.JWT_VERIFY_KEY) == 31
|
||||
assert "key appears weak" in caplog.text.lower()
|
||||
|
||||
|
||||
def test_secret_32_char_no_warning(
|
||||
mocker: MockerFixture, caplog: pytest.LogCaptureFixture
|
||||
):
|
||||
"""Test that 32-character secret does not trigger warning (boundary test)."""
|
||||
secret_32 = "a" * 32 # Exactly 32 characters
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": secret_32}, clear=True)
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
settings = Settings()
|
||||
assert len(settings.JWT_VERIFY_KEY) == 32
|
||||
assert "JWT secret appears weak" not in caplog.text
|
||||
|
||||
|
||||
def test_secret_whitespace_stripped(mocker: MockerFixture):
|
||||
"""Test that JWT secret whitespace is stripped."""
|
||||
secret = "a" * 32
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": f" {secret} "}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == secret
|
||||
|
||||
|
||||
def test_secret_with_special_characters(mocker: MockerFixture):
|
||||
"""Test JWT secret with special characters."""
|
||||
special_secret = "!@#$%^&*()_+-=[]{}|;:,.<>?`~" + "a" * 10 # 40 chars total
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": special_secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == special_secret
|
||||
|
||||
|
||||
def test_secret_with_unicode(mocker: MockerFixture):
|
||||
"""Test JWT secret with unicode characters."""
|
||||
unicode_secret = "秘密🔐キー" + "a" * 25 # Ensure >32 bytes
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": unicode_secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == unicode_secret
|
||||
|
||||
|
||||
def test_secret_very_long(mocker: MockerFixture):
|
||||
"""Test JWT secret with excessive length."""
|
||||
long_secret = "a" * 1000 # 1000 character secret
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": long_secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == long_secret
|
||||
assert len(settings.JWT_VERIFY_KEY) == 1000
|
||||
|
||||
|
||||
def test_secret_with_newline(mocker: MockerFixture):
|
||||
"""Test JWT secret containing newlines."""
|
||||
multiline_secret = "secret\nwith\nnewlines" + "a" * 20
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": multiline_secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == multiline_secret
|
||||
|
||||
|
||||
def test_secret_base64_encoded(mocker: MockerFixture):
|
||||
"""Test JWT secret that looks like base64."""
|
||||
base64_secret = "dGhpc19pc19hX3NlY3JldF9rZXlfd2l0aF9wcm9wZXJfbGVuZ3Ro"
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": base64_secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == base64_secret
|
||||
|
||||
|
||||
def test_secret_numeric_only(mocker: MockerFixture):
|
||||
"""Test JWT secret with only numbers."""
|
||||
numeric_secret = "1234567890" * 4 # 40 character numeric secret
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": numeric_secret}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_VERIFY_KEY == numeric_secret
|
||||
|
||||
|
||||
def test_algorithm_default_hs256(mocker: MockerFixture):
|
||||
"""Test that JWT algorithm defaults to HS256."""
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": "a" * 32}, clear=True)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_ALGORITHM == "HS256"
|
||||
|
||||
|
||||
def test_algorithm_whitespace_stripped(mocker: MockerFixture):
|
||||
"""Test that JWT algorithm whitespace is stripped."""
|
||||
secret = "a" * 32
|
||||
mocker.patch.dict(
|
||||
os.environ,
|
||||
{"JWT_VERIFY_KEY": secret, "JWT_SIGN_ALGORITHM": " HS256 "},
|
||||
clear=True,
|
||||
)
|
||||
|
||||
settings = Settings()
|
||||
assert settings.JWT_ALGORITHM == "HS256"
|
||||
|
||||
|
||||
def test_no_crypto_warning(mocker: MockerFixture, caplog: pytest.LogCaptureFixture):
|
||||
"""Test warning when crypto package is not available."""
|
||||
secret = "a" * 32
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": secret}, clear=True)
|
||||
|
||||
# Mock has_crypto to return False
|
||||
mocker.patch("autogpt_libs.auth.config.has_crypto", False)
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
Settings()
|
||||
assert "Asymmetric JWT verification is not available" in caplog.text
|
||||
assert "cryptography" in caplog.text
|
||||
|
||||
|
||||
def test_algorithm_invalid_raises_error(mocker: MockerFixture):
|
||||
"""Test that invalid JWT algorithm raises AuthConfigError."""
|
||||
secret = "a" * 32
|
||||
mocker.patch.dict(
|
||||
os.environ,
|
||||
{"JWT_VERIFY_KEY": secret, "JWT_SIGN_ALGORITHM": "INVALID_ALG"},
|
||||
clear=True,
|
||||
)
|
||||
|
||||
with pytest.raises(AuthConfigError) as exc_info:
|
||||
Settings()
|
||||
assert "Invalid JWT_SIGN_ALGORITHM" in str(exc_info.value)
|
||||
assert "INVALID_ALG" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_algorithm_none_raises_error(mocker: MockerFixture):
|
||||
"""Test that 'none' algorithm raises AuthConfigError."""
|
||||
secret = "a" * 32
|
||||
mocker.patch.dict(
|
||||
os.environ,
|
||||
{"JWT_VERIFY_KEY": secret, "JWT_SIGN_ALGORITHM": "none"},
|
||||
clear=True,
|
||||
)
|
||||
|
||||
with pytest.raises(AuthConfigError) as exc_info:
|
||||
Settings()
|
||||
assert "Invalid JWT_SIGN_ALGORITHM" in str(exc_info.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("algorithm", ["HS256", "HS384", "HS512"])
|
||||
def test_algorithm_symmetric_warning(
|
||||
mocker: MockerFixture, caplog: pytest.LogCaptureFixture, algorithm: str
|
||||
):
|
||||
"""Test warning for symmetric algorithms (HS256, HS384, HS512)."""
|
||||
secret = "a" * 32
|
||||
mocker.patch.dict(
|
||||
os.environ,
|
||||
{"JWT_VERIFY_KEY": secret, "JWT_SIGN_ALGORITHM": algorithm},
|
||||
clear=True,
|
||||
)
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
settings = Settings()
|
||||
assert algorithm in caplog.text
|
||||
assert "symmetric shared-key signature algorithm" in caplog.text
|
||||
assert settings.JWT_ALGORITHM == algorithm
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"algorithm",
|
||||
["ES256", "ES384", "ES512", "RS256", "RS384", "RS512", "PS256", "PS384", "PS512"],
|
||||
)
|
||||
def test_algorithm_asymmetric_no_warning(
|
||||
mocker: MockerFixture, caplog: pytest.LogCaptureFixture, algorithm: str
|
||||
):
|
||||
"""Test that asymmetric algorithms do not trigger warning."""
|
||||
secret = "a" * 32
|
||||
mocker.patch.dict(
|
||||
os.environ,
|
||||
{"JWT_VERIFY_KEY": secret, "JWT_SIGN_ALGORITHM": algorithm},
|
||||
clear=True,
|
||||
)
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
settings = Settings()
|
||||
# Should not contain the symmetric algorithm warning
|
||||
assert "symmetric shared-key signature algorithm" not in caplog.text
|
||||
assert settings.JWT_ALGORITHM == algorithm
|
||||
117
autogpt_platform/autogpt_libs/autogpt_libs/auth/dependencies.py
Normal file
117
autogpt_platform/autogpt_libs/autogpt_libs/auth/dependencies.py
Normal file
@@ -0,0 +1,117 @@
|
||||
"""
|
||||
FastAPI dependency functions for JWT-based authentication and authorization.
|
||||
|
||||
These are the high-level dependency functions used in route definitions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import fastapi
|
||||
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
||||
|
||||
from .jwt_utils import get_jwt_payload, verify_user
|
||||
from .models import User
|
||||
|
||||
optional_bearer = HTTPBearer(auto_error=False)
|
||||
|
||||
# Header name for admin impersonation
|
||||
IMPERSONATION_HEADER_NAME = "X-Act-As-User-Id"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_optional_user_id(
|
||||
credentials: HTTPAuthorizationCredentials | None = fastapi.Security(
|
||||
optional_bearer
|
||||
),
|
||||
) -> str | None:
|
||||
"""
|
||||
Attempts to extract the user ID ("sub" claim) from a Bearer JWT if provided.
|
||||
|
||||
This dependency allows for both authenticated and anonymous access. If a valid bearer token is
|
||||
supplied, it parses the JWT and extracts the user ID. If the token is missing or invalid, it returns None,
|
||||
treating the request as anonymous.
|
||||
|
||||
Args:
|
||||
credentials: Optional HTTPAuthorizationCredentials object from FastAPI Security dependency.
|
||||
|
||||
Returns:
|
||||
The user ID (str) extracted from the JWT "sub" claim, or None if no valid token is present.
|
||||
"""
|
||||
if not credentials:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Parse JWT token to get user ID
|
||||
from autogpt_libs.auth.jwt_utils import parse_jwt_token
|
||||
|
||||
payload = parse_jwt_token(credentials.credentials)
|
||||
return payload.get("sub")
|
||||
except Exception as e:
|
||||
logger.debug(f"Auth token validation failed (anonymous access): {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def requires_user(jwt_payload: dict = fastapi.Security(get_jwt_payload)) -> User:
|
||||
"""
|
||||
FastAPI dependency that requires a valid authenticated user.
|
||||
|
||||
Raises:
|
||||
HTTPException: 401 for authentication failures
|
||||
"""
|
||||
return verify_user(jwt_payload, admin_only=False)
|
||||
|
||||
|
||||
async def requires_admin_user(
|
||||
jwt_payload: dict = fastapi.Security(get_jwt_payload),
|
||||
) -> User:
|
||||
"""
|
||||
FastAPI dependency that requires a valid admin user.
|
||||
|
||||
Raises:
|
||||
HTTPException: 401 for authentication failures, 403 for insufficient permissions
|
||||
"""
|
||||
return verify_user(jwt_payload, admin_only=True)
|
||||
|
||||
|
||||
async def get_user_id(
|
||||
request: fastapi.Request, jwt_payload: dict = fastapi.Security(get_jwt_payload)
|
||||
) -> str:
|
||||
"""
|
||||
FastAPI dependency that returns the ID of the authenticated user.
|
||||
|
||||
Supports admin impersonation via X-Act-As-User-Id header:
|
||||
- If the header is present and user is admin, returns the impersonated user ID
|
||||
- Otherwise returns the authenticated user's own ID
|
||||
- Logs all impersonation actions for audit trail
|
||||
|
||||
Raises:
|
||||
HTTPException: 401 for authentication failures or missing user ID
|
||||
HTTPException: 403 if non-admin tries to use impersonation
|
||||
"""
|
||||
# Get the authenticated user's ID from JWT
|
||||
user_id = jwt_payload.get("sub")
|
||||
if not user_id:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=401, detail="User ID not found in token"
|
||||
)
|
||||
|
||||
# Check for admin impersonation header
|
||||
impersonate_header = request.headers.get(IMPERSONATION_HEADER_NAME, "").strip()
|
||||
if impersonate_header:
|
||||
# Verify the authenticated user is an admin
|
||||
authenticated_user = verify_user(jwt_payload, admin_only=False)
|
||||
if authenticated_user.role != "admin":
|
||||
raise fastapi.HTTPException(
|
||||
status_code=403, detail="Only admin users can impersonate other users"
|
||||
)
|
||||
|
||||
# Log the impersonation for audit trail
|
||||
logger.info(
|
||||
f"Admin impersonation: {authenticated_user.user_id} ({authenticated_user.email}) "
|
||||
f"acting as user {impersonate_header} for requesting {request.method} {request.url}"
|
||||
)
|
||||
|
||||
return impersonate_header
|
||||
|
||||
return user_id
|
||||
@@ -0,0 +1,554 @@
|
||||
"""
|
||||
Comprehensive integration tests for authentication dependencies.
|
||||
Tests the full authentication flow from HTTP requests to user validation.
|
||||
"""
|
||||
|
||||
import os
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
from fastapi import FastAPI, HTTPException, Request, Security
|
||||
from fastapi.testclient import TestClient
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt_libs.auth.dependencies import (
|
||||
get_user_id,
|
||||
requires_admin_user,
|
||||
requires_user,
|
||||
)
|
||||
from autogpt_libs.auth.models import User
|
||||
|
||||
|
||||
class TestAuthDependencies:
|
||||
"""Test suite for authentication dependency functions."""
|
||||
|
||||
@pytest.fixture
|
||||
def app(self):
|
||||
"""Create a test FastAPI application."""
|
||||
app = FastAPI()
|
||||
|
||||
@app.get("/user")
|
||||
def get_user_endpoint(user: User = Security(requires_user)):
|
||||
return {"user_id": user.user_id, "role": user.role}
|
||||
|
||||
@app.get("/admin")
|
||||
def get_admin_endpoint(user: User = Security(requires_admin_user)):
|
||||
return {"user_id": user.user_id, "role": user.role}
|
||||
|
||||
@app.get("/user-id")
|
||||
def get_user_id_endpoint(user_id: str = Security(get_user_id)):
|
||||
return {"user_id": user_id}
|
||||
|
||||
return app
|
||||
|
||||
@pytest.fixture
|
||||
def client(self, app):
|
||||
"""Create a test client."""
|
||||
return TestClient(app)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_user_with_valid_jwt_payload(self, mocker: MockerFixture):
|
||||
"""Test requires_user with valid JWT payload."""
|
||||
jwt_payload = {"sub": "user-123", "role": "user", "email": "user@example.com"}
|
||||
|
||||
# Mock get_jwt_payload to return our test payload
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
user = await requires_user(jwt_payload)
|
||||
assert isinstance(user, User)
|
||||
assert user.user_id == "user-123"
|
||||
assert user.role == "user"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_user_with_admin_jwt_payload(self, mocker: MockerFixture):
|
||||
"""Test requires_user accepts admin users."""
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
user = await requires_user(jwt_payload)
|
||||
assert user.user_id == "admin-456"
|
||||
assert user.role == "admin"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_user_missing_sub(self):
|
||||
"""Test requires_user with missing user ID."""
|
||||
jwt_payload = {"role": "user", "email": "user@example.com"}
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await requires_user(jwt_payload)
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "User ID not found" in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_user_empty_sub(self):
|
||||
"""Test requires_user with empty user ID."""
|
||||
jwt_payload = {"sub": "", "role": "user"}
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await requires_user(jwt_payload)
|
||||
assert exc_info.value.status_code == 401
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_admin_user_with_admin(self, mocker: MockerFixture):
|
||||
"""Test requires_admin_user with admin role."""
|
||||
jwt_payload = {
|
||||
"sub": "admin-789",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
user = await requires_admin_user(jwt_payload)
|
||||
assert user.user_id == "admin-789"
|
||||
assert user.role == "admin"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_admin_user_with_regular_user(self):
|
||||
"""Test requires_admin_user rejects regular users."""
|
||||
jwt_payload = {"sub": "user-123", "role": "user", "email": "user@example.com"}
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await requires_admin_user(jwt_payload)
|
||||
assert exc_info.value.status_code == 403
|
||||
assert "Admin access required" in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_admin_user_missing_role(self):
|
||||
"""Test requires_admin_user with missing role."""
|
||||
jwt_payload = {"sub": "user-123", "email": "user@example.com"}
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
await requires_admin_user(jwt_payload)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_id_with_valid_payload(self, mocker: MockerFixture):
|
||||
"""Test get_user_id extracts user ID correctly."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {}
|
||||
jwt_payload = {"sub": "user-id-xyz", "role": "user"}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
assert user_id == "user-id-xyz"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_id_missing_sub(self):
|
||||
"""Test get_user_id with missing user ID."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {}
|
||||
jwt_payload = {"role": "user"}
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await get_user_id(request, jwt_payload)
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "User ID not found" in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_id_none_sub(self):
|
||||
"""Test get_user_id with None user ID."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {}
|
||||
jwt_payload = {"sub": None, "role": "user"}
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await get_user_id(request, jwt_payload)
|
||||
assert exc_info.value.status_code == 401
|
||||
|
||||
|
||||
class TestAuthDependenciesIntegration:
|
||||
"""Integration tests for auth dependencies with FastAPI."""
|
||||
|
||||
acceptable_jwt_secret = "test-secret-with-proper-length-123456"
|
||||
|
||||
@pytest.fixture
|
||||
def create_token(self, mocker: MockerFixture):
|
||||
"""Helper to create JWT tokens."""
|
||||
import jwt
|
||||
|
||||
mocker.patch.dict(
|
||||
os.environ,
|
||||
{"JWT_VERIFY_KEY": self.acceptable_jwt_secret},
|
||||
clear=True,
|
||||
)
|
||||
|
||||
def _create_token(payload, secret=self.acceptable_jwt_secret):
|
||||
return jwt.encode(payload, secret, algorithm="HS256")
|
||||
|
||||
return _create_token
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_endpoint_auth_enabled_no_token(self):
|
||||
"""Test endpoints require token when auth is enabled."""
|
||||
app = FastAPI()
|
||||
|
||||
@app.get("/test")
|
||||
def test_endpoint(user: User = Security(requires_user)):
|
||||
return {"user_id": user.user_id}
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
# Should fail without auth header
|
||||
response = client.get("/test")
|
||||
assert response.status_code == 401
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_endpoint_with_valid_token(self, create_token):
|
||||
"""Test endpoint with valid JWT token."""
|
||||
app = FastAPI()
|
||||
|
||||
@app.get("/test")
|
||||
def test_endpoint(user: User = Security(requires_user)):
|
||||
return {"user_id": user.user_id, "role": user.role}
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
token = create_token(
|
||||
{"sub": "test-user", "role": "user", "aud": "authenticated"},
|
||||
secret=self.acceptable_jwt_secret,
|
||||
)
|
||||
|
||||
response = client.get("/test", headers={"Authorization": f"Bearer {token}"})
|
||||
assert response.status_code == 200
|
||||
assert response.json()["user_id"] == "test-user"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_admin_endpoint_requires_admin_role(self, create_token):
|
||||
"""Test admin endpoint rejects non-admin users."""
|
||||
app = FastAPI()
|
||||
|
||||
@app.get("/admin")
|
||||
def admin_endpoint(user: User = Security(requires_admin_user)):
|
||||
return {"user_id": user.user_id}
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
# Regular user token
|
||||
user_token = create_token(
|
||||
{"sub": "regular-user", "role": "user", "aud": "authenticated"},
|
||||
secret=self.acceptable_jwt_secret,
|
||||
)
|
||||
|
||||
response = client.get(
|
||||
"/admin", headers={"Authorization": f"Bearer {user_token}"}
|
||||
)
|
||||
assert response.status_code == 403
|
||||
|
||||
# Admin token
|
||||
admin_token = create_token(
|
||||
{"sub": "admin-user", "role": "admin", "aud": "authenticated"},
|
||||
secret=self.acceptable_jwt_secret,
|
||||
)
|
||||
|
||||
response = client.get(
|
||||
"/admin", headers={"Authorization": f"Bearer {admin_token}"}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json()["user_id"] == "admin-user"
|
||||
|
||||
|
||||
class TestAuthDependenciesEdgeCases:
|
||||
"""Edge case tests for authentication dependencies."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_with_complex_payload(self):
|
||||
"""Test dependencies handle complex JWT payloads."""
|
||||
complex_payload = {
|
||||
"sub": "user-123",
|
||||
"role": "admin",
|
||||
"email": "test@example.com",
|
||||
"app_metadata": {"provider": "email", "providers": ["email"]},
|
||||
"user_metadata": {
|
||||
"full_name": "Test User",
|
||||
"avatar_url": "https://example.com/avatar.jpg",
|
||||
},
|
||||
"aud": "authenticated",
|
||||
"iat": 1234567890,
|
||||
"exp": 9999999999,
|
||||
}
|
||||
|
||||
user = await requires_user(complex_payload)
|
||||
assert user.user_id == "user-123"
|
||||
assert user.email == "test@example.com"
|
||||
|
||||
admin = await requires_admin_user(complex_payload)
|
||||
assert admin.role == "admin"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_with_unicode_in_payload(self):
|
||||
"""Test dependencies handle unicode in JWT payloads."""
|
||||
unicode_payload = {
|
||||
"sub": "user-😀-123",
|
||||
"role": "user",
|
||||
"email": "测试@example.com",
|
||||
"name": "日本語",
|
||||
}
|
||||
|
||||
user = await requires_user(unicode_payload)
|
||||
assert "😀" in user.user_id
|
||||
assert user.email == "测试@example.com"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_with_null_values(self):
|
||||
"""Test dependencies handle null values in payload."""
|
||||
null_payload = {
|
||||
"sub": "user-123",
|
||||
"role": "user",
|
||||
"email": None,
|
||||
"phone": None,
|
||||
"metadata": None,
|
||||
}
|
||||
|
||||
user = await requires_user(null_payload)
|
||||
assert user.user_id == "user-123"
|
||||
assert user.email is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_concurrent_requests_isolation(self):
|
||||
"""Test that concurrent requests don't interfere with each other."""
|
||||
payload1 = {"sub": "user-1", "role": "user"}
|
||||
payload2 = {"sub": "user-2", "role": "admin"}
|
||||
|
||||
# Simulate concurrent processing
|
||||
user1 = await requires_user(payload1)
|
||||
user2 = await requires_admin_user(payload2)
|
||||
|
||||
assert user1.user_id == "user-1"
|
||||
assert user2.user_id == "user-2"
|
||||
assert user1.role == "user"
|
||||
assert user2.role == "admin"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"payload,expected_error,admin_only",
|
||||
[
|
||||
(None, "Authorization header is missing", False),
|
||||
({}, "User ID not found", False),
|
||||
({"sub": ""}, "User ID not found", False),
|
||||
({"role": "user"}, "User ID not found", False),
|
||||
({"sub": "user", "role": "user"}, "Admin access required", True),
|
||||
],
|
||||
)
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_error_cases(
|
||||
self, payload, expected_error: str, admin_only: bool
|
||||
):
|
||||
"""Test that errors propagate correctly through dependencies."""
|
||||
# Import verify_user to test it directly since dependencies use FastAPI Security
|
||||
from autogpt_libs.auth.jwt_utils import verify_user
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
verify_user(payload, admin_only=admin_only)
|
||||
assert expected_error in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_valid_user(self):
|
||||
"""Test valid user case for dependency."""
|
||||
# Import verify_user to test it directly since dependencies use FastAPI Security
|
||||
from autogpt_libs.auth.jwt_utils import verify_user
|
||||
|
||||
# Valid case
|
||||
user = verify_user({"sub": "user", "role": "user"}, admin_only=False)
|
||||
assert user.user_id == "user"
|
||||
|
||||
|
||||
class TestAdminImpersonation:
|
||||
"""Test suite for admin user impersonation functionality."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_admin_impersonation_success(self, mocker: MockerFixture):
|
||||
"""Test admin successfully impersonating another user."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": "target-user-123"}
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
# Mock verify_user to return admin user data
|
||||
mock_verify_user = mocker.patch("autogpt_libs.auth.dependencies.verify_user")
|
||||
mock_verify_user.return_value = Mock(
|
||||
user_id="admin-456", email="admin@example.com", role="admin"
|
||||
)
|
||||
|
||||
# Mock logger to verify audit logging
|
||||
mock_logger = mocker.patch("autogpt_libs.auth.dependencies.logger")
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should return the impersonated user ID
|
||||
assert user_id == "target-user-123"
|
||||
|
||||
# Should log the impersonation attempt
|
||||
mock_logger.info.assert_called_once()
|
||||
log_call = mock_logger.info.call_args[0][0]
|
||||
assert "Admin impersonation:" in log_call
|
||||
assert "admin@example.com" in log_call
|
||||
assert "target-user-123" in log_call
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_non_admin_impersonation_attempt(self, mocker: MockerFixture):
|
||||
"""Test non-admin user attempting impersonation returns 403."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": "target-user-123"}
|
||||
jwt_payload = {
|
||||
"sub": "regular-user",
|
||||
"role": "user",
|
||||
"email": "user@example.com",
|
||||
}
|
||||
|
||||
# Mock verify_user to return regular user data
|
||||
mock_verify_user = mocker.patch("autogpt_libs.auth.dependencies.verify_user")
|
||||
mock_verify_user.return_value = Mock(
|
||||
user_id="regular-user", email="user@example.com", role="user"
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await get_user_id(request, jwt_payload)
|
||||
|
||||
assert exc_info.value.status_code == 403
|
||||
assert "Only admin users can impersonate other users" in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_empty_header(self, mocker: MockerFixture):
|
||||
"""Test impersonation with empty header falls back to regular user ID."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": ""}
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should fall back to the admin's own user ID
|
||||
assert user_id == "admin-456"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_missing_header(self, mocker: MockerFixture):
|
||||
"""Test normal behavior when impersonation header is missing."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {} # No impersonation header
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should return the admin's own user ID
|
||||
assert user_id == "admin-456"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_audit_logging_details(self, mocker: MockerFixture):
|
||||
"""Test that impersonation audit logging includes all required details."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": "victim-user-789"}
|
||||
jwt_payload = {
|
||||
"sub": "admin-999",
|
||||
"role": "admin",
|
||||
"email": "superadmin@company.com",
|
||||
}
|
||||
|
||||
# Mock verify_user to return admin user data
|
||||
mock_verify_user = mocker.patch("autogpt_libs.auth.dependencies.verify_user")
|
||||
mock_verify_user.return_value = Mock(
|
||||
user_id="admin-999", email="superadmin@company.com", role="admin"
|
||||
)
|
||||
|
||||
# Mock logger to capture audit trail
|
||||
mock_logger = mocker.patch("autogpt_libs.auth.dependencies.logger")
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Verify all audit details are logged
|
||||
assert user_id == "victim-user-789"
|
||||
mock_logger.info.assert_called_once()
|
||||
|
||||
log_message = mock_logger.info.call_args[0][0]
|
||||
assert "Admin impersonation:" in log_message
|
||||
assert "superadmin@company.com" in log_message
|
||||
assert "victim-user-789" in log_message
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_header_case_sensitivity(self, mocker: MockerFixture):
|
||||
"""Test that impersonation header is case-sensitive."""
|
||||
request = Mock(spec=Request)
|
||||
# Use wrong case - should not trigger impersonation
|
||||
request.headers = {"x-act-as-user-id": "target-user-123"}
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should fall back to admin's own ID (header case mismatch)
|
||||
assert user_id == "admin-456"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_with_whitespace_header(self, mocker: MockerFixture):
|
||||
"""Test impersonation with whitespace in header value."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": " target-user-123 "}
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
# Mock verify_user to return admin user data
|
||||
mock_verify_user = mocker.patch("autogpt_libs.auth.dependencies.verify_user")
|
||||
mock_verify_user.return_value = Mock(
|
||||
user_id="admin-456", email="admin@example.com", role="admin"
|
||||
)
|
||||
|
||||
# Mock logger
|
||||
mock_logger = mocker.patch("autogpt_libs.auth.dependencies.logger")
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should strip whitespace and impersonate successfully
|
||||
assert user_id == "target-user-123"
|
||||
mock_logger.info.assert_called_once()
|
||||
68
autogpt_platform/autogpt_libs/autogpt_libs/auth/helpers.py
Normal file
68
autogpt_platform/autogpt_libs/autogpt_libs/auth/helpers.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from fastapi import FastAPI
|
||||
from fastapi.openapi.utils import get_openapi
|
||||
|
||||
from .jwt_utils import bearer_jwt_auth
|
||||
|
||||
|
||||
def add_auth_responses_to_openapi(app: FastAPI) -> None:
|
||||
"""
|
||||
Set up custom OpenAPI schema generation that adds 401 responses
|
||||
to all authenticated endpoints.
|
||||
|
||||
This is needed when using HTTPBearer with auto_error=False to get proper
|
||||
401 responses instead of 403, but FastAPI only automatically adds security
|
||||
responses when auto_error=True.
|
||||
"""
|
||||
|
||||
def custom_openapi():
|
||||
if app.openapi_schema:
|
||||
return app.openapi_schema
|
||||
|
||||
openapi_schema = get_openapi(
|
||||
title=app.title,
|
||||
version=app.version,
|
||||
description=app.description,
|
||||
routes=app.routes,
|
||||
)
|
||||
|
||||
# Add 401 response to all endpoints that have security requirements
|
||||
for path, methods in openapi_schema["paths"].items():
|
||||
for method, details in methods.items():
|
||||
security_schemas = [
|
||||
schema
|
||||
for auth_option in details.get("security", [])
|
||||
for schema in auth_option.keys()
|
||||
]
|
||||
if bearer_jwt_auth.scheme_name not in security_schemas:
|
||||
continue
|
||||
|
||||
if "responses" not in details:
|
||||
details["responses"] = {}
|
||||
|
||||
details["responses"]["401"] = {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
}
|
||||
|
||||
# Ensure #/components/responses exists
|
||||
if "components" not in openapi_schema:
|
||||
openapi_schema["components"] = {}
|
||||
if "responses" not in openapi_schema["components"]:
|
||||
openapi_schema["components"]["responses"] = {}
|
||||
|
||||
# Define 401 response
|
||||
openapi_schema["components"]["responses"]["HTTP401NotAuthenticatedError"] = {
|
||||
"description": "Authentication required",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {"detail": {"type": "string"}},
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
app.openapi_schema = openapi_schema
|
||||
return app.openapi_schema
|
||||
|
||||
app.openapi = custom_openapi
|
||||
435
autogpt_platform/autogpt_libs/autogpt_libs/auth/helpers_test.py
Normal file
435
autogpt_platform/autogpt_libs/autogpt_libs/auth/helpers_test.py
Normal file
@@ -0,0 +1,435 @@
|
||||
"""
|
||||
Comprehensive tests for auth helpers module to achieve 100% coverage.
|
||||
Tests OpenAPI schema generation and authentication response handling.
|
||||
"""
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.openapi.utils import get_openapi
|
||||
|
||||
from autogpt_libs.auth.helpers import add_auth_responses_to_openapi
|
||||
from autogpt_libs.auth.jwt_utils import bearer_jwt_auth
|
||||
|
||||
|
||||
def test_add_auth_responses_to_openapi_basic():
|
||||
"""Test adding 401 responses to OpenAPI schema."""
|
||||
app = FastAPI(title="Test App", version="1.0.0")
|
||||
|
||||
# Add some test endpoints with authentication
|
||||
from fastapi import Depends
|
||||
|
||||
from autogpt_libs.auth.dependencies import requires_user
|
||||
|
||||
@app.get("/protected", dependencies=[Depends(requires_user)])
|
||||
def protected_endpoint():
|
||||
return {"message": "Protected"}
|
||||
|
||||
@app.get("/public")
|
||||
def public_endpoint():
|
||||
return {"message": "Public"}
|
||||
|
||||
# Apply the OpenAPI customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
# Get the OpenAPI schema
|
||||
schema = app.openapi()
|
||||
|
||||
# Verify basic schema properties
|
||||
assert schema["info"]["title"] == "Test App"
|
||||
assert schema["info"]["version"] == "1.0.0"
|
||||
|
||||
# Verify 401 response component is added
|
||||
assert "components" in schema
|
||||
assert "responses" in schema["components"]
|
||||
assert "HTTP401NotAuthenticatedError" in schema["components"]["responses"]
|
||||
|
||||
# Verify 401 response structure
|
||||
error_response = schema["components"]["responses"]["HTTP401NotAuthenticatedError"]
|
||||
assert error_response["description"] == "Authentication required"
|
||||
assert "application/json" in error_response["content"]
|
||||
assert "schema" in error_response["content"]["application/json"]
|
||||
|
||||
# Verify schema properties
|
||||
response_schema = error_response["content"]["application/json"]["schema"]
|
||||
assert response_schema["type"] == "object"
|
||||
assert "detail" in response_schema["properties"]
|
||||
assert response_schema["properties"]["detail"]["type"] == "string"
|
||||
|
||||
|
||||
def test_add_auth_responses_to_openapi_with_security():
|
||||
"""Test that 401 responses are added only to secured endpoints."""
|
||||
app = FastAPI()
|
||||
|
||||
# Mock endpoint with security
|
||||
from fastapi import Security
|
||||
|
||||
from autogpt_libs.auth.dependencies import get_user_id
|
||||
|
||||
@app.get("/secured")
|
||||
def secured_endpoint(user_id: str = Security(get_user_id)):
|
||||
return {"user_id": user_id}
|
||||
|
||||
@app.post("/also-secured")
|
||||
def another_secured(user_id: str = Security(get_user_id)):
|
||||
return {"status": "ok"}
|
||||
|
||||
@app.get("/unsecured")
|
||||
def unsecured_endpoint():
|
||||
return {"public": True}
|
||||
|
||||
# Apply OpenAPI customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
# Get schema
|
||||
schema = app.openapi()
|
||||
|
||||
# Check that secured endpoints have 401 responses
|
||||
if "/secured" in schema["paths"]:
|
||||
if "get" in schema["paths"]["/secured"]:
|
||||
secured_get = schema["paths"]["/secured"]["get"]
|
||||
if "responses" in secured_get:
|
||||
assert "401" in secured_get["responses"]
|
||||
assert (
|
||||
secured_get["responses"]["401"]["$ref"]
|
||||
== "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
)
|
||||
|
||||
if "/also-secured" in schema["paths"]:
|
||||
if "post" in schema["paths"]["/also-secured"]:
|
||||
secured_post = schema["paths"]["/also-secured"]["post"]
|
||||
if "responses" in secured_post:
|
||||
assert "401" in secured_post["responses"]
|
||||
|
||||
# Check that unsecured endpoint does not have 401 response
|
||||
if "/unsecured" in schema["paths"]:
|
||||
if "get" in schema["paths"]["/unsecured"]:
|
||||
unsecured_get = schema["paths"]["/unsecured"]["get"]
|
||||
if "responses" in unsecured_get:
|
||||
assert "401" not in unsecured_get.get("responses", {})
|
||||
|
||||
|
||||
def test_add_auth_responses_to_openapi_cached_schema():
|
||||
"""Test that OpenAPI schema is cached after first generation."""
|
||||
app = FastAPI()
|
||||
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
# Get schema twice
|
||||
schema1 = app.openapi()
|
||||
schema2 = app.openapi()
|
||||
|
||||
# Should return the same cached object
|
||||
assert schema1 is schema2
|
||||
|
||||
|
||||
def test_add_auth_responses_to_openapi_existing_responses():
|
||||
"""Test handling endpoints that already have responses defined."""
|
||||
app = FastAPI()
|
||||
|
||||
from fastapi import Security
|
||||
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
|
||||
@app.get(
|
||||
"/with-responses",
|
||||
responses={
|
||||
200: {"description": "Success"},
|
||||
404: {"description": "Not found"},
|
||||
},
|
||||
)
|
||||
def endpoint_with_responses(jwt: dict = Security(get_jwt_payload)):
|
||||
return {"data": "test"}
|
||||
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
schema = app.openapi()
|
||||
|
||||
# Check that existing responses are preserved and 401 is added
|
||||
if "/with-responses" in schema["paths"]:
|
||||
if "get" in schema["paths"]["/with-responses"]:
|
||||
responses = schema["paths"]["/with-responses"]["get"].get("responses", {})
|
||||
# Original responses should be preserved
|
||||
if "200" in responses:
|
||||
assert responses["200"]["description"] == "Success"
|
||||
if "404" in responses:
|
||||
assert responses["404"]["description"] == "Not found"
|
||||
# 401 should be added
|
||||
if "401" in responses:
|
||||
assert (
|
||||
responses["401"]["$ref"]
|
||||
== "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
)
|
||||
|
||||
|
||||
def test_add_auth_responses_to_openapi_no_security_endpoints():
|
||||
"""Test with app that has no secured endpoints."""
|
||||
app = FastAPI()
|
||||
|
||||
@app.get("/public1")
|
||||
def public1():
|
||||
return {"message": "public1"}
|
||||
|
||||
@app.post("/public2")
|
||||
def public2():
|
||||
return {"message": "public2"}
|
||||
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
schema = app.openapi()
|
||||
|
||||
# Component should still be added for consistency
|
||||
assert "HTTP401NotAuthenticatedError" in schema["components"]["responses"]
|
||||
|
||||
# But no endpoints should have 401 responses
|
||||
for path in schema["paths"].values():
|
||||
for method in path.values():
|
||||
if isinstance(method, dict) and "responses" in method:
|
||||
assert "401" not in method["responses"]
|
||||
|
||||
|
||||
def test_add_auth_responses_to_openapi_multiple_security_schemes():
|
||||
"""Test endpoints with multiple security requirements."""
|
||||
app = FastAPI()
|
||||
|
||||
from fastapi import Security
|
||||
|
||||
from autogpt_libs.auth.dependencies import requires_admin_user, requires_user
|
||||
from autogpt_libs.auth.models import User
|
||||
|
||||
@app.get("/multi-auth")
|
||||
def multi_auth(
|
||||
user: User = Security(requires_user),
|
||||
admin: User = Security(requires_admin_user),
|
||||
):
|
||||
return {"status": "super secure"}
|
||||
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
schema = app.openapi()
|
||||
|
||||
# Should have 401 response
|
||||
if "/multi-auth" in schema["paths"]:
|
||||
if "get" in schema["paths"]["/multi-auth"]:
|
||||
responses = schema["paths"]["/multi-auth"]["get"].get("responses", {})
|
||||
if "401" in responses:
|
||||
assert (
|
||||
responses["401"]["$ref"]
|
||||
== "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
)
|
||||
|
||||
|
||||
def test_add_auth_responses_to_openapi_empty_components():
|
||||
"""Test when OpenAPI schema has no components section initially."""
|
||||
app = FastAPI()
|
||||
|
||||
# Mock get_openapi to return schema without components
|
||||
original_get_openapi = get_openapi
|
||||
|
||||
def mock_get_openapi(*args, **kwargs):
|
||||
schema = original_get_openapi(*args, **kwargs)
|
||||
# Remove components if it exists
|
||||
if "components" in schema:
|
||||
del schema["components"]
|
||||
return schema
|
||||
|
||||
with mock.patch("autogpt_libs.auth.helpers.get_openapi", mock_get_openapi):
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
schema = app.openapi()
|
||||
|
||||
# Components should be created
|
||||
assert "components" in schema
|
||||
assert "responses" in schema["components"]
|
||||
assert "HTTP401NotAuthenticatedError" in schema["components"]["responses"]
|
||||
|
||||
|
||||
def test_add_auth_responses_to_openapi_all_http_methods():
|
||||
"""Test that all HTTP methods are handled correctly."""
|
||||
app = FastAPI()
|
||||
|
||||
from fastapi import Security
|
||||
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
|
||||
@app.get("/resource")
|
||||
def get_resource(jwt: dict = Security(get_jwt_payload)):
|
||||
return {"method": "GET"}
|
||||
|
||||
@app.post("/resource")
|
||||
def post_resource(jwt: dict = Security(get_jwt_payload)):
|
||||
return {"method": "POST"}
|
||||
|
||||
@app.put("/resource")
|
||||
def put_resource(jwt: dict = Security(get_jwt_payload)):
|
||||
return {"method": "PUT"}
|
||||
|
||||
@app.patch("/resource")
|
||||
def patch_resource(jwt: dict = Security(get_jwt_payload)):
|
||||
return {"method": "PATCH"}
|
||||
|
||||
@app.delete("/resource")
|
||||
def delete_resource(jwt: dict = Security(get_jwt_payload)):
|
||||
return {"method": "DELETE"}
|
||||
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
schema = app.openapi()
|
||||
|
||||
# All methods should have 401 response
|
||||
if "/resource" in schema["paths"]:
|
||||
for method in ["get", "post", "put", "patch", "delete"]:
|
||||
if method in schema["paths"]["/resource"]:
|
||||
method_spec = schema["paths"]["/resource"][method]
|
||||
if "responses" in method_spec:
|
||||
assert "401" in method_spec["responses"]
|
||||
|
||||
|
||||
def test_bearer_jwt_auth_scheme_config():
|
||||
"""Test that bearer_jwt_auth is configured correctly."""
|
||||
assert bearer_jwt_auth.scheme_name == "HTTPBearerJWT"
|
||||
assert bearer_jwt_auth.auto_error is False
|
||||
|
||||
|
||||
def test_add_auth_responses_with_no_routes():
|
||||
"""Test OpenAPI generation with app that has no routes."""
|
||||
app = FastAPI(title="Empty App")
|
||||
|
||||
# Apply customization to empty app
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
schema = app.openapi()
|
||||
|
||||
# Should still have basic structure
|
||||
assert schema["info"]["title"] == "Empty App"
|
||||
assert "components" in schema
|
||||
assert "responses" in schema["components"]
|
||||
assert "HTTP401NotAuthenticatedError" in schema["components"]["responses"]
|
||||
|
||||
|
||||
def test_custom_openapi_function_replacement():
|
||||
"""Test that the custom openapi function properly replaces the default."""
|
||||
app = FastAPI()
|
||||
|
||||
# Store original function
|
||||
original_openapi = app.openapi
|
||||
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
# Function should be replaced
|
||||
assert app.openapi != original_openapi
|
||||
assert callable(app.openapi)
|
||||
|
||||
|
||||
def test_endpoint_without_responses_section():
|
||||
"""Test endpoint that has security but no responses section initially."""
|
||||
app = FastAPI()
|
||||
|
||||
from fastapi import Security
|
||||
from fastapi.openapi.utils import get_openapi as original_get_openapi
|
||||
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
|
||||
# Create endpoint
|
||||
@app.get("/no-responses")
|
||||
def endpoint_without_responses(jwt: dict = Security(get_jwt_payload)):
|
||||
return {"data": "test"}
|
||||
|
||||
# Mock get_openapi to remove responses from the endpoint
|
||||
def mock_get_openapi(*args, **kwargs):
|
||||
schema = original_get_openapi(*args, **kwargs)
|
||||
# Remove responses from our endpoint to trigger line 40
|
||||
if "/no-responses" in schema.get("paths", {}):
|
||||
if "get" in schema["paths"]["/no-responses"]:
|
||||
# Delete responses to force the code to create it
|
||||
if "responses" in schema["paths"]["/no-responses"]["get"]:
|
||||
del schema["paths"]["/no-responses"]["get"]["responses"]
|
||||
return schema
|
||||
|
||||
with mock.patch("autogpt_libs.auth.helpers.get_openapi", mock_get_openapi):
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
# Get schema and verify 401 was added
|
||||
schema = app.openapi()
|
||||
|
||||
# The endpoint should now have 401 response
|
||||
if "/no-responses" in schema["paths"]:
|
||||
if "get" in schema["paths"]["/no-responses"]:
|
||||
responses = schema["paths"]["/no-responses"]["get"].get("responses", {})
|
||||
assert "401" in responses
|
||||
assert (
|
||||
responses["401"]["$ref"]
|
||||
== "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
)
|
||||
|
||||
|
||||
def test_components_with_existing_responses():
|
||||
"""Test when components already has a responses section."""
|
||||
app = FastAPI()
|
||||
|
||||
# Mock get_openapi to return schema with existing components/responses
|
||||
from fastapi.openapi.utils import get_openapi as original_get_openapi
|
||||
|
||||
def mock_get_openapi(*args, **kwargs):
|
||||
schema = original_get_openapi(*args, **kwargs)
|
||||
# Add existing components/responses
|
||||
if "components" not in schema:
|
||||
schema["components"] = {}
|
||||
schema["components"]["responses"] = {
|
||||
"ExistingResponse": {"description": "An existing response"}
|
||||
}
|
||||
return schema
|
||||
|
||||
with mock.patch("autogpt_libs.auth.helpers.get_openapi", mock_get_openapi):
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
schema = app.openapi()
|
||||
|
||||
# Both responses should exist
|
||||
assert "ExistingResponse" in schema["components"]["responses"]
|
||||
assert "HTTP401NotAuthenticatedError" in schema["components"]["responses"]
|
||||
|
||||
# Verify our 401 response structure
|
||||
error_response = schema["components"]["responses"][
|
||||
"HTTP401NotAuthenticatedError"
|
||||
]
|
||||
assert error_response["description"] == "Authentication required"
|
||||
|
||||
|
||||
def test_openapi_schema_persistence():
|
||||
"""Test that modifications to OpenAPI schema persist correctly."""
|
||||
app = FastAPI()
|
||||
|
||||
from fastapi import Security
|
||||
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
|
||||
@app.get("/test")
|
||||
def test_endpoint(jwt: dict = Security(get_jwt_payload)):
|
||||
return {"test": True}
|
||||
|
||||
# Apply customization
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
# Get schema multiple times
|
||||
schema1 = app.openapi()
|
||||
|
||||
# Modify the cached schema (shouldn't affect future calls)
|
||||
schema1["info"]["title"] = "Modified Title"
|
||||
|
||||
# Clear cache and get again
|
||||
app.openapi_schema = None
|
||||
schema2 = app.openapi()
|
||||
|
||||
# Should regenerate with original title
|
||||
assert schema2["info"]["title"] == app.title
|
||||
assert schema2["info"]["title"] != "Modified Title"
|
||||
80
autogpt_platform/autogpt_libs/autogpt_libs/auth/jwt_utils.py
Normal file
80
autogpt_platform/autogpt_libs/autogpt_libs/auth/jwt_utils.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import jwt
|
||||
from fastapi import HTTPException, Security
|
||||
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
||||
|
||||
from .config import get_settings
|
||||
from .models import User
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Bearer token authentication scheme
|
||||
bearer_jwt_auth = HTTPBearer(
|
||||
bearerFormat="jwt", scheme_name="HTTPBearerJWT", auto_error=False
|
||||
)
|
||||
|
||||
|
||||
async def get_jwt_payload(
|
||||
credentials: HTTPAuthorizationCredentials | None = Security(bearer_jwt_auth),
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Extract and validate JWT payload from HTTP Authorization header.
|
||||
|
||||
This is the core authentication function that handles:
|
||||
- Reading the `Authorization` header to obtain the JWT token
|
||||
- Verifying the JWT token's signature
|
||||
- Decoding the JWT token's payload
|
||||
|
||||
:param credentials: HTTP Authorization credentials from bearer token
|
||||
:return: JWT payload dictionary
|
||||
:raises HTTPException: 401 if authentication fails
|
||||
"""
|
||||
if not credentials:
|
||||
raise HTTPException(status_code=401, detail="Authorization header is missing")
|
||||
|
||||
try:
|
||||
payload = parse_jwt_token(credentials.credentials)
|
||||
logger.debug("Token decoded successfully")
|
||||
return payload
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=401, detail=str(e))
|
||||
|
||||
|
||||
def parse_jwt_token(token: str) -> dict[str, Any]:
|
||||
"""
|
||||
Parse and validate a JWT token.
|
||||
|
||||
:param token: The token to parse
|
||||
:return: The decoded payload
|
||||
:raises ValueError: If the token is invalid or expired
|
||||
"""
|
||||
settings = get_settings()
|
||||
try:
|
||||
payload = jwt.decode(
|
||||
token,
|
||||
settings.JWT_VERIFY_KEY,
|
||||
algorithms=[settings.JWT_ALGORITHM],
|
||||
audience="authenticated",
|
||||
)
|
||||
return payload
|
||||
except jwt.ExpiredSignatureError:
|
||||
raise ValueError("Token has expired")
|
||||
except jwt.InvalidTokenError as e:
|
||||
raise ValueError(f"Invalid token: {str(e)}")
|
||||
|
||||
|
||||
def verify_user(jwt_payload: dict | None, admin_only: bool) -> User:
|
||||
if jwt_payload is None:
|
||||
raise HTTPException(status_code=401, detail="Authorization header is missing")
|
||||
|
||||
user_id = jwt_payload.get("sub")
|
||||
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User ID not found in token")
|
||||
|
||||
if admin_only and jwt_payload["role"] != "admin":
|
||||
raise HTTPException(status_code=403, detail="Admin access required")
|
||||
|
||||
return User.from_payload(jwt_payload)
|
||||
@@ -0,0 +1,308 @@
|
||||
"""
|
||||
Comprehensive tests for JWT token parsing and validation.
|
||||
Ensures 100% line and branch coverage for JWT security functions.
|
||||
"""
|
||||
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import jwt
|
||||
import pytest
|
||||
from fastapi import HTTPException
|
||||
from fastapi.security import HTTPAuthorizationCredentials
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt_libs.auth import config, jwt_utils
|
||||
from autogpt_libs.auth.config import Settings
|
||||
from autogpt_libs.auth.models import User
|
||||
|
||||
MOCK_JWT_SECRET = "test-secret-key-with-at-least-32-characters"
|
||||
TEST_USER_PAYLOAD = {
|
||||
"sub": "test-user-id",
|
||||
"role": "user",
|
||||
"aud": "authenticated",
|
||||
"email": "test@example.com",
|
||||
}
|
||||
TEST_ADMIN_PAYLOAD = {
|
||||
"sub": "admin-user-id",
|
||||
"role": "admin",
|
||||
"aud": "authenticated",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_config(mocker: MockerFixture):
|
||||
mocker.patch.dict(os.environ, {"JWT_VERIFY_KEY": MOCK_JWT_SECRET}, clear=True)
|
||||
mocker.patch.object(config, "_settings", Settings())
|
||||
yield
|
||||
|
||||
|
||||
def create_token(payload, secret=None, algorithm="HS256"):
|
||||
"""Helper to create JWT tokens."""
|
||||
if secret is None:
|
||||
secret = MOCK_JWT_SECRET
|
||||
return jwt.encode(payload, secret, algorithm=algorithm)
|
||||
|
||||
|
||||
def test_parse_jwt_token_valid():
|
||||
"""Test parsing a valid JWT token."""
|
||||
token = create_token(TEST_USER_PAYLOAD)
|
||||
result = jwt_utils.parse_jwt_token(token)
|
||||
|
||||
assert result["sub"] == "test-user-id"
|
||||
assert result["role"] == "user"
|
||||
assert result["aud"] == "authenticated"
|
||||
|
||||
|
||||
def test_parse_jwt_token_expired():
|
||||
"""Test parsing an expired JWT token."""
|
||||
expired_payload = {
|
||||
**TEST_USER_PAYLOAD,
|
||||
"exp": datetime.now(timezone.utc) - timedelta(hours=1),
|
||||
}
|
||||
token = create_token(expired_payload)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
jwt_utils.parse_jwt_token(token)
|
||||
assert "Token has expired" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_parse_jwt_token_invalid_signature():
|
||||
"""Test parsing a token with invalid signature."""
|
||||
# Create token with different secret
|
||||
token = create_token(TEST_USER_PAYLOAD, secret="wrong-secret")
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
jwt_utils.parse_jwt_token(token)
|
||||
assert "Invalid token" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_parse_jwt_token_malformed():
|
||||
"""Test parsing a malformed token."""
|
||||
malformed_tokens = [
|
||||
"not.a.token",
|
||||
"invalid",
|
||||
"",
|
||||
# Header only
|
||||
"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9",
|
||||
# No signature
|
||||
"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0In0",
|
||||
]
|
||||
|
||||
for token in malformed_tokens:
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
jwt_utils.parse_jwt_token(token)
|
||||
assert "Invalid token" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_parse_jwt_token_wrong_audience():
|
||||
"""Test parsing a token with wrong audience."""
|
||||
wrong_aud_payload = {**TEST_USER_PAYLOAD, "aud": "wrong-audience"}
|
||||
token = create_token(wrong_aud_payload)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
jwt_utils.parse_jwt_token(token)
|
||||
assert "Invalid token" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_parse_jwt_token_missing_audience():
|
||||
"""Test parsing a token without audience claim."""
|
||||
no_aud_payload = {k: v for k, v in TEST_USER_PAYLOAD.items() if k != "aud"}
|
||||
token = create_token(no_aud_payload)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
jwt_utils.parse_jwt_token(token)
|
||||
assert "Invalid token" in str(exc_info.value)
|
||||
|
||||
|
||||
async def test_get_jwt_payload_with_valid_token():
|
||||
"""Test extracting JWT payload with valid bearer token."""
|
||||
token = create_token(TEST_USER_PAYLOAD)
|
||||
credentials = HTTPAuthorizationCredentials(scheme="Bearer", credentials=token)
|
||||
|
||||
result = await jwt_utils.get_jwt_payload(credentials)
|
||||
assert result["sub"] == "test-user-id"
|
||||
assert result["role"] == "user"
|
||||
|
||||
|
||||
async def test_get_jwt_payload_no_credentials():
|
||||
"""Test JWT payload when no credentials provided."""
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await jwt_utils.get_jwt_payload(None)
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "Authorization header is missing" in exc_info.value.detail
|
||||
|
||||
|
||||
async def test_get_jwt_payload_invalid_token():
|
||||
"""Test JWT payload extraction with invalid token."""
|
||||
credentials = HTTPAuthorizationCredentials(
|
||||
scheme="Bearer", credentials="invalid.token.here"
|
||||
)
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await jwt_utils.get_jwt_payload(credentials)
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "Invalid token" in exc_info.value.detail
|
||||
|
||||
|
||||
def test_verify_user_with_valid_user():
|
||||
"""Test verifying a valid user."""
|
||||
user = jwt_utils.verify_user(TEST_USER_PAYLOAD, admin_only=False)
|
||||
assert isinstance(user, User)
|
||||
assert user.user_id == "test-user-id"
|
||||
assert user.role == "user"
|
||||
assert user.email == "test@example.com"
|
||||
|
||||
|
||||
def test_verify_user_with_admin():
|
||||
"""Test verifying an admin user."""
|
||||
user = jwt_utils.verify_user(TEST_ADMIN_PAYLOAD, admin_only=True)
|
||||
assert isinstance(user, User)
|
||||
assert user.user_id == "admin-user-id"
|
||||
assert user.role == "admin"
|
||||
|
||||
|
||||
def test_verify_user_admin_only_with_regular_user():
|
||||
"""Test verifying regular user when admin is required."""
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
jwt_utils.verify_user(TEST_USER_PAYLOAD, admin_only=True)
|
||||
assert exc_info.value.status_code == 403
|
||||
assert "Admin access required" in exc_info.value.detail
|
||||
|
||||
|
||||
def test_verify_user_no_payload():
|
||||
"""Test verifying user with no payload."""
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
jwt_utils.verify_user(None, admin_only=False)
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "Authorization header is missing" in exc_info.value.detail
|
||||
|
||||
|
||||
def test_verify_user_missing_sub():
|
||||
"""Test verifying user with payload missing 'sub' field."""
|
||||
invalid_payload = {"role": "user", "email": "test@example.com"}
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
jwt_utils.verify_user(invalid_payload, admin_only=False)
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "User ID not found in token" in exc_info.value.detail
|
||||
|
||||
|
||||
def test_verify_user_empty_sub():
|
||||
"""Test verifying user with empty 'sub' field."""
|
||||
invalid_payload = {"sub": "", "role": "user"}
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
jwt_utils.verify_user(invalid_payload, admin_only=False)
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "User ID not found in token" in exc_info.value.detail
|
||||
|
||||
|
||||
def test_verify_user_none_sub():
|
||||
"""Test verifying user with None 'sub' field."""
|
||||
invalid_payload = {"sub": None, "role": "user"}
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
jwt_utils.verify_user(invalid_payload, admin_only=False)
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "User ID not found in token" in exc_info.value.detail
|
||||
|
||||
|
||||
def test_verify_user_missing_role_admin_check():
|
||||
"""Test verifying admin when role field is missing."""
|
||||
no_role_payload = {"sub": "user-id"}
|
||||
with pytest.raises(KeyError):
|
||||
# This will raise KeyError when checking payload["role"]
|
||||
jwt_utils.verify_user(no_role_payload, admin_only=True)
|
||||
|
||||
|
||||
# ======================== EDGE CASES ======================== #
|
||||
|
||||
|
||||
def test_jwt_with_additional_claims():
|
||||
"""Test JWT token with additional custom claims."""
|
||||
extra_claims_payload = {
|
||||
"sub": "user-id",
|
||||
"role": "user",
|
||||
"aud": "authenticated",
|
||||
"custom_claim": "custom_value",
|
||||
"permissions": ["read", "write"],
|
||||
"metadata": {"key": "value"},
|
||||
}
|
||||
token = create_token(extra_claims_payload)
|
||||
|
||||
result = jwt_utils.parse_jwt_token(token)
|
||||
assert result["sub"] == "user-id"
|
||||
assert result["custom_claim"] == "custom_value"
|
||||
assert result["permissions"] == ["read", "write"]
|
||||
|
||||
|
||||
def test_jwt_with_numeric_sub():
|
||||
"""Test JWT token with numeric user ID."""
|
||||
payload = {
|
||||
"sub": 12345, # Numeric ID
|
||||
"role": "user",
|
||||
"aud": "authenticated",
|
||||
}
|
||||
# Should convert to string internally
|
||||
user = jwt_utils.verify_user(payload, admin_only=False)
|
||||
assert user.user_id == 12345
|
||||
|
||||
|
||||
def test_jwt_with_very_long_sub():
|
||||
"""Test JWT token with very long user ID."""
|
||||
long_id = "a" * 1000
|
||||
payload = {
|
||||
"sub": long_id,
|
||||
"role": "user",
|
||||
"aud": "authenticated",
|
||||
}
|
||||
user = jwt_utils.verify_user(payload, admin_only=False)
|
||||
assert user.user_id == long_id
|
||||
|
||||
|
||||
def test_jwt_with_special_characters_in_claims():
|
||||
"""Test JWT token with special characters in claims."""
|
||||
payload = {
|
||||
"sub": "user@example.com/special-chars!@#$%",
|
||||
"role": "admin",
|
||||
"aud": "authenticated",
|
||||
"email": "test+special@example.com",
|
||||
}
|
||||
user = jwt_utils.verify_user(payload, admin_only=True)
|
||||
assert "special-chars!@#$%" in user.user_id
|
||||
|
||||
|
||||
def test_jwt_with_future_iat():
|
||||
"""Test JWT token with issued-at time in future."""
|
||||
future_payload = {
|
||||
"sub": "user-id",
|
||||
"role": "user",
|
||||
"aud": "authenticated",
|
||||
"iat": datetime.now(timezone.utc) + timedelta(hours=1),
|
||||
}
|
||||
token = create_token(future_payload)
|
||||
|
||||
# PyJWT validates iat claim and should reject future tokens
|
||||
with pytest.raises(ValueError, match="not yet valid"):
|
||||
jwt_utils.parse_jwt_token(token)
|
||||
|
||||
|
||||
def test_jwt_with_different_algorithms():
|
||||
"""Test that only HS256 algorithm is accepted."""
|
||||
payload = {
|
||||
"sub": "user-id",
|
||||
"role": "user",
|
||||
"aud": "authenticated",
|
||||
}
|
||||
|
||||
# Try different algorithms
|
||||
algorithms = ["HS384", "HS512", "none"]
|
||||
for algo in algorithms:
|
||||
if algo == "none":
|
||||
# Special case for 'none' algorithm (security vulnerability if accepted)
|
||||
token = create_token(payload, "", algorithm="none")
|
||||
else:
|
||||
token = create_token(payload, algorithm=algo)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
jwt_utils.parse_jwt_token(token)
|
||||
assert "Invalid token" in str(exc_info.value)
|
||||
22
autogpt_platform/autogpt_libs/autogpt_libs/auth/models.py
Normal file
22
autogpt_platform/autogpt_libs/autogpt_libs/auth/models.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
DEFAULT_EMAIL = "default@example.com"
|
||||
|
||||
|
||||
# Using dataclass here to avoid adding dependency on pydantic
|
||||
@dataclass(frozen=True)
|
||||
class User:
|
||||
user_id: str
|
||||
email: str
|
||||
phone_number: str
|
||||
role: str
|
||||
|
||||
@classmethod
|
||||
def from_payload(cls, payload):
|
||||
return cls(
|
||||
user_id=payload["sub"],
|
||||
email=payload.get("email", ""),
|
||||
phone_number=payload.get("phone", ""),
|
||||
role=payload["role"],
|
||||
)
|
||||
@@ -0,0 +1,9 @@
|
||||
from .config import configure_logging
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import FancyConsoleFormatter
|
||||
|
||||
__all__ = [
|
||||
"configure_logging",
|
||||
"BelowLevelFilter",
|
||||
"FancyConsoleFormatter",
|
||||
]
|
||||
191
autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py
Normal file
191
autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py
Normal file
@@ -0,0 +1,191 @@
|
||||
"""Logging module for Auto-GPT."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import AGPTFormatter
|
||||
|
||||
# Configure global socket timeout and gRPC keepalive to prevent deadlocks
|
||||
# This must be done at import time before any gRPC connections are established
|
||||
socket.setdefaulttimeout(30) # 30-second socket timeout
|
||||
|
||||
# Enable gRPC keepalive to detect dead connections faster
|
||||
os.environ.setdefault("GRPC_KEEPALIVE_TIME_MS", "30000") # 30 seconds
|
||||
os.environ.setdefault("GRPC_KEEPALIVE_TIMEOUT_MS", "5000") # 5 seconds
|
||||
os.environ.setdefault("GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS", "true")
|
||||
|
||||
LOG_DIR = Path(__file__).parent.parent.parent.parent / "logs"
|
||||
LOG_FILE = "activity.log"
|
||||
DEBUG_LOG_FILE = "debug.log"
|
||||
ERROR_LOG_FILE = "error.log"
|
||||
|
||||
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s"
|
||||
|
||||
DEBUG_LOG_FORMAT = (
|
||||
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(title)s%(message)s"
|
||||
)
|
||||
|
||||
|
||||
class LoggingConfig(BaseSettings):
|
||||
level: str = Field(
|
||||
default="INFO",
|
||||
description="Logging level",
|
||||
validation_alias="LOG_LEVEL",
|
||||
)
|
||||
|
||||
enable_cloud_logging: bool = Field(
|
||||
default=False,
|
||||
description="Enable logging to Google Cloud Logging",
|
||||
)
|
||||
|
||||
enable_file_logging: bool = Field(
|
||||
default=False,
|
||||
description="Enable logging to file",
|
||||
)
|
||||
# File output
|
||||
log_dir: Path = Field(
|
||||
default=LOG_DIR,
|
||||
description="Log directory",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
env_prefix="",
|
||||
env_file=".env",
|
||||
env_file_encoding="utf-8",
|
||||
extra="ignore",
|
||||
)
|
||||
|
||||
@field_validator("level", mode="before")
|
||||
@classmethod
|
||||
def parse_log_level(cls, v):
|
||||
if isinstance(v, str):
|
||||
v = v.upper()
|
||||
if v not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
|
||||
raise ValueError(f"Invalid log level: {v}")
|
||||
return v
|
||||
return v
|
||||
|
||||
|
||||
def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
"""Configure the native logging module based on the LoggingConfig settings.
|
||||
|
||||
This function sets up logging handlers and formatters according to the
|
||||
configuration specified in the LoggingConfig object. It supports various
|
||||
logging outputs including console, file, cloud, and JSON logging.
|
||||
|
||||
The function uses the LoggingConfig object to determine which logging
|
||||
features to enable and how to configure them. This includes setting
|
||||
log levels, log formats, and output destinations.
|
||||
|
||||
No arguments are required as the function creates its own LoggingConfig
|
||||
instance internally.
|
||||
|
||||
Note: This function is typically called at the start of the application
|
||||
to set up the logging infrastructure.
|
||||
"""
|
||||
config = LoggingConfig()
|
||||
log_handlers: list[logging.Handler] = []
|
||||
|
||||
structured_logging = config.enable_cloud_logging or force_cloud_logging
|
||||
|
||||
# Console output handlers
|
||||
if not structured_logging:
|
||||
stdout = logging.StreamHandler(stream=sys.stdout)
|
||||
stdout.setLevel(config.level)
|
||||
stdout.addFilter(BelowLevelFilter(logging.WARNING))
|
||||
if config.level == logging.DEBUG:
|
||||
stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
|
||||
stderr = logging.StreamHandler()
|
||||
stderr.setLevel(logging.WARNING)
|
||||
if config.level == logging.DEBUG:
|
||||
stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
|
||||
log_handlers += [stdout, stderr]
|
||||
|
||||
# Cloud logging setup
|
||||
else:
|
||||
# Use Google Cloud Structured Log Handler. Log entries are printed to stdout
|
||||
# in a JSON format which is automatically picked up by Google Cloud Logging.
|
||||
from google.cloud.logging.handlers import StructuredLogHandler
|
||||
|
||||
structured_log_handler = StructuredLogHandler(stream=sys.stdout)
|
||||
structured_log_handler.setLevel(config.level)
|
||||
log_handlers.append(structured_log_handler)
|
||||
|
||||
# File logging setup
|
||||
if config.enable_file_logging:
|
||||
# create log directory if it doesn't exist
|
||||
if not config.log_dir.exists():
|
||||
config.log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print(f"Log directory: {config.log_dir}")
|
||||
|
||||
# Activity log handler (INFO and above)
|
||||
# Security fix: Use RotatingFileHandler with size limits to prevent disk exhaustion
|
||||
activity_log_handler = RotatingFileHandler(
|
||||
config.log_dir / LOG_FILE,
|
||||
mode="a",
|
||||
encoding="utf-8",
|
||||
maxBytes=10 * 1024 * 1024, # 10MB per file
|
||||
backupCount=3, # Keep 3 backup files (40MB total)
|
||||
)
|
||||
activity_log_handler.setLevel(config.level)
|
||||
activity_log_handler.setFormatter(
|
||||
AGPTFormatter(SIMPLE_LOG_FORMAT, no_color=True)
|
||||
)
|
||||
log_handlers.append(activity_log_handler)
|
||||
|
||||
if config.level == logging.DEBUG:
|
||||
# Debug log handler (all levels)
|
||||
# Security fix: Use RotatingFileHandler with size limits
|
||||
debug_log_handler = RotatingFileHandler(
|
||||
config.log_dir / DEBUG_LOG_FILE,
|
||||
mode="a",
|
||||
encoding="utf-8",
|
||||
maxBytes=10 * 1024 * 1024, # 10MB per file
|
||||
backupCount=3, # Keep 3 backup files (40MB total)
|
||||
)
|
||||
debug_log_handler.setLevel(logging.DEBUG)
|
||||
debug_log_handler.setFormatter(
|
||||
AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True)
|
||||
)
|
||||
log_handlers.append(debug_log_handler)
|
||||
|
||||
# Error log handler (ERROR and above)
|
||||
# Security fix: Use RotatingFileHandler with size limits
|
||||
error_log_handler = RotatingFileHandler(
|
||||
config.log_dir / ERROR_LOG_FILE,
|
||||
mode="a",
|
||||
encoding="utf-8",
|
||||
maxBytes=10 * 1024 * 1024, # 10MB per file
|
||||
backupCount=3, # Keep 3 backup files (40MB total)
|
||||
)
|
||||
error_log_handler.setLevel(logging.ERROR)
|
||||
error_log_handler.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True))
|
||||
log_handlers.append(error_log_handler)
|
||||
|
||||
# Configure the root logger
|
||||
logging.basicConfig(
|
||||
format=(
|
||||
"%(levelname)s %(message)s"
|
||||
if structured_logging
|
||||
else (
|
||||
DEBUG_LOG_FORMAT if config.level == logging.DEBUG else SIMPLE_LOG_FORMAT
|
||||
)
|
||||
),
|
||||
level=config.level,
|
||||
handlers=log_handlers,
|
||||
)
|
||||
@@ -0,0 +1,12 @@
|
||||
import logging
|
||||
|
||||
|
||||
class BelowLevelFilter(logging.Filter):
|
||||
"""Filter for logging levels below a certain threshold."""
|
||||
|
||||
def __init__(self, below_level: int):
|
||||
super().__init__()
|
||||
self.below_level = below_level
|
||||
|
||||
def filter(self, record: logging.LogRecord):
|
||||
return record.levelno < self.below_level
|
||||
@@ -0,0 +1,81 @@
|
||||
import logging
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from .utils import remove_color_codes
|
||||
|
||||
|
||||
class FancyConsoleFormatter(logging.Formatter):
|
||||
"""
|
||||
A custom logging formatter designed for console output.
|
||||
|
||||
This formatter enhances the standard logging output with color coding. The color
|
||||
coding is based on the level of the log message, making it easier to distinguish
|
||||
between different types of messages in the console output.
|
||||
|
||||
The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
|
||||
"""
|
||||
|
||||
# level -> (level & text color, title color)
|
||||
LEVEL_COLOR_MAP = {
|
||||
logging.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
logging.INFO: Fore.BLUE,
|
||||
logging.WARNING: Fore.YELLOW,
|
||||
logging.ERROR: Fore.RED,
|
||||
logging.CRITICAL: Fore.RED + Style.BRIGHT,
|
||||
}
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
# Make sure `msg` is a string
|
||||
if not hasattr(record, "msg"):
|
||||
record.msg = ""
|
||||
elif type(record.msg) is not str:
|
||||
record.msg = str(record.msg)
|
||||
|
||||
# Determine default color based on error level
|
||||
level_color = ""
|
||||
if record.levelno in self.LEVEL_COLOR_MAP:
|
||||
level_color = self.LEVEL_COLOR_MAP[record.levelno]
|
||||
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
|
||||
|
||||
# Determine color for message
|
||||
color = getattr(record, "color", level_color)
|
||||
color_is_specified = hasattr(record, "color")
|
||||
|
||||
# Don't color INFO messages unless the color is explicitly specified.
|
||||
if color and (record.levelno != logging.INFO or color_is_specified):
|
||||
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
|
||||
|
||||
return super().format(record)
|
||||
|
||||
|
||||
class AGPTFormatter(FancyConsoleFormatter):
|
||||
def __init__(self, *args, no_color: bool = False, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.no_color = no_color
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
# Make sure `msg` is a string
|
||||
if not hasattr(record, "msg"):
|
||||
record.msg = ""
|
||||
elif type(record.msg) is not str:
|
||||
record.msg = str(record.msg)
|
||||
|
||||
# Strip color from the message to prevent color spoofing
|
||||
if record.msg and not getattr(record, "preserve_color", False):
|
||||
record.msg = remove_color_codes(record.msg)
|
||||
|
||||
# Determine color for title
|
||||
title = getattr(record, "title", "")
|
||||
title_color = getattr(record, "title_color", "") or self.LEVEL_COLOR_MAP.get(
|
||||
record.levelno, ""
|
||||
)
|
||||
if title and title_color:
|
||||
title = f"{title_color + Style.BRIGHT}{title}{Style.RESET_ALL}"
|
||||
# Make sure record.title is set, and padded with a space if not empty
|
||||
record.title = f"{title} " if title else ""
|
||||
|
||||
if self.no_color:
|
||||
return remove_color_codes(super().format(record))
|
||||
else:
|
||||
return super().format(record)
|
||||
@@ -0,0 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
|
||||
class JsonFileHandler(logging.FileHandler):
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
record.json_data = json.loads(record.getMessage())
|
||||
return json.dumps(getattr(record, "json_data"), ensure_ascii=False, indent=4)
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
with open(self.baseFilename, "w", encoding="utf-8") as f:
|
||||
f.write(self.format(record))
|
||||
@@ -0,0 +1,36 @@
|
||||
import pytest
|
||||
|
||||
from .utils import remove_color_codes
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"raw_text, clean_text",
|
||||
[
|
||||
(
|
||||
"COMMAND = \x1b[36mbrowse_website\x1b[0m "
|
||||
"ARGUMENTS = \x1b[36m{'url': 'https://www.google.com',"
|
||||
" 'question': 'What is the capital of France?'}\x1b[0m",
|
||||
"COMMAND = browse_website "
|
||||
"ARGUMENTS = {'url': 'https://www.google.com',"
|
||||
" 'question': 'What is the capital of France?'}",
|
||||
),
|
||||
(
|
||||
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': "
|
||||
"'https://github.com/Significant-Gravitas/AutoGPT,"
|
||||
" https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
|
||||
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': "
|
||||
"'https://github.com/Significant-Gravitas/AutoGPT,"
|
||||
" https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
|
||||
),
|
||||
("", ""),
|
||||
("hello", "hello"),
|
||||
("hello\x1b[31m world", "hello world"),
|
||||
("\x1b[36mHello,\x1b[32m World!", "Hello, World!"),
|
||||
(
|
||||
"\x1b[1m\x1b[31mError:\x1b[0m\x1b[31m file not found",
|
||||
"Error: file not found",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_remove_color_codes(raw_text, clean_text):
|
||||
assert remove_color_codes(raw_text) == clean_text
|
||||
@@ -0,0 +1,5 @@
|
||||
import re
|
||||
|
||||
|
||||
def remove_color_codes(s: str) -> str:
|
||||
return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s)
|
||||
@@ -0,0 +1,33 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class RateLimitSettings(BaseSettings):
|
||||
redis_host: str = Field(
|
||||
default="redis://localhost:6379",
|
||||
description="Redis host",
|
||||
validation_alias="REDIS_HOST",
|
||||
)
|
||||
|
||||
redis_port: str = Field(
|
||||
default="6379", description="Redis port", validation_alias="REDIS_PORT"
|
||||
)
|
||||
|
||||
redis_password: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Redis password",
|
||||
validation_alias="REDIS_PASSWORD",
|
||||
)
|
||||
|
||||
requests_per_minute: int = Field(
|
||||
default=60,
|
||||
description="Maximum number of requests allowed per minute per API key",
|
||||
validation_alias="RATE_LIMIT_REQUESTS_PER_MINUTE",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(case_sensitive=True, extra="ignore")
|
||||
|
||||
|
||||
RATE_LIMIT_SETTINGS = RateLimitSettings()
|
||||
@@ -0,0 +1,51 @@
|
||||
import time
|
||||
from typing import Tuple
|
||||
|
||||
from redis import Redis
|
||||
|
||||
from .config import RATE_LIMIT_SETTINGS
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
redis_host: str = RATE_LIMIT_SETTINGS.redis_host,
|
||||
redis_port: str = RATE_LIMIT_SETTINGS.redis_port,
|
||||
redis_password: str | None = RATE_LIMIT_SETTINGS.redis_password,
|
||||
requests_per_minute: int = RATE_LIMIT_SETTINGS.requests_per_minute,
|
||||
):
|
||||
self.redis = Redis(
|
||||
host=redis_host,
|
||||
port=int(redis_port),
|
||||
password=redis_password,
|
||||
decode_responses=True,
|
||||
)
|
||||
self.window = 60
|
||||
self.max_requests = requests_per_minute
|
||||
|
||||
async def check_rate_limit(self, api_key_id: str) -> Tuple[bool, int, int]:
|
||||
"""
|
||||
Check if request is within rate limits.
|
||||
|
||||
Args:
|
||||
api_key_id: The API key identifier to check
|
||||
|
||||
Returns:
|
||||
Tuple of (is_allowed, remaining_requests, reset_time)
|
||||
"""
|
||||
now = time.time()
|
||||
window_start = now - self.window
|
||||
key = f"ratelimit:{api_key_id}:1min"
|
||||
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.zremrangebyscore(key, 0, window_start)
|
||||
pipe.zadd(key, {str(now): now})
|
||||
pipe.zcount(key, window_start, now)
|
||||
pipe.expire(key, self.window)
|
||||
|
||||
_, _, request_count, _ = pipe.execute()
|
||||
|
||||
remaining = max(0, self.max_requests - request_count)
|
||||
reset_time = int(now + self.window)
|
||||
|
||||
return request_count <= self.max_requests, remaining, reset_time
|
||||
@@ -0,0 +1,32 @@
|
||||
from fastapi import HTTPException, Request
|
||||
from starlette.middleware.base import RequestResponseEndpoint
|
||||
|
||||
from .limiter import RateLimiter
|
||||
|
||||
|
||||
async def rate_limit_middleware(request: Request, call_next: RequestResponseEndpoint):
|
||||
"""FastAPI middleware for rate limiting API requests."""
|
||||
limiter = RateLimiter()
|
||||
|
||||
if not request.url.path.startswith("/api"):
|
||||
return await call_next(request)
|
||||
|
||||
api_key = request.headers.get("Authorization")
|
||||
if not api_key:
|
||||
return await call_next(request)
|
||||
|
||||
api_key = api_key.replace("Bearer ", "")
|
||||
|
||||
is_allowed, remaining, reset_time = await limiter.check_rate_limit(api_key)
|
||||
|
||||
if not is_allowed:
|
||||
raise HTTPException(
|
||||
status_code=429, detail="Rate limit exceeded. Please try again later."
|
||||
)
|
||||
|
||||
response = await call_next(request)
|
||||
response.headers["X-RateLimit-Limit"] = str(limiter.max_requests)
|
||||
response.headers["X-RateLimit-Remaining"] = str(remaining)
|
||||
response.headers["X-RateLimit-Reset"] = str(reset_time)
|
||||
|
||||
return response
|
||||
@@ -0,0 +1,76 @@
|
||||
from typing import Annotated, Any, Literal, Optional, TypedDict
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field, SecretStr, field_serializer
|
||||
|
||||
|
||||
class _BaseCredentials(BaseModel):
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
provider: str
|
||||
title: Optional[str]
|
||||
|
||||
@field_serializer("*")
|
||||
def dump_secret_strings(value: Any, _info):
|
||||
if isinstance(value, SecretStr):
|
||||
return value.get_secret_value()
|
||||
return value
|
||||
|
||||
|
||||
class OAuth2Credentials(_BaseCredentials):
|
||||
type: Literal["oauth2"] = "oauth2"
|
||||
username: Optional[str]
|
||||
"""Username of the third-party service user that these credentials belong to"""
|
||||
access_token: SecretStr
|
||||
access_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the access token expires (if at all)"""
|
||||
refresh_token: Optional[SecretStr]
|
||||
refresh_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the refresh token expires (if at all)"""
|
||||
scopes: list[str]
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
def bearer(self) -> str:
|
||||
return f"Bearer {self.access_token.get_secret_value()}"
|
||||
|
||||
|
||||
class APIKeyCredentials(_BaseCredentials):
|
||||
type: Literal["api_key"] = "api_key"
|
||||
api_key: SecretStr
|
||||
expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the API key expires (if at all)"""
|
||||
|
||||
def bearer(self) -> str:
|
||||
return f"Bearer {self.api_key.get_secret_value()}"
|
||||
|
||||
|
||||
Credentials = Annotated[
|
||||
OAuth2Credentials | APIKeyCredentials,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
|
||||
CredentialsType = Literal["api_key", "oauth2"]
|
||||
|
||||
|
||||
class OAuthState(BaseModel):
|
||||
token: str
|
||||
provider: str
|
||||
expires_at: int
|
||||
code_verifier: Optional[str] = None
|
||||
scopes: list[str]
|
||||
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
|
||||
|
||||
|
||||
class UserMetadata(BaseModel):
|
||||
integration_credentials: list[Credentials] = Field(default_factory=list)
|
||||
integration_oauth_states: list[OAuthState] = Field(default_factory=list)
|
||||
|
||||
|
||||
class UserMetadataRaw(TypedDict, total=False):
|
||||
integration_credentials: list[dict]
|
||||
integration_oauth_states: list[dict]
|
||||
|
||||
|
||||
class UserIntegrations(BaseModel):
|
||||
credentials: list[Credentials] = Field(default_factory=list)
|
||||
oauth_states: list[OAuthState] = Field(default_factory=list)
|
||||
@@ -0,0 +1,61 @@
|
||||
import asyncio
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from expiringdict import ExpiringDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from redis.asyncio import Redis as AsyncRedis
|
||||
from redis.asyncio.lock import Lock as AsyncRedisLock
|
||||
|
||||
|
||||
class AsyncRedisKeyedMutex:
|
||||
"""
|
||||
This class provides a mutex that can be locked and unlocked by a specific key,
|
||||
using Redis as a distributed locking provider.
|
||||
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
|
||||
in case the key is not unlocked for a specified duration, to prevent memory leaks.
|
||||
"""
|
||||
|
||||
def __init__(self, redis: "AsyncRedis", timeout: int | None = 60):
|
||||
self.redis = redis
|
||||
self.timeout = timeout
|
||||
self.locks: dict[Any, "AsyncRedisLock"] = ExpiringDict(
|
||||
max_len=6000, max_age_seconds=self.timeout
|
||||
)
|
||||
self.locks_lock = asyncio.Lock()
|
||||
|
||||
@asynccontextmanager
|
||||
async def locked(self, key: Any):
|
||||
lock = await self.acquire(key)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if (await lock.locked()) and (await lock.owned()):
|
||||
await lock.release()
|
||||
|
||||
async def acquire(self, key: Any) -> "AsyncRedisLock":
|
||||
"""Acquires and returns a lock with the given key"""
|
||||
async with self.locks_lock:
|
||||
if key not in self.locks:
|
||||
self.locks[key] = self.redis.lock(
|
||||
str(key), self.timeout, thread_local=False
|
||||
)
|
||||
lock = self.locks[key]
|
||||
await lock.acquire()
|
||||
return lock
|
||||
|
||||
async def release(self, key: Any):
|
||||
if (
|
||||
(lock := self.locks.get(key))
|
||||
and (await lock.locked())
|
||||
and (await lock.owned())
|
||||
):
|
||||
await lock.release()
|
||||
|
||||
async def release_all_locks(self):
|
||||
"""Call this on process termination to ensure all locks are released"""
|
||||
async with self.locks_lock:
|
||||
for lock in self.locks.values():
|
||||
if (await lock.locked()) and (await lock.owned()):
|
||||
await lock.release()
|
||||
1932
autogpt_platform/autogpt_libs/poetry.lock
generated
Normal file
1932
autogpt_platform/autogpt_libs/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
40
autogpt_platform/autogpt_libs/pyproject.toml
Normal file
40
autogpt_platform/autogpt_libs/pyproject.toml
Normal file
@@ -0,0 +1,40 @@
|
||||
[tool.poetry]
|
||||
name = "autogpt-libs"
|
||||
version = "0.2.0"
|
||||
description = "Shared libraries across AutoGPT Platform"
|
||||
authors = ["AutoGPT team <info@agpt.co>"]
|
||||
readme = "README.md"
|
||||
packages = [{ include = "autogpt_libs" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<4.0"
|
||||
colorama = "^0.4.6"
|
||||
cryptography = "^45.0"
|
||||
expiringdict = "^1.2.2"
|
||||
fastapi = "^0.116.1"
|
||||
google-cloud-logging = "^3.12.1"
|
||||
launchdarkly-server-sdk = "^9.12.0"
|
||||
pydantic = "^2.11.7"
|
||||
pydantic-settings = "^2.10.1"
|
||||
pyjwt = { version = "^2.10.1", extras = ["crypto"] }
|
||||
redis = "^6.2.0"
|
||||
supabase = "^2.16.0"
|
||||
uvicorn = "^0.35.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pyright = "^1.1.404"
|
||||
pytest = "^8.4.1"
|
||||
pytest-asyncio = "^1.1.0"
|
||||
pytest-mock = "^3.14.1"
|
||||
pytest-cov = "^6.2.1"
|
||||
ruff = "^0.12.11"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
[tool.ruff.lint]
|
||||
extend-select = ["I"] # sort dependencies
|
||||
52
autogpt_platform/backend/.dockerignore
Normal file
52
autogpt_platform/backend/.dockerignore
Normal file
@@ -0,0 +1,52 @@
|
||||
# Development and testing files
|
||||
**/__pycache__
|
||||
**/*.pyc
|
||||
**/*.pyo
|
||||
**/*.pyd
|
||||
**/.Python
|
||||
**/env/
|
||||
**/venv/
|
||||
**/.venv/
|
||||
**/pip-log.txt
|
||||
**/.pytest_cache/
|
||||
**/test-results/
|
||||
**/snapshots/
|
||||
**/test/
|
||||
|
||||
# IDE and editor files
|
||||
**/.vscode/
|
||||
**/.idea/
|
||||
**/*.swp
|
||||
**/*.swo
|
||||
*~
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
**/*.log
|
||||
**/logs/
|
||||
|
||||
# Git
|
||||
.git/
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
**/*.md
|
||||
!README.md
|
||||
|
||||
# Local development files
|
||||
.env
|
||||
.env.local
|
||||
**/.env.test
|
||||
|
||||
# Build artifacts
|
||||
**/dist/
|
||||
**/build/
|
||||
**/target/
|
||||
|
||||
# Docker files (avoid recursion)
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
.dockerignore
|
||||
175
autogpt_platform/backend/.env.default
Normal file
175
autogpt_platform/backend/.env.default
Normal file
@@ -0,0 +1,175 @@
|
||||
# Backend Configuration
|
||||
# This file contains environment variables that MUST be set for the AutoGPT platform
|
||||
# Variables with working defaults in settings.py are not included here
|
||||
|
||||
## ===== REQUIRED DATABASE CONFIGURATION ===== ##
|
||||
# PostgreSQL Database Connection
|
||||
DB_USER=postgres
|
||||
DB_PASS=your-super-secret-and-long-postgres-password
|
||||
DB_NAME=postgres
|
||||
DB_PORT=5432
|
||||
DB_HOST=localhost
|
||||
DB_CONNECTION_LIMIT=12
|
||||
DB_CONNECT_TIMEOUT=60
|
||||
DB_POOL_TIMEOUT=300
|
||||
DB_SCHEMA=platform
|
||||
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
|
||||
DIRECT_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
|
||||
PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
|
||||
## ===== REQUIRED SERVICE CREDENTIALS ===== ##
|
||||
# Redis Configuration
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
# REDIS_PASSWORD=
|
||||
|
||||
# RabbitMQ Credentials
|
||||
RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
|
||||
# Supabase Authentication
|
||||
SUPABASE_URL=http://localhost:8000
|
||||
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
JWT_VERIFY_KEY=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
|
||||
## ===== REQUIRED SECURITY KEYS ===== ##
|
||||
# Generate using: from cryptography.fernet import Fernet;Fernet.generate_key().decode()
|
||||
ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=
|
||||
UNSUBSCRIBE_SECRET_KEY=HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio=
|
||||
|
||||
## ===== IMPORTANT OPTIONAL CONFIGURATION ===== ##
|
||||
# Platform URLs (set these for webhooks and OAuth to work)
|
||||
PLATFORM_BASE_URL=http://localhost:8000
|
||||
FRONTEND_BASE_URL=http://localhost:3000
|
||||
|
||||
# Media Storage (required for marketplace and library functionality)
|
||||
MEDIA_GCS_BUCKET_NAME=
|
||||
|
||||
## ===== API KEYS AND OAUTH CREDENTIALS ===== ##
|
||||
# All API keys below are optional - only add what you need
|
||||
|
||||
# AI/LLM Services
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
LLAMA_API_KEY=
|
||||
AIML_API_KEY=
|
||||
V0_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
NVIDIA_API_KEY=
|
||||
|
||||
# OAuth Credentials
|
||||
# For the OAuth callback URL, use <your_frontend_url>/auth/integrations/oauth_callback,
|
||||
# e.g. http://localhost:3000/auth/integrations/oauth_callback
|
||||
|
||||
# GitHub OAuth App server credentials - https://github.com/settings/developers
|
||||
GITHUB_CLIENT_ID=
|
||||
GITHUB_CLIENT_SECRET=
|
||||
|
||||
# Notion OAuth App server credentials - https://developers.notion.com/docs/authorization
|
||||
# Configure a public integration
|
||||
NOTION_CLIENT_ID=
|
||||
NOTION_CLIENT_SECRET=
|
||||
|
||||
# Google OAuth App server credentials - https://console.cloud.google.com/apis/credentials, and enable gmail api and set scopes
|
||||
# https://console.cloud.google.com/apis/credentials/consent ?project=<your_project_id>
|
||||
# You'll need to add/enable the following scopes (minimum):
|
||||
# https://console.developers.google.com/apis/api/gmail.googleapis.com/overview ?project=<your_project_id>
|
||||
# https://console.cloud.google.com/apis/library/sheets.googleapis.com/ ?project=<your_project_id>
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
|
||||
# Twitter (X) OAuth 2.0 with PKCE Configuration
|
||||
# 1. Create a Twitter Developer Account:
|
||||
# - Visit https://developer.x.com/en and sign up
|
||||
# 2. Set up your application:
|
||||
# - Navigate to Developer Portal > Projects > Create Project
|
||||
# - Add a new app to your project
|
||||
# 3. Configure app settings:
|
||||
# - App Permissions: Read + Write + Direct Messages
|
||||
# - App Type: Web App, Automated App or Bot
|
||||
# - OAuth 2.0 Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||
# - Save your Client ID and Client Secret below
|
||||
TWITTER_CLIENT_ID=
|
||||
TWITTER_CLIENT_SECRET=
|
||||
|
||||
# Linear App
|
||||
# Make a new workspace for your OAuth APP -- trust me
|
||||
# https://linear.app/settings/api/applications/new
|
||||
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||
LINEAR_CLIENT_ID=
|
||||
LINEAR_CLIENT_SECRET=
|
||||
|
||||
# To obtain Todoist API credentials:
|
||||
# 1. Create a Todoist account at todoist.com
|
||||
# 2. Visit the Developer Console: https://developer.todoist.com/appconsole.html
|
||||
# 3. Click "Create new app"
|
||||
# 4. Once created, copy your Client ID and Client Secret below
|
||||
TODOIST_CLIENT_ID=
|
||||
TODOIST_CLIENT_SECRET=
|
||||
|
||||
NOTION_CLIENT_ID=
|
||||
NOTION_CLIENT_SECRET=
|
||||
|
||||
# Discord OAuth App credentials
|
||||
# 1. Go to https://discord.com/developers/applications
|
||||
# 2. Create a new application
|
||||
# 3. Go to OAuth2 section and add redirect URI: http://localhost:3000/auth/integrations/oauth_callback
|
||||
# 4. Copy Client ID and Client Secret below
|
||||
DISCORD_CLIENT_ID=
|
||||
DISCORD_CLIENT_SECRET=
|
||||
|
||||
REDDIT_CLIENT_ID=
|
||||
REDDIT_CLIENT_SECRET=
|
||||
|
||||
# Payment Processing
|
||||
STRIPE_API_KEY=
|
||||
STRIPE_WEBHOOK_SECRET=
|
||||
|
||||
# Email Service (for sending notifications and confirmations)
|
||||
POSTMARK_SERVER_API_TOKEN=
|
||||
POSTMARK_SENDER_EMAIL=invalid@invalid.com
|
||||
POSTMARK_WEBHOOK_TOKEN=
|
||||
|
||||
# Error Tracking
|
||||
SENTRY_DSN=
|
||||
|
||||
# Feature Flags
|
||||
LAUNCH_DARKLY_SDK_KEY=
|
||||
|
||||
# Content Generation & Media
|
||||
DID_API_KEY=
|
||||
FAL_API_KEY=
|
||||
IDEOGRAM_API_KEY=
|
||||
REPLICATE_API_KEY=
|
||||
REVID_API_KEY=
|
||||
SCREENSHOTONE_API_KEY=
|
||||
UNREAL_SPEECH_API_KEY=
|
||||
|
||||
# Data & Search Services
|
||||
E2B_API_KEY=
|
||||
EXA_API_KEY=
|
||||
JINA_API_KEY=
|
||||
MEM0_API_KEY=
|
||||
OPENWEATHERMAP_API_KEY=
|
||||
GOOGLE_MAPS_API_KEY=
|
||||
|
||||
# Communication Services
|
||||
DISCORD_BOT_TOKEN=
|
||||
MEDIUM_API_KEY=
|
||||
MEDIUM_AUTHOR_ID=
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# Business & Marketing Tools
|
||||
APOLLO_API_KEY=
|
||||
ENRICHLAYER_API_KEY=
|
||||
AYRSHARE_API_KEY=
|
||||
AYRSHARE_JWT_KEY=
|
||||
SMARTLEAD_API_KEY=
|
||||
ZEROBOUNCE_API_KEY=
|
||||
|
||||
# Other Services
|
||||
AUTOMOD_API_KEY=
|
||||
20
autogpt_platform/backend/.gitignore
vendored
Normal file
20
autogpt_platform/backend/.gitignore
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
.env
|
||||
database.db
|
||||
database.db-journal
|
||||
dev.db
|
||||
dev.db-journal
|
||||
build/
|
||||
config.json
|
||||
secrets/*
|
||||
!secrets/.gitkeep
|
||||
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
|
||||
# Load test results and reports
|
||||
load-tests/*_RESULTS.md
|
||||
load-tests/*_REPORT.md
|
||||
load-tests/results/
|
||||
load-tests/*.json
|
||||
load-tests/*.log
|
||||
load-tests/node_modules/*
|
||||
106
autogpt_platform/backend/Dockerfile
Normal file
106
autogpt_platform/backend/Dockerfile
Normal file
@@ -0,0 +1,106 @@
|
||||
FROM debian:13-slim AS builder
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN echo 'Acquire::http::Pipeline-Depth 0;\nAcquire::http::No-Cache true;\nAcquire::BrokenProxy true;\n' > /etc/apt/apt.conf.d/99fixbadproxy
|
||||
|
||||
# Install Node.js repository key and setup
|
||||
RUN apt-get update --allow-releaseinfo-change --fix-missing \
|
||||
&& apt-get install -y curl ca-certificates gnupg \
|
||||
&& mkdir -p /etc/apt/keyrings \
|
||||
&& curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \
|
||||
&& echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list
|
||||
|
||||
# Update package list and install Python, Node.js, and build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
python3.13 \
|
||||
python3.13-dev \
|
||||
python3.13-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
libpq5 \
|
||||
libz-dev \
|
||||
libssl-dev \
|
||||
postgresql-client \
|
||||
nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV POETRY_HOME=/opt/poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
RUN pip3 install poetry --break-system-packages
|
||||
|
||||
# Copy and install dependencies
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
RUN poetry install --no-ansi --no-root
|
||||
|
||||
# Generate Prisma client
|
||||
COPY autogpt_platform/backend/schema.prisma ./
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||
RUN poetry run prisma generate
|
||||
|
||||
FROM debian:13-slim AS server_dependencies
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ENV POETRY_HOME=/opt/poetry \
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
POETRY_VIRTUALENVS_CREATE=true \
|
||||
POETRY_VIRTUALENVS_IN_PROJECT=true \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Install Python without upgrading system-managed packages
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy only necessary files from builder
|
||||
COPY --from=builder /app /app
|
||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||
# Copy Node.js installation for Prisma
|
||||
COPY --from=builder /usr/bin/node /usr/bin/node
|
||||
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
||||
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||
|
||||
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
||||
RUN mkdir -p /app/autogpt_platform/backend
|
||||
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
FROM server_dependencies AS migrate
|
||||
|
||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
||||
|
||||
FROM server_dependencies AS server
|
||||
|
||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||
RUN poetry install --no-ansi --only-root
|
||||
|
||||
ENV PORT=8000
|
||||
|
||||
CMD ["poetry", "run", "rest"]
|
||||
1
autogpt_platform/backend/README.advanced.md
Normal file
1
autogpt_platform/backend/README.advanced.md
Normal file
@@ -0,0 +1 @@
|
||||
[Advanced Setup (Dev Branch)](https://dev-docs.agpt.co/platform/advanced_setup/#autogpt_agent_server_advanced_set_up)
|
||||
1
autogpt_platform/backend/README.md
Normal file
1
autogpt_platform/backend/README.md
Normal file
@@ -0,0 +1 @@
|
||||
[Getting Started (Released)](https://docs.agpt.co/platform/getting-started/#autogpt_agent_server)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user