mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 08:38:09 -05:00
Compare commits
44 Commits
autogpt-rs
...
fix-path
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6301f8c34e | ||
|
|
f711c057da | ||
|
|
0c6b08d882 | ||
|
|
5b08913ab9 | ||
|
|
815cf8b4ac | ||
|
|
3abe821533 | ||
|
|
6ae6c711b7 | ||
|
|
c8f55bc518 | ||
|
|
df2126c1a8 | ||
|
|
dfcfd003df | ||
|
|
4e33399d31 | ||
|
|
369b1d9023 | ||
|
|
241f21ab5f | ||
|
|
7551782cd1 | ||
|
|
430835e539 | ||
|
|
f5040fa3ab | ||
|
|
6ced85d203 | ||
|
|
5e1a3d5717 | ||
|
|
d35b91cde4 | ||
|
|
aeab5aac67 | ||
|
|
31cd6dc652 | ||
|
|
13b82c86f5 | ||
|
|
ff11d00f74 | ||
|
|
9d7dfb0a6d | ||
|
|
f1bf7f269b | ||
|
|
46cc8ae3ea | ||
|
|
43bf6f2349 | ||
|
|
2582eb1ee8 | ||
|
|
10cefc149f | ||
|
|
d62fe001b8 | ||
|
|
f583a15fd0 | ||
|
|
2cad2093eb | ||
|
|
4e569f4562 | ||
|
|
7f514c10cf | ||
|
|
d7aba4f6c0 | ||
|
|
ba30aa2fce | ||
|
|
efeba4400e | ||
|
|
ba206e3bec | ||
|
|
be16fd90d4 | ||
|
|
d10167ceab | ||
|
|
d593f76437 | ||
|
|
bda938422e | ||
|
|
8397b78ec2 | ||
|
|
0d7342826b |
@@ -1,18 +0,0 @@
|
||||
version = 1
|
||||
|
||||
test_patterns = ["**/*.spec.ts","**/*_test.py","**/*_tests.py","**/test_*.py"]
|
||||
|
||||
exclude_patterns = ["classic/**"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "javascript"
|
||||
|
||||
[analyzers.meta]
|
||||
plugins = ["react"]
|
||||
environment = ["nodejs"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "python"
|
||||
|
||||
[analyzers.meta]
|
||||
runtime_version = "3.x.x"
|
||||
@@ -1,62 +0,0 @@
|
||||
# Ignore everything by default, selectively add things to context
|
||||
*
|
||||
|
||||
# Platform - Libs
|
||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
||||
!autogpt_platform/autogpt_libs/poetry.lock
|
||||
!autogpt_platform/autogpt_libs/README.md
|
||||
|
||||
# Platform - Backend
|
||||
!autogpt_platform/backend/backend/
|
||||
!autogpt_platform/backend/test/e2e_test_data.py
|
||||
!autogpt_platform/backend/migrations/
|
||||
!autogpt_platform/backend/schema.prisma
|
||||
!autogpt_platform/backend/pyproject.toml
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
!autogpt_platform/market/scripts.py
|
||||
!autogpt_platform/market/schema.prisma
|
||||
!autogpt_platform/market/pyproject.toml
|
||||
!autogpt_platform/market/poetry.lock
|
||||
!autogpt_platform/market/README.md
|
||||
|
||||
# Platform - Frontend
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
!autogpt_platform/frontend/README.md
|
||||
## config
|
||||
!autogpt_platform/frontend/*.config.*
|
||||
!autogpt_platform/frontend/.env.*
|
||||
|
||||
# Classic - AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
!classic/original_autogpt/pyproject.toml
|
||||
!classic/original_autogpt/poetry.lock
|
||||
!classic/original_autogpt/README.md
|
||||
!classic/original_autogpt/tests/
|
||||
|
||||
# Classic - Benchmark
|
||||
!classic/benchmark/agbenchmark/
|
||||
!classic/benchmark/pyproject.toml
|
||||
!classic/benchmark/poetry.lock
|
||||
!classic/benchmark/README.md
|
||||
|
||||
# Classic - Forge
|
||||
!classic/forge/
|
||||
!classic/forge/pyproject.toml
|
||||
!classic/forge/poetry.lock
|
||||
!classic/forge/README.md
|
||||
|
||||
# Classic - Frontend
|
||||
!classic/frontend/build/web/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
43
.github/PULL_REQUEST_TEMPLATE.md
vendored
43
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,38 +1,23 @@
|
||||
### Background
|
||||
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Changes 🏗️
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### Checklist 📋
|
||||
|
||||
#### For code changes:
|
||||
- [ ] I have clearly listed my changes in the PR description
|
||||
- [ ] I have made a test plan
|
||||
- [ ] I have tested my changes according to the test plan:
|
||||
<!-- Put your test plan here: -->
|
||||
- [ ] ...
|
||||
### Testing 🔍
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in autogpt_platform/
|
||||
|
||||
<details>
|
||||
<summary>Example test plan</summary>
|
||||
|
||||
- [ ] Create from scratch and execute an agent with at least 3 blocks
|
||||
- [ ] Import an agent from file upload, and confirm it executes correctly
|
||||
- [ ] Upload agent to marketplace
|
||||
- [ ] Import an agent from marketplace and confirm it executes correctly
|
||||
- [ ] Edit an agent from monitor, and confirm it executes correctly
|
||||
</details>
|
||||
<!--
|
||||
Please make sure your changes have been tested and are in good working condition.
|
||||
Here is a list of our critical paths, if you need some inspiration on what and how to test:
|
||||
-->
|
||||
|
||||
#### For configuration changes:
|
||||
- [ ] `.env.example` is updated or already compatible with my changes
|
||||
- [ ] `docker-compose.yml` is updated or already compatible with my changes
|
||||
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
|
||||
|
||||
<details>
|
||||
<summary>Examples of configuration changes</summary>
|
||||
|
||||
- Changing ports
|
||||
- Adding new services that need to communicate with each other
|
||||
- Secrets or environment variable changes
|
||||
- New or infrastructure changes such as databases
|
||||
</details>
|
||||
- Create from scratch and execute an agent with at least 3 blocks
|
||||
- Import an agent from file upload, and confirm it executes correctly
|
||||
- Upload agent to marketplace
|
||||
- Import an agent from marketplace and confirm it executes correctly
|
||||
- Edit an agent from monitor, and confirm it executes correctly
|
||||
|
||||
153
.github/dependabot.yml
vendored
153
.github/dependabot.yml
vendored
@@ -1,153 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
# autogpt_libs (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/autogpt_libs"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(libs/deps)"
|
||||
prefix-development: "chore(libs/deps-dev)"
|
||||
ignore:
|
||||
- dependency-name: "poetry"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# backend (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/backend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(backend/deps)"
|
||||
prefix-development: "chore(backend/deps-dev)"
|
||||
ignore:
|
||||
- dependency-name: "poetry"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# frontend (Next.js project)
|
||||
- package-ecosystem: "npm"
|
||||
directory: "autogpt_platform/frontend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(frontend/deps)"
|
||||
prefix-development: "chore(frontend/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# infra (Terraform)
|
||||
- package-ecosystem: "terraform"
|
||||
directory: "autogpt_platform/infra"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(infra/deps)"
|
||||
prefix-development: "chore(infra/deps-dev)"
|
||||
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# Docker
|
||||
- package-ecosystem: "docker"
|
||||
directory: "autogpt_platform/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: "pip"
|
||||
directory: "docs/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(docs/deps)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
32
.github/labeler.yml
vendored
32
.github/labeler.yml
vendored
@@ -1,33 +1,27 @@
|
||||
Classic AutoGPT Agent:
|
||||
AutoGPT Agent:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/original_autogpt/**
|
||||
|
||||
Classic Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/benchmark/**
|
||||
|
||||
Classic Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/frontend/**
|
||||
|
||||
Forge:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/forge/**
|
||||
|
||||
Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/benchmark/**
|
||||
|
||||
Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/frontend/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
platform/frontend:
|
||||
Builder:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/frontend/**
|
||||
- any-glob-to-any-file: autogpt_platform/autogpt_builder/**
|
||||
|
||||
platform/backend:
|
||||
Server:
|
||||
- changed-files:
|
||||
- all-globs-to-any-file:
|
||||
- autogpt_platform/backend/**
|
||||
- '!autogpt_platform/backend/backend/blocks/**'
|
||||
|
||||
platform/blocks:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/backend/backend/blocks/**
|
||||
- any-glob-to-any-file: autogpt_platform/autogpt_server/**
|
||||
|
||||
13
.github/workflows/classic-autogpt-ci.yml
vendored
13
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -2,12 +2,12 @@ name: Classic - AutoGPT CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -115,7 +115,6 @@ jobs:
|
||||
poetry run pytest -vv \
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests/unit tests/integration
|
||||
env:
|
||||
CI: true
|
||||
@@ -125,14 +124,8 @@ jobs:
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
|
||||
@@ -5,7 +5,7 @@ on:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: dev
|
||||
BASE_BRANCH: development
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
@@ -15,46 +15,46 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
94
.github/workflows/classic-autogpt-docker-ci.yml
vendored
94
.github/workflows/classic-autogpt-docker-ci.yml
vendored
@@ -2,13 +2,13 @@ name: Classic - AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
branches: [ master, development ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -34,58 +34,58 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -117,16 +117,16 @@ jobs:
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
|
||||
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
@@ -2,7 +2,7 @@ name: Classic - AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, edited]
|
||||
types: [ published, edited ]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -19,69 +19,69 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
4
.github/workflows/classic-autogpts-ci.yml
vendored
4
.github/workflows/classic-autogpts-ci.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -16,7 +16,7 @@ on:
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
|
||||
37
.github/workflows/classic-benchmark-ci.yml
vendored
37
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -2,13 +2,13 @@ name: Classic - AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
@@ -87,20 +87,13 @@ jobs:
|
||||
poetry run pytest -vv \
|
||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: agbenchmark,${{ runner.os }}
|
||||
@@ -109,7 +102,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [forge]
|
||||
agent-name: [ forge ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
@@ -153,23 +146,23 @@ jobs:
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
# poetry run agbenchmark --mock
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
exit 1
|
||||
else
|
||||
echo "No unstaged changes."
|
||||
fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
|
||||
13
.github/workflows/classic-forge-ci.yml
vendored
13
.github/workflows/classic-forge-ci.yml
vendored
@@ -2,13 +2,13 @@ name: Classic - Forge CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
@@ -139,7 +139,6 @@ jobs:
|
||||
poetry run pytest -vv \
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
forge
|
||||
env:
|
||||
CI: true
|
||||
@@ -149,14 +148,8 @@ jobs:
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
68
.github/workflows/classic-frontend-ci.yml
vendored
68
.github/workflows/classic-frontend-ci.yml
vendored
@@ -4,15 +4,15 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- dev
|
||||
- development
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -21,40 +21,40 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
|
||||
10
.github/workflows/classic-python-checks.yml
vendored
10
.github/workflows/classic-python-checks.yml
vendored
@@ -2,18 +2,18 @@ name: Classic - Python checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
@@ -21,7 +21,7 @@ on:
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-python-checks-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
|
||||
47
.github/workflows/claude.yml
vendored
47
.github/workflows/claude.yml
vendored
@@ -1,47 +0,0 @@
|
||||
name: Claude Code
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
(
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
) && (
|
||||
github.event.comment.author_association == 'OWNER' ||
|
||||
github.event.comment.author_association == 'MEMBER' ||
|
||||
github.event.comment.author_association == 'COLLABORATOR' ||
|
||||
github.event.review.author_association == 'OWNER' ||
|
||||
github.event.review.author_association == 'MEMBER' ||
|
||||
github.event.review.author_association == 'COLLABORATOR' ||
|
||||
github.event.issue.author_association == 'OWNER' ||
|
||||
github.event.issue.author_association == 'MEMBER' ||
|
||||
github.event.issue.author_association == 'COLLABORATOR'
|
||||
)
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@beta
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
98
.github/workflows/codeql.yml
vendored
98
.github/workflows/codeql.yml
vendored
@@ -1,98 +0,0 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
pull_request:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
merge_group:
|
||||
schedule:
|
||||
- cron: '15 4 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: typescript
|
||||
build-mode: none
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
config: |
|
||||
paths-ignore:
|
||||
- classic/frontend/build/**
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
@@ -1,50 +0,0 @@
|
||||
name: AutoGPT Platform - Deploy Prod Environment
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: production
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_prod
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
41
.github/workflows/platform-autogpt-builder-ci.yml
vendored
Normal file
41
.github/workflows/platform-autogpt-builder-ci.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Platform - AutoGPT Builder CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'autogpt_platform/autogpt_builder/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'autogpt_platform/autogpt_builder/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/autogpt_builder
|
||||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '21'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Check formatting with Prettier
|
||||
run: |
|
||||
npx prettier --check .
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
npm run lint
|
||||
@@ -1,51 +0,0 @@
|
||||
name: AutoGPT Platform - Deploy Dev Environment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/**'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_dev
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
Normal file
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: Platform - AutoGPT Builder Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: TFLint
|
||||
uses: pauloconnor/tflint-action@v0.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tflint_path: terraform/
|
||||
tflint_recurse: true
|
||||
tflint_changed_only: false
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.14.4
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
155
.github/workflows/platform-autogpt-server-ci.yml
vendored
Normal file
155
.github/workflows/platform-autogpt-server-ci.yml
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
name: Platform - AutoGPT Server CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, development, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "autogpt_platform/autogpt_server/**"
|
||||
pull_request:
|
||||
branches: [master, development, release-*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "autogpt_platform/autogpt_server/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/autogpt_server
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Setup PostgreSQL
|
||||
uses: ikalnytskyi/action-setup-postgres@v6
|
||||
with:
|
||||
username: ${{ secrets.DB_USER || 'postgres' }}
|
||||
password: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
database: postgres
|
||||
port: 5432
|
||||
id: postgres
|
||||
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: "."
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/autogpt_server/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
else
|
||||
poetry run pytest -vv test
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
DB_USER: ${{ secrets.DB_USER || 'postgres' }}
|
||||
DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
DB_NAME: postgres
|
||||
DB_PORT: 5432
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: autogpt-server,${{ runner.os }}
|
||||
228
.github/workflows/platform-backend-ci.yml
vendored
228
.github/workflows/platform-backend-ci.yml
vendored
@@ -1,228 +0,0 @@
|
||||
name: AutoGPT Platform - Backend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/backend
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.11"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: bitnami/redis:6.2
|
||||
env:
|
||||
REDIS_PASSWORD: testpassword
|
||||
ports:
|
||||
- 6379:6379
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.12-management
|
||||
ports:
|
||||
- 5672:5672
|
||||
- 15672:15672
|
||||
env:
|
||||
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
|
||||
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
|
||||
clamav:
|
||||
image: clamav/clamav-debian:latest
|
||||
ports:
|
||||
- 3310:3310
|
||||
env:
|
||||
CLAMAV_NO_FRESHCLAMD: false
|
||||
CLAMD_CONF_StreamMaxLength: 50M
|
||||
CLAMD_CONF_MaxFileSize: 100M
|
||||
CLAMD_CONF_MaxScanSize: 100M
|
||||
CLAMD_CONF_MaxThreads: 4
|
||||
CLAMD_CONF_ReadTimeout: 300
|
||||
options: >-
|
||||
--health-cmd "clamdscan --version || exit 1"
|
||||
--health-interval 30s
|
||||
--health-timeout 10s
|
||||
--health-retries 5
|
||||
--health-start-period 180s
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Setup Supabase
|
||||
uses: supabase/setup-cli@v1
|
||||
with:
|
||||
version: 1.178.1
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
run: |
|
||||
# Extract Poetry version from backend/poetry.lock
|
||||
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
|
||||
if [ -n "$BASE_REF" ]; then
|
||||
BASE_BRANCH=${BASE_REF/refs\/heads\//}
|
||||
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
|
||||
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
|
||||
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
|
||||
else
|
||||
POETRY_VERSION=$HEAD_POETRY_VERSION
|
||||
fi
|
||||
echo "Using Poetry version ${POETRY_VERSION}"
|
||||
|
||||
# Install Poetry
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$POETRY_VERSION python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
env:
|
||||
BASE_REF: ${{ github.base_ref || github.event.merge_group.base_ref }}
|
||||
|
||||
- name: Check poetry.lock
|
||||
run: |
|
||||
poetry lock
|
||||
|
||||
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
|
||||
echo "Error: poetry.lock not up to date."
|
||||
echo
|
||||
git diff poetry.lock
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- id: supabase
|
||||
name: Start Supabase
|
||||
working-directory: .
|
||||
run: |
|
||||
supabase init
|
||||
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
|
||||
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
|
||||
# outputs:
|
||||
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
|
||||
|
||||
- name: Wait for ClamAV to be ready
|
||||
run: |
|
||||
echo "Waiting for ClamAV daemon to start..."
|
||||
max_attempts=60
|
||||
attempt=0
|
||||
|
||||
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
|
||||
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
|
||||
sleep 5
|
||||
attempt=$((attempt+1))
|
||||
done
|
||||
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
|
||||
echo "Checking ClamAV service logs..."
|
||||
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "ClamAV is ready!"
|
||||
|
||||
# Verify ClamAV is responsive
|
||||
echo "Testing ClamAV connection..."
|
||||
timeout 10 bash -c 'echo "PING" | nc localhost 3310' || {
|
||||
echo "ClamAV is not responding to PING"
|
||||
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG
|
||||
else
|
||||
poetry run pytest -s -vv
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
|
||||
SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
|
||||
REDIS_HOST: "localhost"
|
||||
REDIS_PORT: "6379"
|
||||
REDIS_PASSWORD: "testpassword"
|
||||
ENCRYPTION_KEY: "dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=" # DO NOT USE IN PRODUCTION!!
|
||||
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
# We know these are here, don't report this as a security vulnerability
|
||||
# This is used as the default credential for the entire system's RabbitMQ instance
|
||||
# If you want to replace this, you can do so by making our entire system generate
|
||||
# new credentials for each local user and update the environment variables in
|
||||
# the backend service, docker composes, and examples
|
||||
RABBITMQ_DEFAULT_USER: "rabbitmq_user_default"
|
||||
RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7"
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: backend,${{ runner.os }}
|
||||
@@ -1,198 +0,0 @@
|
||||
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check comment permissions and deployment status
|
||||
id: check_status
|
||||
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const commentBody = context.payload.comment.body.trim();
|
||||
const commentUser = context.payload.comment.user.login;
|
||||
const prAuthor = context.payload.issue.user.login;
|
||||
const authorAssociation = context.payload.comment.author_association;
|
||||
|
||||
// Check permissions
|
||||
const hasPermission = (
|
||||
authorAssociation === 'OWNER' ||
|
||||
authorAssociation === 'MEMBER' ||
|
||||
authorAssociation === 'COLLABORATOR'
|
||||
);
|
||||
|
||||
core.setOutput('comment_body', commentBody);
|
||||
core.setOutput('has_permission', hasPermission);
|
||||
|
||||
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
|
||||
core.setOutput('permission_denied', 'true');
|
||||
return;
|
||||
}
|
||||
|
||||
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process deploy command
|
||||
if (commentBody === '!deploy') {
|
||||
core.setOutput('should_deploy', 'true');
|
||||
}
|
||||
// Process undeploy command
|
||||
else if (commentBody === '!undeploy') {
|
||||
core.setOutput('should_undeploy', 'true');
|
||||
}
|
||||
|
||||
- name: Post permission denied comment
|
||||
if: steps.check_status.outputs.permission_denied == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
|
||||
});
|
||||
|
||||
- name: Get PR details for deployment
|
||||
id: pr_details
|
||||
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const pr = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: context.issue.number
|
||||
});
|
||||
core.setOutput('pr_number', pr.data.number);
|
||||
core.setOutput('pr_title', pr.data.title);
|
||||
core.setOutput('pr_state', pr.data.state);
|
||||
|
||||
- name: Dispatch Deploy Event
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "deploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post deploy success comment
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
|
||||
});
|
||||
|
||||
- name: Dispatch Undeploy Event (from comment)
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post undeploy success comment
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
|
||||
});
|
||||
|
||||
- name: Check deployment status on PR close
|
||||
id: check_pr_close
|
||||
if: github.event_name == 'pull_request' && github.event.action == 'closed'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const comments = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number
|
||||
});
|
||||
|
||||
let lastDeployIndex = -1;
|
||||
let lastUndeployIndex = -1;
|
||||
|
||||
comments.data.forEach((comment, index) => {
|
||||
if (comment.body.trim() === '!deploy') {
|
||||
lastDeployIndex = index;
|
||||
} else if (comment.body.trim() === '!undeploy') {
|
||||
lastUndeployIndex = index;
|
||||
}
|
||||
});
|
||||
|
||||
// Should undeploy if there's a !deploy without a subsequent !undeploy
|
||||
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
|
||||
core.setOutput('should_undeploy', shouldUndeploy);
|
||||
|
||||
- name: Dispatch Undeploy Event (PR closed with active deployment)
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ github.event.pull_request.number }}",
|
||||
"pr_title": "${{ github.event.pull_request.title }}",
|
||||
"pr_state": "${{ github.event.pull_request.state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post PR close undeploy comment
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
|
||||
});
|
||||
279
.github/workflows/platform-frontend-ci.yml
vendored
279
.github/workflows/platform-frontend-ci.yml
vendored
@@ -1,279 +0,0 @@
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
merge_group:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run lint
|
||||
run: pnpm lint
|
||||
|
||||
type-check:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run tsc check
|
||||
run: pnpm type-check
|
||||
|
||||
chromatic:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
# Only run on dev branch pushes or PRs targeting dev
|
||||
if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run Chromatic
|
||||
uses: chromaui/action@latest
|
||||
with:
|
||||
projectToken: chpt_9e7c1a76478c9c8
|
||||
onlyChanged: true
|
||||
workingDir: autogpt_platform/frontend
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
exitOnceUploaded: true
|
||||
|
||||
test:
|
||||
runs-on: big-boi
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.example ../backend/.env
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-frontend-test-
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml up -d
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1
|
||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
fi
|
||||
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Create E2E test data
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
# First try to run the script from inside the container
|
||||
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
||||
echo "✅ Found e2e_test_data.py in container, running it..."
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
||||
# Copy the script into the container and run it
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
||||
echo "❌ Failed to copy script to container"
|
||||
exit 1
|
||||
}
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Setup .env
|
||||
run: cp .env.example .env
|
||||
|
||||
- name: Build frontend
|
||||
run: pnpm build --turbo
|
||||
# uses Turbopack, much faster and safe enough for a test pipeline
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Install Browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
run: pnpm test:no-build
|
||||
|
||||
- name: Upload Playwright artifacts
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-report
|
||||
path: playwright-report
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.yml logs
|
||||
@@ -16,7 +16,7 @@ jobs:
|
||||
# operations-per-run: 5000
|
||||
stale-issue-message: >
|
||||
This issue has automatically been marked as _stale_ because it has not had
|
||||
any activity in the last 170 days. You can _unstale_ it by commenting or
|
||||
any activity in the last 50 days. You can _unstale_ it by commenting or
|
||||
removing the label. Otherwise, this issue will be closed in 10 days.
|
||||
stale-pr-message: >
|
||||
This pull request has automatically been marked as _stale_ because it has
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
close-issue-message: >
|
||||
This issue was closed automatically because it has been stale for 10 days
|
||||
with no activity.
|
||||
days-before-stale: 170
|
||||
days-before-stale: 50
|
||||
days-before-close: 10
|
||||
# Do not touch meta issues:
|
||||
exempt-issue-labels: meta,fridge,project management
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
name: Repo - Enforce dev as base branch
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: [ master ]
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
check_pr_target:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Check if PR is from dev or hotfix
|
||||
if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }}
|
||||
run: |
|
||||
gh pr comment ${{ github.event.number }} --repo "$REPO" \
|
||||
--body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.'
|
||||
gh pr edit ${{ github.event.number }} --base dev --repo "$REPO"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
2
.github/workflows/repo-pr-label.yml
vendored
2
.github/workflows/repo-pr-label.yml
vendored
@@ -3,7 +3,7 @@ name: Repo - Pull Request auto-label
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths-ignore:
|
||||
- 'classic/forge/tests/vcr_cassettes'
|
||||
- 'classic/benchmark/reports/**'
|
||||
|
||||
1
.github/workflows/repo-workflow-checker.yml
vendored
1
.github/workflows/repo-workflow-checker.yml
vendored
@@ -2,7 +2,6 @@ name: Repo - PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
|
||||
@@ -5,8 +5,6 @@ import sys
|
||||
import time
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
CHECK_INTERVAL = 30
|
||||
|
||||
|
||||
def get_environment_variables() -> Tuple[str, str, str, str, str]:
|
||||
"""Retrieve and return necessary environment variables."""
|
||||
@@ -14,11 +12,7 @@ def get_environment_variables() -> Tuple[str, str, str, str, str]:
|
||||
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
|
||||
event = json.load(f)
|
||||
|
||||
# Handle both PR and merge group events
|
||||
if "pull_request" in event:
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
else:
|
||||
sha = os.environ["GITHUB_SHA"]
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
|
||||
return (
|
||||
os.environ["GITHUB_API_URL"],
|
||||
@@ -99,10 +93,9 @@ def main():
|
||||
break
|
||||
|
||||
print(
|
||||
"Some check runs are still in progress. "
|
||||
f"Waiting {CHECK_INTERVAL} seconds before checking again..."
|
||||
"Some check runs are still in progress. Waiting 3 minutes before checking again..."
|
||||
)
|
||||
time.sleep(CHECK_INTERVAL)
|
||||
time.sleep(180)
|
||||
|
||||
if all_others_passed:
|
||||
print("All other completed check runs have passed. This check passes.")
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
print("Python version 3.11 or higher required")
|
||||
sys.exit(1)
|
||||
|
||||
import tomllib
|
||||
|
||||
|
||||
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
|
||||
"""Extract package version from poetry.lock file."""
|
||||
try:
|
||||
if lockfile_path == "-":
|
||||
data = tomllib.load(sys.stdin.buffer)
|
||||
else:
|
||||
with open(lockfile_path, "rb") as f:
|
||||
data = tomllib.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except tomllib.TOMLDecodeError as e:
|
||||
print(f"Error parsing TOML file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error reading file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Look for the package in the packages list
|
||||
packages = data.get("package", [])
|
||||
for package in packages:
|
||||
if package.get("name", "").lower() == package_name.lower():
|
||||
return package.get("version")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) not in (2, 3):
|
||||
print(
|
||||
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
|
||||
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
package_name = sys.argv[1]
|
||||
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
|
||||
|
||||
version = get_package_version(package_name, lockfile_path)
|
||||
|
||||
if version:
|
||||
print(version)
|
||||
else:
|
||||
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -165,15 +165,9 @@ package-lock.json
|
||||
|
||||
# Allow for locally private items
|
||||
# private
|
||||
pri*
|
||||
pri*
|
||||
# ignore
|
||||
ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
autogpt_platform/backend/settings.py
|
||||
/.auth
|
||||
/autogpt_platform/frontend/.auth
|
||||
|
||||
*.ign.*
|
||||
.test-contents
|
||||
.claude/settings.local.json
|
||||
autogpt_platform/autogpt_server/settings.py
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +1,6 @@
|
||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||
path = classic/forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
[submodule "autogpt_platform/supabase"]
|
||||
path = autogpt_platform/supabase
|
||||
url = https://github.com/supabase/supabase.git
|
||||
|
||||
@@ -10,142 +10,39 @@ repos:
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: https://github.com/Yelp/detect-secrets
|
||||
rev: v1.5.0
|
||||
hooks:
|
||||
- id: detect-secrets
|
||||
name: Detect secrets
|
||||
description: Detects high entropy strings that are likely to be passwords.
|
||||
files: ^autogpt_platform/
|
||||
stages: [pre-push]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, all dependencies need to be up-to-date.
|
||||
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
|
||||
hooks:
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Backend
|
||||
alias: poetry-install-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend install
|
||||
# include autogpt_libs source (since it's a path dependency)
|
||||
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Libs
|
||||
alias: poetry-install-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs install
|
||||
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, Prisma client must be up-to-date.
|
||||
hooks:
|
||||
- id: prisma-generate
|
||||
name: Prisma Generate - AutoGPT Platform - Backend
|
||||
alias: prisma-generate-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
|
||||
# include everything that triggers poetry install + the prisma schema
|
||||
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.7.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Backend
|
||||
alias: ruff-lint-platform-backend
|
||||
files: ^autogpt_platform/backend/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff-format
|
||||
name: Format (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
hooks:
|
||||
- id: isort
|
||||
name: Lint (isort) - AutoGPT Platform - Backend
|
||||
alias: isort-platform-backend
|
||||
entry: poetry -P autogpt_platform/backend run isort -p backend
|
||||
files: ^autogpt_platform/backend/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -P classic/original_autogpt run isort -p autogpt
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C classic/original_autogpt run isort
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -P classic/forge run isort -p forge
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C classic/forge run isort
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -P classic/benchmark run isort -p agbenchmark
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C classic/benchmark run isort
|
||||
files: ^classic/benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.10.0
|
||||
rev: 23.12.1
|
||||
# Black has sensible defaults, doesn't need package context, and ignores
|
||||
# everything in .gitignore, so it works fine without any config or arguments.
|
||||
hooks:
|
||||
- id: black
|
||||
name: Format (Black)
|
||||
name: Lint (Black)
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
@@ -153,79 +50,53 @@ repos:
|
||||
# them separately.
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: prettier
|
||||
name: Format (Prettier) - AutoGPT Platform - Frontend
|
||||
alias: format-platform-frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' --
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
|
||||
- repo: local
|
||||
# To have watertight type checking, we check *all* the files in an affected
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Backend
|
||||
alias: pyright-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run pyright
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Libs
|
||||
alias: pyright-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs run pyright
|
||||
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
args: [-p, autogpt, autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
args: [-p, forge, forge]
|
||||
files: ^classic/forge/(classic/forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
@@ -233,46 +104,24 @@ repos:
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: tsc
|
||||
name: Typecheck - AutoGPT Platform - Frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && pnpm type-check'
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
# - repo: local
|
||||
# hooks:
|
||||
# - id: pytest
|
||||
# name: Run tests - AutoGPT Platform - Backend
|
||||
# alias: pytest-platform-backend
|
||||
# entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
|
||||
# # include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
|
||||
# files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^classic/forge/(classic/forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
# - id: pytest
|
||||
# name: Run tests - Classic - AutoGPT (excl. slow tests)
|
||||
# alias: pytest-classic-autogpt
|
||||
# entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# # include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
# files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
|
||||
# - id: pytest
|
||||
# name: Run tests - Classic - Forge (excl. slow tests)
|
||||
# alias: pytest-classic-forge
|
||||
# entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
# files: ^classic/forge/(forge/|tests/|poetry\.lock$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
|
||||
# - id: pytest
|
||||
# name: Run tests - Classic - Benchmark
|
||||
# alias: pytest-classic-benchmark
|
||||
# entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
# files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
17
.vscode/all-projects.code-workspace
vendored
17
.vscode/all-projects.code-workspace
vendored
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"name": "frontend",
|
||||
"path": "../autogpt_platform/frontend"
|
||||
"name": "autogpt_server",
|
||||
"path": "../autogpt_platform/autogpt_server"
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"path": "../autogpt_platform/backend"
|
||||
"name": "autogpt_builder",
|
||||
"path": "../autogpt_platform/autogpt_builder"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
@@ -24,7 +24,10 @@
|
||||
"name": "docs",
|
||||
"path": "../docs"
|
||||
},
|
||||
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
},
|
||||
{
|
||||
"name": "classic - autogpt",
|
||||
"path": "../classic/original_autogpt"
|
||||
@@ -41,10 +44,6 @@
|
||||
"name": "classic - frontend",
|
||||
"path": "../classic/frontend"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
|
||||
67
.vscode/launch.json
vendored
67
.vscode/launch.json
vendored
@@ -1,67 +0,0 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Frontend: Server Side",
|
||||
"type": "node-terminal",
|
||||
"request": "launch",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"command": "pnpm dev"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Client Side",
|
||||
"type": "msedge",
|
||||
"request": "launch",
|
||||
"url": "http://localhost:3000"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Full Stack",
|
||||
"type": "node-terminal",
|
||||
|
||||
"request": "launch",
|
||||
"command": "pnpm dev",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"serverReadyAction": {
|
||||
"pattern": "- Local:.+(https?://.+)",
|
||||
"uriFormat": "%s",
|
||||
"action": "debugWithChrome"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Backend",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "backend.app",
|
||||
"env": {
|
||||
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY": "YES"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/backend/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "Marketplace",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "autogpt_platform.market.main",
|
||||
"env": {
|
||||
"ENV": "dev"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/market/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/market"
|
||||
}
|
||||
],
|
||||
"compounds": [
|
||||
{
|
||||
"name": "Everything",
|
||||
"configurations": ["Backend", "Frontend: Full Stack"],
|
||||
// "preLaunchTask": "${defaultBuildTask}",
|
||||
"stopAll": true,
|
||||
"presentation": {
|
||||
"hidden": false,
|
||||
"order": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
53
AGENTS.md
53
AGENTS.md
@@ -1,53 +0,0 @@
|
||||
# AutoGPT Platform Contribution Guide
|
||||
|
||||
This guide provides context for Codex when updating the **autogpt_platform** folder.
|
||||
|
||||
## Directory overview
|
||||
|
||||
- `autogpt_platform/backend` – FastAPI based backend service.
|
||||
- `autogpt_platform/autogpt_libs` – Shared Python libraries.
|
||||
- `autogpt_platform/frontend` – Next.js + Typescript frontend.
|
||||
- `autogpt_platform/docker-compose.yml` – development stack.
|
||||
|
||||
See `docs/content/platform/getting-started.md` for setup instructions.
|
||||
|
||||
## Code style
|
||||
|
||||
- Format Python code with `poetry run format`.
|
||||
- Format frontend code using `pnpm format`.
|
||||
|
||||
## Testing
|
||||
|
||||
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
|
||||
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
|
||||
|
||||
Always run the relevant linters and tests before committing.
|
||||
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
|
||||
Types:
|
||||
- feat
|
||||
- fix
|
||||
- refactor
|
||||
- ci
|
||||
- dx (developer experience)
|
||||
Scopes:
|
||||
- platform
|
||||
- platform/library
|
||||
- platform/marketplace
|
||||
- backend
|
||||
- backend/executor
|
||||
- frontend
|
||||
- frontend/library
|
||||
- frontend/marketplace
|
||||
- blocks
|
||||
|
||||
## Pull requests
|
||||
|
||||
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
|
||||
- Rely on the pre-commit checks for linting and formatting
|
||||
- Fill out the **Changes** section and the checklist.
|
||||
- Use conventional commit titles with a scope (e.g. `feat(frontend): add feature`).
|
||||
- Keep out-of-scope changes under 20% of the PR.
|
||||
- Ensure PR descriptions are complete.
|
||||
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
|
||||
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
|
||||
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs
|
||||
@@ -2,14 +2,14 @@
|
||||
If you are reading this, you are probably looking for the full **[contribution guide]**,
|
||||
which is part of our [wiki].
|
||||
|
||||
Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
|
||||
<!-- You can find our immediate priorities and their progress on our public [kanban board]. -->
|
||||
|
||||
[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
|
||||
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
|
||||
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
|
||||
## Contributing to the AutoGPT Platform Folder
|
||||
All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license.
|
||||
|
||||
## In short
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. We encourage you to collaborate with fellow community members on some of our bigger
|
||||
|
||||
199
LICENSE
199
LICENSE
@@ -1,204 +1,7 @@
|
||||
All portions of this repository are under one of two licenses.
|
||||
|
||||
- Everything inside the autogpt_platform folder is under the Polyform Shield License.
|
||||
- Everything outside the autogpt_platform folder is under the MIT License.
|
||||
|
||||
More info:
|
||||
|
||||
**Polyform Shield License:**
|
||||
Code and content within the `autogpt_platform` folder is licensed under the Polyform Shield License. This new project is our in-developlemt platform for building, deploying and managing agents.
|
||||
Read more about this effort here: https://agpt.co/blog/introducing-the-autogpt-platform
|
||||
|
||||
**MIT License:**
|
||||
All other portions of the AutoGPT repository (i.e., everything outside the `autogpt_platform` folder) are licensed under the MIT License. This includes:
|
||||
- The Original, stand-alone AutoGPT Agent
|
||||
- Forge: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge
|
||||
- AG Benchmark: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark
|
||||
- AutoGPT Classic GUI: https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend.
|
||||
|
||||
We also publish additional work under the MIT Licence in other repositories, such as GravitasML (https://github.com/Significant-Gravitas/gravitasml) which is developed for and used in the AutoGPT Platform, and our [Code Ability](https://github.com/Significant-Gravitas/AutoGPT-Code-Ability) project.
|
||||
|
||||
Both licences are available to read below:
|
||||
|
||||
=====================================================
|
||||
-----------------------------------------------------
|
||||
=====================================================
|
||||
|
||||
# PolyForm Shield License 1.0.0
|
||||
|
||||
<https://polyformproject.org/licenses/shield/1.0.0>
|
||||
|
||||
## Acceptance
|
||||
|
||||
In order to get any license under these terms, you must agree
|
||||
to them as both strict obligations and conditions to all
|
||||
your licenses.
|
||||
|
||||
## Copyright License
|
||||
|
||||
The licensor grants you a copyright license for the
|
||||
software to do everything you might do with the software
|
||||
that would otherwise infringe the licensor's copyright
|
||||
in it for any permitted purpose. However, you may
|
||||
only distribute the software according to [Distribution
|
||||
License](#distribution-license) and make changes or new works
|
||||
based on the software according to [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Distribution License
|
||||
|
||||
The licensor grants you an additional copyright license
|
||||
to distribute copies of the software. Your license
|
||||
to distribute covers distributing the software with
|
||||
changes and new works permitted by [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Notices
|
||||
|
||||
You must ensure that anyone who gets a copy of any part of
|
||||
the software from you also gets a copy of these terms or the
|
||||
URL for them above, as well as copies of any plain-text lines
|
||||
beginning with `Required Notice:` that the licensor provided
|
||||
with the software. For example:
|
||||
|
||||
> Required Notice: Copyright Yoyodyne, Inc. (http://example.com)
|
||||
|
||||
## Changes and New Works License
|
||||
|
||||
The licensor grants you an additional copyright license to
|
||||
make changes and new works based on the software for any
|
||||
permitted purpose.
|
||||
|
||||
## Patent License
|
||||
|
||||
The licensor grants you a patent license for the software that
|
||||
covers patent claims the licensor can license, or becomes able
|
||||
to license, that you would infringe by using the software.
|
||||
|
||||
## Noncompete
|
||||
|
||||
Any purpose is a permitted purpose, except for providing any
|
||||
product that competes with the software or any product the
|
||||
licensor or any of its affiliates provides using the software.
|
||||
|
||||
## Competition
|
||||
|
||||
Goods and services compete even when they provide functionality
|
||||
through different kinds of interfaces or for different technical
|
||||
platforms. Applications can compete with services, libraries
|
||||
with plugins, frameworks with development tools, and so on,
|
||||
even if they're written in different programming languages
|
||||
or for different computer architectures. Goods and services
|
||||
compete even when provided free of charge. If you market a
|
||||
product as a practical substitute for the software or another
|
||||
product, it definitely competes.
|
||||
|
||||
## New Products
|
||||
|
||||
If you are using the software to provide a product that does
|
||||
not compete, but the licensor or any of its affiliates brings
|
||||
your product into competition by providing a new version of
|
||||
the software or another product using the software, you may
|
||||
continue using versions of the software available under these
|
||||
terms beforehand to provide your competing product, but not
|
||||
any later versions.
|
||||
|
||||
## Discontinued Products
|
||||
|
||||
You may begin using the software to compete with a product
|
||||
or service that the licensor or any of its affiliates has
|
||||
stopped providing, unless the licensor includes a plain-text
|
||||
line beginning with `Licensor Line of Business:` with the
|
||||
software that mentions that line of business. For example:
|
||||
|
||||
> Licensor Line of Business: YoyodyneCMS Content Management
|
||||
System (http://example.com/cms)
|
||||
|
||||
## Sales of Business
|
||||
|
||||
If the licensor or any of its affiliates sells a line of
|
||||
business developing the software or using the software
|
||||
to provide a product, the buyer can also enforce
|
||||
[Noncompete](#noncompete) for that product.
|
||||
|
||||
## Fair Use
|
||||
|
||||
You may have "fair use" rights for the software under the
|
||||
law. These terms do not limit them.
|
||||
|
||||
## No Other Rights
|
||||
|
||||
These terms do not allow you to sublicense or transfer any of
|
||||
your licenses to anyone else, or prevent the licensor from
|
||||
granting licenses to anyone else. These terms do not imply
|
||||
any other licenses.
|
||||
|
||||
## Patent Defense
|
||||
|
||||
If you make any written claim that the software infringes or
|
||||
contributes to infringement of any patent, your patent license
|
||||
for the software granted under these terms ends immediately. If
|
||||
your company makes such a claim, your patent license ends
|
||||
immediately for work on behalf of your company.
|
||||
|
||||
## Violations
|
||||
|
||||
The first time you are notified in writing that you have
|
||||
violated any of these terms, or done anything with the software
|
||||
not covered by your licenses, your licenses can nonetheless
|
||||
continue if you come into full compliance with these terms,
|
||||
and take practical steps to correct past violations, within
|
||||
32 days of receiving notice. Otherwise, all your licenses
|
||||
end immediately.
|
||||
|
||||
## No Liability
|
||||
|
||||
***As far as the law allows, the software comes as is, without
|
||||
any warranty or condition, and the licensor will not be liable
|
||||
to you for any damages arising out of these terms or the use
|
||||
or nature of the software, under any kind of legal claim.***
|
||||
|
||||
## Definitions
|
||||
|
||||
The **licensor** is the individual or entity offering these
|
||||
terms, and the **software** is the software the licensor makes
|
||||
available under these terms.
|
||||
|
||||
A **product** can be a good or service, or a combination
|
||||
of them.
|
||||
|
||||
**You** refers to the individual or entity agreeing to these
|
||||
terms.
|
||||
|
||||
**Your company** is any legal entity, sole proprietorship,
|
||||
or other kind of organization that you work for, plus all
|
||||
its affiliates.
|
||||
|
||||
**Affiliates** means the other organizations than an
|
||||
organization has control over, is under the control of, or is
|
||||
under common control with.
|
||||
|
||||
**Control** means ownership of substantially all the assets of
|
||||
an entity, or the power to direct its management and policies
|
||||
by vote, contract, or otherwise. Control can be direct or
|
||||
indirect.
|
||||
|
||||
**Your licenses** are all the licenses granted to you for the
|
||||
software under these terms.
|
||||
|
||||
**Use** means anything you do with the software requiring one
|
||||
of your licenses.
|
||||
|
||||
=====================================================
|
||||
-----------------------------------------------------
|
||||
=====================================================
|
||||
|
||||
MIT License
|
||||
|
||||
|
||||
Copyright (c) 2023 Toran Bruce Richards
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
@@ -206,11 +9,9 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
|
||||
136
README.md
136
README.md
@@ -1,129 +1,43 @@
|
||||
# AutoGPT: Build, Deploy, and Run AI Agents
|
||||
# AutoGPT: Build & Use AI Agents
|
||||
|
||||
[](https://discord.gg/autogpt)  
|
||||
[](https://discord.gg/autogpt)  
|
||||
[](https://twitter.com/Auto_GPT)  
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows.
|
||||
**AutoGPT** is a powerful tool that lets you create and run intelligent agents. These agents can perform various tasks automatically, making your life easier.
|
||||
|
||||
## Hosting Options
|
||||
- Download to self-host (Free!)
|
||||
- [Join the Waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta (Closed Beta - Public release Coming Soon!)
|
||||
## How to Get Started
|
||||
|
||||
## How to Self-Host the AutoGPT Platform
|
||||
> [!NOTE]
|
||||
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
|
||||
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
|
||||
https://github.com/user-attachments/assets/8508f4dc-b362-4cab-900f-644964a96cdf
|
||||
|
||||
### System Requirements
|
||||
### 🧱 AutoGPT Builder
|
||||
|
||||
Before proceeding with the installation, ensure your system meets the following requirements:
|
||||
The AutoGPT Builder is the frontend. It allows you to design agents using an easy flowchart style. You build your agent by connecting blocks, where each block performs a single action. It's simple and intuitive!
|
||||
|
||||
#### Hardware Requirements
|
||||
- CPU: 4+ cores recommended
|
||||
- RAM: Minimum 8GB, 16GB recommended
|
||||
- Storage: At least 10GB of free space
|
||||
|
||||
#### Software Requirements
|
||||
- Operating Systems:
|
||||
- Linux (Ubuntu 20.04 or newer recommended)
|
||||
- macOS (10.15 or newer)
|
||||
- Windows 10/11 with WSL2
|
||||
- Required Software (with minimum versions):
|
||||
- Docker Engine (20.10.0 or newer)
|
||||
- Docker Compose (2.0.0 or newer)
|
||||
- Git (2.30 or newer)
|
||||
- Node.js (16.x or newer)
|
||||
- npm (8.x or newer)
|
||||
- VSCode (1.60 or newer) or any modern code editor
|
||||
|
||||
#### Network Requirements
|
||||
- Stable internet connection
|
||||
- Access to required ports (will be configured in Docker)
|
||||
- Ability to make outbound HTTPS connections
|
||||
|
||||
### Updated Setup Instructions:
|
||||
We've moved to a fully maintained and regularly updated documentation site.
|
||||
|
||||
👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/)
|
||||
|
||||
|
||||
This tutorial assumes you have Docker, VSCode, git and npm installed.
|
||||
|
||||
---
|
||||
|
||||
#### ⚡ Quick Setup with One-Line Script (Recommended for Local Hosting)
|
||||
|
||||
Skip the manual steps and get started in minutes using our automatic setup script.
|
||||
|
||||
For macOS/Linux:
|
||||
```
|
||||
curl -fsSL https://setup.agpt.co/install.sh -o install.sh && bash install.sh
|
||||
```
|
||||
|
||||
For Windows (PowerShell):
|
||||
```
|
||||
powershell -c "iwr https://setup.agpt.co/install.bat -o install.bat; ./install.bat"
|
||||
```
|
||||
|
||||
This will install dependencies, configure Docker, and launch your local instance — all in one go.
|
||||
|
||||
### 🧱 AutoGPT Frontend
|
||||
|
||||
The AutoGPT frontend is where users interact with our powerful AI automation platform. It offers multiple ways to engage with and leverage our AI agents. This is the interface where you'll bring your AI automation ideas to life:
|
||||
|
||||
**Agent Builder:** For those who want to customize, our intuitive, low-code interface allows you to design and configure your own AI agents.
|
||||
|
||||
**Workflow Management:** Build, modify, and optimize your automation workflows with ease. You build your agent by connecting blocks, where each block performs a single action.
|
||||
|
||||
**Deployment Controls:** Manage the lifecycle of your agents, from testing to production.
|
||||
|
||||
**Ready-to-Use Agents:** Don't want to build? Simply select from our library of pre-configured agents and put them to work immediately.
|
||||
|
||||
**Agent Interaction:** Whether you've built your own or are using pre-configured agents, easily run and interact with them through our user-friendly interface.
|
||||
|
||||
**Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes.
|
||||
|
||||
[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks.
|
||||
[Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
|
||||
|
||||
### 💽 AutoGPT Server
|
||||
|
||||
The AutoGPT Server is the powerhouse of our platform This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously. It contains all the essential components that make AutoGPT run smoothly.
|
||||
|
||||
**Source Code:** The core logic that drives our agents and automation processes.
|
||||
|
||||
**Infrastructure:** Robust systems that ensure reliable and scalable performance.
|
||||
|
||||
**Marketplace:** A comprehensive marketplace where you can find and deploy a wide range of pre-built agents.
|
||||
The AutoGPT Server is the backend. This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously.
|
||||
|
||||
### 🐙 Example Agents
|
||||
|
||||
Here are two examples of what you can do with AutoGPT:
|
||||
|
||||
1. **Generate Viral Videos from Trending Topics**
|
||||
- This agent reads topics on Reddit.
|
||||
- It identifies trending topics.
|
||||
- It then automatically creates a short-form video based on the content.
|
||||
1. **Reddit Marketing Agent**
|
||||
- This agent reads comments on Reddit.
|
||||
- It looks for people asking about your product.
|
||||
- It then automatically responds to them.
|
||||
|
||||
2. **Identify Top Quotes from Videos for Social Media**
|
||||
2. **YouTube Content Repurposing Agent**
|
||||
- This agent subscribes to your YouTube channel.
|
||||
- When you post a new video, it transcribes it.
|
||||
- It uses AI to identify the most impactful quotes to generate a summary.
|
||||
- Then, it writes a post to automatically publish to your social media.
|
||||
- It uses AI to write a search engine optimized blog post.
|
||||
- Then, it publishes this blog post to your Medium account.
|
||||
|
||||
These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case.
|
||||
These examples show just a glimpse of what you can achieve with AutoGPT!
|
||||
|
||||
---
|
||||
|
||||
### **License Overview:**
|
||||
|
||||
🛡️ **Polyform Shield License:**
|
||||
All code and content within the `autogpt_platform` folder is licensed under the Polyform Shield License. This new project is our in-developlemt platform for building, deploying and managing agents.</br>_[Read more about this effort](https://agpt.co/blog/introducing-the-autogpt-platform)_
|
||||
|
||||
🦉 **MIT License:**
|
||||
All other portions of the AutoGPT repository (i.e., everything outside the `autogpt_platform` folder) are licensed under the MIT License. This includes the original stand-alone AutoGPT Agent, along with projects such as [Forge](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge), [agbenchmark](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) and the [AutoGPT Classic GUI](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend).</br>We also publish additional work under the MIT Licence in other repositories, such as [GravitasML](https://github.com/Significant-Gravitas/gravitasml) which is developed for and used in the AutoGPT Platform. See also our MIT Licenced [Code Ability](https://github.com/Significant-Gravitas/AutoGPT-Code-Ability) project.
|
||||
|
||||
---
|
||||
### Mission
|
||||
Our mission is to provide the tools, so that you can focus on what matters:
|
||||
|
||||
- 🏗️ **Building** - Lay the foundation for something amazing.
|
||||
@@ -136,6 +50,7 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
|
||||
 | 
|
||||
**🚀 [Contributing](CONTRIBUTING.md)**
|
||||
|
||||
|
||||
---
|
||||
## 🤖 AutoGPT Classic
|
||||
> Below is information about the classic version of AutoGPT.
|
||||
@@ -159,7 +74,7 @@ This guide will walk you through the process of creating your own agent and usin
|
||||
|
||||
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
|
||||
 | 
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
|
||||
|
||||
### 💻 UI
|
||||
|
||||
@@ -198,7 +113,7 @@ Just clone the repo, install dependencies with `./run setup`, and you should be
|
||||
|
||||
[](https://discord.gg/autogpt)
|
||||
|
||||
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn't created an issue for the same topic.
|
||||
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn’t created an issue for the same topic.
|
||||
|
||||
## 🤝 Sister projects
|
||||
|
||||
@@ -208,8 +123,6 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
|
||||
|
||||
---
|
||||
|
||||
## Stars stats
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
|
||||
<picture>
|
||||
@@ -219,10 +132,3 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
|
||||
</picture>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
## ⚡ Contributors
|
||||
|
||||
<a href="https://github.com/Significant-Gravitas/AutoGPT/graphs/contributors" alt="View Contributors">
|
||||
<img src="https://contrib.rocks/image?repo=Significant-Gravitas/AutoGPT&max=1000&columns=10" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
48
SECURITY.md
48
SECURITY.md
@@ -1,48 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
|
||||
|
||||
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
|
||||
|
||||
Instead, please report them via:
|
||||
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
|
||||
<!--- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty-->
|
||||
|
||||
### Reporting Process
|
||||
1. **Submit Report**: Use one of the above channels to submit your report
|
||||
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
|
||||
3. **Collaboration**: We will collaborate with you to understand and validate the issue
|
||||
4. **Resolution**: We will work on a fix and coordinate the release process
|
||||
|
||||
### Disclosure Policy
|
||||
- Please provide detailed reports with reproducible steps
|
||||
- Include the version/commit hash where you discovered the vulnerability
|
||||
- Allow us a 90-day security fix window before any public disclosure
|
||||
- After patch is released, allow 30 days for users to update before public disclosure (for a total of 120 days max between update time and fix time)
|
||||
- Share any potential mitigations or workarounds if known
|
||||
|
||||
## Supported Versions
|
||||
Only the following versions are eligible for security updates:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|-----------|
|
||||
| Latest release on master branch | ✅ |
|
||||
| Development commits (pre-master) | ✅ |
|
||||
| Classic folder (deprecated) | ❌ |
|
||||
| All other versions | ❌ |
|
||||
|
||||
## Security Best Practices
|
||||
When using this project:
|
||||
1. Always use the latest stable version
|
||||
2. Review security advisories before updating
|
||||
3. Follow our security documentation and guidelines
|
||||
4. Keep your dependencies up to date
|
||||
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
|
||||
|
||||
## Past Security Advisories
|
||||
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
|
||||
|
||||
---
|
||||
Last updated: November 2024
|
||||
@@ -1,123 +0,0 @@
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=supabase
|
||||
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY=your-encryption-key-32-chars-min
|
||||
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
|
||||
############
|
||||
# Supavisor -- Database pooler
|
||||
############
|
||||
POOLER_PROXY_PORT_TRANSACTION=6543
|
||||
POOLER_DEFAULT_POOL_SIZE=20
|
||||
POOLER_MAX_CLIENT_CONN=100
|
||||
POOLER_TENANT_ID=your-tenant-id
|
||||
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3000
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
ENABLE_ANONYMOUS_USERS=false
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3000
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
# Add your OpenAI API key to enable SQL Editor Assistant
|
||||
OPENAI_API_KEY=
|
||||
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
||||
2
autogpt_platform/.gitignore
vendored
2
autogpt_platform/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
@@ -1,146 +0,0 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
## Repository Overview
|
||||
|
||||
AutoGPT Platform is a monorepo containing:
|
||||
- **Backend** (`/backend`): Python FastAPI server with async support
|
||||
- **Frontend** (`/frontend`): Next.js React application
|
||||
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
|
||||
|
||||
## Essential Commands
|
||||
|
||||
### Backend Development
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd backend && poetry install
|
||||
|
||||
# Run database migrations
|
||||
poetry run prisma migrate dev
|
||||
|
||||
# Start all services (database, redis, rabbitmq, clamav)
|
||||
docker compose up -d
|
||||
|
||||
# Run the backend server
|
||||
poetry run serve
|
||||
|
||||
# Run tests
|
||||
poetry run test
|
||||
|
||||
# Run specific test
|
||||
poetry run pytest path/to/test_file.py::test_function_name
|
||||
|
||||
# Lint and format
|
||||
# prefer format if you want to just "fix" it and only get the errors that can't be autofixed
|
||||
poetry run format # Black + isort
|
||||
poetry run lint # ruff
|
||||
```
|
||||
More details can be found in TESTING.md
|
||||
|
||||
#### Creating/Updating Snapshots
|
||||
|
||||
When you first write a test or when the expected output changes:
|
||||
|
||||
```bash
|
||||
poetry run pytest path/to/test.py --snapshot-update
|
||||
```
|
||||
|
||||
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
|
||||
|
||||
|
||||
### Frontend Development
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd frontend && npm install
|
||||
|
||||
# Start development server
|
||||
npm run dev
|
||||
|
||||
# Run E2E tests
|
||||
npm run test
|
||||
|
||||
# Run Storybook for component development
|
||||
npm run storybook
|
||||
|
||||
# Build production
|
||||
npm run build
|
||||
|
||||
# Type checking
|
||||
npm run type-check
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Backend Architecture
|
||||
- **API Layer**: FastAPI with REST and WebSocket endpoints
|
||||
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
|
||||
- **Queue System**: RabbitMQ for async task processing
|
||||
- **Execution Engine**: Separate executor service processes agent workflows
|
||||
- **Authentication**: JWT-based with Supabase integration
|
||||
- **Security**: Cache protection middleware prevents sensitive data caching in browsers/proxies
|
||||
|
||||
### Frontend Architecture
|
||||
- **Framework**: Next.js App Router with React Server Components
|
||||
- **State Management**: React hooks + Supabase client for real-time updates
|
||||
- **Workflow Builder**: Visual graph editor using @xyflow/react
|
||||
- **UI Components**: Radix UI primitives with Tailwind CSS styling
|
||||
- **Feature Flags**: LaunchDarkly integration
|
||||
|
||||
### Key Concepts
|
||||
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
|
||||
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
|
||||
3. **Integrations**: OAuth and API connections stored per user
|
||||
4. **Store**: Marketplace for sharing agent templates
|
||||
5. **Virus Scanning**: ClamAV integration for file upload security
|
||||
|
||||
### Testing Approach
|
||||
- Backend uses pytest with snapshot testing for API responses
|
||||
- Test files are colocated with source files (`*_test.py`)
|
||||
- Frontend uses Playwright for E2E tests
|
||||
- Component testing via Storybook
|
||||
|
||||
### Database Schema
|
||||
Key models (defined in `/backend/schema.prisma`):
|
||||
- `User`: Authentication and profile data
|
||||
- `AgentGraph`: Workflow definitions with version control
|
||||
- `AgentGraphExecution`: Execution history and results
|
||||
- `AgentNode`: Individual nodes in a workflow
|
||||
- `StoreListing`: Marketplace listings for sharing agents
|
||||
|
||||
### Environment Configuration
|
||||
- Backend: `.env` file in `/backend`
|
||||
- Frontend: `.env.local` file in `/frontend`
|
||||
- Both require Supabase credentials and API keys for various services
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
**Adding a new block:**
|
||||
1. Create new file in `/backend/backend/blocks/`
|
||||
2. Inherit from `Block` base class
|
||||
3. Define input/output schemas
|
||||
4. Implement `run` method
|
||||
5. Register in block registry
|
||||
6. Generate the block uuid using `uuid.uuid4()`
|
||||
|
||||
**Modifying the API:**
|
||||
1. Update route in `/backend/backend/server/routers/`
|
||||
2. Add/update Pydantic models in same directory
|
||||
3. Write tests alongside the route file
|
||||
4. Run `poetry run test` to verify
|
||||
|
||||
**Frontend feature development:**
|
||||
1. Components go in `/frontend/src/components/`
|
||||
2. Use existing UI components from `/frontend/src/components/ui/`
|
||||
3. Add Storybook stories for new components
|
||||
4. Test with Playwright if user-facing
|
||||
|
||||
### Security Implementation
|
||||
|
||||
**Cache Protection Middleware:**
|
||||
- Located in `/backend/backend/server/middleware/security.py`
|
||||
- Default behavior: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
|
||||
- Uses an allow list approach - only explicitly permitted paths can be cached
|
||||
- Cacheable paths include: static assets (`/static/*`, `/_next/static/*`), health checks, public store pages, documentation
|
||||
- Prevents sensitive data (auth tokens, API keys, user data) from being cached by browsers/proxies
|
||||
- To allow caching for a new endpoint, add it to `CACHEABLE_PATHS` in the middleware
|
||||
- Applied to both main API server and external API applications
|
||||
@@ -1,21 +0,0 @@
|
||||
**Determinist Ltd**
|
||||
|
||||
**Contributor License Agreement (“Agreement”)**
|
||||
|
||||
Thank you for your interest in the AutoGPT open source project at [https://github.com/Significant-Gravitas/AutoGPT](https://github.com/Significant-Gravitas/AutoGPT) stewarded by Determinist Ltd (“**Determinist**”), with offices at 3rd Floor 1 Ashley Road, Altrincham, Cheshire, WA14 2DT, United Kingdom. The form of license below is a document that clarifies the terms under which You, the person listed below, may contribute software code described below (the “**Contribution**”) to the project. We appreciate your participation in our project, and your help in improving our products, so we want you to understand what will be done with the Contributions. This license is for your protection as well as the protection of Determinist and its licensees; it does not change your rights to use your own Contributions for any other purpose.
|
||||
|
||||
By submitting a Pull Request which modifies the content of the “autogpt\_platform” folder at [https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt\_platform](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt_platform), You hereby agree:
|
||||
|
||||
1\. **You grant us the ability to use the Contributions in any way**. You hereby grant to Determinist a non-exclusive, irrevocable, worldwide, royalty-free, sublicenseable, transferable license under all of Your relevant intellectual property rights (including copyright, patent, and any other rights), to use, copy, prepare derivative works of, distribute and publicly perform and display the Contributions on any licensing terms, including without limitation: (a) open source licenses like the GNU General Public License (GPL), the GNU Lesser General Public License (LGPL), the Common Public License, or the Berkeley Science Division license (BSD); and (b) binary, proprietary, or commercial licenses.
|
||||
|
||||
2\. **Grant of Patent License**. You hereby grant to Determinist a worldwide, non-exclusive, royalty-free, irrevocable, license, under any rights you may have, now or in the future, in any patents or patent applications, to make, have made, use, offer to sell, sell, and import products containing the Contribution or portions of the Contribution. This license extends to patent claims that are infringed by the Contribution alone or by combination of the Contribution with other inventions.
|
||||
|
||||
4\. **Limitations on Licenses**. The licenses granted in this Agreement will continue for the duration of the applicable patent or intellectual property right under which such license is granted. The licenses granted in this Agreement will include the right to grant and authorize sublicenses, so long as the sublicenses are within the scope of the licenses granted in this Agreement. Except for the licenses granted herein, You reserve all right, title, and interest in and to the Contribution.
|
||||
|
||||
5\. **You are able to grant us these rights**. You represent that You are legally entitled to grant the above license. If Your employer has rights to intellectual property that You create, You represent that You are authorized to make the Contributions on behalf of that employer, or that Your employer has waived such rights for the Contributions.
|
||||
|
||||
3\. **The Contributions are your original work**. You represent that the Contributions are Your original works of authorship, and to Your knowledge, no other person claims, or has the right to claim, any right in any invention or patent related to the Contributions. You also represent that You are not legally obligated, whether by entering into an agreement or otherwise, in any way that conflicts with the terms of this license. For example, if you have signed an agreement requiring you to assign the intellectual property rights in the Contributions to an employer or customer, that would conflict with the terms of this license.
|
||||
|
||||
6\. **We determine the code that is in our products**. You understand that the decision to include the Contribution in any product or source repository is entirely that of Determinist, and this agreement does not guarantee that the Contributions will be included in any product.
|
||||
|
||||
7\. **No Implied Warranties.** Determinist acknowledges that, except as explicitly described in this Agreement, the Contribution is provided on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
@@ -1,164 +0,0 @@
|
||||
# PolyForm Shield License 1.0.0
|
||||
|
||||
<https://polyformproject.org/licenses/shield/1.0.0>
|
||||
|
||||
## Acceptance
|
||||
|
||||
In order to get any license under these terms, you must agree
|
||||
to them as both strict obligations and conditions to all
|
||||
your licenses.
|
||||
|
||||
## Copyright License
|
||||
|
||||
The licensor grants you a copyright license for the
|
||||
software to do everything you might do with the software
|
||||
that would otherwise infringe the licensor's copyright
|
||||
in it for any permitted purpose. However, you may
|
||||
only distribute the software according to [Distribution
|
||||
License](#distribution-license) and make changes or new works
|
||||
based on the software according to [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Distribution License
|
||||
|
||||
The licensor grants you an additional copyright license
|
||||
to distribute copies of the software. Your license
|
||||
to distribute covers distributing the software with
|
||||
changes and new works permitted by [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Notices
|
||||
|
||||
You must ensure that anyone who gets a copy of any part of
|
||||
the software from you also gets a copy of these terms or the
|
||||
URL for them above, as well as copies of any plain-text lines
|
||||
beginning with `Required Notice:` that the licensor provided
|
||||
with the software. For example:
|
||||
|
||||
> Required Notice: Copyright Yoyodyne, Inc. (http://example.com)
|
||||
|
||||
## Changes and New Works License
|
||||
|
||||
The licensor grants you an additional copyright license to
|
||||
make changes and new works based on the software for any
|
||||
permitted purpose.
|
||||
|
||||
## Patent License
|
||||
|
||||
The licensor grants you a patent license for the software that
|
||||
covers patent claims the licensor can license, or becomes able
|
||||
to license, that you would infringe by using the software.
|
||||
|
||||
## Noncompete
|
||||
|
||||
Any purpose is a permitted purpose, except for providing any
|
||||
product that competes with the software or any product the
|
||||
licensor or any of its affiliates provides using the software.
|
||||
|
||||
## Competition
|
||||
|
||||
Goods and services compete even when they provide functionality
|
||||
through different kinds of interfaces or for different technical
|
||||
platforms. Applications can compete with services, libraries
|
||||
with plugins, frameworks with development tools, and so on,
|
||||
even if they're written in different programming languages
|
||||
or for different computer architectures. Goods and services
|
||||
compete even when provided free of charge. If you market a
|
||||
product as a practical substitute for the software or another
|
||||
product, it definitely competes.
|
||||
|
||||
## New Products
|
||||
|
||||
If you are using the software to provide a product that does
|
||||
not compete, but the licensor or any of its affiliates brings
|
||||
your product into competition by providing a new version of
|
||||
the software or another product using the software, you may
|
||||
continue using versions of the software available under these
|
||||
terms beforehand to provide your competing product, but not
|
||||
any later versions.
|
||||
|
||||
## Discontinued Products
|
||||
|
||||
You may begin using the software to compete with a product
|
||||
or service that the licensor or any of its affiliates has
|
||||
stopped providing, unless the licensor includes a plain-text
|
||||
line beginning with `Licensor Line of Business:` with the
|
||||
software that mentions that line of business. For example:
|
||||
|
||||
> Licensor Line of Business: YoyodyneCMS Content Management
|
||||
System (http://example.com/cms)
|
||||
|
||||
## Sales of Business
|
||||
|
||||
If the licensor or any of its affiliates sells a line of
|
||||
business developing the software or using the software
|
||||
to provide a product, the buyer can also enforce
|
||||
[Noncompete](#noncompete) for that product.
|
||||
|
||||
## Fair Use
|
||||
|
||||
You may have "fair use" rights for the software under the
|
||||
law. These terms do not limit them.
|
||||
|
||||
## No Other Rights
|
||||
|
||||
These terms do not allow you to sublicense or transfer any of
|
||||
your licenses to anyone else, or prevent the licensor from
|
||||
granting licenses to anyone else. These terms do not imply
|
||||
any other licenses.
|
||||
|
||||
## Patent Defense
|
||||
|
||||
If you make any written claim that the software infringes or
|
||||
contributes to infringement of any patent, your patent license
|
||||
for the software granted under these terms ends immediately. If
|
||||
your company makes such a claim, your patent license ends
|
||||
immediately for work on behalf of your company.
|
||||
|
||||
## Violations
|
||||
|
||||
The first time you are notified in writing that you have
|
||||
violated any of these terms, or done anything with the software
|
||||
not covered by your licenses, your licenses can nonetheless
|
||||
continue if you come into full compliance with these terms,
|
||||
and take practical steps to correct past violations, within
|
||||
32 days of receiving notice. Otherwise, all your licenses
|
||||
end immediately.
|
||||
|
||||
## No Liability
|
||||
|
||||
***As far as the law allows, the software comes as is, without
|
||||
any warranty or condition, and the licensor will not be liable
|
||||
to you for any damages arising out of these terms or the use
|
||||
or nature of the software, under any kind of legal claim.***
|
||||
|
||||
## Definitions
|
||||
|
||||
The **licensor** is the individual or entity offering these
|
||||
terms, and the **software** is the software the licensor makes
|
||||
available under these terms.
|
||||
|
||||
A **product** can be a good or service, or a combination
|
||||
of them.
|
||||
|
||||
**You** refers to the individual or entity agreeing to these
|
||||
terms.
|
||||
|
||||
**Your company** is any legal entity, sole proprietorship,
|
||||
or other kind of organization that you work for, plus all
|
||||
its affiliates.
|
||||
|
||||
**Affiliates** means the other organizations than an
|
||||
organization has control over, is under the control of, or is
|
||||
under common control with.
|
||||
|
||||
**Control** means ownership of substantially all the assets of
|
||||
an entity, or the power to direct its management and policies
|
||||
by vote, contract, or otherwise. Control can be direct or
|
||||
indirect.
|
||||
|
||||
**Your licenses** are all the licenses granted to you for the
|
||||
software under these terms.
|
||||
|
||||
**Use** means anything you do with the software requiring one
|
||||
of your licenses.
|
||||
@@ -8,131 +8,88 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
|
||||
|
||||
- Docker
|
||||
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
|
||||
- Node.js & NPM (for running the frontend application)
|
||||
|
||||
### Running the System
|
||||
|
||||
To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
|
||||
|
||||
```
|
||||
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
|
||||
cd AutoGPT/autogpt_platform
|
||||
```
|
||||
|
||||
2. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
1. Clone this repository to your local machine.
|
||||
2. Navigate to autogpt_platform/supabase
|
||||
3. Run the following command:
|
||||
|
||||
```
|
||||
docker compose up -d
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
4. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
|
||||
```
|
||||
cd frontend
|
||||
```
|
||||
|
||||
You will need to run your frontend application separately on your local machine.
|
||||
|
||||
4. Navigate back to rnd (cd ..)
|
||||
5. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env.local
|
||||
cp supabase/docker/.env.example .env
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
6. Run the following command:
|
||||
|
||||
Enable corepack and install dependencies by running:
|
||||
```
|
||||
docker compose -f docker-compose.combined.yml up -d
|
||||
|
||||
```
|
||||
corepack enable
|
||||
pnpm i
|
||||
```
|
||||
|
||||
Generate the API client (this step is required before running the frontend):
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.combined.yml` file in detached mode.
|
||||
7. Navigate to autogpt_platform/autogpt_builder.
|
||||
8. Run the following command:
|
||||
```
|
||||
pnpm generate:api-client
|
||||
cp .env.example .env.local
|
||||
```
|
||||
|
||||
Then start the frontend application in development mode:
|
||||
|
||||
9. Run the following command:
|
||||
```
|
||||
pnpm dev
|
||||
yarn dev
|
||||
```
|
||||
|
||||
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
Here are some useful Docker Compose commands for managing your AutoGPT Platform:
|
||||
|
||||
- `docker compose up -d`: Start the services in detached mode.
|
||||
- `docker compose stop`: Stop the running services without removing them.
|
||||
- `docker compose -f docker-compose.combined.yml up -d`: Start the services in detached mode.
|
||||
- `docker compose -f docker-compose.combined.yml stop`: Stop the running services without removing them.
|
||||
- `docker compose rm`: Remove stopped service containers.
|
||||
- `docker compose build`: Build or rebuild services.
|
||||
- `docker compose down`: Stop and remove containers, networks, and volumes.
|
||||
- `docker compose watch`: Watch for changes in your services and automatically update them.
|
||||
|
||||
|
||||
### Sample Scenarios
|
||||
|
||||
Here are some common scenarios where you might use multiple Docker Compose commands:
|
||||
|
||||
1. Updating and restarting a specific service:
|
||||
|
||||
```
|
||||
docker compose build api_srv
|
||||
docker compose up -d --no-deps api_srv
|
||||
```
|
||||
|
||||
This rebuilds the `api_srv` service and restarts it without affecting other services.
|
||||
|
||||
2. Viewing logs for troubleshooting:
|
||||
|
||||
```
|
||||
docker compose logs -f api_srv ws_srv
|
||||
```
|
||||
|
||||
This shows and follows the logs for both `api_srv` and `ws_srv` services.
|
||||
|
||||
3. Scaling a service for increased load:
|
||||
|
||||
```
|
||||
docker compose up -d --scale executor=3
|
||||
```
|
||||
|
||||
This scales the `executor` service to 3 instances to handle increased load.
|
||||
|
||||
4. Stopping the entire system for maintenance:
|
||||
|
||||
```
|
||||
docker compose stop
|
||||
docker compose rm -f
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This stops all services, removes containers, pulls the latest images, and restarts the system.
|
||||
|
||||
5. Developing with live updates:
|
||||
|
||||
```
|
||||
docker compose watch
|
||||
```
|
||||
|
||||
This watches for changes in your code and automatically updates the relevant services.
|
||||
|
||||
6. Checking the status of services:
|
||||
@@ -143,6 +100,7 @@ Here are some common scenarios where you might use multiple Docker Compose comma
|
||||
|
||||
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
|
||||
|
||||
|
||||
### Persisting Data
|
||||
|
||||
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
|
||||
@@ -171,26 +129,5 @@ To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml
|
||||
|
||||
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
|
||||
|
||||
### API Client Generation
|
||||
|
||||
The platform includes scripts for generating and managing the API client:
|
||||
|
||||
- `pnpm fetch:openapi`: Fetches the OpenAPI specification from the backend service (requires backend to be running on port 8006)
|
||||
- `pnpm generate:api-client`: Generates the TypeScript API client from the OpenAPI specification using Orval
|
||||
- `pnpm generate:api-all`: Runs both fetch and generate commands in sequence
|
||||
|
||||
#### Manual API Client Updates
|
||||
|
||||
If you need to update the API client after making changes to the backend API:
|
||||
|
||||
1. Ensure the backend services are running:
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
2. Generate the updated API client:
|
||||
```
|
||||
pnpm generate:api-all
|
||||
```
|
||||
|
||||
This will fetch the latest OpenAPI specification and regenerate the TypeScript client code.
|
||||
|
||||
@@ -1,802 +0,0 @@
|
||||
# DatabaseManager Technical Specification
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document provides a complete technical specification for implementing a drop-in replacement for the AutoGPT Platform's DatabaseManager service. The replacement must maintain 100% API compatibility while preserving all functional behaviors, security requirements, and performance characteristics.
|
||||
|
||||
## 1. System Overview
|
||||
|
||||
### 1.1 Purpose
|
||||
The DatabaseManager is a centralized service that provides database access for the AutoGPT Platform's executor system. It encapsulates all database operations behind a service interface, enabling distributed execution while maintaining data consistency and security.
|
||||
|
||||
### 1.2 Architecture Pattern
|
||||
- **Service Type**: HTTP-based microservice using FastAPI
|
||||
- **Communication**: RPC-style over HTTP with JSON serialization
|
||||
- **Base Class**: Inherits from `AppService` (backend.util.service)
|
||||
- **Client Classes**: `DatabaseManagerClient` (sync) and `DatabaseManagerAsyncClient` (async)
|
||||
- **Port**: Configurable via `config.database_api_port`
|
||||
|
||||
### 1.3 Critical Requirements
|
||||
1. **API Compatibility**: All 40+ exposed methods must maintain exact signatures
|
||||
2. **Type Safety**: Full type preservation across service boundaries
|
||||
3. **User Isolation**: All operations must respect user_id boundaries
|
||||
4. **Transaction Support**: Maintain ACID properties for critical operations
|
||||
5. **Event Publishing**: Maintain Redis event bus integration for real-time updates
|
||||
|
||||
## 2. Service Implementation Requirements
|
||||
|
||||
### 2.1 Base Service Class
|
||||
|
||||
```python
|
||||
from backend.util.service import AppService, expose
|
||||
from backend.util.settings import Config
|
||||
from backend.data import db
|
||||
import logging
|
||||
|
||||
class DatabaseManager(AppService):
|
||||
"""
|
||||
REQUIRED: Inherit from AppService to get:
|
||||
- Automatic endpoint generation via @expose decorator
|
||||
- Built-in health checks at /health
|
||||
- Request/response serialization
|
||||
- Error handling and logging
|
||||
"""
|
||||
|
||||
def run_service(self) -> None:
|
||||
"""REQUIRED: Initialize database connection before starting service"""
|
||||
logger.info(f"[{self.service_name}] ⏳ Connecting to Database...")
|
||||
self.run_and_wait(db.connect()) # CRITICAL: Must connect to database
|
||||
super().run_service() # Start HTTP server
|
||||
|
||||
def cleanup(self):
|
||||
"""REQUIRED: Clean disconnect on shutdown"""
|
||||
super().cleanup()
|
||||
logger.info(f"[{self.service_name}] ⏳ Disconnecting Database...")
|
||||
self.run_and_wait(db.disconnect()) # CRITICAL: Must disconnect cleanly
|
||||
|
||||
@classmethod
|
||||
def get_port(cls) -> int:
|
||||
"""REQUIRED: Return configured port"""
|
||||
return config.database_api_port
|
||||
```
|
||||
|
||||
### 2.2 Method Exposure Pattern
|
||||
|
||||
```python
|
||||
@staticmethod
|
||||
def _(f: Callable[P, R], name: str | None = None) -> Callable[Concatenate[object, P], R]:
|
||||
"""
|
||||
REQUIRED: Helper to expose methods with proper signatures
|
||||
- Preserves function name for endpoint generation
|
||||
- Maintains type information
|
||||
- Adds 'self' parameter for instance binding
|
||||
"""
|
||||
if name is not None:
|
||||
f.__name__ = name
|
||||
return cast(Callable[Concatenate[object, P], R], expose(f))
|
||||
```
|
||||
|
||||
### 2.3 Database Connection Management
|
||||
|
||||
**REQUIRED: Use Prisma ORM with these exact configurations:**
|
||||
|
||||
```python
|
||||
from prisma import Prisma
|
||||
|
||||
prisma = Prisma(
|
||||
auto_register=True,
|
||||
http={"timeout": HTTP_TIMEOUT}, # Default: 120 seconds
|
||||
datasource={"url": DATABASE_URL}
|
||||
)
|
||||
|
||||
# Connection lifecycle
|
||||
async def connect():
|
||||
await prisma.connect()
|
||||
|
||||
async def disconnect():
|
||||
await prisma.disconnect()
|
||||
```
|
||||
|
||||
### 2.4 Transaction Support
|
||||
|
||||
**REQUIRED: Implement both regular and locked transactions:**
|
||||
|
||||
```python
|
||||
async def transaction(timeout: float | None = None):
|
||||
"""Regular database transaction"""
|
||||
async with prisma.tx(timeout=timeout) as tx:
|
||||
yield tx
|
||||
|
||||
async def locked_transaction(key: str, timeout: float | None = None):
|
||||
"""Transaction with PostgreSQL advisory lock"""
|
||||
lock_key = zlib.crc32(key.encode("utf-8"))
|
||||
async with transaction(timeout=timeout) as tx:
|
||||
await tx.execute_raw("SELECT pg_advisory_xact_lock($1)", lock_key)
|
||||
yield tx
|
||||
```
|
||||
|
||||
## 3. Complete API Specification
|
||||
|
||||
### 3.1 Execution Management APIs
|
||||
|
||||
#### get_graph_execution
|
||||
```python
|
||||
async def get_graph_execution(
|
||||
user_id: str,
|
||||
execution_id: str,
|
||||
*,
|
||||
include_node_executions: bool = False
|
||||
) -> GraphExecution | GraphExecutionWithNodes | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns execution only if user_id matches
|
||||
- Optionally includes all node executions
|
||||
- Returns None if not found or unauthorized
|
||||
|
||||
#### get_graph_executions
|
||||
```python
|
||||
async def get_graph_executions(
|
||||
user_id: str,
|
||||
graph_id: str | None = None,
|
||||
*,
|
||||
limit: int = 50,
|
||||
graph_version: int | None = None,
|
||||
cursor: str | None = None,
|
||||
preset_id: str | None = None
|
||||
) -> tuple[list[GraphExecution], str | None]
|
||||
```
|
||||
**Behavior**:
|
||||
- Paginated results with cursor
|
||||
- Filter by graph_id, version, or preset_id
|
||||
- Returns (executions, next_cursor)
|
||||
|
||||
#### create_graph_execution
|
||||
```python
|
||||
async def create_graph_execution(
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
starting_nodes_input: dict[str, dict[str, Any]],
|
||||
user_id: str,
|
||||
preset_id: str | None = None
|
||||
) -> GraphExecutionWithNodes
|
||||
```
|
||||
**Behavior**:
|
||||
- Creates execution with status "QUEUED"
|
||||
- Initializes all nodes with "PENDING" status
|
||||
- Publishes creation event to Redis
|
||||
- Uses locked transaction on graph_id
|
||||
|
||||
#### update_graph_execution_start_time
|
||||
```python
|
||||
async def update_graph_execution_start_time(
|
||||
graph_exec_id: str
|
||||
) -> None
|
||||
```
|
||||
**Behavior**:
|
||||
- Sets start_time to current timestamp
|
||||
- Only updates if currently NULL
|
||||
|
||||
#### update_graph_execution_stats
|
||||
```python
|
||||
async def update_graph_execution_stats(
|
||||
graph_exec_id: str,
|
||||
status: AgentExecutionStatus | None = None,
|
||||
stats: dict[str, Any] | None = None
|
||||
) -> GraphExecution | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Updates status and/or stats atomically
|
||||
- Sets end_time if status is terminal (COMPLETED/FAILED)
|
||||
- Publishes update event to Redis
|
||||
- Returns updated execution
|
||||
|
||||
#### get_node_execution
|
||||
```python
|
||||
async def get_node_execution(
|
||||
node_exec_id: str
|
||||
) -> NodeExecutionResult | None
|
||||
```
|
||||
**Behavior**:
|
||||
- No user_id check (relies on graph execution security)
|
||||
- Includes all input/output data
|
||||
|
||||
#### get_node_executions
|
||||
```python
|
||||
async def get_node_executions(
|
||||
graph_exec_id: str
|
||||
) -> list[NodeExecutionResult]
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns all node executions for graph
|
||||
- Ordered by creation time
|
||||
|
||||
#### get_latest_node_execution
|
||||
```python
|
||||
async def get_latest_node_execution(
|
||||
graph_exec_id: str,
|
||||
node_id: str
|
||||
) -> NodeExecutionResult | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns most recent execution of specific node
|
||||
- Used for retry/rerun scenarios
|
||||
|
||||
#### update_node_execution_status
|
||||
```python
|
||||
async def update_node_execution_status(
|
||||
node_exec_id: str,
|
||||
status: AgentExecutionStatus,
|
||||
execution_data: dict[str, Any] | None = None,
|
||||
stats: dict[str, Any] | None = None
|
||||
) -> NodeExecutionResult
|
||||
```
|
||||
**Behavior**:
|
||||
- Updates status atomically
|
||||
- Sets end_time for terminal states
|
||||
- Optionally updates stats/data
|
||||
- Publishes event to Redis
|
||||
- Returns updated execution
|
||||
|
||||
#### update_node_execution_status_batch
|
||||
```python
|
||||
async def update_node_execution_status_batch(
|
||||
execution_updates: list[NodeExecutionUpdate]
|
||||
) -> list[NodeExecutionResult]
|
||||
```
|
||||
**Behavior**:
|
||||
- Batch update multiple nodes in single transaction
|
||||
- Each update can have different status/stats
|
||||
- Publishes events for all updates
|
||||
- Returns all updated executions
|
||||
|
||||
#### update_node_execution_stats
|
||||
```python
|
||||
async def update_node_execution_stats(
|
||||
node_exec_id: str,
|
||||
stats: dict[str, Any]
|
||||
) -> NodeExecutionResult
|
||||
```
|
||||
**Behavior**:
|
||||
- Updates only stats field
|
||||
- Merges with existing stats
|
||||
- Does not affect status
|
||||
|
||||
#### upsert_execution_input
|
||||
```python
|
||||
async def upsert_execution_input(
|
||||
node_id: str,
|
||||
graph_exec_id: str,
|
||||
input_name: str,
|
||||
input_data: Any,
|
||||
node_exec_id: str | None = None
|
||||
) -> tuple[str, BlockInput]
|
||||
```
|
||||
**Behavior**:
|
||||
- Creates or updates input data
|
||||
- If node_exec_id not provided, creates node execution
|
||||
- Serializes input_data to JSON
|
||||
- Returns (node_exec_id, input_object)
|
||||
|
||||
#### upsert_execution_output
|
||||
```python
|
||||
async def upsert_execution_output(
|
||||
node_exec_id: str,
|
||||
output_name: str,
|
||||
output_data: Any
|
||||
) -> None
|
||||
```
|
||||
**Behavior**:
|
||||
- Creates or updates output data
|
||||
- Serializes output_data to JSON
|
||||
- No return value
|
||||
|
||||
#### get_execution_kv_data
|
||||
```python
|
||||
async def get_execution_kv_data(
|
||||
user_id: str,
|
||||
key: str
|
||||
) -> Any | None
|
||||
```
|
||||
**Behavior**:
|
||||
- User-scoped key-value storage
|
||||
- Returns deserialized JSON data
|
||||
- Returns None if key not found
|
||||
|
||||
#### set_execution_kv_data
|
||||
```python
|
||||
async def set_execution_kv_data(
|
||||
user_id: str,
|
||||
node_exec_id: str,
|
||||
key: str,
|
||||
data: Any
|
||||
) -> Any | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Sets user-scoped key-value data
|
||||
- Associates with node execution
|
||||
- Serializes data to JSON
|
||||
- Returns previous value or None
|
||||
|
||||
#### get_block_error_stats
|
||||
```python
|
||||
async def get_block_error_stats() -> list[BlockErrorStats]
|
||||
```
|
||||
**Behavior**:
|
||||
- Aggregates error counts by block_id
|
||||
- Last 7 days of data
|
||||
- Groups by error type
|
||||
|
||||
### 3.2 Graph Management APIs
|
||||
|
||||
#### get_node
|
||||
```python
|
||||
async def get_node(
|
||||
node_id: str
|
||||
) -> AgentNode | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns node with block data
|
||||
- No user_id check (public blocks)
|
||||
|
||||
#### get_graph
|
||||
```python
|
||||
async def get_graph(
|
||||
graph_id: str,
|
||||
version: int | None = None,
|
||||
user_id: str | None = None,
|
||||
for_export: bool = False,
|
||||
include_subgraphs: bool = False
|
||||
) -> GraphModel | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns latest version if version=None
|
||||
- Checks user_id for private graphs
|
||||
- for_export=True excludes internal fields
|
||||
- include_subgraphs=True loads nested graphs
|
||||
|
||||
#### get_connected_output_nodes
|
||||
```python
|
||||
async def get_connected_output_nodes(
|
||||
node_id: str,
|
||||
output_name: str
|
||||
) -> list[tuple[AgentNode, AgentNodeLink]]
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns downstream nodes connected to output
|
||||
- Includes link metadata
|
||||
- Used for execution flow
|
||||
|
||||
#### get_graph_metadata
|
||||
```python
|
||||
async def get_graph_metadata(
|
||||
graph_id: str,
|
||||
user_id: str
|
||||
) -> GraphMetadata | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns graph metadata without full definition
|
||||
- User must own or have access to graph
|
||||
|
||||
### 3.3 Credit System APIs
|
||||
|
||||
#### get_credits
|
||||
```python
|
||||
async def get_credits(
|
||||
user_id: str
|
||||
) -> int
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns current credit balance
|
||||
- Always non-negative
|
||||
|
||||
#### spend_credits
|
||||
```python
|
||||
async def spend_credits(
|
||||
user_id: str,
|
||||
cost: int,
|
||||
metadata: UsageTransactionMetadata
|
||||
) -> int
|
||||
```
|
||||
**Behavior**:
|
||||
- Deducts credits atomically
|
||||
- Creates transaction record
|
||||
- Throws InsufficientCredits if balance too low
|
||||
- Returns new balance
|
||||
- metadata includes: block_id, node_exec_id, context
|
||||
|
||||
### 3.4 User Management APIs
|
||||
|
||||
#### get_user_metadata
|
||||
```python
|
||||
async def get_user_metadata(
|
||||
user_id: str
|
||||
) -> UserMetadata
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns user preferences and settings
|
||||
- Creates default if not exists
|
||||
|
||||
#### update_user_metadata
|
||||
```python
|
||||
async def update_user_metadata(
|
||||
user_id: str,
|
||||
data: UserMetadataDTO
|
||||
) -> UserMetadata
|
||||
```
|
||||
**Behavior**:
|
||||
- Partial update of metadata
|
||||
- Validates against schema
|
||||
- Returns updated metadata
|
||||
|
||||
#### get_user_integrations
|
||||
```python
|
||||
async def get_user_integrations(
|
||||
user_id: str
|
||||
) -> UserIntegrations
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns OAuth credentials
|
||||
- Decrypts sensitive data
|
||||
- Creates empty if not exists
|
||||
|
||||
#### update_user_integrations
|
||||
```python
|
||||
async def update_user_integrations(
|
||||
user_id: str,
|
||||
data: UserIntegrations
|
||||
) -> None
|
||||
```
|
||||
**Behavior**:
|
||||
- Updates integration credentials
|
||||
- Encrypts sensitive data
|
||||
- No return value
|
||||
|
||||
### 3.5 User Communication APIs
|
||||
|
||||
#### get_active_user_ids_in_timerange
|
||||
```python
|
||||
async def get_active_user_ids_in_timerange(
|
||||
start_time: datetime,
|
||||
end_time: datetime
|
||||
) -> list[str]
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns users with graph executions in range
|
||||
- Used for analytics/notifications
|
||||
|
||||
#### get_user_email_by_id
|
||||
```python
|
||||
async def get_user_email_by_id(
|
||||
user_id: str
|
||||
) -> str | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns user's email address
|
||||
- None if user not found
|
||||
|
||||
#### get_user_email_verification
|
||||
```python
|
||||
async def get_user_email_verification(
|
||||
user_id: str
|
||||
) -> UserEmailVerification
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns email and verification status
|
||||
- Used for notification filtering
|
||||
|
||||
#### get_user_notification_preference
|
||||
```python
|
||||
async def get_user_notification_preference(
|
||||
user_id: str
|
||||
) -> NotificationPreference
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns notification settings
|
||||
- Creates default if not exists
|
||||
|
||||
### 3.6 Notification APIs
|
||||
|
||||
#### create_or_add_to_user_notification_batch
|
||||
```python
|
||||
async def create_or_add_to_user_notification_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
notification_data: NotificationEvent
|
||||
) -> UserNotificationBatchDTO
|
||||
```
|
||||
**Behavior**:
|
||||
- Adds to existing batch or creates new
|
||||
- Batches by type for efficiency
|
||||
- Returns updated batch
|
||||
|
||||
#### empty_user_notification_batch
|
||||
```python
|
||||
async def empty_user_notification_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType
|
||||
) -> None
|
||||
```
|
||||
**Behavior**:
|
||||
- Clears all notifications of type
|
||||
- Used after sending batch
|
||||
|
||||
#### get_all_batches_by_type
|
||||
```python
|
||||
async def get_all_batches_by_type(
|
||||
notification_type: NotificationType
|
||||
) -> list[UserNotificationBatchDTO]
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns all user batches of type
|
||||
- Used by notification service
|
||||
|
||||
#### get_user_notification_batch
|
||||
```python
|
||||
async def get_user_notification_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType
|
||||
) -> UserNotificationBatchDTO | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns user's batch for type
|
||||
- None if no batch exists
|
||||
|
||||
#### get_user_notification_oldest_message_in_batch
|
||||
```python
|
||||
async def get_user_notification_oldest_message_in_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType
|
||||
) -> NotificationEvent | None
|
||||
```
|
||||
**Behavior**:
|
||||
- Returns oldest notification in batch
|
||||
- Used for batch timing decisions
|
||||
|
||||
## 4. Client Implementation Requirements
|
||||
|
||||
### 4.1 Synchronous Client
|
||||
|
||||
```python
|
||||
class DatabaseManagerClient(AppServiceClient):
|
||||
"""
|
||||
REQUIRED: Synchronous client that:
|
||||
- Converts async methods to sync using endpoint_to_sync
|
||||
- Maintains exact method signatures
|
||||
- Handles connection pooling
|
||||
- Implements retry logic
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_service_type(cls):
|
||||
return DatabaseManager
|
||||
|
||||
# Example method mapping
|
||||
get_graph_execution = endpoint_to_sync(DatabaseManager.get_graph_execution)
|
||||
```
|
||||
|
||||
### 4.2 Asynchronous Client
|
||||
|
||||
```python
|
||||
class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
"""
|
||||
REQUIRED: Async client that:
|
||||
- Directly references async methods
|
||||
- No conversion needed
|
||||
- Shares connection pool
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_service_type(cls):
|
||||
return DatabaseManager
|
||||
|
||||
# Direct method reference
|
||||
get_graph_execution = DatabaseManager.get_graph_execution
|
||||
```
|
||||
|
||||
## 5. Data Models
|
||||
|
||||
### 5.1 Core Enums
|
||||
|
||||
```python
|
||||
class AgentExecutionStatus(str, Enum):
|
||||
PENDING = "PENDING"
|
||||
QUEUED = "QUEUED"
|
||||
RUNNING = "RUNNING"
|
||||
COMPLETED = "COMPLETED"
|
||||
FAILED = "FAILED"
|
||||
CANCELED = "CANCELED"
|
||||
|
||||
class NotificationType(str, Enum):
|
||||
SYSTEM = "SYSTEM"
|
||||
REVIEW = "REVIEW"
|
||||
EXECUTION = "EXECUTION"
|
||||
MARKETING = "MARKETING"
|
||||
```
|
||||
|
||||
### 5.2 Key Data Models
|
||||
|
||||
All models must exactly match the Prisma schema definitions. Key models include:
|
||||
|
||||
- `GraphExecution`: Execution metadata with stats
|
||||
- `GraphExecutionWithNodes`: Includes all node executions
|
||||
- `NodeExecutionResult`: Node execution with I/O data
|
||||
- `GraphModel`: Complete graph definition
|
||||
- `UserIntegrations`: OAuth credentials
|
||||
- `UsageTransactionMetadata`: Credit usage context
|
||||
- `NotificationEvent`: Individual notification data
|
||||
|
||||
## 6. Security Requirements
|
||||
|
||||
### 6.1 User Isolation
|
||||
- **CRITICAL**: All user-scoped operations MUST filter by user_id
|
||||
- Never expose data across user boundaries
|
||||
- Use database-level row security where possible
|
||||
|
||||
### 6.2 Authentication
|
||||
- Service assumes authentication handled by API gateway
|
||||
- user_id parameter is trusted after authentication
|
||||
- No additional auth checks within service
|
||||
|
||||
### 6.3 Data Protection
|
||||
- Encrypt sensitive integration credentials
|
||||
- Use HMAC for unsubscribe tokens
|
||||
- Never log sensitive data
|
||||
|
||||
## 7. Performance Requirements
|
||||
|
||||
### 7.1 Connection Management
|
||||
- Maintain persistent database connection
|
||||
- Use connection pooling (default: 10 connections)
|
||||
- Implement exponential backoff for retries
|
||||
|
||||
### 7.2 Query Optimization
|
||||
- Use indexes for all WHERE clauses
|
||||
- Batch operations where possible
|
||||
- Limit default result sets (50 items)
|
||||
|
||||
### 7.3 Event Publishing
|
||||
- Publish events asynchronously
|
||||
- Don't block on event delivery
|
||||
- Use fire-and-forget pattern
|
||||
|
||||
## 8. Error Handling
|
||||
|
||||
### 8.1 Standard Exceptions
|
||||
```python
|
||||
class InsufficientCredits(Exception):
|
||||
"""Raised when user lacks credits"""
|
||||
|
||||
class NotFoundError(Exception):
|
||||
"""Raised when entity not found"""
|
||||
|
||||
class AuthorizationError(Exception):
|
||||
"""Raised when user lacks access"""
|
||||
```
|
||||
|
||||
### 8.2 Error Response Format
|
||||
```json
|
||||
{
|
||||
"error": "error_type",
|
||||
"message": "Human readable message",
|
||||
"details": {} // Optional additional context
|
||||
}
|
||||
```
|
||||
|
||||
## 9. Testing Requirements
|
||||
|
||||
### 9.1 Unit Tests
|
||||
- Test each method in isolation
|
||||
- Mock database calls
|
||||
- Verify user_id filtering
|
||||
|
||||
### 9.2 Integration Tests
|
||||
- Test with real database
|
||||
- Verify transaction boundaries
|
||||
- Test concurrent operations
|
||||
|
||||
### 9.3 Service Tests
|
||||
- Test HTTP endpoint generation
|
||||
- Verify serialization/deserialization
|
||||
- Test error handling
|
||||
|
||||
## 10. Implementation Checklist
|
||||
|
||||
### Phase 1: Core Service Setup
|
||||
- [ ] Create DatabaseManager class inheriting from AppService
|
||||
- [ ] Implement run_service() with database connection
|
||||
- [ ] Implement cleanup() with proper disconnect
|
||||
- [ ] Configure port from settings
|
||||
- [ ] Set up method exposure helper
|
||||
|
||||
### Phase 2: Execution APIs (15 methods)
|
||||
- [ ] get_graph_execution
|
||||
- [ ] get_graph_executions
|
||||
- [ ] get_graph_execution_meta
|
||||
- [ ] create_graph_execution
|
||||
- [ ] update_graph_execution_start_time
|
||||
- [ ] update_graph_execution_stats
|
||||
- [ ] get_node_execution
|
||||
- [ ] get_node_executions
|
||||
- [ ] get_latest_node_execution
|
||||
- [ ] update_node_execution_status
|
||||
- [ ] update_node_execution_status_batch
|
||||
- [ ] update_node_execution_stats
|
||||
- [ ] upsert_execution_input
|
||||
- [ ] upsert_execution_output
|
||||
- [ ] get_execution_kv_data
|
||||
- [ ] set_execution_kv_data
|
||||
- [ ] get_block_error_stats
|
||||
|
||||
### Phase 3: Graph APIs (4 methods)
|
||||
- [ ] get_node
|
||||
- [ ] get_graph
|
||||
- [ ] get_connected_output_nodes
|
||||
- [ ] get_graph_metadata
|
||||
|
||||
### Phase 4: Credit APIs (2 methods)
|
||||
- [ ] get_credits
|
||||
- [ ] spend_credits
|
||||
|
||||
### Phase 5: User APIs (4 methods)
|
||||
- [ ] get_user_metadata
|
||||
- [ ] update_user_metadata
|
||||
- [ ] get_user_integrations
|
||||
- [ ] update_user_integrations
|
||||
|
||||
### Phase 6: Communication APIs (4 methods)
|
||||
- [ ] get_active_user_ids_in_timerange
|
||||
- [ ] get_user_email_by_id
|
||||
- [ ] get_user_email_verification
|
||||
- [ ] get_user_notification_preference
|
||||
|
||||
### Phase 7: Notification APIs (5 methods)
|
||||
- [ ] create_or_add_to_user_notification_batch
|
||||
- [ ] empty_user_notification_batch
|
||||
- [ ] get_all_batches_by_type
|
||||
- [ ] get_user_notification_batch
|
||||
- [ ] get_user_notification_oldest_message_in_batch
|
||||
|
||||
### Phase 8: Client Implementation
|
||||
- [ ] Create DatabaseManagerClient with sync methods
|
||||
- [ ] Create DatabaseManagerAsyncClient with async methods
|
||||
- [ ] Test client method generation
|
||||
- [ ] Verify type preservation
|
||||
|
||||
### Phase 9: Integration Testing
|
||||
- [ ] Test all methods with real database
|
||||
- [ ] Verify user isolation
|
||||
- [ ] Test error scenarios
|
||||
- [ ] Performance testing
|
||||
- [ ] Event publishing verification
|
||||
|
||||
### Phase 10: Deployment Validation
|
||||
- [ ] Deploy to test environment
|
||||
- [ ] Run integration test suite
|
||||
- [ ] Verify backward compatibility
|
||||
- [ ] Performance benchmarking
|
||||
- [ ] Production deployment
|
||||
|
||||
## 11. Success Criteria
|
||||
|
||||
The implementation is successful when:
|
||||
|
||||
1. **All 40+ methods** produce identical outputs to the original
|
||||
2. **Performance** is within 10% of original implementation
|
||||
3. **All tests** pass without modification
|
||||
4. **No breaking changes** to any client code
|
||||
5. **Security boundaries** are maintained
|
||||
6. **Event publishing** works identically
|
||||
7. **Error handling** matches original behavior
|
||||
|
||||
## 12. Critical Implementation Notes
|
||||
|
||||
1. **DO NOT** modify any function signatures
|
||||
2. **DO NOT** change any return types
|
||||
3. **DO NOT** add new required parameters
|
||||
4. **DO NOT** remove any functionality
|
||||
5. **ALWAYS** maintain user_id isolation
|
||||
6. **ALWAYS** publish events for state changes
|
||||
7. **ALWAYS** use transactions for multi-step operations
|
||||
8. **ALWAYS** handle errors exactly as original
|
||||
|
||||
This specification, when implemented correctly, will produce a drop-in replacement for the DatabaseManager that maintains 100% compatibility with the existing system.
|
||||
@@ -1,765 +0,0 @@
|
||||
# Notification Service Technical Specification
|
||||
|
||||
## Overview
|
||||
|
||||
The AutoGPT Platform Notification Service is a RabbitMQ-based asynchronous notification system that handles various types of user notifications including real-time alerts, batched notifications, and scheduled summaries. The service supports email delivery via Postmark and system alerts via Discord.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Core Components
|
||||
|
||||
1. **NotificationManager Service** (`notifications.py`)
|
||||
- AppService implementation with RabbitMQ integration
|
||||
- Processes notification queues asynchronously
|
||||
- Manages batching strategies and delivery timing
|
||||
- Handles email templating and sending
|
||||
|
||||
2. **RabbitMQ Message Broker**
|
||||
- Multiple queues for different notification strategies
|
||||
- Dead letter exchange for failed messages
|
||||
- Topic-based routing for message distribution
|
||||
|
||||
3. **Email Sender** (`email.py`)
|
||||
- Postmark integration for email delivery
|
||||
- Jinja2 template rendering
|
||||
- HTML email composition with unsubscribe headers
|
||||
|
||||
4. **Database Storage**
|
||||
- Notification batching tables
|
||||
- User preference storage
|
||||
- Email verification tracking
|
||||
|
||||
## Service Exposure Mechanism
|
||||
|
||||
### AppService Framework
|
||||
|
||||
The NotificationManager extends `AppService` which automatically exposes methods decorated with `@expose` as HTTP endpoints:
|
||||
|
||||
```python
|
||||
class NotificationManager(AppService):
|
||||
@expose
|
||||
def queue_weekly_summary(self):
|
||||
# Implementation
|
||||
|
||||
@expose
|
||||
def process_existing_batches(self, notification_types: list[NotificationType]):
|
||||
# Implementation
|
||||
|
||||
@expose
|
||||
async def discord_system_alert(self, content: str):
|
||||
# Implementation
|
||||
```
|
||||
|
||||
### Automatic HTTP Endpoint Creation
|
||||
|
||||
When the service starts, the AppService base class:
|
||||
1. Scans for methods with `@expose` decorator
|
||||
2. Creates FastAPI routes for each exposed method:
|
||||
- Route path: `/{method_name}`
|
||||
- HTTP method: POST
|
||||
- Endpoint handler: Generated via `_create_fastapi_endpoint()`
|
||||
|
||||
### Service Client Access
|
||||
|
||||
#### NotificationManagerClient
|
||||
```python
|
||||
class NotificationManagerClient(AppServiceClient):
|
||||
@classmethod
|
||||
def get_service_type(cls):
|
||||
return NotificationManager
|
||||
|
||||
# Direct method references (sync)
|
||||
process_existing_batches = NotificationManager.process_existing_batches
|
||||
queue_weekly_summary = NotificationManager.queue_weekly_summary
|
||||
|
||||
# Async-to-sync conversion
|
||||
discord_system_alert = endpoint_to_sync(NotificationManager.discord_system_alert)
|
||||
```
|
||||
|
||||
#### Client Usage Pattern
|
||||
```python
|
||||
# Get client instance
|
||||
client = get_service_client(NotificationManagerClient)
|
||||
|
||||
# Call exposed methods via HTTP
|
||||
client.process_existing_batches([NotificationType.AGENT_RUN])
|
||||
client.queue_weekly_summary()
|
||||
client.discord_system_alert("System alert message")
|
||||
```
|
||||
|
||||
### HTTP Communication Details
|
||||
|
||||
1. **Service URL**: `http://{host}:{notification_service_port}`
|
||||
- Default port: 8007
|
||||
- Host: Configurable via settings
|
||||
|
||||
2. **Request Format**:
|
||||
- Method: POST
|
||||
- Path: `/{method_name}`
|
||||
- Body: JSON with method parameters
|
||||
|
||||
3. **Client Implementation**:
|
||||
- Uses `httpx` for HTTP requests
|
||||
- Automatic retry on connection failures
|
||||
- Configurable timeout (default from api_call_timeout)
|
||||
|
||||
### Direct Function Calls
|
||||
|
||||
The service also exposes two functions that can be called directly without going through the service client:
|
||||
|
||||
```python
|
||||
# Sync version - used by ExecutionManager
|
||||
def queue_notification(event: NotificationEventModel) -> NotificationResult
|
||||
|
||||
# Async version - used by credit system
|
||||
async def queue_notification_async(event: NotificationEventModel) -> NotificationResult
|
||||
```
|
||||
|
||||
These functions:
|
||||
- Connect directly to RabbitMQ
|
||||
- Publish messages to appropriate queues
|
||||
- Return success/failure status
|
||||
- Are NOT exposed via HTTP
|
||||
|
||||
## Message Queuing Architecture
|
||||
|
||||
### RabbitMQ Configuration
|
||||
|
||||
#### Exchanges
|
||||
```python
|
||||
NOTIFICATION_EXCHANGE = Exchange(name="notifications", type=ExchangeType.TOPIC)
|
||||
DEAD_LETTER_EXCHANGE = Exchange(name="dead_letter", type=ExchangeType.TOPIC)
|
||||
```
|
||||
|
||||
#### Queues
|
||||
1. **immediate_notifications**
|
||||
- Routing Key: `notification.immediate.#`
|
||||
- Dead Letter: `failed.immediate`
|
||||
- For: Critical alerts, errors
|
||||
|
||||
2. **admin_notifications**
|
||||
- Routing Key: `notification.admin.#`
|
||||
- Dead Letter: `failed.admin`
|
||||
- For: Refund requests, system alerts
|
||||
|
||||
3. **summary_notifications**
|
||||
- Routing Key: `notification.summary.#`
|
||||
- Dead Letter: `failed.summary`
|
||||
- For: Daily/weekly summaries
|
||||
|
||||
4. **batch_notifications**
|
||||
- Routing Key: `notification.batch.#`
|
||||
- Dead Letter: `failed.batch`
|
||||
- For: Agent runs, batched events
|
||||
|
||||
5. **failed_notifications**
|
||||
- Routing Key: `failed.#`
|
||||
- For: All failed messages
|
||||
|
||||
### Queue Strategies (QueueType enum)
|
||||
|
||||
1. **IMMEDIATE**: Send right away (errors, critical notifications)
|
||||
2. **BATCH**: Batch for configured delay (agent runs)
|
||||
3. **SUMMARY**: Scheduled digest (daily/weekly summaries)
|
||||
4. **BACKOFF**: Exponential backoff strategy (defined but not fully implemented)
|
||||
5. **ADMIN**: Admin-only notifications
|
||||
|
||||
## Notification Types
|
||||
|
||||
### Enum Values (NotificationType)
|
||||
```python
|
||||
AGENT_RUN # Batch strategy, 1 day delay
|
||||
ZERO_BALANCE # Backoff strategy, 60 min delay
|
||||
LOW_BALANCE # Immediate strategy
|
||||
BLOCK_EXECUTION_FAILED # Backoff strategy, 60 min delay
|
||||
CONTINUOUS_AGENT_ERROR # Backoff strategy, 60 min delay
|
||||
DAILY_SUMMARY # Summary strategy
|
||||
WEEKLY_SUMMARY # Summary strategy
|
||||
MONTHLY_SUMMARY # Summary strategy
|
||||
REFUND_REQUEST # Admin strategy
|
||||
REFUND_PROCESSED # Admin strategy
|
||||
```
|
||||
|
||||
## Integration Points
|
||||
|
||||
### 1. Scheduler Integration
|
||||
The scheduler service (`backend.executor.scheduler`) imports monitoring functions that call the NotificationManagerClient:
|
||||
|
||||
```python
|
||||
from backend.monitoring import (
|
||||
process_existing_batches,
|
||||
process_weekly_summary,
|
||||
)
|
||||
|
||||
# These are scheduled as cron jobs
|
||||
```
|
||||
|
||||
### 2. Execution Manager Integration
|
||||
The ExecutionManager directly calls `queue_notification()` for:
|
||||
- Agent run completions
|
||||
- Low balance alerts
|
||||
|
||||
```python
|
||||
from backend.notifications.notifications import queue_notification
|
||||
|
||||
# Called after graph execution completes
|
||||
queue_notification(NotificationEventModel(
|
||||
user_id=graph_exec.user_id,
|
||||
type=NotificationType.AGENT_RUN,
|
||||
data=AgentRunData(...)
|
||||
))
|
||||
```
|
||||
|
||||
### 3. Credit System Integration
|
||||
The credit system uses `queue_notification_async()` for:
|
||||
- Refund requests
|
||||
- Refund processed notifications
|
||||
|
||||
```python
|
||||
from backend.notifications.notifications import queue_notification_async
|
||||
|
||||
await queue_notification_async(NotificationEventModel(
|
||||
user_id=user_id,
|
||||
type=NotificationType.REFUND_REQUEST,
|
||||
data=RefundRequestData(...)
|
||||
))
|
||||
```
|
||||
|
||||
### 4. Monitoring Module Wrappers
|
||||
The monitoring module provides wrapper functions that are used by the scheduler:
|
||||
|
||||
```python
|
||||
# backend/monitoring/notification_monitor.py
|
||||
def process_existing_batches(**kwargs):
|
||||
args = NotificationJobArgs(**kwargs)
|
||||
get_notification_manager_client().process_existing_batches(
|
||||
args.notification_types
|
||||
)
|
||||
|
||||
def process_weekly_summary(**kwargs):
|
||||
get_notification_manager_client().queue_weekly_summary()
|
||||
```
|
||||
|
||||
## Data Models
|
||||
|
||||
### Base Event Model
|
||||
```typescript
|
||||
interface BaseEventModel {
|
||||
type: NotificationType;
|
||||
user_id: string;
|
||||
created_at: string; // ISO datetime with timezone
|
||||
}
|
||||
```
|
||||
|
||||
### Notification Event Model
|
||||
```typescript
|
||||
interface NotificationEventModel<T> extends BaseEventModel {
|
||||
data: T;
|
||||
}
|
||||
```
|
||||
|
||||
### Notification Data Types
|
||||
|
||||
#### AgentRunData
|
||||
```typescript
|
||||
interface AgentRunData {
|
||||
agent_name: string;
|
||||
credits_used: number;
|
||||
execution_time: number;
|
||||
node_count: number;
|
||||
graph_id: string;
|
||||
outputs: Array<Record<string, any>>;
|
||||
}
|
||||
```
|
||||
|
||||
#### ZeroBalanceData
|
||||
```typescript
|
||||
interface ZeroBalanceData {
|
||||
last_transaction: number;
|
||||
last_transaction_time: string; // ISO datetime with timezone
|
||||
top_up_link: string;
|
||||
}
|
||||
```
|
||||
|
||||
#### LowBalanceData
|
||||
```typescript
|
||||
interface LowBalanceData {
|
||||
agent_name: string;
|
||||
current_balance: number; // credits (100 = $1)
|
||||
billing_page_link: string;
|
||||
shortfall: number;
|
||||
}
|
||||
```
|
||||
|
||||
#### BlockExecutionFailedData
|
||||
```typescript
|
||||
interface BlockExecutionFailedData {
|
||||
block_name: string;
|
||||
block_id: string;
|
||||
error_message: string;
|
||||
graph_id: string;
|
||||
node_id: string;
|
||||
execution_id: string;
|
||||
}
|
||||
```
|
||||
|
||||
#### ContinuousAgentErrorData
|
||||
```typescript
|
||||
interface ContinuousAgentErrorData {
|
||||
agent_name: string;
|
||||
error_message: string;
|
||||
graph_id: string;
|
||||
execution_id: string;
|
||||
start_time: string; // ISO datetime with timezone
|
||||
error_time: string; // ISO datetime with timezone
|
||||
attempts: number;
|
||||
}
|
||||
```
|
||||
|
||||
#### Summary Data Types
|
||||
```typescript
|
||||
interface BaseSummaryData {
|
||||
total_credits_used: number;
|
||||
total_executions: number;
|
||||
most_used_agent: string;
|
||||
total_execution_time: number;
|
||||
successful_runs: number;
|
||||
failed_runs: number;
|
||||
average_execution_time: number;
|
||||
cost_breakdown: Record<string, number>;
|
||||
}
|
||||
|
||||
interface DailySummaryData extends BaseSummaryData {
|
||||
date: string; // ISO datetime with timezone
|
||||
}
|
||||
|
||||
interface WeeklySummaryData extends BaseSummaryData {
|
||||
start_date: string; // ISO datetime with timezone
|
||||
end_date: string; // ISO datetime with timezone
|
||||
}
|
||||
```
|
||||
|
||||
#### RefundRequestData
|
||||
```typescript
|
||||
interface RefundRequestData {
|
||||
user_id: string;
|
||||
user_name: string;
|
||||
user_email: string;
|
||||
transaction_id: string;
|
||||
refund_request_id: string;
|
||||
reason: string;
|
||||
amount: number;
|
||||
balance: number;
|
||||
}
|
||||
```
|
||||
|
||||
### Summary Parameters
|
||||
```typescript
|
||||
interface BaseSummaryParams {
|
||||
start_date: string; // ISO datetime with timezone
|
||||
end_date: string; // ISO datetime with timezone
|
||||
}
|
||||
|
||||
interface DailySummaryParams extends BaseSummaryParams {
|
||||
date: string; // ISO datetime with timezone
|
||||
}
|
||||
|
||||
interface WeeklySummaryParams extends BaseSummaryParams {
|
||||
start_date: string; // ISO datetime with timezone
|
||||
end_date: string; // ISO datetime with timezone
|
||||
}
|
||||
```
|
||||
|
||||
## Database Schema
|
||||
|
||||
### NotificationEvent Table
|
||||
```sql
|
||||
model NotificationEvent {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @default(now()) @updatedAt
|
||||
UserNotificationBatch UserNotificationBatch? @relation
|
||||
userNotificationBatchId String?
|
||||
type NotificationType
|
||||
data Json
|
||||
@@index([userNotificationBatchId])
|
||||
}
|
||||
```
|
||||
|
||||
### UserNotificationBatch Table
|
||||
```sql
|
||||
model UserNotificationBatch {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @default(now()) @updatedAt
|
||||
userId String
|
||||
User User @relation
|
||||
type NotificationType
|
||||
Notifications NotificationEvent[]
|
||||
@@unique([userId, type])
|
||||
}
|
||||
```
|
||||
|
||||
## API Methods
|
||||
|
||||
### Exposed Service Methods (via HTTP)
|
||||
|
||||
#### queue_weekly_summary()
|
||||
- **HTTP Endpoint**: `POST /queue_weekly_summary`
|
||||
- **Purpose**: Triggers weekly summary generation for all active users
|
||||
- **Process**:
|
||||
1. Runs in background executor
|
||||
2. Queries users active in last 7 days
|
||||
3. Queues summary notification for each user
|
||||
- **Used by**: Scheduler service (via cron)
|
||||
|
||||
#### process_existing_batches(notification_types: list[NotificationType])
|
||||
- **HTTP Endpoint**: `POST /process_existing_batches`
|
||||
- **Purpose**: Processes aged-out batches for specified notification types
|
||||
- **Process**:
|
||||
1. Runs in background executor
|
||||
2. Retrieves all batches for given types
|
||||
3. Checks if oldest message exceeds max delay
|
||||
4. Sends batched email if aged out
|
||||
5. Clears processed batches
|
||||
- **Used by**: Scheduler service (via cron)
|
||||
|
||||
#### discord_system_alert(content: str)
|
||||
- **HTTP Endpoint**: `POST /discord_system_alert`
|
||||
- **Purpose**: Sends system alerts to Discord channel
|
||||
- **Async**: Yes (converted to sync by client)
|
||||
- **Used by**: Monitoring services
|
||||
|
||||
### Direct Queue Functions (not via HTTP)
|
||||
|
||||
#### queue_notification(event: NotificationEventModel) -> NotificationResult
|
||||
- **Purpose**: Queue a notification (sync version)
|
||||
- **Used by**: ExecutionManager (same process)
|
||||
- **Direct RabbitMQ**: Yes
|
||||
|
||||
#### queue_notification_async(event: NotificationEventModel) -> NotificationResult
|
||||
- **Purpose**: Queue a notification (async version)
|
||||
- **Used by**: Credit system (async context)
|
||||
- **Direct RabbitMQ**: Yes
|
||||
|
||||
## Message Processing Flow
|
||||
|
||||
### 1. Message Routing
|
||||
```python
|
||||
def get_routing_key(event_type: NotificationType) -> str:
|
||||
strategy = NotificationTypeOverride(event_type).strategy
|
||||
if strategy == QueueType.IMMEDIATE:
|
||||
return f"notification.immediate.{event_type.value}"
|
||||
elif strategy == QueueType.BATCH:
|
||||
return f"notification.batch.{event_type.value}"
|
||||
# ... etc
|
||||
```
|
||||
|
||||
### 2. Queue Processing Methods
|
||||
|
||||
#### _process_immediate(message: str) -> bool
|
||||
1. Parse message to NotificationEventModel
|
||||
2. Retrieve user email
|
||||
3. Check user preferences and email verification
|
||||
4. Send email immediately via EmailSender
|
||||
5. Return True if successful
|
||||
|
||||
#### _process_batch(message: str) -> bool
|
||||
1. Parse message to NotificationEventModel
|
||||
2. Add to user's notification batch
|
||||
3. Check if batch is old enough (based on delay)
|
||||
4. If aged out:
|
||||
- Retrieve all batch messages
|
||||
- Send combined email
|
||||
- Clear batch
|
||||
5. Return True if processed or batched
|
||||
|
||||
#### _process_summary(message: str) -> bool
|
||||
1. Parse message to SummaryParamsEventModel
|
||||
2. Gather summary data (credits, executions, etc.)
|
||||
- **Note**: Currently returns hardcoded placeholder data
|
||||
3. Format and send summary email
|
||||
4. Return True if successful
|
||||
|
||||
#### _process_admin_message(message: str) -> bool
|
||||
1. Parse message
|
||||
2. Send to configured admin email
|
||||
3. No user preference checks
|
||||
4. Return True if successful
|
||||
|
||||
## Email Delivery
|
||||
|
||||
### EmailSender Class
|
||||
|
||||
#### Template Loading
|
||||
- Base template: `templates/base.html.jinja2`
|
||||
- Notification templates: `templates/{notification_type}.html.jinja2`
|
||||
- Subject templates from NotificationTypeOverride
|
||||
- **Note**: Templates use `.html.jinja2` extension, not just `.html`
|
||||
|
||||
#### Email Composition
|
||||
```python
|
||||
def send_templated(
|
||||
notification: NotificationType,
|
||||
user_email: str,
|
||||
data: NotificationEventModel | list[NotificationEventModel],
|
||||
user_unsub_link: str | None = None
|
||||
)
|
||||
```
|
||||
|
||||
#### Postmark Integration
|
||||
- API Token: `settings.secrets.postmark_server_api_token`
|
||||
- Sender Email: `settings.config.postmark_sender_email`
|
||||
- Headers:
|
||||
- `List-Unsubscribe-Post: List-Unsubscribe=One-Click`
|
||||
- `List-Unsubscribe: <{unsubscribe_link}>`
|
||||
|
||||
## User Preferences and Permissions
|
||||
|
||||
### Email Verification Check
|
||||
```python
|
||||
validated_email = get_db().get_user_email_verification(user_id)
|
||||
```
|
||||
|
||||
### Notification Preferences
|
||||
```python
|
||||
preferences = get_db().get_user_notification_preference(user_id).preferences
|
||||
# Returns dict[NotificationType, bool]
|
||||
```
|
||||
|
||||
### Preference Fields in User Model
|
||||
- `notifyOnAgentRun`
|
||||
- `notifyOnZeroBalance`
|
||||
- `notifyOnLowBalance`
|
||||
- `notifyOnBlockExecutionFailed`
|
||||
- `notifyOnContinuousAgentError`
|
||||
- `notifyOnDailySummary`
|
||||
- `notifyOnWeeklySummary`
|
||||
- `notifyOnMonthlySummary`
|
||||
|
||||
### Unsubscribe Link Generation
|
||||
```python
|
||||
def generate_unsubscribe_link(user_id: str) -> str:
|
||||
# HMAC-SHA256 signed token
|
||||
# Format: base64(user_id:signature_hex)
|
||||
# URL: {platform_base_url}/api/email/unsubscribe?token={token}
|
||||
```
|
||||
|
||||
## Batching Logic
|
||||
|
||||
### Batch Delays (get_batch_delay)
|
||||
|
||||
**Note**: The delay configuration exists for multiple notification types, but only notifications with `QueueType.BATCH` strategy actually use batching. Others use different strategies:
|
||||
|
||||
- `AGENT_RUN`: 1 day (Strategy: BATCH - actually uses batching)
|
||||
- `ZERO_BALANCE`: 60 minutes configured (Strategy: BACKOFF - not batched)
|
||||
- `LOW_BALANCE`: 60 minutes configured (Strategy: IMMEDIATE - sent immediately)
|
||||
- `BLOCK_EXECUTION_FAILED`: 60 minutes configured (Strategy: BACKOFF - not batched)
|
||||
- `CONTINUOUS_AGENT_ERROR`: 60 minutes configured (Strategy: BACKOFF - not batched)
|
||||
|
||||
### Batch Processing
|
||||
1. Messages added to UserNotificationBatch
|
||||
2. Oldest message timestamp tracked
|
||||
3. When `oldest_timestamp + delay < now()`:
|
||||
- Batch is processed
|
||||
- All messages sent in single email
|
||||
- Batch cleared
|
||||
|
||||
## Service Lifecycle
|
||||
|
||||
### Startup
|
||||
1. Initialize FastAPI app with exposed endpoints
|
||||
2. Start HTTP server on port 8007
|
||||
3. Initialize RabbitMQ connection
|
||||
4. Create/verify exchanges and queues
|
||||
5. Set up queue consumers
|
||||
6. Start processing loop
|
||||
|
||||
### Main Loop
|
||||
```python
|
||||
while self.running:
|
||||
await self._run_queue(immediate_queue, self._process_immediate, ...)
|
||||
await self._run_queue(admin_queue, self._process_admin_message, ...)
|
||||
await self._run_queue(batch_queue, self._process_batch, ...)
|
||||
await self._run_queue(summary_queue, self._process_summary, ...)
|
||||
await asyncio.sleep(0.1)
|
||||
```
|
||||
|
||||
### Shutdown
|
||||
1. Set `running = False`
|
||||
2. Disconnect RabbitMQ
|
||||
3. Cleanup resources
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
```python
|
||||
# Service Configuration
|
||||
notification_service_port: int = 8007
|
||||
|
||||
# Email Configuration
|
||||
postmark_sender_email: str = "invalid@invalid.com"
|
||||
refund_notification_email: str = "refund@agpt.co"
|
||||
|
||||
# Security
|
||||
unsubscribe_secret_key: str = ""
|
||||
|
||||
# Secrets
|
||||
postmark_server_api_token: str = ""
|
||||
postmark_webhook_token: str = ""
|
||||
discord_bot_token: str = ""
|
||||
|
||||
# Platform URLs
|
||||
platform_base_url: str
|
||||
frontend_base_url: str
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Message Processing Errors
|
||||
- Failed messages sent to dead letter queue
|
||||
- Validation errors logged but don't crash service
|
||||
- Connection errors trigger retry with `@continuous_retry()`
|
||||
|
||||
### RabbitMQ ACK/NACK Protocol
|
||||
- Success: `message.ack()`
|
||||
- Failure: `message.reject(requeue=False)`
|
||||
- Timeout/Queue empty: Continue loop
|
||||
|
||||
### HTTP Endpoint Errors
|
||||
- Wrapped in RemoteCallError for client
|
||||
- Automatic retry available via client configuration
|
||||
- Connection failures tracked and logged
|
||||
|
||||
## System Integrations
|
||||
|
||||
### DatabaseManagerClient
|
||||
- User email retrieval
|
||||
- Email verification status
|
||||
- Notification preferences
|
||||
- Batch management
|
||||
- Active user queries
|
||||
|
||||
### Discord Integration
|
||||
- Uses SendDiscordMessageBlock
|
||||
- Configured via discord_bot_token
|
||||
- For system alerts only
|
||||
|
||||
## Implementation Checklist
|
||||
|
||||
1. **Core Service**
|
||||
- [ ] AppService implementation with @expose decorators
|
||||
- [ ] FastAPI endpoint generation
|
||||
- [ ] RabbitMQ connection management
|
||||
- [ ] Queue consumer setup
|
||||
- [ ] Message routing logic
|
||||
|
||||
2. **Service Client**
|
||||
- [ ] NotificationManagerClient implementation
|
||||
- [ ] HTTP client configuration
|
||||
- [ ] Method mapping to service endpoints
|
||||
- [ ] Async-to-sync conversions
|
||||
|
||||
3. **Message Processing**
|
||||
- [ ] Parse and validate all notification types
|
||||
- [ ] Implement all queue strategies
|
||||
- [ ] Batch management with delays
|
||||
- [ ] Summary data gathering
|
||||
|
||||
4. **Email Delivery**
|
||||
- [ ] Postmark integration
|
||||
- [ ] Template loading and rendering
|
||||
- [ ] Unsubscribe header support
|
||||
- [ ] HTML email composition
|
||||
|
||||
5. **User Management**
|
||||
- [ ] Preference checking
|
||||
- [ ] Email verification
|
||||
- [ ] Unsubscribe link generation
|
||||
- [ ] Daily limit tracking
|
||||
|
||||
6. **Batching System**
|
||||
- [ ] Database batch operations
|
||||
- [ ] Age-out checking
|
||||
- [ ] Batch clearing after send
|
||||
- [ ] Oldest message tracking
|
||||
|
||||
7. **Error Handling**
|
||||
- [ ] Dead letter queue routing
|
||||
- [ ] Message rejection on failure
|
||||
- [ ] Continuous retry wrapper
|
||||
- [ ] Validation error logging
|
||||
|
||||
8. **Scheduled Operations**
|
||||
- [ ] Weekly summary generation
|
||||
- [ ] Batch processing triggers
|
||||
- [ ] Background executor usage
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Service-to-Service Communication**:
|
||||
- HTTP endpoints only accessible internally
|
||||
- No authentication on service endpoints (internal network only)
|
||||
- Service discovery via host/port configuration
|
||||
|
||||
2. **User Security**:
|
||||
- Email verification required for all user notifications
|
||||
- Unsubscribe tokens HMAC-signed
|
||||
- User preferences enforced
|
||||
|
||||
3. **Admin Notifications**:
|
||||
- Separate queue, no user preference checks
|
||||
- Fixed admin email configuration
|
||||
|
||||
## Testing Considerations
|
||||
|
||||
1. **Unit Tests**
|
||||
- Message parsing and validation
|
||||
- Routing key generation
|
||||
- Batch delay calculations
|
||||
- Template rendering
|
||||
|
||||
2. **Integration Tests**
|
||||
- HTTP endpoint accessibility
|
||||
- Service client method calls
|
||||
- RabbitMQ message flow
|
||||
- Database batch operations
|
||||
- Email sending (mock Postmark)
|
||||
|
||||
3. **Load Tests**
|
||||
- High volume message processing
|
||||
- Concurrent HTTP requests
|
||||
- Batch accumulation limits
|
||||
- Memory usage under load
|
||||
|
||||
## Implementation Status Notes
|
||||
|
||||
1. **Backoff Strategy**: While `QueueType.BACKOFF` is defined and used by several notification types (ZERO_BALANCE, BLOCK_EXECUTION_FAILED, CONTINUOUS_AGENT_ERROR), the actual exponential backoff processing logic is not implemented. These messages are routed to immediate queue.
|
||||
|
||||
2. **Summary Data**: The `_gather_summary_data()` method currently returns hardcoded placeholder values rather than querying actual execution data from the database.
|
||||
|
||||
3. **Batch Processing**: Only `AGENT_RUN` notifications actually use batch processing. Other notification types with configured delays use different strategies (IMMEDIATE or BACKOFF).
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
1. **Additional Channels**
|
||||
- SMS notifications (not implemented)
|
||||
- Webhook notifications (not implemented)
|
||||
- In-app notifications
|
||||
|
||||
2. **Advanced Batching**
|
||||
- Dynamic batch sizes
|
||||
- Priority-based processing
|
||||
- Custom delay configurations
|
||||
|
||||
3. **Analytics**
|
||||
- Delivery tracking
|
||||
- Open/click rates
|
||||
- Notification effectiveness metrics
|
||||
|
||||
4. **Service Improvements**
|
||||
- Authentication for HTTP endpoints
|
||||
- Rate limiting per user
|
||||
- Circuit breaker patterns
|
||||
- Implement actual backoff processing for BACKOFF strategy
|
||||
- Implement real summary data gathering
|
||||
@@ -1,474 +0,0 @@
|
||||
# AutoGPT Platform Scheduler Technical Specification
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document provides a comprehensive technical specification for the AutoGPT Platform Scheduler service. The scheduler is responsible for managing scheduled graph executions, system monitoring tasks, and periodic maintenance operations. This specification is designed to enable a complete reimplementation that maintains 100% compatibility with the existing system.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [System Architecture](#system-architecture)
|
||||
2. [Service Implementation](#service-implementation)
|
||||
3. [Data Models](#data-models)
|
||||
4. [API Endpoints](#api-endpoints)
|
||||
5. [Database Schema](#database-schema)
|
||||
6. [External Dependencies](#external-dependencies)
|
||||
7. [Authentication & Authorization](#authentication--authorization)
|
||||
8. [Process Management](#process-management)
|
||||
9. [Error Handling](#error-handling)
|
||||
10. [Configuration](#configuration)
|
||||
11. [Testing Strategy](#testing-strategy)
|
||||
|
||||
## System Architecture
|
||||
|
||||
### Overview
|
||||
|
||||
The scheduler operates as an independent microservice within the AutoGPT platform, implementing the `AppService` base class pattern. It runs on a dedicated port (default: 8003) and exposes HTTP/JSON-RPC endpoints for communication with other services.
|
||||
|
||||
### Core Components
|
||||
|
||||
1. **Scheduler Service** (`backend/executor/scheduler.py:156`)
|
||||
- Extends `AppService` base class
|
||||
- Manages APScheduler instance with multiple jobstores
|
||||
- Handles lifecycle management and graceful shutdown
|
||||
|
||||
2. **Scheduler Client** (`backend/executor/scheduler.py:354`)
|
||||
- Extends `AppServiceClient` base class
|
||||
- Provides async/sync method wrappers for RPC calls
|
||||
- Implements automatic retry and connection pooling
|
||||
|
||||
3. **Entry Points**
|
||||
- Main executable: `backend/scheduler.py`
|
||||
- Service launcher: `backend/app.py`
|
||||
|
||||
## Service Implementation
|
||||
|
||||
### Base Service Pattern
|
||||
|
||||
```python
|
||||
class Scheduler(AppService):
|
||||
scheduler: BlockingScheduler
|
||||
|
||||
def __init__(self, register_system_tasks: bool = True):
|
||||
self.register_system_tasks = register_system_tasks
|
||||
|
||||
@classmethod
|
||||
def get_port(cls) -> int:
|
||||
return config.execution_scheduler_port # Default: 8003
|
||||
|
||||
@classmethod
|
||||
def db_pool_size(cls) -> int:
|
||||
return config.scheduler_db_pool_size # Default: 3
|
||||
|
||||
def run_service(self):
|
||||
# Initialize scheduler with jobstores
|
||||
# Register system tasks if enabled
|
||||
# Start scheduler blocking loop
|
||||
|
||||
def cleanup(self):
|
||||
# Graceful shutdown of scheduler
|
||||
# Wait=False for immediate termination
|
||||
```
|
||||
|
||||
### Jobstore Configuration
|
||||
|
||||
The scheduler uses three distinct jobstores:
|
||||
|
||||
1. **EXECUTION** (`Jobstores.EXECUTION.value`)
|
||||
- Type: SQLAlchemyJobStore
|
||||
- Table: `apscheduler_jobs`
|
||||
- Purpose: Graph execution schedules
|
||||
- Persistence: Required
|
||||
|
||||
2. **BATCHED_NOTIFICATIONS** (`Jobstores.BATCHED_NOTIFICATIONS.value`)
|
||||
- Type: SQLAlchemyJobStore
|
||||
- Table: `apscheduler_jobs_batched_notifications`
|
||||
- Purpose: Batched notification processing
|
||||
- Persistence: Required
|
||||
|
||||
3. **WEEKLY_NOTIFICATIONS** (`Jobstores.WEEKLY_NOTIFICATIONS.value`)
|
||||
- Type: MemoryJobStore
|
||||
- Purpose: Weekly summary notifications
|
||||
- Persistence: Not required
|
||||
|
||||
### System Tasks
|
||||
|
||||
When `register_system_tasks=True`, the following monitoring tasks are registered:
|
||||
|
||||
1. **Weekly Summary Processing**
|
||||
- Job ID: `process_weekly_summary`
|
||||
- Schedule: `0 * * * *` (hourly)
|
||||
- Function: `monitoring.process_weekly_summary`
|
||||
- Jobstore: WEEKLY_NOTIFICATIONS
|
||||
|
||||
2. **Late Execution Monitoring**
|
||||
- Job ID: `report_late_executions`
|
||||
- Schedule: Interval (config.execution_late_notification_threshold_secs)
|
||||
- Function: `monitoring.report_late_executions`
|
||||
- Jobstore: EXECUTION
|
||||
|
||||
3. **Block Error Rate Monitoring**
|
||||
- Job ID: `report_block_error_rates`
|
||||
- Schedule: Interval (config.block_error_rate_check_interval_secs)
|
||||
- Function: `monitoring.report_block_error_rates`
|
||||
- Jobstore: EXECUTION
|
||||
|
||||
4. **Cloud Storage Cleanup**
|
||||
- Job ID: `cleanup_expired_files`
|
||||
- Schedule: Interval (config.cloud_storage_cleanup_interval_hours * 3600)
|
||||
- Function: `cleanup_expired_files`
|
||||
- Jobstore: EXECUTION
|
||||
|
||||
## Data Models
|
||||
|
||||
### GraphExecutionJobArgs
|
||||
|
||||
```python
|
||||
class GraphExecutionJobArgs(BaseModel):
|
||||
user_id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
cron: str
|
||||
input_data: BlockInput
|
||||
input_credentials: dict[str, CredentialsMetaInput] = Field(default_factory=dict)
|
||||
```
|
||||
|
||||
### GraphExecutionJobInfo
|
||||
|
||||
```python
|
||||
class GraphExecutionJobInfo(GraphExecutionJobArgs):
|
||||
id: str
|
||||
name: str
|
||||
next_run_time: str
|
||||
|
||||
@staticmethod
|
||||
def from_db(job_args: GraphExecutionJobArgs, job_obj: JobObj) -> "GraphExecutionJobInfo":
|
||||
return GraphExecutionJobInfo(
|
||||
id=job_obj.id,
|
||||
name=job_obj.name,
|
||||
next_run_time=job_obj.next_run_time.isoformat(),
|
||||
**job_args.model_dump(),
|
||||
)
|
||||
```
|
||||
|
||||
### NotificationJobArgs
|
||||
|
||||
```python
|
||||
class NotificationJobArgs(BaseModel):
|
||||
notification_types: list[NotificationType]
|
||||
cron: str
|
||||
```
|
||||
|
||||
### CredentialsMetaInput
|
||||
|
||||
```python
|
||||
class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
id: str
|
||||
title: Optional[str] = None
|
||||
provider: CP
|
||||
type: CT
|
||||
```
|
||||
|
||||
## API Endpoints
|
||||
|
||||
All endpoints are exposed via the `@expose` decorator and follow HTTP POST JSON-RPC pattern.
|
||||
|
||||
### 1. Add Graph Execution Schedule
|
||||
|
||||
**Endpoint**: `/add_graph_execution_schedule`
|
||||
|
||||
**Request Body**:
|
||||
```json
|
||||
{
|
||||
"user_id": "string",
|
||||
"graph_id": "string",
|
||||
"graph_version": "integer",
|
||||
"cron": "string (crontab format)",
|
||||
"input_data": {},
|
||||
"input_credentials": {},
|
||||
"name": "string (optional)"
|
||||
}
|
||||
```
|
||||
|
||||
**Response**: `GraphExecutionJobInfo`
|
||||
|
||||
**Behavior**:
|
||||
- Creates APScheduler job with CronTrigger
|
||||
- Uses job kwargs to store GraphExecutionJobArgs
|
||||
- Sets `replace_existing=True` to allow updates
|
||||
- Returns job info with generated ID and next run time
|
||||
|
||||
### 2. Delete Graph Execution Schedule
|
||||
|
||||
**Endpoint**: `/delete_graph_execution_schedule`
|
||||
|
||||
**Request Body**:
|
||||
```json
|
||||
{
|
||||
"schedule_id": "string",
|
||||
"user_id": "string"
|
||||
}
|
||||
```
|
||||
|
||||
**Response**: `GraphExecutionJobInfo`
|
||||
|
||||
**Behavior**:
|
||||
- Validates schedule exists in EXECUTION jobstore
|
||||
- Verifies user_id matches job's user_id
|
||||
- Removes job from scheduler
|
||||
- Returns deleted job info
|
||||
|
||||
**Errors**:
|
||||
- `NotFoundError`: If job doesn't exist
|
||||
- `NotAuthorizedError`: If user_id doesn't match
|
||||
|
||||
### 3. Get Graph Execution Schedules
|
||||
|
||||
**Endpoint**: `/get_graph_execution_schedules`
|
||||
|
||||
**Request Body**:
|
||||
```json
|
||||
{
|
||||
"graph_id": "string (optional)",
|
||||
"user_id": "string (optional)"
|
||||
}
|
||||
```
|
||||
|
||||
**Response**: `list[GraphExecutionJobInfo]`
|
||||
|
||||
**Behavior**:
|
||||
- Retrieves all jobs from EXECUTION jobstore
|
||||
- Filters by graph_id and/or user_id if provided
|
||||
- Validates job kwargs as GraphExecutionJobArgs
|
||||
- Skips invalid jobs (ValidationError)
|
||||
- Only returns jobs with next_run_time set
|
||||
|
||||
### 4. System Task Endpoints
|
||||
|
||||
- `/execute_process_existing_batches` - Trigger batch processing
|
||||
- `/execute_process_weekly_summary` - Trigger weekly summary
|
||||
- `/execute_report_late_executions` - Trigger late execution report
|
||||
- `/execute_report_block_error_rates` - Trigger error rate report
|
||||
- `/execute_cleanup_expired_files` - Trigger file cleanup
|
||||
|
||||
### 5. Health Check
|
||||
|
||||
**Endpoints**: `/health_check`, `/health_check_async`
|
||||
**Methods**: POST, GET
|
||||
**Response**: "OK"
|
||||
|
||||
## Database Schema
|
||||
|
||||
### APScheduler Tables
|
||||
|
||||
The scheduler relies on APScheduler's SQLAlchemy jobstore schema:
|
||||
|
||||
1. **apscheduler_jobs**
|
||||
- id: VARCHAR (PRIMARY KEY)
|
||||
- next_run_time: FLOAT
|
||||
- job_state: BLOB/BYTEA (pickled job data)
|
||||
|
||||
2. **apscheduler_jobs_batched_notifications**
|
||||
- Same schema as above
|
||||
- Separate table for notification jobs
|
||||
|
||||
### Database Configuration
|
||||
|
||||
- URL extraction from `DIRECT_URL` environment variable
|
||||
- Schema extraction from URL query parameter
|
||||
- Connection pooling: `pool_size=db_pool_size()`, `max_overflow=0`
|
||||
- Metadata schema binding for multi-schema support
|
||||
|
||||
## External Dependencies
|
||||
|
||||
### Required Services
|
||||
|
||||
1. **PostgreSQL Database**
|
||||
- Connection via `DIRECT_URL` environment variable
|
||||
- Schema support via URL parameter
|
||||
- APScheduler job persistence
|
||||
|
||||
2. **ExecutionManager** (via execution_utils)
|
||||
- Function: `add_graph_execution`
|
||||
- Called by: `execute_graph` job function
|
||||
- Purpose: Create graph execution entries
|
||||
|
||||
3. **NotificationManager** (via monitoring module)
|
||||
- Functions: `process_existing_batches`, `queue_weekly_summary`
|
||||
- Purpose: Notification processing
|
||||
|
||||
4. **Cloud Storage** (via util.cloud_storage)
|
||||
- Function: `cleanup_expired_files_async`
|
||||
- Purpose: File expiration management
|
||||
|
||||
### Python Dependencies
|
||||
|
||||
```
|
||||
apscheduler>=3.10.0
|
||||
sqlalchemy
|
||||
pydantic>=2.0
|
||||
httpx
|
||||
uvicorn
|
||||
fastapi
|
||||
python-dotenv
|
||||
tenacity
|
||||
```
|
||||
|
||||
## Authentication & Authorization
|
||||
|
||||
### Service-Level Authentication
|
||||
|
||||
- No authentication required between internal services
|
||||
- Services communicate via trusted internal network
|
||||
- Host/port configuration via environment variables
|
||||
|
||||
### User-Level Authorization
|
||||
|
||||
- Authorization check in `delete_graph_execution_schedule`:
|
||||
- Validates `user_id` matches job's `user_id`
|
||||
- Raises `NotAuthorizedError` on mismatch
|
||||
- No authorization for read operations (security consideration)
|
||||
|
||||
## Process Management
|
||||
|
||||
### Startup Sequence
|
||||
|
||||
1. Load environment variables via `dotenv.load_dotenv()`
|
||||
2. Extract database URL and schema
|
||||
3. Initialize BlockingScheduler with configured jobstores
|
||||
4. Register system tasks (if enabled)
|
||||
5. Add job execution listener
|
||||
6. Start scheduler (blocking)
|
||||
|
||||
### Shutdown Sequence
|
||||
|
||||
1. Receive SIGTERM/SIGINT signal
|
||||
2. Call `cleanup()` method
|
||||
3. Shutdown scheduler with `wait=False`
|
||||
4. Terminate process
|
||||
|
||||
### Multi-Process Architecture
|
||||
|
||||
- Runs as independent process via `AppProcess`
|
||||
- Started by `run_processes()` in app.py
|
||||
- Can run in foreground or background mode
|
||||
- Automatic signal handling for graceful shutdown
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Job Execution Errors
|
||||
|
||||
- Listener on `EVENT_JOB_ERROR` logs failures
|
||||
- Errors in job functions are caught and logged
|
||||
- Jobs continue to run on schedule despite failures
|
||||
|
||||
### RPC Communication Errors
|
||||
|
||||
- Automatic retry via `@conn_retry` decorator
|
||||
- Configurable retry count and timeout
|
||||
- Connection pooling with self-healing
|
||||
|
||||
### Database Connection Errors
|
||||
|
||||
- APScheduler handles reconnection automatically
|
||||
- Pool exhaustion prevented by `max_overflow=0`
|
||||
- Connection errors logged but don't crash service
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
- `DIRECT_URL`: PostgreSQL connection string (required)
|
||||
- `{SERVICE_NAME}_HOST`: Override service host
|
||||
- Standard logging configuration
|
||||
|
||||
### Config Settings (via Config class)
|
||||
|
||||
```python
|
||||
execution_scheduler_port: int = 8003
|
||||
scheduler_db_pool_size: int = 3
|
||||
execution_late_notification_threshold_secs: int
|
||||
block_error_rate_check_interval_secs: int
|
||||
cloud_storage_cleanup_interval_hours: int
|
||||
pyro_host: str = "localhost"
|
||||
pyro_client_comm_timeout: float = 15
|
||||
pyro_client_comm_retry: int = 3
|
||||
rpc_client_call_timeout: int = 300
|
||||
```
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
1. Mock APScheduler for job management tests
|
||||
2. Mock database connections
|
||||
3. Test each RPC endpoint independently
|
||||
4. Verify job serialization/deserialization
|
||||
|
||||
### Integration Tests
|
||||
|
||||
1. Test with real PostgreSQL instance
|
||||
2. Verify job persistence across restarts
|
||||
3. Test concurrent job execution
|
||||
4. Validate cron expression parsing
|
||||
|
||||
### Critical Test Cases
|
||||
|
||||
1. **Job Persistence**: Jobs survive scheduler restart
|
||||
2. **User Isolation**: Users can only delete their own jobs
|
||||
3. **Concurrent Access**: Multiple clients can add/remove jobs
|
||||
4. **Error Recovery**: Service recovers from database outages
|
||||
5. **Resource Cleanup**: No memory/connection leaks
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
### Key Design Decisions
|
||||
|
||||
1. **BlockingScheduler vs AsyncIOScheduler**: Uses BlockingScheduler for simplicity and compatibility with multiprocessing architecture
|
||||
|
||||
2. **Job Storage**: All job arguments stored in kwargs, not in job name/id
|
||||
|
||||
3. **Separate Jobstores**: Isolation between execution and notification jobs
|
||||
|
||||
4. **No Authentication**: Relies on network isolation for security
|
||||
|
||||
### Migration Considerations
|
||||
|
||||
1. APScheduler job format must be preserved exactly
|
||||
2. Database schema cannot change without migration
|
||||
3. RPC protocol must maintain compatibility
|
||||
4. Environment variables must match existing deployment
|
||||
|
||||
### Performance Considerations
|
||||
|
||||
1. Database pool size limited to prevent exhaustion
|
||||
2. No job result storage (fire-and-forget pattern)
|
||||
3. Minimal logging in hot paths
|
||||
4. Connection reuse via pooling
|
||||
|
||||
## Appendix: Critical Implementation Details
|
||||
|
||||
### Event Loop Management
|
||||
|
||||
```python
|
||||
@thread_cached
|
||||
def get_event_loop():
|
||||
return asyncio.new_event_loop()
|
||||
|
||||
def execute_graph(**kwargs):
|
||||
get_event_loop().run_until_complete(_execute_graph(**kwargs))
|
||||
```
|
||||
|
||||
### Job Function Execution Context
|
||||
|
||||
- Jobs run in scheduler's process space
|
||||
- Each job gets fresh event loop
|
||||
- No shared state between job executions
|
||||
- Exceptions logged but don't affect scheduler
|
||||
|
||||
### Cron Expression Format
|
||||
|
||||
- Uses standard crontab format via `CronTrigger.from_crontab()`
|
||||
- Supports: minute hour day month day_of_week
|
||||
- Special strings: @yearly, @monthly, @weekly, @daily, @hourly
|
||||
|
||||
This specification provides all necessary details to reimplement the scheduler service while maintaining 100% compatibility with the existing system. Any deviation from these specifications may result in system incompatibility.
|
||||
@@ -1,85 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main, master ]
|
||||
pull_request:
|
||||
branches: [ main, master ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUSTFLAGS: "-D warnings"
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
redis:
|
||||
image: redis:7
|
||||
options: >-
|
||||
--health-cmd "redis-cli ping"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 6379:6379
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Run tests
|
||||
run: cargo test
|
||||
env:
|
||||
REDIS_URL: redis://localhost:6379
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Run clippy
|
||||
run: |
|
||||
cargo clippy -- \
|
||||
-D warnings \
|
||||
-D clippy::unwrap_used \
|
||||
-D clippy::panic \
|
||||
-D clippy::unimplemented \
|
||||
-D clippy::todo
|
||||
|
||||
fmt:
|
||||
name: Format
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: Check formatting
|
||||
run: cargo fmt -- --check
|
||||
|
||||
bench:
|
||||
name: Benchmarks
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
redis:
|
||||
image: redis:7
|
||||
options: >-
|
||||
--health-cmd "redis-cli ping"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 6379:6379
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Build benchmarks
|
||||
run: cargo bench --no-run
|
||||
env:
|
||||
REDIS_URL: redis://localhost:6379
|
||||
3382
autogpt_platform/autogpt-rs/websocket/Cargo.lock
generated
3382
autogpt_platform/autogpt-rs/websocket/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,60 +0,0 @@
|
||||
[package]
|
||||
name = "websocket"
|
||||
authors = ["AutoGPT Team"]
|
||||
description = "WebSocket server for AutoGPT Platform"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "websocket"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "websocket"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
axum = { version = "0.7.5", features = ["ws"] }
|
||||
jsonwebtoken = "9.3.0"
|
||||
redis = { version = "0.25.4", features = ["aio", "tokio-comp"] }
|
||||
serde = { version = "1.0.204", features = ["derive"] }
|
||||
serde_json = "1.0.120"
|
||||
tokio = { version = "1.38.1", features = ["rt-multi-thread", "macros", "net", "sync", "time", "io-util"] }
|
||||
tower-http = { version = "0.5.2", features = ["cors"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
futures = "0.3"
|
||||
dotenvy = "0.15"
|
||||
clap = { version = "4.5.4", features = ["derive"] }
|
||||
toml = "0.8"
|
||||
|
||||
[dev-dependencies]
|
||||
# Load testing and profiling
|
||||
tokio-console = "0.1"
|
||||
criterion = { version = "0.5", features = ["async_tokio"] }
|
||||
pprof = { version = "0.13", features = ["flamegraph", "criterion"] }
|
||||
# Dependencies for benchmarks
|
||||
tokio-tungstenite = "0.24"
|
||||
futures-util = "0.3"
|
||||
chrono = "0.4"
|
||||
|
||||
[[bench]]
|
||||
name = "websocket_bench"
|
||||
harness = false
|
||||
|
||||
[[example]]
|
||||
name = "ws_client_example"
|
||||
required-features = []
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3 # Maximum optimization
|
||||
lto = true # Enable link-time optimization
|
||||
codegen-units = 1 # Reduce parallel code generation units to increase optimization
|
||||
panic = "abort" # Remove panic unwinding to reduce binary size
|
||||
strip = true # Strip symbols from binary
|
||||
|
||||
[profile.bench]
|
||||
opt-level = 3 # Maximum optimization
|
||||
lto = true # Enable link-time optimization
|
||||
codegen-units = 1 # Reduce parallel code generation units to increase optimization
|
||||
debug = true # Keep debug symbols for profiling
|
||||
@@ -1,412 +0,0 @@
|
||||
# WebSocket API Technical Specification
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a complete technical specification for the AutoGPT Platform WebSocket API (`ws_api.py`). The WebSocket API provides real-time updates for graph and node execution events, enabling clients to monitor workflow execution progress.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Core Components
|
||||
|
||||
1. **WebSocket Server** (`ws_api.py`)
|
||||
- FastAPI application with WebSocket endpoint
|
||||
- Handles client connections and message routing
|
||||
- Authenticates clients via JWT tokens
|
||||
- Manages subscriptions to execution events
|
||||
|
||||
2. **Connection Manager** (`conn_manager.py`)
|
||||
- Maintains active WebSocket connections
|
||||
- Manages channel subscriptions
|
||||
- Routes execution events to subscribed clients
|
||||
- Handles connection lifecycle
|
||||
|
||||
3. **Event Broadcasting System**
|
||||
- Redis Pub/Sub based event bus
|
||||
- Asynchronous event broadcaster
|
||||
- Execution event propagation from backend services
|
||||
|
||||
## API Endpoint
|
||||
|
||||
### WebSocket Endpoint
|
||||
- **URL**: `/ws`
|
||||
- **Protocol**: WebSocket (ws:// or wss://)
|
||||
- **Query Parameters**:
|
||||
- `token` (required when auth enabled): JWT authentication token
|
||||
|
||||
## Authentication
|
||||
|
||||
### JWT Token Authentication
|
||||
- **When Required**: When `settings.config.enable_auth` is `True`
|
||||
- **Token Location**: Query parameter `?token=<JWT_TOKEN>`
|
||||
- **Token Validation**:
|
||||
```python
|
||||
payload = parse_jwt_token(token)
|
||||
user_id = payload.get("sub")
|
||||
```
|
||||
- **JWT Requirements**:
|
||||
- Algorithm: Configured via `settings.JWT_ALGORITHM`
|
||||
- Secret Key: Configured via `settings.JWT_SECRET_KEY`
|
||||
- Audience: Must be "authenticated"
|
||||
- Claims: Must contain `sub` (user ID)
|
||||
|
||||
### Authentication Failures
|
||||
- **4001**: Missing authentication token
|
||||
- **4002**: Invalid token (missing user ID)
|
||||
- **4003**: Invalid token (parsing error or expired)
|
||||
|
||||
### No-Auth Mode
|
||||
- When `settings.config.enable_auth` is `False`
|
||||
- Uses `DEFAULT_USER_ID` from `backend.data.user`
|
||||
|
||||
## Message Protocol
|
||||
|
||||
### Message Format
|
||||
All messages use JSON format with the following structure:
|
||||
|
||||
```typescript
|
||||
interface WSMessage {
|
||||
method: WSMethod;
|
||||
data?: Record<string, any> | any[] | string;
|
||||
success?: boolean;
|
||||
channel?: string;
|
||||
error?: string;
|
||||
}
|
||||
```
|
||||
|
||||
### Message Methods (WSMethod enum)
|
||||
|
||||
1. **Client-to-Server Methods**:
|
||||
- `SUBSCRIBE_GRAPH_EXEC`: Subscribe to specific graph execution
|
||||
- `SUBSCRIBE_GRAPH_EXECS`: Subscribe to all executions of a graph
|
||||
- `UNSUBSCRIBE`: Unsubscribe from a channel
|
||||
- `HEARTBEAT`: Keep-alive ping
|
||||
|
||||
2. **Server-to-Client Methods**:
|
||||
- `GRAPH_EXECUTION_EVENT`: Graph execution status update
|
||||
- `NODE_EXECUTION_EVENT`: Node execution status update
|
||||
- `ERROR`: Error message
|
||||
- `HEARTBEAT`: Keep-alive pong
|
||||
|
||||
## Subscription Models
|
||||
|
||||
### Subscribe to Specific Graph Execution
|
||||
```typescript
|
||||
interface WSSubscribeGraphExecutionRequest {
|
||||
graph_exec_id: string;
|
||||
}
|
||||
```
|
||||
**Channel Key Format**: `{user_id}|graph_exec#{graph_exec_id}`
|
||||
|
||||
### Subscribe to All Graph Executions
|
||||
```typescript
|
||||
interface WSSubscribeGraphExecutionsRequest {
|
||||
graph_id: string;
|
||||
}
|
||||
```
|
||||
**Channel Key Format**: `{user_id}|graph#{graph_id}|executions`
|
||||
|
||||
## Event Models
|
||||
|
||||
### Graph Execution Event
|
||||
```typescript
|
||||
interface GraphExecutionEvent {
|
||||
event_type: "graph_execution_update";
|
||||
id: string; // graph_exec_id
|
||||
user_id: string;
|
||||
graph_id: string;
|
||||
graph_version: number;
|
||||
preset_id?: string;
|
||||
status: ExecutionStatus;
|
||||
started_at: string; // ISO datetime
|
||||
ended_at: string; // ISO datetime
|
||||
inputs: Record<string, any>;
|
||||
outputs: Record<string, any>;
|
||||
stats?: {
|
||||
cost: number; // cents
|
||||
duration: number; // seconds
|
||||
duration_cpu_only: number;
|
||||
node_exec_time: number;
|
||||
node_exec_time_cpu_only: number;
|
||||
node_exec_count: number;
|
||||
node_error_count: number;
|
||||
error?: string;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Node Execution Event
|
||||
```typescript
|
||||
interface NodeExecutionEvent {
|
||||
event_type: "node_execution_update";
|
||||
user_id: string;
|
||||
graph_id: string;
|
||||
graph_version: number;
|
||||
graph_exec_id: string;
|
||||
node_exec_id: string;
|
||||
node_id: string;
|
||||
block_id: string;
|
||||
status: ExecutionStatus;
|
||||
input_data: Record<string, any>;
|
||||
output_data: Record<string, any>;
|
||||
add_time: string; // ISO datetime
|
||||
queue_time?: string; // ISO datetime
|
||||
start_time?: string; // ISO datetime
|
||||
end_time?: string; // ISO datetime
|
||||
}
|
||||
```
|
||||
|
||||
### Execution Status Enum
|
||||
```typescript
|
||||
enum ExecutionStatus {
|
||||
INCOMPLETE = "INCOMPLETE",
|
||||
QUEUED = "QUEUED",
|
||||
RUNNING = "RUNNING",
|
||||
COMPLETED = "COMPLETED",
|
||||
FAILED = "FAILED"
|
||||
}
|
||||
```
|
||||
|
||||
## Message Flow Examples
|
||||
|
||||
### 1. Subscribe to Graph Execution
|
||||
```json
|
||||
// Client → Server
|
||||
{
|
||||
"method": "subscribe_graph_execution",
|
||||
"data": {
|
||||
"graph_exec_id": "exec-123"
|
||||
}
|
||||
}
|
||||
|
||||
// Server → Client (Success)
|
||||
{
|
||||
"method": "subscribe_graph_execution",
|
||||
"success": true,
|
||||
"channel": "user-456|graph_exec#exec-123"
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Receive Execution Updates
|
||||
```json
|
||||
// Server → Client (Graph Update)
|
||||
{
|
||||
"method": "graph_execution_event",
|
||||
"channel": "user-456|graph_exec#exec-123",
|
||||
"data": {
|
||||
"event_type": "graph_execution_update",
|
||||
"id": "exec-123",
|
||||
"user_id": "user-456",
|
||||
"graph_id": "graph-789",
|
||||
"status": "RUNNING",
|
||||
// ... other fields
|
||||
}
|
||||
}
|
||||
|
||||
// Server → Client (Node Update)
|
||||
{
|
||||
"method": "node_execution_event",
|
||||
"channel": "user-456|graph_exec#exec-123",
|
||||
"data": {
|
||||
"event_type": "node_execution_update",
|
||||
"node_exec_id": "node-exec-111",
|
||||
"status": "COMPLETED",
|
||||
// ... other fields
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Heartbeat
|
||||
```json
|
||||
// Client → Server
|
||||
{
|
||||
"method": "heartbeat",
|
||||
"data": "ping"
|
||||
}
|
||||
|
||||
// Server → Client
|
||||
{
|
||||
"method": "heartbeat",
|
||||
"data": "pong",
|
||||
"success": true
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Error Handling
|
||||
```json
|
||||
// Server → Client (Invalid Message)
|
||||
{
|
||||
"method": "error",
|
||||
"success": false,
|
||||
"error": "Invalid message format. Review the schema and retry"
|
||||
}
|
||||
```
|
||||
|
||||
## Event Broadcasting Architecture
|
||||
|
||||
### Redis Pub/Sub Integration
|
||||
1. **Event Bus Name**: Configured via `config.execution_event_bus_name`
|
||||
2. **Channel Pattern**: `{event_bus_name}/{channel_key}`
|
||||
3. **Event Flow**:
|
||||
- Execution services publish events to Redis
|
||||
- Event broadcaster listens to Redis pattern `*`
|
||||
- Events are routed to WebSocket connections based on subscriptions
|
||||
|
||||
### Event Broadcaster
|
||||
- Runs as continuous async task using `@continuous_retry()` decorator
|
||||
- Listens to all execution events via `AsyncRedisExecutionEventBus`
|
||||
- Calls `ConnectionManager.send_execution_update()` for each event
|
||||
|
||||
## Connection Lifecycle
|
||||
|
||||
### Connection Establishment
|
||||
1. Client connects to `/ws` endpoint
|
||||
2. Authentication performed (JWT validation)
|
||||
3. WebSocket accepted via `manager.connect_socket()`
|
||||
4. Connection added to active connections set
|
||||
|
||||
### Message Processing Loop
|
||||
1. Receive text message from client
|
||||
2. Parse and validate as `WSMessage`
|
||||
3. Route to appropriate handler based on `method`
|
||||
4. Send response or error back to client
|
||||
|
||||
### Connection Termination
|
||||
1. `WebSocketDisconnect` exception caught
|
||||
2. `manager.disconnect_socket()` called
|
||||
3. Connection removed from active connections
|
||||
4. All subscriptions for that connection removed
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Validation Errors
|
||||
- **Invalid Message Format**: Returns error with method "error"
|
||||
- **Invalid Message Data**: Returns error with specific validation message
|
||||
- **Unknown Message Type**: Returns error indicating unsupported method
|
||||
|
||||
### Connection Errors
|
||||
- WebSocket disconnections handled gracefully
|
||||
- Failed event parsing logged but doesn't crash connection
|
||||
- Handler exceptions logged and connection continues
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
```python
|
||||
# WebSocket Server Configuration
|
||||
websocket_server_host: str = "0.0.0.0"
|
||||
websocket_server_port: int = 8001
|
||||
|
||||
# Authentication
|
||||
enable_auth: bool = True
|
||||
|
||||
# CORS
|
||||
backend_cors_allow_origins: List[str] = []
|
||||
|
||||
# Redis Event Bus
|
||||
execution_event_bus_name: str = "autogpt:execution_event_bus"
|
||||
|
||||
# Message Size Limits
|
||||
max_message_size_limit: int = 512000 # 512KB
|
||||
```
|
||||
|
||||
### Security Headers
|
||||
- CORS middleware applied with configured origins
|
||||
- Credentials allowed for authenticated requests
|
||||
- All methods and headers allowed (configurable)
|
||||
|
||||
## Deployment Requirements
|
||||
|
||||
### Dependencies
|
||||
1. **FastAPI**: Web framework with WebSocket support
|
||||
2. **Redis**: For pub/sub event broadcasting
|
||||
3. **JWT Libraries**: For token validation
|
||||
4. **Prisma**: Database ORM (for future graph access validation)
|
||||
|
||||
### Process Management
|
||||
- Implements `AppProcess` interface for service lifecycle
|
||||
- Runs via `uvicorn` ASGI server
|
||||
- Graceful shutdown handling in `cleanup()` method
|
||||
|
||||
### Concurrent Connections
|
||||
- No hard limit on WebSocket connections
|
||||
- Memory usage scales with active connections
|
||||
- Each connection maintains subscription set
|
||||
|
||||
## Implementation Checklist
|
||||
|
||||
To implement a compatible WebSocket API:
|
||||
|
||||
1. **Authentication**
|
||||
- [ ] JWT token validation from query parameters
|
||||
- [ ] Support for no-auth mode with default user ID
|
||||
- [ ] Proper error codes for auth failures
|
||||
|
||||
2. **Message Handling**
|
||||
- [ ] Parse and validate WSMessage format
|
||||
- [ ] Implement all client-to-server methods
|
||||
- [ ] Support all server-to-client event types
|
||||
- [ ] Proper error responses for invalid messages
|
||||
|
||||
3. **Subscription Management**
|
||||
- [ ] Channel key generation matching exact format
|
||||
- [ ] Support for both execution and graph-level subscriptions
|
||||
- [ ] Unsubscribe functionality
|
||||
- [ ] Clean up subscriptions on disconnect
|
||||
|
||||
4. **Event Broadcasting**
|
||||
- [ ] Listen to Redis pub/sub for execution events
|
||||
- [ ] Route events to correct subscribed connections
|
||||
- [ ] Handle both graph and node execution events
|
||||
- [ ] Maintain event order and completeness
|
||||
|
||||
5. **Connection Management**
|
||||
- [ ] Track active WebSocket connections
|
||||
- [ ] Handle graceful disconnections
|
||||
- [ ] Implement heartbeat/keepalive
|
||||
- [ ] Memory-efficient subscription storage
|
||||
|
||||
6. **Configuration**
|
||||
- [ ] Support all environment variables
|
||||
- [ ] CORS configuration for allowed origins
|
||||
- [ ] Configurable host/port binding
|
||||
- [ ] Redis connection configuration
|
||||
|
||||
7. **Error Handling**
|
||||
- [ ] Graceful handling of malformed messages
|
||||
- [ ] Logging of errors without dropping connections
|
||||
- [ ] Specific error messages for debugging
|
||||
- [ ] Recovery from Redis connection issues
|
||||
|
||||
## Testing Considerations
|
||||
|
||||
1. **Unit Tests**
|
||||
- Message parsing and validation
|
||||
- Channel key generation
|
||||
- Subscription management logic
|
||||
|
||||
2. **Integration Tests**
|
||||
- Full WebSocket connection flow
|
||||
- Event broadcasting from Redis
|
||||
- Multi-client subscription scenarios
|
||||
- Authentication success/failure cases
|
||||
|
||||
3. **Load Tests**
|
||||
- Many concurrent connections
|
||||
- High-frequency event broadcasting
|
||||
- Memory usage under load
|
||||
- Connection/disconnection cycles
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Authentication**: JWT tokens transmitted via query parameters (consider upgrading to headers)
|
||||
2. **Authorization**: Currently no graph-level access validation (commented out in code)
|
||||
3. **Rate Limiting**: No rate limiting implemented
|
||||
4. **Message Size**: Limited by `max_message_size_limit` configuration
|
||||
5. **Input Validation**: All inputs validated via Pydantic models
|
||||
|
||||
## Future Enhancements (Currently Commented Out)
|
||||
|
||||
1. **Graph Access Validation**: Verify user has read access to subscribed graphs
|
||||
2. **Message Compression**: For large execution payloads
|
||||
3. **Batch Updates**: Aggregate multiple events in single message
|
||||
4. **Selective Field Subscription**: Subscribe to specific fields only
|
||||
@@ -1,93 +0,0 @@
|
||||
# WebSocket Server Benchmarks
|
||||
|
||||
This directory contains performance benchmarks for the AutoGPT WebSocket server.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Redis must be running locally or set `REDIS_URL` environment variable:
|
||||
```bash
|
||||
docker run -d -p 6379:6379 redis:latest
|
||||
```
|
||||
|
||||
2. Build the project in release mode:
|
||||
```bash
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
## Running Benchmarks
|
||||
|
||||
Run all benchmarks:
|
||||
```bash
|
||||
cargo bench
|
||||
```
|
||||
|
||||
Run specific benchmark group:
|
||||
```bash
|
||||
cargo bench connection_establishment
|
||||
cargo bench subscriptions
|
||||
cargo bench message_throughput
|
||||
cargo bench concurrent_connections
|
||||
cargo bench message_parsing
|
||||
cargo bench redis_event_processing
|
||||
```
|
||||
|
||||
## Benchmark Categories
|
||||
|
||||
### Connection Establishment
|
||||
Tests the performance of establishing WebSocket connections with different authentication scenarios:
|
||||
- No authentication
|
||||
- Valid JWT authentication
|
||||
- Invalid JWT authentication (connection rejection)
|
||||
|
||||
### Subscriptions
|
||||
Measures the performance of subscription operations:
|
||||
- Subscribing to graph execution events
|
||||
- Unsubscribing from channels
|
||||
|
||||
### Message Throughput
|
||||
Tests how many messages the server can process per second with varying message counts (10, 100, 1000).
|
||||
|
||||
### Concurrent Connections
|
||||
Benchmarks the server's ability to handle multiple simultaneous connections (10, 50, 100, 500 clients).
|
||||
|
||||
### Message Parsing
|
||||
Tests JSON parsing performance with different message sizes (100B to 100KB).
|
||||
|
||||
### Redis Event Processing
|
||||
Benchmarks the parsing of execution events received from Redis.
|
||||
|
||||
## Profiling
|
||||
|
||||
To generate flamegraphs for CPU profiling:
|
||||
|
||||
1. Install flamegraph tools:
|
||||
```bash
|
||||
cargo install flamegraph
|
||||
```
|
||||
|
||||
2. Run benchmarks with profiling:
|
||||
```bash
|
||||
cargo bench --bench websocket_bench -- --profile-time=10
|
||||
```
|
||||
|
||||
## Interpreting Results
|
||||
|
||||
- **Throughput**: Higher is better (operations/second or elements/second)
|
||||
- **Time**: Lower is better (nanoseconds per operation)
|
||||
- **Error margins**: Look for stable results with low standard deviation
|
||||
|
||||
## Optimizing Performance
|
||||
|
||||
Based on benchmark results, consider:
|
||||
|
||||
1. **Connection pooling** for Redis connections
|
||||
2. **Message batching** for high-throughput scenarios
|
||||
3. **Async task tuning** for concurrent connection handling
|
||||
4. **JSON parsing optimization** using simd-json or other fast parsers
|
||||
5. **Memory allocation** optimization using arena allocators
|
||||
|
||||
## Notes
|
||||
|
||||
- Benchmarks create actual WebSocket servers on random ports
|
||||
- Each benchmark iteration properly cleans up resources
|
||||
- Results may vary based on system resources and Redis performance
|
||||
@@ -1,406 +0,0 @@
|
||||
#![allow(clippy::unwrap_used)] // Benchmarks can panic on setup errors
|
||||
|
||||
use axum::{routing::get, Router};
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio_tungstenite::{connect_async, tungstenite::Message};
|
||||
|
||||
// Import the actual websocket server components
|
||||
use websocket::{models, ws_handler, AppState, Config, ConnectionManager, Stats};
|
||||
|
||||
// Helper to create a test server
|
||||
async fn create_test_server(enable_auth: bool) -> (String, tokio::task::JoinHandle<()>) {
|
||||
// Set environment variables for test config
|
||||
std::env::set_var("WEBSOCKET_SERVER_HOST", "127.0.0.1");
|
||||
std::env::set_var("WEBSOCKET_SERVER_PORT", "0");
|
||||
std::env::set_var("ENABLE_AUTH", enable_auth.to_string());
|
||||
std::env::set_var("SUPABASE_JWT_SECRET", "test_secret");
|
||||
std::env::set_var("DEFAULT_USER_ID", "test_user");
|
||||
if std::env::var("REDIS_URL").is_err() {
|
||||
std::env::set_var("REDIS_URL", "redis://localhost:6379");
|
||||
}
|
||||
|
||||
let mut config = Config::load(None);
|
||||
config.port = 0; // Force OS to assign port
|
||||
|
||||
let redis_client =
|
||||
redis::Client::open(config.redis_url.clone()).expect("Failed to connect to Redis");
|
||||
let stats = Arc::new(Stats::default());
|
||||
let mgr = Arc::new(ConnectionManager::new(
|
||||
redis_client,
|
||||
config.execution_event_bus_name.clone(),
|
||||
stats.clone(),
|
||||
));
|
||||
|
||||
// Start broadcaster
|
||||
let mgr_clone = mgr.clone();
|
||||
tokio::spawn(async move {
|
||||
mgr_clone.run_broadcaster().await;
|
||||
});
|
||||
|
||||
let state = AppState {
|
||||
mgr,
|
||||
config: Arc::new(config),
|
||||
stats,
|
||||
};
|
||||
|
||||
let app = Router::new()
|
||||
.route("/ws", get(ws_handler))
|
||||
.layer(axum::Extension(state));
|
||||
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
let server_url = format!("ws://{addr}");
|
||||
|
||||
let server_handle = tokio::spawn(async move {
|
||||
axum::serve(listener, app.into_make_service())
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
// Give server time to start
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
(server_url, server_handle)
|
||||
}
|
||||
|
||||
// Helper to create a valid JWT token
|
||||
fn create_jwt_token(user_id: &str) -> String {
|
||||
use jsonwebtoken::{encode, Algorithm, EncodingKey, Header};
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct Claims {
|
||||
sub: String,
|
||||
aud: Vec<String>,
|
||||
exp: usize,
|
||||
}
|
||||
|
||||
let claims = Claims {
|
||||
sub: user_id.to_string(),
|
||||
aud: vec!["authenticated".to_string()],
|
||||
exp: (chrono::Utc::now() + chrono::Duration::hours(1)).timestamp() as usize,
|
||||
};
|
||||
|
||||
encode(
|
||||
&Header::new(Algorithm::HS256),
|
||||
&claims,
|
||||
&EncodingKey::from_secret(b"test_secret"),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// Benchmark connection establishment
|
||||
fn benchmark_connection_establishment(c: &mut Criterion) {
|
||||
let rt = Runtime::new().unwrap();
|
||||
|
||||
let mut group = c.benchmark_group("connection_establishment");
|
||||
group.measurement_time(Duration::from_secs(30));
|
||||
|
||||
// Test without auth
|
||||
group.bench_function("no_auth", |b| {
|
||||
b.to_async(&rt).iter_with_large_drop(|| async {
|
||||
let (server_url, server_handle) = create_test_server(false).await;
|
||||
let url = format!("{server_url}/ws");
|
||||
let (ws_stream, _) = connect_async(&url).await.unwrap();
|
||||
drop(ws_stream);
|
||||
server_handle.abort();
|
||||
});
|
||||
});
|
||||
|
||||
// Test with valid auth
|
||||
group.bench_function("valid_auth", |b| {
|
||||
b.to_async(&rt).iter_with_large_drop(|| async {
|
||||
let (server_url, server_handle) = create_test_server(true).await;
|
||||
let token = create_jwt_token("test_user");
|
||||
let url = format!("{server_url}/ws?token={token}");
|
||||
let (ws_stream, _) = connect_async(&url).await.unwrap();
|
||||
drop(ws_stream);
|
||||
server_handle.abort();
|
||||
});
|
||||
});
|
||||
|
||||
// Test with invalid auth
|
||||
group.bench_function("invalid_auth", |b| {
|
||||
b.to_async(&rt).iter_with_large_drop(|| async {
|
||||
let (server_url, server_handle) = create_test_server(true).await;
|
||||
let url = format!("{server_url}/ws?token=invalid");
|
||||
let result = connect_async(&url).await;
|
||||
assert!(
|
||||
result.is_err() || {
|
||||
if let Ok((mut ws_stream, _)) = result {
|
||||
// Should receive close frame
|
||||
matches!(ws_stream.next().await, Some(Ok(Message::Close(_))))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
);
|
||||
server_handle.abort();
|
||||
});
|
||||
});
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
// Benchmark subscription operations
|
||||
fn benchmark_subscriptions(c: &mut Criterion) {
|
||||
let rt = Runtime::new().unwrap();
|
||||
|
||||
let mut group = c.benchmark_group("subscriptions");
|
||||
group.measurement_time(Duration::from_secs(20));
|
||||
|
||||
group.bench_function("subscribe_graph_execution", |b| {
|
||||
b.to_async(&rt).iter_with_large_drop(|| async {
|
||||
let (server_url, server_handle) = create_test_server(false).await;
|
||||
let url = format!("{server_url}/ws");
|
||||
let (mut ws_stream, _) = connect_async(&url).await.unwrap();
|
||||
let msg = json!({
|
||||
"method": "subscribe_graph_execution",
|
||||
"data": {
|
||||
"graph_exec_id": "test_exec_123"
|
||||
}
|
||||
});
|
||||
|
||||
ws_stream
|
||||
.send(Message::Text(msg.to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Wait for response
|
||||
if let Some(Ok(Message::Text(response))) = ws_stream.next().await {
|
||||
let resp: serde_json::Value = serde_json::from_str(&response).unwrap();
|
||||
assert_eq!(resp["success"], true);
|
||||
}
|
||||
|
||||
server_handle.abort();
|
||||
});
|
||||
});
|
||||
|
||||
group.bench_function("unsubscribe", |b| {
|
||||
b.to_async(&rt).iter_with_large_drop(|| async {
|
||||
let (server_url, server_handle) = create_test_server(false).await;
|
||||
let url = format!("{server_url}/ws");
|
||||
let (mut ws_stream, _) = connect_async(&url).await.unwrap();
|
||||
|
||||
// First subscribe
|
||||
let msg = json!({
|
||||
"method": "subscribe_graph_execution",
|
||||
"data": {
|
||||
"graph_exec_id": "test_exec_123"
|
||||
}
|
||||
});
|
||||
ws_stream
|
||||
.send(Message::Text(msg.to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
ws_stream.next().await; // Consume response
|
||||
let msg = json!({
|
||||
"method": "unsubscribe",
|
||||
"data": {
|
||||
"channel": "test_user|graph_exec#test_exec_123"
|
||||
}
|
||||
});
|
||||
|
||||
ws_stream
|
||||
.send(Message::Text(msg.to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Wait for response
|
||||
if let Some(Ok(Message::Text(response))) = ws_stream.next().await {
|
||||
let resp: serde_json::Value = serde_json::from_str(&response).unwrap();
|
||||
assert_eq!(resp["success"], true);
|
||||
}
|
||||
|
||||
server_handle.abort();
|
||||
});
|
||||
});
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
// Benchmark message throughput
|
||||
fn benchmark_message_throughput(c: &mut Criterion) {
|
||||
let rt = Runtime::new().unwrap();
|
||||
|
||||
let mut group = c.benchmark_group("message_throughput");
|
||||
group.measurement_time(Duration::from_secs(30));
|
||||
|
||||
for msg_count in [10, 100, 1000].iter() {
|
||||
group.throughput(Throughput::Elements(*msg_count as u64));
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(msg_count),
|
||||
msg_count,
|
||||
|b, &msg_count| {
|
||||
b.to_async(&rt).iter_with_large_drop(|| async {
|
||||
let (server_url, server_handle) = create_test_server(false).await;
|
||||
let url = format!("{server_url}/ws");
|
||||
let (mut ws_stream, _) = connect_async(&url).await.unwrap();
|
||||
// Send multiple heartbeat messages
|
||||
for _ in 0..msg_count {
|
||||
let msg = json!({
|
||||
"method": "heartbeat",
|
||||
"data": "ping"
|
||||
});
|
||||
ws_stream
|
||||
.send(Message::Text(msg.to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Receive all responses
|
||||
for _ in 0..msg_count {
|
||||
ws_stream.next().await;
|
||||
}
|
||||
|
||||
server_handle.abort();
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
// Benchmark concurrent connections
|
||||
fn benchmark_concurrent_connections(c: &mut Criterion) {
|
||||
let rt = Runtime::new().unwrap();
|
||||
|
||||
let mut group = c.benchmark_group("concurrent_connections");
|
||||
group.measurement_time(Duration::from_secs(60));
|
||||
group.sample_size(10);
|
||||
|
||||
for num_clients in [100, 500, 1000].iter() {
|
||||
group.throughput(Throughput::Elements(*num_clients as u64));
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(num_clients),
|
||||
num_clients,
|
||||
|b, &num_clients| {
|
||||
b.to_async(&rt).iter_with_large_drop(|| async {
|
||||
let (server_url, server_handle) = create_test_server(false).await;
|
||||
let url = format!("{server_url}/ws");
|
||||
|
||||
// Create multiple concurrent connections
|
||||
let mut handles = vec![];
|
||||
for i in 0..num_clients {
|
||||
let url = url.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let (mut ws_stream, _) = connect_async(&url).await.unwrap();
|
||||
|
||||
// Subscribe to a unique channel
|
||||
let msg = json!({
|
||||
"method": "subscribe_graph_execution",
|
||||
"data": {
|
||||
"graph_exec_id": format!("exec_{}", i)
|
||||
}
|
||||
});
|
||||
ws_stream
|
||||
.send(Message::Text(msg.to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
ws_stream.next().await; // Wait for response
|
||||
|
||||
// Send a heartbeat
|
||||
let msg = json!({
|
||||
"method": "heartbeat",
|
||||
"data": "ping"
|
||||
});
|
||||
ws_stream
|
||||
.send(Message::Text(msg.to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
ws_stream.next().await; // Wait for response
|
||||
|
||||
ws_stream
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all connections to complete
|
||||
for handle in handles {
|
||||
let _ = handle.await;
|
||||
}
|
||||
|
||||
server_handle.abort();
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
// Benchmark message parsing
|
||||
fn benchmark_message_parsing(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("message_parsing");
|
||||
|
||||
// Test different message sizes
|
||||
for msg_size in [100, 1000, 10000].iter() {
|
||||
group.throughput(Throughput::Bytes(*msg_size as u64));
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("parse_json", msg_size),
|
||||
msg_size,
|
||||
|b, &msg_size| {
|
||||
let data_str = "x".repeat(msg_size);
|
||||
let json_msg = json!({
|
||||
"method": "subscribe_graph_execution",
|
||||
"data": {
|
||||
"graph_exec_id": data_str
|
||||
}
|
||||
});
|
||||
let json_str = json_msg.to_string();
|
||||
|
||||
b.iter(|| {
|
||||
let _: models::WSMessage = serde_json::from_str(&json_str).unwrap();
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
// Benchmark Redis event processing
|
||||
fn benchmark_redis_event_processing(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("redis_event_processing");
|
||||
|
||||
group.bench_function("parse_execution_event", |b| {
|
||||
let event = json!({
|
||||
"payload": {
|
||||
"event_type": "graph_execution_update",
|
||||
"id": "exec_123",
|
||||
"graph_id": "graph_456",
|
||||
"graph_version": 1,
|
||||
"user_id": "user_789",
|
||||
"status": "RUNNING",
|
||||
"started_at": "2024-01-01T00:00:00Z",
|
||||
"inputs": {"test": "data"},
|
||||
"outputs": {}
|
||||
}
|
||||
});
|
||||
let event_str = event.to_string();
|
||||
|
||||
b.iter(|| {
|
||||
let _: models::RedisEventWrapper = serde_json::from_str(&event_str).unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
benchmark_connection_establishment,
|
||||
benchmark_subscriptions,
|
||||
benchmark_message_throughput,
|
||||
benchmark_concurrent_connections,
|
||||
benchmark_message_parsing,
|
||||
benchmark_redis_event_processing
|
||||
);
|
||||
criterion_main!(benches);
|
||||
@@ -1,10 +0,0 @@
|
||||
# Clippy configuration for robust error handling
|
||||
|
||||
# Set the maximum cognitive complexity allowed
|
||||
cognitive-complexity-threshold = 30
|
||||
|
||||
# Warn on TODO/FIXME comments
|
||||
allow-dbg-in-tests = false
|
||||
|
||||
# Enforce documentation
|
||||
missing-docs-in-crate-items = true
|
||||
@@ -1,23 +0,0 @@
|
||||
# WebSocket API Configuration
|
||||
|
||||
# Server settings
|
||||
host = "0.0.0.0"
|
||||
port = 8001
|
||||
|
||||
# Authentication
|
||||
enable_auth = true
|
||||
jwt_secret = "your-super-secret-jwt-token-with-at-least-32-characters-long"
|
||||
jwt_algorithm = "HS256"
|
||||
default_user_id = "default"
|
||||
|
||||
# Redis configuration
|
||||
redis_url = "redis://:password@localhost:6379/"
|
||||
|
||||
# Event bus
|
||||
execution_event_bus_name = "execution_event"
|
||||
|
||||
# Message size limit (in bytes)
|
||||
max_message_size_limit = 512000
|
||||
|
||||
# CORS allowed origins
|
||||
backend_cors_allow_origins = ["http://localhost:3000", "https://559f69c159ef.ngrok.app"]
|
||||
@@ -1,75 +0,0 @@
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use serde_json::json;
|
||||
use tokio_tungstenite::{connect_async, tungstenite::Message};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let url = "ws://localhost:8001/ws";
|
||||
|
||||
println!("Connecting to {url}");
|
||||
let (mut ws_stream, _) = connect_async(url).await?;
|
||||
println!("Connected!");
|
||||
|
||||
// Subscribe to a graph execution
|
||||
let subscribe_msg = json!({
|
||||
"method": "subscribe_graph_execution",
|
||||
"data": {
|
||||
"graph_exec_id": "test_exec_123"
|
||||
}
|
||||
});
|
||||
|
||||
println!("Sending subscription request...");
|
||||
ws_stream
|
||||
.send(Message::Text(subscribe_msg.to_string()))
|
||||
.await?;
|
||||
|
||||
// Wait for response
|
||||
if let Some(msg) = ws_stream.next().await {
|
||||
if let Message::Text(text) = msg? {
|
||||
println!("Received: {text}");
|
||||
}
|
||||
}
|
||||
|
||||
// Send heartbeat
|
||||
let heartbeat_msg = json!({
|
||||
"method": "heartbeat",
|
||||
"data": "ping"
|
||||
});
|
||||
|
||||
println!("Sending heartbeat...");
|
||||
ws_stream
|
||||
.send(Message::Text(heartbeat_msg.to_string()))
|
||||
.await?;
|
||||
|
||||
// Wait for pong
|
||||
if let Some(msg) = ws_stream.next().await {
|
||||
if let Message::Text(text) = msg? {
|
||||
println!("Received: {text}");
|
||||
}
|
||||
}
|
||||
|
||||
// Unsubscribe
|
||||
let unsubscribe_msg = json!({
|
||||
"method": "unsubscribe",
|
||||
"data": {
|
||||
"channel": "default|graph_exec#test_exec_123"
|
||||
}
|
||||
});
|
||||
|
||||
println!("Sending unsubscribe request...");
|
||||
ws_stream
|
||||
.send(Message::Text(unsubscribe_msg.to_string()))
|
||||
.await?;
|
||||
|
||||
// Wait for response
|
||||
if let Some(msg) = ws_stream.next().await {
|
||||
if let Message::Text(text) = msg? {
|
||||
println!("Received: {text}");
|
||||
}
|
||||
}
|
||||
|
||||
println!("Closing connection...");
|
||||
ws_stream.close(None).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
use jsonwebtoken::Algorithm;
|
||||
use serde::Deserialize;
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
pub enable_auth: bool,
|
||||
pub jwt_secret: String,
|
||||
pub jwt_algorithm: Algorithm,
|
||||
pub execution_event_bus_name: String,
|
||||
pub redis_url: String,
|
||||
pub default_user_id: String,
|
||||
pub max_message_size_limit: usize,
|
||||
pub backend_cors_allow_origins: Vec<String>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn load(config_path: Option<&Path>) -> Self {
|
||||
let path = config_path.unwrap_or(Path::new("config.toml"));
|
||||
let toml_result = fs::read_to_string(path)
|
||||
.ok()
|
||||
.and_then(|s| toml::from_str::<Config>(&s).ok());
|
||||
|
||||
let mut config = match toml_result {
|
||||
Some(config) => config,
|
||||
None => Config {
|
||||
host: env::var("WEBSOCKET_SERVER_HOST").unwrap_or_else(|_| "0.0.0.0".to_string()),
|
||||
port: env::var("WEBSOCKET_SERVER_PORT")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(8001),
|
||||
enable_auth: env::var("ENABLE_AUTH")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(true),
|
||||
jwt_secret: env::var("SUPABASE_JWT_SECRET")
|
||||
.unwrap_or_else(|_| "dummy_secret_for_no_auth".to_string()),
|
||||
jwt_algorithm: Algorithm::HS256,
|
||||
execution_event_bus_name: env::var("EXECUTION_EVENT_BUS_NAME")
|
||||
.unwrap_or_else(|_| "execution_event".to_string()),
|
||||
redis_url: env::var("REDIS_URL")
|
||||
.unwrap_or_else(|_| "redis://localhost/".to_string()),
|
||||
default_user_id: "default".to_string(),
|
||||
max_message_size_limit: env::var("MAX_MESSAGE_SIZE_LIMIT")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(512000),
|
||||
backend_cors_allow_origins: env::var("BACKEND_CORS_ALLOW_ORIGINS")
|
||||
.unwrap_or_default()
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect(),
|
||||
},
|
||||
};
|
||||
|
||||
if let Ok(v) = env::var("WEBSOCKET_SERVER_HOST") {
|
||||
config.host = v;
|
||||
}
|
||||
if let Ok(v) = env::var("WEBSOCKET_SERVER_PORT") {
|
||||
config.port = v.parse().unwrap_or(8001);
|
||||
}
|
||||
if let Ok(v) = env::var("ENABLE_AUTH") {
|
||||
config.enable_auth = v.parse().unwrap_or(true);
|
||||
}
|
||||
if let Ok(v) = env::var("SUPABASE_JWT_SECRET") {
|
||||
config.jwt_secret = v;
|
||||
}
|
||||
if let Ok(v) = env::var("JWT_ALGORITHM") {
|
||||
config.jwt_algorithm = Algorithm::from_str(&v).unwrap_or(Algorithm::HS256);
|
||||
}
|
||||
if let Ok(v) = env::var("EXECUTION_EVENT_BUS_NAME") {
|
||||
config.execution_event_bus_name = v;
|
||||
}
|
||||
if let Ok(v) = env::var("REDIS_URL") {
|
||||
config.redis_url = v;
|
||||
}
|
||||
if let Ok(v) = env::var("DEFAULT_USER_ID") {
|
||||
config.default_user_id = v;
|
||||
}
|
||||
if let Ok(v) = env::var("MAX_MESSAGE_SIZE_LIMIT") {
|
||||
config.max_message_size_limit = v.parse().unwrap_or(512000);
|
||||
}
|
||||
if let Ok(v) = env::var("BACKEND_CORS_ALLOW_ORIGINS") {
|
||||
config.backend_cors_allow_origins = v
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
}
|
||||
@@ -1,277 +0,0 @@
|
||||
use futures::StreamExt;
|
||||
use redis::Client as RedisClient;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, RwLock};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::models::{ExecutionEvent, RedisEventWrapper, WSMessage};
|
||||
use crate::stats::Stats;
|
||||
|
||||
pub struct ConnectionManager {
|
||||
pub subscribers: RwLock<HashMap<String, HashSet<u64>>>,
|
||||
pub clients: RwLock<HashMap<u64, (String, mpsc::Sender<String>)>>,
|
||||
pub client_channels: RwLock<HashMap<u64, HashSet<String>>>,
|
||||
pub next_id: AtomicU64,
|
||||
pub redis_client: RedisClient,
|
||||
pub bus_name: String,
|
||||
pub stats: Arc<Stats>,
|
||||
}
|
||||
|
||||
impl ConnectionManager {
|
||||
pub fn new(redis_client: RedisClient, bus_name: String, stats: Arc<Stats>) -> Self {
|
||||
Self {
|
||||
subscribers: RwLock::new(HashMap::new()),
|
||||
clients: RwLock::new(HashMap::new()),
|
||||
client_channels: RwLock::new(HashMap::new()),
|
||||
next_id: AtomicU64::new(0),
|
||||
redis_client,
|
||||
bus_name,
|
||||
stats,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_broadcaster(self: Arc<Self>) {
|
||||
info!("🚀 Starting Redis event broadcaster");
|
||||
|
||||
loop {
|
||||
match self.run_broadcaster_inner().await {
|
||||
Ok(_) => {
|
||||
warn!("⚠️ Event broadcaster stopped unexpectedly, restarting in 5 seconds");
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("❌ Event broadcaster error: {}, restarting in 5 seconds", e);
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_broadcaster_inner(
|
||||
self: &Arc<Self>,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let mut pubsub = self.redis_client.get_async_pubsub().await?;
|
||||
pubsub.psubscribe("*").await?;
|
||||
info!(
|
||||
"📡 Listening to all Redis events, filtering for bus: {}",
|
||||
self.bus_name
|
||||
);
|
||||
|
||||
let mut pubsub_stream = pubsub.on_message();
|
||||
|
||||
loop {
|
||||
let msg = pubsub_stream.next().await;
|
||||
match msg {
|
||||
Some(msg) => {
|
||||
let channel: String = msg.get_channel_name().to_string();
|
||||
debug!("📨 Received message on Redis channel: {}", channel);
|
||||
self.stats
|
||||
.redis_messages_received
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let payload: String = match msg.get_payload() {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
warn!("⚠️ Failed to get payload from Redis message: {}", e);
|
||||
self.stats
|
||||
.errors_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Parse the channel format: execution_event/{user_id}/{graph_id}/{graph_exec_id}
|
||||
let parts: Vec<&str> = channel.split('/').collect();
|
||||
|
||||
// Check if this is an execution event channel
|
||||
if parts.len() != 4 || parts[0] != self.bus_name {
|
||||
debug!(
|
||||
"🚫 Ignoring non-execution event channel: {} (parts: {:?}, bus_name: {})",
|
||||
channel, parts, self.bus_name
|
||||
);
|
||||
self.stats
|
||||
.redis_messages_ignored
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
continue;
|
||||
}
|
||||
|
||||
let user_id = parts[1];
|
||||
let graph_id = parts[2];
|
||||
let graph_exec_id = parts[3];
|
||||
|
||||
debug!(
|
||||
"📥 Received event - user: {}, graph: {}, exec: {}",
|
||||
user_id, graph_id, graph_exec_id
|
||||
);
|
||||
|
||||
// Parse the wrapped event
|
||||
let wrapped_event = match RedisEventWrapper::parse(&payload) {
|
||||
Ok(e) => e,
|
||||
Err(e) => {
|
||||
warn!("⚠️ Failed to parse event JSON: {}, payload: {}", e, payload);
|
||||
self.stats
|
||||
.errors_json_parse
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
self.stats
|
||||
.errors_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let event = wrapped_event.payload;
|
||||
debug!("📦 Event received: {:?}", event);
|
||||
|
||||
let (method, event_json) = match &event {
|
||||
ExecutionEvent::GraphExecutionUpdate(graph_event) => {
|
||||
self.stats
|
||||
.graph_execution_events
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
self.stats
|
||||
.events_received_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
(
|
||||
"graph_execution_event",
|
||||
match serde_json::to_value(graph_event) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("❌ Failed to serialize graph event: {}", e);
|
||||
continue;
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
ExecutionEvent::NodeExecutionUpdate(node_event) => {
|
||||
self.stats
|
||||
.node_execution_events
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
self.stats
|
||||
.events_received_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
(
|
||||
"node_execution_event",
|
||||
match serde_json::to_value(node_event) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("❌ Failed to serialize node event: {}", e);
|
||||
continue;
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
// Create the channel keys in the format expected by WebSocket clients
|
||||
let mut channels_to_notify = Vec::new();
|
||||
|
||||
// For both event types, notify the specific execution channel
|
||||
let exec_channel = format!("{user_id}|graph_exec#{graph_exec_id}");
|
||||
channels_to_notify.push(exec_channel.clone());
|
||||
|
||||
// For graph execution events, also notify the graph executions channel
|
||||
if matches!(&event, ExecutionEvent::GraphExecutionUpdate(_)) {
|
||||
let graph_channel = format!("{user_id}|graph#{graph_id}|executions");
|
||||
channels_to_notify.push(graph_channel);
|
||||
}
|
||||
|
||||
debug!(
|
||||
"📢 Broadcasting {} event to channels: {:?}",
|
||||
method, channels_to_notify
|
||||
);
|
||||
|
||||
let subs = self.subscribers.read().await;
|
||||
|
||||
// Log current subscriber state
|
||||
debug!("📊 Current subscribers count: {}", subs.len());
|
||||
|
||||
for channel_key in channels_to_notify {
|
||||
let ws_msg = WSMessage {
|
||||
method: method.to_string(),
|
||||
channel: Some(channel_key.clone()),
|
||||
data: Some(event_json.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
let json_msg = match serde_json::to_string(&ws_msg) {
|
||||
Ok(j) => {
|
||||
debug!("📤 Sending WebSocket message: {}", j);
|
||||
j
|
||||
}
|
||||
Err(e) => {
|
||||
error!("❌ Failed to serialize WebSocket message: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(client_ids) = subs.get(&channel_key) {
|
||||
let clients = self.clients.read().await;
|
||||
let client_count = client_ids.len();
|
||||
debug!(
|
||||
"📣 Broadcasting to {} clients on channel: {}",
|
||||
client_count, channel_key
|
||||
);
|
||||
|
||||
for &cid in client_ids {
|
||||
if let Some((user_id, tx)) = clients.get(&cid) {
|
||||
match tx.try_send(json_msg.clone()) {
|
||||
Ok(_) => {
|
||||
debug!(
|
||||
"✅ Message sent immediately to client {} (user: {})",
|
||||
cid, user_id
|
||||
);
|
||||
self.stats
|
||||
.messages_sent_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
Err(mpsc::error::TrySendError::Full(_)) => {
|
||||
// Channel is full, try with a small timeout
|
||||
let tx_clone = tx.clone();
|
||||
let msg_clone = json_msg.clone();
|
||||
let stats_clone = self.stats.clone();
|
||||
tokio::spawn(async move {
|
||||
match tokio::time::timeout(
|
||||
std::time::Duration::from_millis(100),
|
||||
tx_clone.send(msg_clone),
|
||||
)
|
||||
.await {
|
||||
Ok(Ok(_)) => {
|
||||
stats_clone
|
||||
.messages_sent_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
_ => {
|
||||
stats_clone
|
||||
.messages_failed_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
});
|
||||
warn!("⚠️ Channel full for client {} (user: {}), sending async", cid, user_id);
|
||||
}
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => {
|
||||
warn!(
|
||||
"⚠️ Channel closed for client {} (user: {})",
|
||||
cid, user_id
|
||||
);
|
||||
self.stats
|
||||
.messages_failed_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("⚠️ Client {} not found in clients map", cid);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("📭 No subscribers for channel: {}", channel_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
return Err("❌ Redis pubsub stream ended".into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,442 +0,0 @@
|
||||
use axum::extract::ws::{CloseFrame, Message, WebSocket};
|
||||
use axum::{
|
||||
extract::{Query, WebSocketUpgrade},
|
||||
http::HeaderMap,
|
||||
response::IntoResponse,
|
||||
Extension,
|
||||
};
|
||||
use jsonwebtoken::{decode, DecodingKey, Validation};
|
||||
use serde_json::{json, Value};
|
||||
use std::collections::HashMap;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::connection_manager::ConnectionManager;
|
||||
use crate::models::{Claims, WSMessage};
|
||||
use crate::AppState;
|
||||
|
||||
// Helper function to safely serialize messages
|
||||
fn serialize_message(msg: &WSMessage) -> String {
|
||||
serde_json::to_string(msg).unwrap_or_else(|e| {
|
||||
error!("❌ Failed to serialize WebSocket message: {}", e);
|
||||
json!({"method": "error", "success": false, "error": "Internal serialization error"})
|
||||
.to_string()
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn ws_handler(
|
||||
ws: WebSocketUpgrade,
|
||||
query: Query<HashMap<String, String>>,
|
||||
_headers: HeaderMap,
|
||||
Extension(state): Extension<AppState>,
|
||||
) -> impl IntoResponse {
|
||||
let token = query.0.get("token").cloned();
|
||||
let mut user_id = state.config.default_user_id.clone();
|
||||
let mut auth_error_code: Option<u16> = None;
|
||||
|
||||
if state.config.enable_auth {
|
||||
match token {
|
||||
Some(token_str) => {
|
||||
debug!("🔐 Authenticating WebSocket connection");
|
||||
let mut validation = Validation::new(state.config.jwt_algorithm);
|
||||
validation.set_audience(&["authenticated"]);
|
||||
|
||||
let key = DecodingKey::from_secret(state.config.jwt_secret.as_bytes());
|
||||
|
||||
match decode::<Claims>(&token_str, &key, &validation) {
|
||||
Ok(token_data) => {
|
||||
user_id = token_data.claims.sub.clone();
|
||||
debug!("✅ WebSocket authenticated for user: {}", user_id);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("⚠️ JWT validation failed: {}", e);
|
||||
auth_error_code = Some(4003);
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("⚠️ Missing authentication token in WebSocket connection");
|
||||
auth_error_code = Some(4001);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
debug!("🔓 WebSocket connection without auth (auth disabled)");
|
||||
}
|
||||
|
||||
if let Some(code) = auth_error_code {
|
||||
error!("❌ WebSocket authentication failed with code: {}", code);
|
||||
state
|
||||
.mgr
|
||||
.stats
|
||||
.connections_failed_auth
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
state
|
||||
.mgr
|
||||
.stats
|
||||
.connections_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
return ws
|
||||
.on_upgrade(move |mut socket: WebSocket| async move {
|
||||
let close_frame = Some(CloseFrame {
|
||||
code,
|
||||
reason: "Authentication failed".into(),
|
||||
});
|
||||
let _ = socket.send(Message::Close(close_frame)).await;
|
||||
let _ = socket.close().await;
|
||||
})
|
||||
.into_response();
|
||||
}
|
||||
|
||||
debug!("✅ WebSocket connection established for user: {}", user_id);
|
||||
ws.on_upgrade(move |socket| {
|
||||
handle_socket(
|
||||
socket,
|
||||
user_id,
|
||||
state.mgr.clone(),
|
||||
state.config.max_message_size_limit,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
async fn update_subscription_stats(mgr: &ConnectionManager, channel: &str, add: bool) {
|
||||
if add {
|
||||
mgr.stats
|
||||
.subscriptions_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
mgr.stats
|
||||
.subscriptions_active
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let mut channel_stats = mgr.stats.channels_active.write().await;
|
||||
let count = channel_stats.entry(channel.to_string()).or_insert(0);
|
||||
*count += 1;
|
||||
} else {
|
||||
mgr.stats
|
||||
.unsubscriptions_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
mgr.stats
|
||||
.subscriptions_active
|
||||
.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let mut channel_stats = mgr.stats.channels_active.write().await;
|
||||
if let Some(count) = channel_stats.get_mut(channel) {
|
||||
*count = count.saturating_sub(1);
|
||||
if *count == 0 {
|
||||
channel_stats.remove(channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_socket(
|
||||
mut socket: WebSocket,
|
||||
user_id: String,
|
||||
mgr: std::sync::Arc<ConnectionManager>,
|
||||
max_size: usize,
|
||||
) {
|
||||
let client_id = mgr
|
||||
.next_id
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
let (tx, mut rx) = mpsc::channel::<String>(10);
|
||||
info!("👋 New WebSocket client {} for user: {}", client_id, user_id);
|
||||
|
||||
// Update connection stats
|
||||
mgr.stats
|
||||
.connections_total
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
mgr.stats
|
||||
.connections_active
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
// Update active users
|
||||
{
|
||||
let mut active_users = mgr.stats.active_users.write().await;
|
||||
let count = active_users.entry(user_id.clone()).or_insert(0);
|
||||
*count += 1;
|
||||
}
|
||||
|
||||
{
|
||||
let mut clients = mgr.clients.write().await;
|
||||
clients.insert(client_id, (user_id.clone(), tx));
|
||||
}
|
||||
|
||||
{
|
||||
let mut client_channels = mgr.client_channels.write().await;
|
||||
client_channels.insert(client_id, std::collections::HashSet::new());
|
||||
}
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
msg = rx.recv() => {
|
||||
if let Some(msg) = msg {
|
||||
if socket.send(Message::Text(msg)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
incoming = socket.recv() => {
|
||||
let msg = match incoming {
|
||||
Some(Ok(msg)) => msg,
|
||||
_ => break,
|
||||
};
|
||||
match msg {
|
||||
Message::Text(text) => {
|
||||
if text.len() > max_size {
|
||||
warn!("⚠️ Message from client {} exceeds size limit: {} > {}", client_id, text.len(), max_size);
|
||||
let err_resp = serialize_message(&WSMessage {
|
||||
method: "error".to_string(),
|
||||
success: Some(false),
|
||||
error: Some("Message exceeds size limit".to_string()),
|
||||
..Default::default()
|
||||
});
|
||||
if socket.send(Message::Text(err_resp)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
mgr.stats.messages_received_total.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let ws_msg: WSMessage = match serde_json::from_str(&text) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
warn!("⚠️ Invalid message format from client {}: {}", client_id, e);
|
||||
mgr.stats.errors_json_parse.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
mgr.stats.errors_total.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
let err_resp = serialize_message(&WSMessage {
|
||||
method: "error".to_string(),
|
||||
success: Some(false),
|
||||
error: Some("Invalid message format. Review the schema and retry".to_string()),
|
||||
..Default::default()
|
||||
});
|
||||
if socket.send(Message::Text(err_resp)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
debug!("📥 Received {} message from client {}", ws_msg.method, client_id);
|
||||
|
||||
match ws_msg.method.as_str() {
|
||||
"subscribe_graph_execution" => {
|
||||
let graph_exec_id = match &ws_msg.data {
|
||||
Some(Value::Object(map)) => map.get("graph_exec_id").and_then(|v| v.as_str()),
|
||||
_ => None,
|
||||
};
|
||||
let Some(graph_exec_id) = graph_exec_id else {
|
||||
warn!("⚠️ Missing graph_exec_id in subscribe_graph_execution from client {}", client_id);
|
||||
let err_resp = json!({"method": "error", "success": false, "error": "Missing graph_exec_id"});
|
||||
if socket.send(Message::Text(err_resp.to_string())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
};
|
||||
let channel = format!("{user_id}|graph_exec#{graph_exec_id}");
|
||||
debug!("📌 Client {} subscribing to channel: {}", client_id, channel);
|
||||
|
||||
{
|
||||
let mut subs = mgr.subscribers.write().await;
|
||||
subs.entry(channel.clone()).or_insert(std::collections::HashSet::new()).insert(client_id);
|
||||
}
|
||||
{
|
||||
let mut chs = mgr.client_channels.write().await;
|
||||
if let Some(set) = chs.get_mut(&client_id) {
|
||||
set.insert(channel.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Update subscription stats
|
||||
update_subscription_stats(&mgr, &channel, true).await;
|
||||
|
||||
let resp = WSMessage {
|
||||
method: "subscribe_graph_execution".to_string(),
|
||||
success: Some(true),
|
||||
channel: Some(channel),
|
||||
..Default::default()
|
||||
};
|
||||
if socket.send(Message::Text(serialize_message(&resp))).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
"subscribe_graph_executions" => {
|
||||
let graph_id = match &ws_msg.data {
|
||||
Some(Value::Object(map)) => map.get("graph_id").and_then(|v| v.as_str()),
|
||||
_ => None,
|
||||
};
|
||||
let Some(graph_id) = graph_id else {
|
||||
let err_resp = json!({"method": "error", "success": false, "error": "Missing graph_id"});
|
||||
if socket.send(Message::Text(err_resp.to_string())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
};
|
||||
let channel = format!("{user_id}|graph#{graph_id}|executions");
|
||||
|
||||
{
|
||||
let mut subs = mgr.subscribers.write().await;
|
||||
subs.entry(channel.clone()).or_insert(std::collections::HashSet::new()).insert(client_id);
|
||||
}
|
||||
{
|
||||
let mut chs = mgr.client_channels.write().await;
|
||||
if let Some(set) = chs.get_mut(&client_id) {
|
||||
set.insert(channel.clone());
|
||||
}
|
||||
}
|
||||
debug!("📌 Client {} subscribing to channel: {}", client_id, channel);
|
||||
// Update subscription stats
|
||||
update_subscription_stats(&mgr, &channel, true).await;
|
||||
|
||||
let resp = WSMessage {
|
||||
method: "subscribe_graph_executions".to_string(),
|
||||
success: Some(true),
|
||||
channel: Some(channel),
|
||||
..Default::default()
|
||||
};
|
||||
if socket.send(Message::Text(serialize_message(&resp))).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
"unsubscribe" => {
|
||||
let channel = match &ws_msg.data {
|
||||
Some(Value::String(s)) => Some(s.as_str()),
|
||||
Some(Value::Object(map)) => map.get("channel").and_then(|v| v.as_str()),
|
||||
_ => None,
|
||||
};
|
||||
let Some(channel) = channel else {
|
||||
let err_resp = json!({"method": "error", "success": false, "error": "Missing channel"});
|
||||
if socket.send(Message::Text(err_resp.to_string())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
};
|
||||
let channel = channel.to_string();
|
||||
|
||||
if !channel.starts_with(&format!("{user_id}|")) {
|
||||
let err_resp = json!({"method": "error", "success": false, "error": "Unauthorized channel"});
|
||||
if socket.send(Message::Text(err_resp.to_string())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
{
|
||||
let mut subs = mgr.subscribers.write().await;
|
||||
if let Some(set) = subs.get_mut(&channel) {
|
||||
set.remove(&client_id);
|
||||
if set.is_empty() {
|
||||
subs.remove(&channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut chs = mgr.client_channels.write().await;
|
||||
if let Some(set) = chs.get_mut(&client_id) {
|
||||
set.remove(&channel);
|
||||
}
|
||||
}
|
||||
|
||||
// Update subscription stats
|
||||
update_subscription_stats(&mgr, &channel, false).await;
|
||||
|
||||
let resp = WSMessage {
|
||||
method: "unsubscribe".to_string(),
|
||||
success: Some(true),
|
||||
channel: Some(channel),
|
||||
..Default::default()
|
||||
};
|
||||
if socket.send(Message::Text(serialize_message(&resp))).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
"heartbeat" => {
|
||||
if ws_msg.data == Some(Value::String("ping".to_string())) {
|
||||
let resp = WSMessage {
|
||||
method: "heartbeat".to_string(),
|
||||
data: Some(Value::String("pong".to_string())),
|
||||
success: Some(true),
|
||||
..Default::default()
|
||||
};
|
||||
if socket.send(Message::Text(serialize_message(&resp))).await.is_err() {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
let err_resp = json!({"method": "error", "success": false, "error": "Invalid heartbeat"});
|
||||
if socket.send(Message::Text(err_resp.to_string())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
warn!("❓ Unknown method '{}' from client {}", ws_msg.method, client_id);
|
||||
let err_resp = json!({"method": "error", "success": false, "error": "Unknown method"});
|
||||
if socket.send(Message::Text(err_resp.to_string())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Message::Close(_) => break,
|
||||
Message::Ping(_) => {
|
||||
if socket.send(Message::Pong(vec![])).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Message::Pong(_) => {}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
else => break,
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
debug!("👋 WebSocket client {} disconnected, cleaning up", client_id);
|
||||
|
||||
// Update connection stats
|
||||
mgr.stats
|
||||
.connections_active
|
||||
.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
// Update active users
|
||||
{
|
||||
let mut active_users = mgr.stats.active_users.write().await;
|
||||
if let Some(count) = active_users.get_mut(&user_id) {
|
||||
*count = count.saturating_sub(1);
|
||||
if *count == 0 {
|
||||
active_users.remove(&user_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let channels = {
|
||||
let mut client_channels = mgr.client_channels.write().await;
|
||||
client_channels.remove(&client_id).unwrap_or_default()
|
||||
};
|
||||
|
||||
{
|
||||
let mut subs = mgr.subscribers.write().await;
|
||||
for channel in &channels {
|
||||
if let Some(set) = subs.get_mut(channel) {
|
||||
set.remove(&client_id);
|
||||
if set.is_empty() {
|
||||
subs.remove(channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update subscription stats for all channels the client was subscribed to
|
||||
for channel in &channels {
|
||||
update_subscription_stats(&mgr, channel, false).await;
|
||||
}
|
||||
|
||||
{
|
||||
let mut clients = mgr.clients.write().await;
|
||||
clients.remove(&client_id);
|
||||
}
|
||||
|
||||
debug!("✨ Cleanup completed for client {}", client_id);
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
#![deny(warnings)]
|
||||
#![deny(clippy::unwrap_used)]
|
||||
#![deny(clippy::panic)]
|
||||
#![deny(clippy::unimplemented)]
|
||||
#![deny(clippy::todo)]
|
||||
|
||||
|
||||
pub mod config;
|
||||
pub mod connection_manager;
|
||||
pub mod handlers;
|
||||
pub mod models;
|
||||
pub mod stats;
|
||||
|
||||
pub use config::Config;
|
||||
pub use connection_manager::ConnectionManager;
|
||||
pub use handlers::ws_handler;
|
||||
pub use stats::Stats;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub mgr: Arc<ConnectionManager>,
|
||||
pub config: Arc<Config>,
|
||||
pub stats: Arc<Stats>,
|
||||
}
|
||||
@@ -1,172 +0,0 @@
|
||||
use axum::{
|
||||
body::Body,
|
||||
http::{header, StatusCode},
|
||||
response::Response,
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use clap::Parser;
|
||||
use std::sync::Arc;
|
||||
use tokio::net::TcpListener;
|
||||
use tower_http::cors::{Any, CorsLayer};
|
||||
use tracing::{debug, error, info};
|
||||
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::connection_manager::ConnectionManager;
|
||||
use crate::handlers::ws_handler;
|
||||
|
||||
async fn stats_handler(
|
||||
axum::Extension(state): axum::Extension<AppState>,
|
||||
) -> Result<axum::response::Json<stats::StatsSnapshot>, StatusCode> {
|
||||
let snapshot = state.stats.snapshot().await;
|
||||
Ok(axum::response::Json(snapshot))
|
||||
}
|
||||
|
||||
async fn prometheus_handler(
|
||||
axum::Extension(state): axum::Extension<AppState>,
|
||||
) -> Result<Response, StatusCode> {
|
||||
let snapshot = state.stats.snapshot().await;
|
||||
let prometheus_text = state.stats.to_prometheus_format(&snapshot);
|
||||
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "text/plain; version=0.0.4")
|
||||
.body(Body::from(prometheus_text))
|
||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
|
||||
mod config;
|
||||
mod connection_manager;
|
||||
mod handlers;
|
||||
mod models;
|
||||
mod stats;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about)]
|
||||
struct Cli {
|
||||
/// Path to a TOML configuration file
|
||||
#[arg(short = 'c', long = "config", value_name = "FILE")]
|
||||
config: Option<std::path::PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
mgr: Arc<ConnectionManager>,
|
||||
config: Arc<Config>,
|
||||
stats: Arc<stats::Stats>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// Initialize tracing
|
||||
tracing_subscriber::registry()
|
||||
.with(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| "websocket=info,tower_http=debug".into()),
|
||||
)
|
||||
.with(tracing_subscriber::fmt::layer())
|
||||
.init();
|
||||
|
||||
info!("🚀 Starting WebSocket API server");
|
||||
|
||||
let cli = Cli::parse();
|
||||
let config = Arc::new(Config::load(cli.config.as_deref()));
|
||||
info!(
|
||||
"⚙️ Configuration loaded - host: {}, port: {}, auth: {}",
|
||||
config.host, config.port, config.enable_auth
|
||||
);
|
||||
|
||||
let redis_client = match redis::Client::open(config.redis_url.clone()) {
|
||||
Ok(client) => {
|
||||
debug!("✅ Redis client created successfully");
|
||||
client
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"❌ Failed to create Redis client: {}. Please check REDIS_URL environment variable",
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
let stats = Arc::new(stats::Stats::default());
|
||||
let mgr = Arc::new(ConnectionManager::new(
|
||||
redis_client,
|
||||
config.execution_event_bus_name.clone(),
|
||||
stats.clone(),
|
||||
));
|
||||
|
||||
let mgr_clone = mgr.clone();
|
||||
tokio::spawn(async move {
|
||||
debug!("📡 Starting event broadcaster task");
|
||||
mgr_clone.run_broadcaster().await;
|
||||
});
|
||||
|
||||
let state = AppState {
|
||||
mgr,
|
||||
config: config.clone(),
|
||||
stats,
|
||||
};
|
||||
|
||||
let app = Router::new()
|
||||
.route("/ws", get(ws_handler))
|
||||
.route("/stats", get(stats_handler))
|
||||
.route("/metrics", get(prometheus_handler))
|
||||
.layer(axum::Extension(state));
|
||||
|
||||
let cors = if config.backend_cors_allow_origins.is_empty() {
|
||||
// If no specific origins configured, allow any origin but without credentials
|
||||
CorsLayer::new()
|
||||
.allow_methods(Any)
|
||||
.allow_headers(Any)
|
||||
.allow_origin(Any)
|
||||
} else {
|
||||
// If specific origins configured, allow credentials
|
||||
CorsLayer::new()
|
||||
.allow_methods([
|
||||
axum::http::Method::GET,
|
||||
axum::http::Method::POST,
|
||||
axum::http::Method::PUT,
|
||||
axum::http::Method::DELETE,
|
||||
axum::http::Method::OPTIONS,
|
||||
])
|
||||
.allow_headers(vec![
|
||||
axum::http::header::CONTENT_TYPE,
|
||||
axum::http::header::AUTHORIZATION,
|
||||
])
|
||||
.allow_credentials(true)
|
||||
.allow_origin(
|
||||
config
|
||||
.backend_cors_allow_origins
|
||||
.iter()
|
||||
.filter_map(|o| o.parse::<axum::http::HeaderValue>().ok())
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
};
|
||||
|
||||
let app = app.layer(cors);
|
||||
|
||||
let addr = format!("{}:{}", config.host, config.port);
|
||||
let listener = match TcpListener::bind(&addr).await {
|
||||
Ok(listener) => {
|
||||
info!("🎧 WebSocket server listening on: {}", addr);
|
||||
listener
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"❌ Failed to bind to {}: {}. Please check if the port is already in use",
|
||||
addr, e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
info!("✨ WebSocket API server ready to accept connections");
|
||||
|
||||
if let Err(e) = axum::serve(listener, app.into_make_service()).await {
|
||||
error!("💥 Server error: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct WSMessage {
|
||||
pub method: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub data: Option<Value>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub success: Option<bool>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub channel: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Claims {
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
// Event models moved from events.rs
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "event_type")]
|
||||
pub enum ExecutionEvent {
|
||||
#[serde(rename = "graph_execution_update")]
|
||||
GraphExecutionUpdate(GraphExecutionEvent),
|
||||
#[serde(rename = "node_execution_update")]
|
||||
NodeExecutionUpdate(NodeExecutionEvent),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GraphExecutionEvent {
|
||||
pub id: String,
|
||||
pub graph_id: String,
|
||||
pub graph_version: u32,
|
||||
pub user_id: String,
|
||||
pub status: ExecutionStatus,
|
||||
pub started_at: Option<String>,
|
||||
pub ended_at: Option<String>,
|
||||
pub preset_id: Option<String>,
|
||||
pub stats: Option<ExecutionStats>,
|
||||
|
||||
// Keep these as JSON since they vary by graph
|
||||
pub inputs: Value,
|
||||
pub outputs: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct NodeExecutionEvent {
|
||||
pub node_exec_id: String,
|
||||
pub node_id: String,
|
||||
pub graph_exec_id: String,
|
||||
pub graph_id: String,
|
||||
pub graph_version: u32,
|
||||
pub user_id: String,
|
||||
pub block_id: String,
|
||||
pub status: ExecutionStatus,
|
||||
pub add_time: String,
|
||||
pub queue_time: Option<String>,
|
||||
pub start_time: Option<String>,
|
||||
pub end_time: Option<String>,
|
||||
|
||||
// Keep these as JSON since they vary by node type
|
||||
pub input_data: Value,
|
||||
pub output_data: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ExecutionStats {
|
||||
pub cost: f64,
|
||||
pub duration: f64,
|
||||
pub duration_cpu_only: f64,
|
||||
pub error: Option<String>,
|
||||
pub node_error_count: u32,
|
||||
pub node_exec_count: u32,
|
||||
pub node_exec_time: f64,
|
||||
pub node_exec_time_cpu_only: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
|
||||
pub enum ExecutionStatus {
|
||||
Queued,
|
||||
Running,
|
||||
Completed,
|
||||
Failed,
|
||||
Incomplete,
|
||||
Terminated,
|
||||
}
|
||||
|
||||
// Wrapper for the Redis event that includes the payload
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct RedisEventWrapper {
|
||||
pub payload: ExecutionEvent,
|
||||
}
|
||||
|
||||
impl RedisEventWrapper {
|
||||
pub fn parse(json_str: &str) -> Result<Self, serde_json::Error> {
|
||||
serde_json::from_str(json_str)
|
||||
}
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Stats {
|
||||
// Connection metrics
|
||||
pub connections_total: AtomicU64,
|
||||
pub connections_active: AtomicU64,
|
||||
pub connections_failed_auth: AtomicU64,
|
||||
|
||||
// Message metrics
|
||||
pub messages_received_total: AtomicU64,
|
||||
pub messages_sent_total: AtomicU64,
|
||||
pub messages_failed_total: AtomicU64,
|
||||
|
||||
// Subscription metrics
|
||||
pub subscriptions_total: AtomicU64,
|
||||
pub subscriptions_active: AtomicU64,
|
||||
pub unsubscriptions_total: AtomicU64,
|
||||
|
||||
// Event metrics by type
|
||||
pub events_received_total: AtomicU64,
|
||||
pub graph_execution_events: AtomicU64,
|
||||
pub node_execution_events: AtomicU64,
|
||||
|
||||
// Redis metrics
|
||||
pub redis_messages_received: AtomicU64,
|
||||
pub redis_messages_ignored: AtomicU64,
|
||||
|
||||
// Channel metrics
|
||||
pub channels_active: RwLock<HashMap<String, usize>>, // channel -> subscriber count
|
||||
|
||||
// User metrics
|
||||
pub active_users: RwLock<HashMap<String, usize>>, // user_id -> connection count
|
||||
|
||||
// Error metrics
|
||||
pub errors_total: AtomicU64,
|
||||
pub errors_json_parse: AtomicU64,
|
||||
pub errors_message_size: AtomicU64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct StatsSnapshot {
|
||||
// Connection metrics
|
||||
pub connections_total: u64,
|
||||
pub connections_active: u64,
|
||||
pub connections_failed_auth: u64,
|
||||
|
||||
// Message metrics
|
||||
pub messages_received_total: u64,
|
||||
pub messages_sent_total: u64,
|
||||
pub messages_failed_total: u64,
|
||||
|
||||
// Subscription metrics
|
||||
pub subscriptions_total: u64,
|
||||
pub subscriptions_active: u64,
|
||||
pub unsubscriptions_total: u64,
|
||||
|
||||
// Event metrics
|
||||
pub events_received_total: u64,
|
||||
pub graph_execution_events: u64,
|
||||
pub node_execution_events: u64,
|
||||
|
||||
// Redis metrics
|
||||
pub redis_messages_received: u64,
|
||||
pub redis_messages_ignored: u64,
|
||||
|
||||
// Channel metrics
|
||||
pub channels_active_count: usize,
|
||||
pub total_subscribers: usize,
|
||||
|
||||
// User metrics
|
||||
pub active_users_count: usize,
|
||||
|
||||
// Error metrics
|
||||
pub errors_total: u64,
|
||||
pub errors_json_parse: u64,
|
||||
pub errors_message_size: u64,
|
||||
}
|
||||
|
||||
impl Stats {
|
||||
pub async fn snapshot(&self) -> StatsSnapshot {
|
||||
// Take read locks for HashMap data - it's ok if this is slightly stale
|
||||
let channels = self.channels_active.read().await;
|
||||
let total_subscribers: usize = channels.values().sum();
|
||||
let channels_active_count = channels.len();
|
||||
drop(channels); // Release lock early
|
||||
|
||||
let users = self.active_users.read().await;
|
||||
let active_users_count = users.len();
|
||||
drop(users); // Release lock early
|
||||
|
||||
StatsSnapshot {
|
||||
connections_total: self.connections_total.load(Ordering::Relaxed),
|
||||
connections_active: self.connections_active.load(Ordering::Relaxed),
|
||||
connections_failed_auth: self.connections_failed_auth.load(Ordering::Relaxed),
|
||||
|
||||
messages_received_total: self.messages_received_total.load(Ordering::Relaxed),
|
||||
messages_sent_total: self.messages_sent_total.load(Ordering::Relaxed),
|
||||
messages_failed_total: self.messages_failed_total.load(Ordering::Relaxed),
|
||||
|
||||
subscriptions_total: self.subscriptions_total.load(Ordering::Relaxed),
|
||||
subscriptions_active: self.subscriptions_active.load(Ordering::Relaxed),
|
||||
unsubscriptions_total: self.unsubscriptions_total.load(Ordering::Relaxed),
|
||||
|
||||
events_received_total: self.events_received_total.load(Ordering::Relaxed),
|
||||
graph_execution_events: self.graph_execution_events.load(Ordering::Relaxed),
|
||||
node_execution_events: self.node_execution_events.load(Ordering::Relaxed),
|
||||
|
||||
redis_messages_received: self.redis_messages_received.load(Ordering::Relaxed),
|
||||
redis_messages_ignored: self.redis_messages_ignored.load(Ordering::Relaxed),
|
||||
|
||||
channels_active_count,
|
||||
total_subscribers,
|
||||
active_users_count,
|
||||
|
||||
errors_total: self.errors_total.load(Ordering::Relaxed),
|
||||
errors_json_parse: self.errors_json_parse.load(Ordering::Relaxed),
|
||||
errors_message_size: self.errors_message_size.load(Ordering::Relaxed),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_prometheus_format(&self, snapshot: &StatsSnapshot) -> String {
|
||||
let mut output = String::new();
|
||||
|
||||
// Connection metrics
|
||||
output.push_str("# HELP ws_connections_total Total number of WebSocket connections\n");
|
||||
output.push_str("# TYPE ws_connections_total counter\n");
|
||||
output.push_str(&format!(
|
||||
"ws_connections_total {}\n\n",
|
||||
snapshot.connections_total
|
||||
));
|
||||
|
||||
output.push_str(
|
||||
"# HELP ws_connections_active Current number of active WebSocket connections\n",
|
||||
);
|
||||
output.push_str("# TYPE ws_connections_active gauge\n");
|
||||
output.push_str(&format!(
|
||||
"ws_connections_active {}\n\n",
|
||||
snapshot.connections_active
|
||||
));
|
||||
|
||||
output
|
||||
.push_str("# HELP ws_connections_failed_auth Total number of failed authentications\n");
|
||||
output.push_str("# TYPE ws_connections_failed_auth counter\n");
|
||||
output.push_str(&format!(
|
||||
"ws_connections_failed_auth {}\n\n",
|
||||
snapshot.connections_failed_auth
|
||||
));
|
||||
|
||||
// Message metrics
|
||||
output.push_str(
|
||||
"# HELP ws_messages_received_total Total number of messages received from clients\n",
|
||||
);
|
||||
output.push_str("# TYPE ws_messages_received_total counter\n");
|
||||
output.push_str(&format!(
|
||||
"ws_messages_received_total {}\n\n",
|
||||
snapshot.messages_received_total
|
||||
));
|
||||
|
||||
output.push_str("# HELP ws_messages_sent_total Total number of messages sent to clients\n");
|
||||
output.push_str("# TYPE ws_messages_sent_total counter\n");
|
||||
output.push_str(&format!(
|
||||
"ws_messages_sent_total {}\n\n",
|
||||
snapshot.messages_sent_total
|
||||
));
|
||||
|
||||
// Subscription metrics
|
||||
output.push_str("# HELP ws_subscriptions_active Current number of active subscriptions\n");
|
||||
output.push_str("# TYPE ws_subscriptions_active gauge\n");
|
||||
output.push_str(&format!(
|
||||
"ws_subscriptions_active {}\n\n",
|
||||
snapshot.subscriptions_active
|
||||
));
|
||||
|
||||
// Event metrics
|
||||
output.push_str(
|
||||
"# HELP ws_events_received_total Total number of events received from Redis\n",
|
||||
);
|
||||
output.push_str("# TYPE ws_events_received_total counter\n");
|
||||
output.push_str(&format!(
|
||||
"ws_events_received_total {}\n\n",
|
||||
snapshot.events_received_total
|
||||
));
|
||||
|
||||
output.push_str(
|
||||
"# HELP ws_graph_execution_events_total Total number of graph execution events\n",
|
||||
);
|
||||
output.push_str("# TYPE ws_graph_execution_events_total counter\n");
|
||||
output.push_str(&format!(
|
||||
"ws_graph_execution_events_total {}\n\n",
|
||||
snapshot.graph_execution_events
|
||||
));
|
||||
|
||||
output.push_str(
|
||||
"# HELP ws_node_execution_events_total Total number of node execution events\n",
|
||||
);
|
||||
output.push_str("# TYPE ws_node_execution_events_total counter\n");
|
||||
output.push_str(&format!(
|
||||
"ws_node_execution_events_total {}\n\n",
|
||||
snapshot.node_execution_events
|
||||
));
|
||||
|
||||
// Channel metrics
|
||||
output.push_str("# HELP ws_channels_active Number of active channels\n");
|
||||
output.push_str("# TYPE ws_channels_active gauge\n");
|
||||
output.push_str(&format!(
|
||||
"ws_channels_active {}\n\n",
|
||||
snapshot.channels_active_count
|
||||
));
|
||||
|
||||
output.push_str(
|
||||
"# HELP ws_total_subscribers Total number of subscribers across all channels\n",
|
||||
);
|
||||
output.push_str("# TYPE ws_total_subscribers gauge\n");
|
||||
output.push_str(&format!(
|
||||
"ws_total_subscribers {}\n\n",
|
||||
snapshot.total_subscribers
|
||||
));
|
||||
|
||||
// User metrics
|
||||
output.push_str("# HELP ws_active_users Number of unique users with active connections\n");
|
||||
output.push_str("# TYPE ws_active_users gauge\n");
|
||||
output.push_str(&format!(
|
||||
"ws_active_users {}\n\n",
|
||||
snapshot.active_users_count
|
||||
));
|
||||
|
||||
// Error metrics
|
||||
output.push_str("# HELP ws_errors_total Total number of errors\n");
|
||||
output.push_str("# TYPE ws_errors_total counter\n");
|
||||
output.push_str(&format!("ws_errors_total {}\n", snapshot.errors_total));
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
15
autogpt_platform/autogpt_builder/.env.example
Normal file
15
autogpt_platform/autogpt_builder/.env.example
Normal file
@@ -0,0 +1,15 @@
|
||||
NEXT_PUBLIC_AUTH_CALLBACK_URL=http://localhost:8006/auth/callback
|
||||
NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api
|
||||
NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws
|
||||
NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market
|
||||
|
||||
## Supabase credentials
|
||||
|
||||
NEXT_PUBLIC_SUPABASE_URL=http://localhost:8000
|
||||
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
|
||||
## OAuth Callback URL
|
||||
## This should be {domain}/auth/callback
|
||||
## Only used if you're using Supabase and OAuth
|
||||
AUTH_CALLBACK_URL=http://localhost:3000/auth/callback
|
||||
GA_MEASUREMENT_ID=G-FH2XK2W4GN
|
||||
3
autogpt_platform/autogpt_builder/.eslintrc.json
Normal file
3
autogpt_platform/autogpt_builder/.eslintrc.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"extends": "next/core-web-vitals"
|
||||
}
|
||||
@@ -22,14 +22,9 @@
|
||||
|
||||
# debug
|
||||
npm-debug.log*
|
||||
pnpm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# lock files (from yarn1 or npm)
|
||||
yarn.lock
|
||||
package-lock.json
|
||||
|
||||
# local env files
|
||||
.env*.local
|
||||
|
||||
@@ -42,15 +37,3 @@ next-env.d.ts
|
||||
|
||||
# Sentry Config File
|
||||
.env.sentry-build-plugin
|
||||
node_modules/
|
||||
/test-results/
|
||||
/playwright-report/
|
||||
/blob-report/
|
||||
/playwright/.cache/
|
||||
|
||||
*storybook.log
|
||||
storybook-static
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
!.npmrc
|
||||
.cursorrules
|
||||
@@ -1,6 +1,4 @@
|
||||
node_modules
|
||||
pnpm-lock.yaml
|
||||
.next
|
||||
.auth
|
||||
build
|
||||
public
|
||||
32
autogpt_platform/autogpt_builder/Dockerfile
Normal file
32
autogpt_platform/autogpt_builder/Dockerfile
Normal file
@@ -0,0 +1,32 @@
|
||||
# Base stage for both dev and prod
|
||||
FROM node:21-alpine AS base
|
||||
WORKDIR /app
|
||||
COPY autogpt_platform/autogpt_builder/package.json autogpt_platform/autogpt_builder/yarn.lock ./
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
# Dev stage
|
||||
FROM base AS dev
|
||||
ENV NODE_ENV=development
|
||||
COPY autogpt_platform/autogpt_builder/ .
|
||||
EXPOSE 3000
|
||||
CMD ["yarn", "run", "dev"]
|
||||
|
||||
# Build stage for prod
|
||||
FROM base AS build
|
||||
COPY autogpt_platform/autogpt_builder/ .
|
||||
RUN npm run build
|
||||
|
||||
# Prod stage
|
||||
FROM node:21-alpine AS prod
|
||||
ENV NODE_ENV=production
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=build /app/package.json /app/yarn.lock ./
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
COPY --from=build /app/.next ./.next
|
||||
COPY --from=build /app/public ./public
|
||||
COPY --from=build /app/next.config.mjs ./next.config.mjs
|
||||
|
||||
EXPOSE 3000
|
||||
CMD ["npm", "start"]
|
||||
41
autogpt_platform/autogpt_builder/README.md
Normal file
41
autogpt_platform/autogpt_builder/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
This is the frontend for AutoGPT's next generation
|
||||
|
||||
## Getting Started
|
||||
|
||||
Run the following installation once.
|
||||
|
||||
```bash
|
||||
npm install
|
||||
# or
|
||||
yarn install
|
||||
# or
|
||||
pnpm install
|
||||
# or
|
||||
bun install
|
||||
```
|
||||
|
||||
Next, run the development server:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
# or
|
||||
yarn dev
|
||||
# or
|
||||
pnpm dev
|
||||
# or
|
||||
bun dev
|
||||
```
|
||||
|
||||
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
|
||||
|
||||
You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
|
||||
|
||||
For subsequent runs, you do not have to `npm install` again. Simply do `npm run dev`.
|
||||
|
||||
If the project is updated via git, you will need to `npm install` after each update.
|
||||
|
||||
This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
|
||||
|
||||
## Deploy
|
||||
|
||||
TODO
|
||||
84
autogpt_platform/autogpt_builder/next.config.mjs
Normal file
84
autogpt_platform/autogpt_builder/next.config.mjs
Normal file
@@ -0,0 +1,84 @@
|
||||
import { withSentryConfig } from "@sentry/nextjs";
|
||||
import dotenv from "dotenv";
|
||||
|
||||
// Load environment variables
|
||||
dotenv.config();
|
||||
|
||||
/** @type {import('next').NextConfig} */
|
||||
const nextConfig = {
|
||||
env: {
|
||||
NEXT_PUBLIC_AGPT_SERVER_URL: process.env.NEXT_PUBLIC_AGPT_SERVER_URL,
|
||||
NEXT_PUBLIC_AGPT_MARKETPLACE_URL:
|
||||
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL,
|
||||
},
|
||||
images: {
|
||||
domains: ["images.unsplash.com"],
|
||||
},
|
||||
async redirects() {
|
||||
return [
|
||||
{
|
||||
source: "/monitor", // FIXME: Remove after 2024-09-01
|
||||
destination: "/",
|
||||
permanent: false,
|
||||
},
|
||||
];
|
||||
},
|
||||
// TODO: Re-enable TypeScript checks once current issues are resolved
|
||||
typescript: {
|
||||
ignoreBuildErrors: true,
|
||||
},
|
||||
};
|
||||
|
||||
export default withSentryConfig(nextConfig, {
|
||||
// For all available options, see:
|
||||
// https://github.com/getsentry/sentry-webpack-plugin#options
|
||||
|
||||
org: "significant-gravitas",
|
||||
project: "builder",
|
||||
|
||||
// Only print logs for uploading source maps in CI
|
||||
silent: !process.env.CI,
|
||||
|
||||
// For all available options, see:
|
||||
// https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/
|
||||
|
||||
// Upload a larger set of source maps for prettier stack traces (increases build time)
|
||||
widenClientFileUpload: true,
|
||||
|
||||
// Automatically annotate React components to show their full name in breadcrumbs and session replay
|
||||
reactComponentAnnotation: {
|
||||
enabled: true,
|
||||
},
|
||||
|
||||
// Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers.
|
||||
// This can increase your server load as well as your hosting bill.
|
||||
// Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client-
|
||||
// side errors will fail.
|
||||
tunnelRoute: "/monitoring",
|
||||
|
||||
// Hides source maps from generated client bundles
|
||||
hideSourceMaps: true,
|
||||
|
||||
// Automatically tree-shake Sentry logger statements to reduce bundle size
|
||||
disableLogger: true,
|
||||
|
||||
// Enables automatic instrumentation of Vercel Cron Monitors. (Does not yet work with App Router route handlers.)
|
||||
// See the following for more information:
|
||||
// https://docs.sentry.io/product/crons/
|
||||
// https://vercel.com/docs/cron-jobs
|
||||
automaticVercelMonitors: true,
|
||||
|
||||
async headers() {
|
||||
return [
|
||||
{
|
||||
source: "/:path*",
|
||||
headers: [
|
||||
{
|
||||
key: "Document-Policy",
|
||||
value: "js-profiling",
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
},
|
||||
});
|
||||
72
autogpt_platform/autogpt_builder/package.json
Normal file
72
autogpt_platform/autogpt_builder/package.json
Normal file
@@ -0,0 +1,72 @@
|
||||
{
|
||||
"name": "autogpt_builder",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
"build": "next build",
|
||||
"start": "next start",
|
||||
"lint": "next lint",
|
||||
"format": "prettier --write ."
|
||||
},
|
||||
"dependencies": {
|
||||
"@hookform/resolvers": "^3.9.0",
|
||||
"@next/third-parties": "^14.2.5",
|
||||
"@radix-ui/react-avatar": "^1.1.0",
|
||||
"@radix-ui/react-checkbox": "^1.1.1",
|
||||
"@radix-ui/react-collapsible": "^1.1.0",
|
||||
"@radix-ui/react-dialog": "^1.1.1",
|
||||
"@radix-ui/react-dropdown-menu": "^2.1.1",
|
||||
"@radix-ui/react-icons": "^1.3.0",
|
||||
"@radix-ui/react-label": "^2.1.0",
|
||||
"@radix-ui/react-popover": "^1.1.1",
|
||||
"@radix-ui/react-scroll-area": "^1.1.0",
|
||||
"@radix-ui/react-select": "^2.1.1",
|
||||
"@radix-ui/react-separator": "^1.1.0",
|
||||
"@radix-ui/react-slot": "^1.1.0",
|
||||
"@radix-ui/react-switch": "^1.1.0",
|
||||
"@radix-ui/react-toast": "^1.2.1",
|
||||
"@radix-ui/react-tooltip": "^1.1.2",
|
||||
"@sentry/nextjs": "^8",
|
||||
"@supabase/ssr": "^0.4.0",
|
||||
"@supabase/supabase-js": "^2.45.0",
|
||||
"@tanstack/react-table": "^8.20.5",
|
||||
"@xyflow/react": "^12.1.0",
|
||||
"ajv": "^8.17.1",
|
||||
"class-variance-authority": "^0.7.0",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "1.0.0",
|
||||
"date-fns": "^3.6.0",
|
||||
"dotenv": "^16.4.5",
|
||||
"lucide-react": "^0.407.0",
|
||||
"moment": "^2.30.1",
|
||||
"next": "14.2.4",
|
||||
"next-themes": "^0.3.0",
|
||||
"react": "^18",
|
||||
"react-day-picker": "^8.10.1",
|
||||
"react-dom": "^18",
|
||||
"react-hook-form": "^7.52.1",
|
||||
"react-icons": "^5.2.1",
|
||||
"react-markdown": "^9.0.1",
|
||||
"react-modal": "^3.16.1",
|
||||
"react-shepherd": "^6.1.1",
|
||||
"recharts": "^2.12.7",
|
||||
"tailwind-merge": "^2.3.0",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"uuid": "^10.0.0",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20",
|
||||
"@types/react": "^18",
|
||||
"@types/react-dom": "^18",
|
||||
"@types/react-modal": "^3.16.3",
|
||||
"eslint": "^8",
|
||||
"eslint-config-next": "14.2.4",
|
||||
"postcss": "^8",
|
||||
"prettier": "^3.3.3",
|
||||
"prettier-plugin-tailwindcss": "^0.6.6",
|
||||
"tailwindcss": "^3.4.1",
|
||||
"typescript": "^5"
|
||||
}
|
||||
}
|
||||
BIN
autogpt_platform/autogpt_builder/public/AUTOgpt_Logo_dark.png
Normal file
BIN
autogpt_platform/autogpt_builder/public/AUTOgpt_Logo_dark.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 29 KiB |
BIN
autogpt_platform/autogpt_builder/public/AUTOgpt_Logo_light.png
Normal file
BIN
autogpt_platform/autogpt_builder/public/AUTOgpt_Logo_light.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
@@ -2,19 +2,11 @@
|
||||
// The config you add here will be used whenever a users loads a page in their browser.
|
||||
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
|
||||
|
||||
import { BehaveAs, getBehaveAs, getEnvironmentStr } from "@/lib/utils";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
const isProductionCloud =
|
||||
process.env.NODE_ENV === "production" && getBehaveAs() === BehaveAs.CLOUD;
|
||||
|
||||
Sentry.init({
|
||||
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
|
||||
|
||||
environment: getEnvironmentStr(),
|
||||
|
||||
enabled: isProductionCloud,
|
||||
|
||||
// Add optional integrations for additional features
|
||||
integrations: [
|
||||
Sentry.replayIntegration(),
|
||||
@@ -34,11 +26,17 @@ Sentry.init({
|
||||
// Set `tracePropagationTargets` to control for which URLs trace propagation should be enabled
|
||||
tracePropagationTargets: [
|
||||
"localhost",
|
||||
"localhost:8006",
|
||||
/^https:\/\/dev\-builder\.agpt\.co\/api/,
|
||||
/^https:\/\/.*\.agpt\.co\/api/,
|
||||
],
|
||||
|
||||
beforeSend(event, hint) {
|
||||
// Check if it is an exception, and if so, show the report dialog
|
||||
if (event.exception && event.event_id) {
|
||||
Sentry.showReportDialog({ eventId: event.event_id });
|
||||
}
|
||||
return event;
|
||||
},
|
||||
|
||||
// Define how likely Replay events are sampled.
|
||||
// This sets the sample rate to be 10%. You may want this to be 100% while
|
||||
// in development and sample at a lower rate in production
|
||||
@@ -56,10 +54,4 @@ Sentry.init({
|
||||
// For example, a tracesSampleRate of 0.5 and profilesSampleRate of 0.5 would
|
||||
// result in 25% of transactions being profiled (0.5*0.5=0.25)
|
||||
profilesSampleRate: 1.0,
|
||||
_experiments: {
|
||||
// Enable logs to be sent to Sentry.
|
||||
enableLogs: true,
|
||||
},
|
||||
});
|
||||
|
||||
export const onRouterTransitionStart = Sentry.captureRouterTransitionStart;
|
||||
@@ -4,32 +4,13 @@
|
||||
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
|
||||
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { BehaveAs, getBehaveAs, getEnvironmentStr } from "./src/lib/utils";
|
||||
|
||||
const isProductionCloud =
|
||||
process.env.NODE_ENV === "production" && getBehaveAs() === BehaveAs.CLOUD;
|
||||
|
||||
Sentry.init({
|
||||
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
|
||||
|
||||
environment: getEnvironmentStr(),
|
||||
|
||||
enabled: isProductionCloud,
|
||||
|
||||
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
|
||||
tracesSampleRate: 1,
|
||||
tracePropagationTargets: [
|
||||
"localhost",
|
||||
"localhost:8006",
|
||||
/^https:\/\/dev\-builder\.agpt\.co\/api/,
|
||||
/^https:\/\/.*\.agpt\.co\/api/,
|
||||
],
|
||||
|
||||
// Setting this option to true will print useful information to the console while you're setting up Sentry.
|
||||
debug: false,
|
||||
|
||||
_experiments: {
|
||||
// Enable logs to be sent to Sentry.
|
||||
enableLogs: true,
|
||||
},
|
||||
});
|
||||
@@ -2,28 +2,14 @@
|
||||
// The config you add here will be used whenever the server handles a request.
|
||||
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
|
||||
|
||||
import { BehaveAs, getBehaveAs, getEnvironmentStr } from "@/lib/utils";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
// import { NodeProfilingIntegration } from "@sentry/profiling-node";
|
||||
|
||||
const isProductionCloud =
|
||||
process.env.NODE_ENV === "production" && getBehaveAs() === BehaveAs.CLOUD;
|
||||
|
||||
Sentry.init({
|
||||
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
|
||||
|
||||
environment: getEnvironmentStr(),
|
||||
|
||||
enabled: isProductionCloud,
|
||||
|
||||
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
|
||||
tracesSampleRate: 1,
|
||||
tracePropagationTargets: [
|
||||
"localhost",
|
||||
"localhost:8006",
|
||||
/^https:\/\/dev\-builder\.agpt\.co\/api/,
|
||||
/^https:\/\/.*\.agpt\.co\/api/,
|
||||
],
|
||||
|
||||
// Setting this option to true will print useful information to the console while you're setting up Sentry.
|
||||
debug: false,
|
||||
@@ -34,9 +20,4 @@ Sentry.init({
|
||||
// NodeProfilingIntegration,
|
||||
// Sentry.fsIntegration(),
|
||||
],
|
||||
|
||||
_experiments: {
|
||||
// Enable logs to be sent to Sentry.
|
||||
enableLogs: true,
|
||||
},
|
||||
});
|
||||
100
autogpt_platform/autogpt_builder/src/app/admin/layout.tsx
Normal file
100
autogpt_platform/autogpt_builder/src/app/admin/layout.tsx
Normal file
@@ -0,0 +1,100 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import Link from "next/link";
|
||||
import { BinaryIcon, XIcon } from "lucide-react";
|
||||
import { usePathname } from "next/navigation"; // Add this import
|
||||
|
||||
const tabs = [
|
||||
{ name: "Dashboard", href: "/admin/dashboard" },
|
||||
{ name: "Marketplace", href: "/admin/marketplace" },
|
||||
{ name: "Users", href: "/admin/users" },
|
||||
{ name: "Settings", href: "/admin/settings" },
|
||||
];
|
||||
|
||||
export default function AdminLayout({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
const pathname = usePathname(); // Get the current pathname
|
||||
const [activeTab, setActiveTab] = useState(() => {
|
||||
// Set active tab based on the current route
|
||||
return tabs.find((tab) => tab.href === pathname)?.name || tabs[0].name;
|
||||
});
|
||||
const [mobileMenuOpen, setMobileMenuOpen] = useState(false);
|
||||
|
||||
return (
|
||||
<div className="min-h-screen bg-gray-100">
|
||||
<nav className="bg-white shadow-sm">
|
||||
<div className="max-w-10xl mx-auto px-4 sm:px-6 lg:px-8">
|
||||
<div className="flex h-16 items-center justify-between">
|
||||
<div className="flex items-center">
|
||||
<div className="flex-shrink-0">
|
||||
<h1 className="text-xl font-bold">Admin Panel</h1>
|
||||
</div>
|
||||
<div className="hidden sm:ml-6 sm:flex sm:space-x-8">
|
||||
{tabs.map((tab) => (
|
||||
<Link
|
||||
key={tab.name}
|
||||
href={tab.href}
|
||||
className={`${
|
||||
activeTab === tab.name
|
||||
? "border-indigo-500 text-indigo-600"
|
||||
: "border-transparent text-gray-500 hover:border-gray-300 hover:text-gray-700"
|
||||
} inline-flex items-center border-b-2 px-1 pt-1 text-sm font-medium`}
|
||||
onClick={() => setActiveTab(tab.name)}
|
||||
>
|
||||
{tab.name}
|
||||
</Link>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
<div className="sm:hidden">
|
||||
<button
|
||||
type="button"
|
||||
className="inline-flex items-center justify-center rounded-md p-2 text-gray-400 hover:bg-gray-100 hover:text-gray-500 focus:outline-none focus:ring-2 focus:ring-inset focus:ring-indigo-500"
|
||||
onClick={() => setMobileMenuOpen(!mobileMenuOpen)}
|
||||
>
|
||||
<span className="sr-only">Open main menu</span>
|
||||
{mobileMenuOpen ? (
|
||||
<XIcon className="block h-6 w-6" aria-hidden="true" />
|
||||
) : (
|
||||
<BinaryIcon className="block h-6 w-6" aria-hidden="true" />
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{mobileMenuOpen && (
|
||||
<div className="sm:hidden">
|
||||
<div className="space-y-1 pb-3 pt-2">
|
||||
{tabs.map((tab) => (
|
||||
<Link
|
||||
key={tab.name}
|
||||
href={tab.href}
|
||||
className={`${
|
||||
activeTab === tab.name
|
||||
? "border-indigo-500 bg-indigo-50 text-indigo-700"
|
||||
: "border-transparent text-gray-600 hover:border-gray-300 hover:bg-gray-50 hover:text-gray-800"
|
||||
} block border-l-4 py-2 pl-3 pr-4 text-base font-medium`}
|
||||
onClick={() => {
|
||||
setActiveTab(tab.name);
|
||||
setMobileMenuOpen(false);
|
||||
}}
|
||||
>
|
||||
{tab.name}
|
||||
</Link>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</nav>
|
||||
|
||||
<main className="py-10">
|
||||
<div className="mx-auto max-w-7xl px-4 sm:px-6 lg:px-8">{children}</div>
|
||||
</main>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
import { withRoleAccess } from "@/lib/withRoleAccess";
|
||||
|
||||
import React from "react";
|
||||
import { getReviewableAgents } from "@/components/admin/marketplace/actions";
|
||||
import AdminMarketplaceAgentList from "@/components/admin/marketplace/AdminMarketplaceAgentList";
|
||||
import AdminFeaturedAgentsControl from "@/components/admin/marketplace/AdminFeaturedAgentsControl";
|
||||
import { Separator } from "@/components/ui/separator";
|
||||
async function AdminMarketplace() {
|
||||
const reviewableAgents = await getReviewableAgents();
|
||||
|
||||
return (
|
||||
<>
|
||||
<AdminMarketplaceAgentList agents={reviewableAgents.agents} />
|
||||
<Separator className="my-4" />
|
||||
<AdminFeaturedAgentsControl className="mt-4" />
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
export default async function AdminDashboardPage() {
|
||||
"use server";
|
||||
const withAdminAccess = await withRoleAccess(["admin"]);
|
||||
const ProtectedAdminMarketplace = await withAdminAccess(AdminMarketplace);
|
||||
return <ProtectedAdminMarketplace />;
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
import { NextResponse } from "next/server";
|
||||
import { createServerClient } from "@/lib/supabase/server";
|
||||
|
||||
// Handle the callback to complete the user session login
|
||||
export async function GET(request: Request) {
|
||||
const { searchParams, origin } = new URL(request.url);
|
||||
const code = searchParams.get("code");
|
||||
// if "next" is in param, use it as the redirect URL
|
||||
const next = searchParams.get("next") ?? "/profile";
|
||||
|
||||
if (code) {
|
||||
const supabase = createServerClient();
|
||||
|
||||
if (!supabase) {
|
||||
return NextResponse.redirect(`${origin}/error`);
|
||||
}
|
||||
|
||||
const { data, error } = await supabase.auth.exchangeCodeForSession(code);
|
||||
// data.session?.refresh_token is available if you need to store it for later use
|
||||
if (!error) {
|
||||
const forwardedHost = request.headers.get("x-forwarded-host"); // original origin before load balancer
|
||||
const isLocalEnv = process.env.NODE_ENV === "development";
|
||||
if (isLocalEnv) {
|
||||
// we can be sure that there is no load balancer in between, so no need to watch for X-Forwarded-Host
|
||||
return NextResponse.redirect(`${origin}${next}`);
|
||||
} else if (forwardedHost) {
|
||||
return NextResponse.redirect(`https://${forwardedHost}${next}`);
|
||||
} else {
|
||||
return NextResponse.redirect(`${origin}${next}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// return the user to an error page with instructions
|
||||
return NextResponse.redirect(`${origin}/auth/auth-code-error`);
|
||||
}
|
||||
@@ -2,7 +2,7 @@ import { type EmailOtpType } from "@supabase/supabase-js";
|
||||
import { type NextRequest } from "next/server";
|
||||
|
||||
import { redirect } from "next/navigation";
|
||||
import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase";
|
||||
import { createServerClient } from "@/lib/supabase/server";
|
||||
|
||||
// Email confirmation route
|
||||
export async function GET(request: NextRequest) {
|
||||
@@ -12,7 +12,7 @@ export async function GET(request: NextRequest) {
|
||||
const next = searchParams.get("next") ?? "/";
|
||||
|
||||
if (token_hash && type) {
|
||||
const supabase = await getServerSupabase();
|
||||
const supabase = createServerClient();
|
||||
|
||||
if (!supabase) {
|
||||
redirect("/error");
|
||||
16
autogpt_platform/autogpt_builder/src/app/build/page.tsx
Normal file
16
autogpt_platform/autogpt_builder/src/app/build/page.tsx
Normal file
@@ -0,0 +1,16 @@
|
||||
"use client";
|
||||
|
||||
import { useSearchParams } from "next/navigation";
|
||||
import FlowEditor from '@/components/Flow';
|
||||
|
||||
export default function Home() {
|
||||
const query = useSearchParams();
|
||||
|
||||
return (
|
||||
<FlowEditor
|
||||
className="flow-container w-full min-h-[86vh] border border-gray-300 dark:border-gray-700 rounded-lg"
|
||||
flowID={query.get("flowID") ?? query.get("templateID") ?? undefined}
|
||||
template={!!query.get("templateID")}
|
||||
/>
|
||||
);
|
||||
}
|
||||
75
autogpt_platform/autogpt_builder/src/app/globals.css
Normal file
75
autogpt_platform/autogpt_builder/src/app/globals.css
Normal file
@@ -0,0 +1,75 @@
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
|
||||
@layer utilities {
|
||||
.text-balance {
|
||||
text-wrap: balance;
|
||||
}
|
||||
}
|
||||
|
||||
@layer base {
|
||||
:root {
|
||||
--background: 0 0% 100%;
|
||||
--foreground: 240 10% 3.9%;
|
||||
--card: 0 0% 100%;
|
||||
--card-foreground: 240 10% 3.9%;
|
||||
--popover: 0 0% 100%;
|
||||
--popover-foreground: 240 10% 3.9%;
|
||||
--primary: 240 5.9% 10%;
|
||||
--primary-foreground: 0 0% 98%;
|
||||
--secondary: 240 4.8% 95.9%;
|
||||
--secondary-foreground: 240 5.9% 10%;
|
||||
--muted: 240 4.8% 95.9%;
|
||||
--muted-foreground: 240 3.8% 46.1%;
|
||||
--accent: 240 4.8% 95.9%;
|
||||
--accent-foreground: 240 5.9% 10%;
|
||||
--destructive: 0 84.2% 60.2%;
|
||||
--destructive-foreground: 0 0% 98%;
|
||||
--border: 240 5.9% 90%;
|
||||
--input: 240 5.9% 90%;
|
||||
--ring: 240 5.9% 10%;
|
||||
--radius: 0.5rem;
|
||||
--chart-1: 12 76% 61%;
|
||||
--chart-2: 173 58% 39%;
|
||||
--chart-3: 197 37% 24%;
|
||||
--chart-4: 43 74% 66%;
|
||||
--chart-5: 27 87% 67%;
|
||||
}
|
||||
|
||||
.dark {
|
||||
--background: 240 10% 3.9%;
|
||||
--foreground: 0 0% 98%;
|
||||
--card: 240 10% 3.9%;
|
||||
--card-foreground: 0 0% 98%;
|
||||
--popover: 240 10% 3.9%;
|
||||
--popover-foreground: 0 0% 98%;
|
||||
--primary: 0 0% 98%;
|
||||
--primary-foreground: 240 5.9% 10%;
|
||||
--secondary: 240 3.7% 15.9%;
|
||||
--secondary-foreground: 0 0% 98%;
|
||||
--muted: 240 3.7% 15.9%;
|
||||
--muted-foreground: 240 5% 64.9%;
|
||||
--accent: 240 3.7% 15.9%;
|
||||
--accent-foreground: 0 0% 98%;
|
||||
--destructive: 0 62.8% 30.6%;
|
||||
--destructive-foreground: 0 0% 98%;
|
||||
--border: 240 3.7% 15.9%;
|
||||
--input: 240 3.7% 15.9%;
|
||||
--ring: 240 4.9% 83.9%;
|
||||
--chart-1: 220 70% 50%;
|
||||
--chart-2: 160 60% 45%;
|
||||
--chart-3: 30 80% 55%;
|
||||
--chart-4: 280 65% 60%;
|
||||
--chart-5: 340 75% 55%;
|
||||
}
|
||||
}
|
||||
|
||||
@layer base {
|
||||
* {
|
||||
@apply border-border;
|
||||
}
|
||||
body {
|
||||
@apply bg-background text-foreground;
|
||||
}
|
||||
}
|
||||
49
autogpt_platform/autogpt_builder/src/app/layout.tsx
Normal file
49
autogpt_platform/autogpt_builder/src/app/layout.tsx
Normal file
@@ -0,0 +1,49 @@
|
||||
import React from "react";
|
||||
import type { Metadata } from "next";
|
||||
import { Inter } from "next/font/google";
|
||||
import { Providers } from "@/app/providers";
|
||||
import { NavBar } from "@/components/NavBar";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
import "./globals.css";
|
||||
import TallyPopupSimple from "@/components/TallyPopup";
|
||||
import { GoogleAnalytics } from "@next/third-parties/google";
|
||||
import { Toaster } from "@/components/ui/toaster";
|
||||
|
||||
const inter = Inter({ subsets: ["latin"] });
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: "NextGen AutoGPT",
|
||||
description: "Your one stop shop to creating AI Agents",
|
||||
};
|
||||
|
||||
export default function RootLayout({
|
||||
children,
|
||||
}: Readonly<{
|
||||
children: React.ReactNode;
|
||||
}>) {
|
||||
return (
|
||||
<html lang="en">
|
||||
<body className={cn("antialiased transition-colors", inter.className)}>
|
||||
<Providers
|
||||
attribute="class"
|
||||
defaultTheme="light"
|
||||
// Feel free to remove this line if you want to use the system theme by default
|
||||
// enableSystem
|
||||
disableTransitionOnChange
|
||||
>
|
||||
<div className="flex min-h-screen flex-col">
|
||||
<NavBar />
|
||||
<main className="flex-1 overflow-hidden p-4">{children}</main>
|
||||
<TallyPopupSimple />
|
||||
</div>
|
||||
<Toaster />
|
||||
</Providers>
|
||||
</body>
|
||||
|
||||
<GoogleAnalytics
|
||||
gaId={process.env.GA_MEASUREMENT_ID || "G-FH2XK2W4GN"} // This is the measurement Id for the Google Analytics dev project
|
||||
/>
|
||||
</html>
|
||||
);
|
||||
}
|
||||
64
autogpt_platform/autogpt_builder/src/app/login/actions.ts
Normal file
64
autogpt_platform/autogpt_builder/src/app/login/actions.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
"use server";
|
||||
import { revalidatePath } from "next/cache";
|
||||
import { redirect } from "next/navigation";
|
||||
import { createServerClient } from "@/lib/supabase/server";
|
||||
import { z } from "zod";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
const loginFormSchema = z.object({
|
||||
email: z.string().email().min(2).max(64),
|
||||
password: z.string().min(6).max(64),
|
||||
});
|
||||
|
||||
export async function login(values: z.infer<typeof loginFormSchema>) {
|
||||
return await Sentry.withServerActionInstrumentation("login", {}, async () => {
|
||||
const supabase = createServerClient();
|
||||
|
||||
if (!supabase) {
|
||||
redirect("/error");
|
||||
}
|
||||
|
||||
// We are sure that the values are of the correct type because zod validates the form
|
||||
const { data, error } = await supabase.auth.signInWithPassword(values);
|
||||
|
||||
if (error) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
if (data.session) {
|
||||
await supabase.auth.setSession(data.session);
|
||||
}
|
||||
|
||||
revalidatePath("/", "layout");
|
||||
redirect("/profile");
|
||||
});
|
||||
}
|
||||
|
||||
export async function signup(values: z.infer<typeof loginFormSchema>) {
|
||||
"use server";
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"signup",
|
||||
{},
|
||||
async () => {
|
||||
const supabase = createServerClient();
|
||||
|
||||
if (!supabase) {
|
||||
redirect("/error");
|
||||
}
|
||||
|
||||
// We are sure that the values are of the correct type because zod validates the form
|
||||
const { data, error } = await supabase.auth.signUp(values);
|
||||
|
||||
if (error) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
if (data.session) {
|
||||
await supabase.auth.setSession(data.session);
|
||||
}
|
||||
|
||||
revalidatePath("/", "layout");
|
||||
redirect("/profile");
|
||||
},
|
||||
);
|
||||
}
|
||||
234
autogpt_platform/autogpt_builder/src/app/login/page.tsx
Normal file
234
autogpt_platform/autogpt_builder/src/app/login/page.tsx
Normal file
@@ -0,0 +1,234 @@
|
||||
"use client";
|
||||
import useUser from "@/hooks/useUser";
|
||||
import { login, signup } from "./actions";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import {
|
||||
Form,
|
||||
FormControl,
|
||||
FormDescription,
|
||||
FormField,
|
||||
FormItem,
|
||||
FormLabel,
|
||||
FormMessage,
|
||||
} from "@/components/ui/form";
|
||||
import { useForm } from "react-hook-form";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { z } from "zod";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import { PasswordInput } from "@/components/PasswordInput";
|
||||
import { FaGoogle, FaGithub, FaDiscord, FaSpinner } from "react-icons/fa";
|
||||
import { useState } from "react";
|
||||
import { useSupabase } from "@/components/SupabaseProvider";
|
||||
import { useRouter } from "next/navigation";
|
||||
import Link from "next/link";
|
||||
import { Checkbox } from "@/components/ui/checkbox";
|
||||
|
||||
const loginFormSchema = z.object({
|
||||
email: z.string().email().min(2).max(64),
|
||||
password: z.string().min(6).max(64),
|
||||
agreeToTerms: z.boolean().refine((value) => value === true, {
|
||||
message: "You must agree to the Terms of Service and Privacy Policy",
|
||||
}),
|
||||
});
|
||||
|
||||
export default function LoginPage() {
|
||||
const { supabase, isLoading: isSupabaseLoading } = useSupabase();
|
||||
const { user, isLoading: isUserLoading } = useUser();
|
||||
const [feedback, setFeedback] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
|
||||
const form = useForm<z.infer<typeof loginFormSchema>>({
|
||||
resolver: zodResolver(loginFormSchema),
|
||||
defaultValues: {
|
||||
email: "",
|
||||
password: "",
|
||||
agreeToTerms: false,
|
||||
},
|
||||
});
|
||||
|
||||
if (user) {
|
||||
console.log("User exists, redirecting to profile");
|
||||
router.push("/profile");
|
||||
}
|
||||
|
||||
if (isUserLoading || isSupabaseLoading || user) {
|
||||
return (
|
||||
<div className="flex h-[80vh] items-center justify-center">
|
||||
<FaSpinner className="mr-2 h-16 w-16 animate-spin" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (!supabase) {
|
||||
return (
|
||||
<div>
|
||||
User accounts are disabled because Supabase client is unavailable
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
async function handleSignInWithProvider(
|
||||
provider: "google" | "github" | "discord",
|
||||
) {
|
||||
const { data, error } = await supabase!.auth.signInWithOAuth({
|
||||
provider: provider,
|
||||
options: {
|
||||
redirectTo:
|
||||
process.env.AUTH_CALLBACK_URL ??
|
||||
`http://localhost:3000/auth/callback`,
|
||||
},
|
||||
});
|
||||
|
||||
if (!error) {
|
||||
setFeedback(null);
|
||||
return;
|
||||
}
|
||||
setFeedback(error.message);
|
||||
}
|
||||
|
||||
const onLogin = async (data: z.infer<typeof loginFormSchema>) => {
|
||||
setIsLoading(true);
|
||||
const error = await login(data);
|
||||
setIsLoading(false);
|
||||
if (error) {
|
||||
setFeedback(error);
|
||||
return;
|
||||
}
|
||||
setFeedback(null);
|
||||
};
|
||||
|
||||
const onSignup = async (data: z.infer<typeof loginFormSchema>) => {
|
||||
if (await form.trigger()) {
|
||||
setIsLoading(true);
|
||||
const error = await signup(data);
|
||||
setIsLoading(false);
|
||||
if (error) {
|
||||
setFeedback(error);
|
||||
return;
|
||||
}
|
||||
setFeedback(null);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex h-[80vh] items-center justify-center">
|
||||
<div className="w-full max-w-md space-y-6 rounded-lg p-8 shadow-md">
|
||||
<div className="mb-6 space-y-2">
|
||||
<Button
|
||||
className="w-full"
|
||||
onClick={() => handleSignInWithProvider("google")}
|
||||
variant="outline"
|
||||
type="button"
|
||||
disabled={isLoading}
|
||||
>
|
||||
<FaGoogle className="mr-2 h-4 w-4" />
|
||||
Sign in with Google
|
||||
</Button>
|
||||
<Button
|
||||
className="w-full"
|
||||
onClick={() => handleSignInWithProvider("github")}
|
||||
variant="outline"
|
||||
type="button"
|
||||
disabled={isLoading}
|
||||
>
|
||||
<FaGithub className="mr-2 h-4 w-4" />
|
||||
Sign in with GitHub
|
||||
</Button>
|
||||
<Button
|
||||
className="w-full"
|
||||
onClick={() => handleSignInWithProvider("discord")}
|
||||
variant="outline"
|
||||
type="button"
|
||||
disabled={isLoading}
|
||||
>
|
||||
<FaDiscord className="mr-2 h-4 w-4" />
|
||||
Sign in with Discord
|
||||
</Button>
|
||||
</div>
|
||||
<Form {...form}>
|
||||
<form onSubmit={form.handleSubmit(onLogin)}>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="email"
|
||||
render={({ field }) => (
|
||||
<FormItem className="mb-4">
|
||||
<FormLabel>Email</FormLabel>
|
||||
<FormControl>
|
||||
<Input placeholder="user@email.com" {...field} />
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="password"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>Password</FormLabel>
|
||||
<FormControl>
|
||||
<PasswordInput placeholder="password" {...field} />
|
||||
</FormControl>
|
||||
<FormDescription>
|
||||
Password needs to be at least 6 characters long
|
||||
</FormDescription>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="agreeToTerms"
|
||||
render={({ field }) => (
|
||||
<FormItem className="mt-4 flex flex-row items-start space-x-3 space-y-0">
|
||||
<FormControl>
|
||||
<Checkbox
|
||||
checked={field.value}
|
||||
onCheckedChange={field.onChange}
|
||||
/>
|
||||
</FormControl>
|
||||
<div className="space-y-1 leading-none">
|
||||
<FormLabel>
|
||||
I agree to the{" "}
|
||||
<Link href="/terms-of-service" className="underline">
|
||||
Terms of Service
|
||||
</Link>{" "}
|
||||
and{" "}
|
||||
<Link
|
||||
href="https://www.notion.so/auto-gpt/Privacy-Policy-ab11c9c20dbd4de1a15dcffe84d77984"
|
||||
className="underline"
|
||||
>
|
||||
Privacy Policy
|
||||
</Link>
|
||||
</FormLabel>
|
||||
<FormMessage />
|
||||
</div>
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
<div className="mb-6 mt-6 flex w-full space-x-4">
|
||||
<Button
|
||||
className="flex w-1/2 justify-center"
|
||||
type="submit"
|
||||
disabled={isLoading}
|
||||
>
|
||||
Log in
|
||||
</Button>
|
||||
<Button
|
||||
className="flex w-1/2 justify-center"
|
||||
variant="outline"
|
||||
type="button"
|
||||
onClick={form.handleSubmit(onSignup)}
|
||||
disabled={isLoading}
|
||||
>
|
||||
Sign up
|
||||
</Button>
|
||||
</div>
|
||||
</form>
|
||||
<p className="text-sm text-red-500">{feedback}</p>
|
||||
</Form>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
import { Suspense } from "react";
|
||||
import { notFound } from "next/navigation";
|
||||
import MarketplaceAPI from "@/lib/marketplace-api";
|
||||
import { AgentDetailResponse } from "@/lib/marketplace-api";
|
||||
import AgentDetailContent from "@/components/marketplace/AgentDetailContent";
|
||||
|
||||
async function getAgentDetails(id: string): Promise<AgentDetailResponse> {
|
||||
const apiUrl =
|
||||
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
|
||||
"http://localhost:8015/api/v1/market";
|
||||
const api = new MarketplaceAPI(apiUrl);
|
||||
try {
|
||||
console.log(`Fetching agent details for id: ${id}`);
|
||||
const agent = await api.getAgentDetails(id);
|
||||
console.log(`Agent details fetched:`, agent);
|
||||
return agent;
|
||||
} catch (error) {
|
||||
console.error(`Error fetching agent details:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export default async function AgentDetailPage({
|
||||
params,
|
||||
}: {
|
||||
params: { id: string };
|
||||
}) {
|
||||
let agent: AgentDetailResponse;
|
||||
|
||||
try {
|
||||
agent = await getAgentDetails(params.id);
|
||||
} catch (error) {
|
||||
return notFound();
|
||||
}
|
||||
|
||||
return (
|
||||
<Suspense fallback={<div>Loading...</div>}>
|
||||
<AgentDetailContent agent={agent} />
|
||||
</Suspense>
|
||||
);
|
||||
}
|
||||
317
autogpt_platform/autogpt_builder/src/app/marketplace/page.tsx
Normal file
317
autogpt_platform/autogpt_builder/src/app/marketplace/page.tsx
Normal file
@@ -0,0 +1,317 @@
|
||||
"use client";
|
||||
import React, { useEffect, useMemo, useState, useCallback } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import Image from "next/image";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import MarketplaceAPI, {
|
||||
AgentResponse,
|
||||
AgentListResponse,
|
||||
AgentWithRank,
|
||||
} from "@/lib/marketplace-api";
|
||||
import {
|
||||
ChevronLeft,
|
||||
ChevronRight,
|
||||
PlusCircle,
|
||||
Search,
|
||||
Star,
|
||||
} from "lucide-react";
|
||||
|
||||
// Utility Functions
|
||||
function debounce<T extends (...args: any[]) => any>(
|
||||
func: T,
|
||||
wait: number,
|
||||
): (...args: Parameters<T>) => void {
|
||||
let timeout: NodeJS.Timeout | null = null;
|
||||
return (...args: Parameters<T>) => {
|
||||
if (timeout) clearTimeout(timeout);
|
||||
timeout = setTimeout(() => func(...args), wait);
|
||||
};
|
||||
}
|
||||
|
||||
// Types
|
||||
type Agent = AgentResponse | AgentWithRank;
|
||||
|
||||
// Components
|
||||
const HeroSection: React.FC = () => {
|
||||
const router = useRouter();
|
||||
|
||||
return (
|
||||
<div className="relative bg-indigo-600 py-6">
|
||||
<div className="absolute inset-0 z-0">
|
||||
<Image
|
||||
src="https://images.unsplash.com/photo-1562408590-e32931084e23?auto=format&fit=crop&w=2070&q=80"
|
||||
alt="Marketplace background"
|
||||
layout="fill"
|
||||
objectFit="cover"
|
||||
quality={75}
|
||||
priority
|
||||
className="opacity-20"
|
||||
/>
|
||||
<div
|
||||
className="absolute inset-0 bg-indigo-600 mix-blend-multiply"
|
||||
aria-hidden="true"
|
||||
></div>
|
||||
</div>
|
||||
<div className="relative mx-auto flex max-w-7xl items-center justify-between px-4 py-4 sm:px-6 lg:px-8">
|
||||
<div>
|
||||
<h1 className="text-2xl font-extrabold tracking-tight text-white sm:text-3xl lg:text-4xl">
|
||||
AutoGPT Marketplace
|
||||
</h1>
|
||||
<p className="mt-2 max-w-3xl text-sm text-indigo-100 sm:text-base">
|
||||
Discover and share proven AI Agents to supercharge your business.
|
||||
</p>
|
||||
</div>
|
||||
<Button
|
||||
onClick={() => router.push("/marketplace/submit")}
|
||||
className="flex items-center bg-white text-indigo-600 hover:bg-indigo-50"
|
||||
>
|
||||
<PlusCircle className="mr-2 h-4 w-4" />
|
||||
Submit Agent
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const SearchInput: React.FC<{
|
||||
value: string;
|
||||
onChange: (e: React.ChangeEvent<HTMLInputElement>) => void;
|
||||
}> = ({ value, onChange }) => (
|
||||
<div className="relative mb-8">
|
||||
<Input
|
||||
placeholder="Search agents..."
|
||||
type="text"
|
||||
className="w-full rounded-full border-gray-300 py-2 pl-10 pr-4 focus:border-indigo-500 focus:ring-indigo-500"
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
/>
|
||||
<Search
|
||||
className="absolute left-3 top-1/2 -translate-y-1/2 transform text-gray-400"
|
||||
size={20}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
const AgentCard: React.FC<{ agent: Agent; featured?: boolean }> = ({
|
||||
agent,
|
||||
featured = false,
|
||||
}) => {
|
||||
const router = useRouter();
|
||||
|
||||
const handleClick = () => {
|
||||
router.push(`/marketplace/${agent.id}`);
|
||||
};
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`flex cursor-pointer flex-col justify-between rounded-lg border p-6 transition-colors duration-200 hover:bg-gray-50 ${featured ? "border-indigo-500 shadow-md" : "border-gray-200"}`}
|
||||
onClick={handleClick}
|
||||
>
|
||||
<div>
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
<h3 className="truncate text-lg font-semibold text-gray-900">
|
||||
{agent.name}
|
||||
</h3>
|
||||
{featured && <Star className="text-indigo-500" size={20} />}
|
||||
</div>
|
||||
<p className="mb-4 line-clamp-2 text-sm text-gray-500">
|
||||
{agent.description}
|
||||
</p>
|
||||
<div className="mb-2 text-xs text-gray-400">
|
||||
Categories: {agent.categories.join(", ")}
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-end justify-between">
|
||||
<div className="text-xs text-gray-400">
|
||||
Updated {new Date(agent.updatedAt).toLocaleDateString()}
|
||||
</div>
|
||||
<div className="text-xs text-gray-400">Downloads {agent.downloads}</div>
|
||||
{"rank" in agent && (
|
||||
<div className="text-xs text-indigo-600">
|
||||
Rank: {agent.rank.toFixed(2)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const AgentGrid: React.FC<{
|
||||
agents: Agent[];
|
||||
title: string;
|
||||
featured?: boolean;
|
||||
}> = ({ agents, title, featured = false }) => (
|
||||
<div className="mb-12">
|
||||
<h2 className="mb-4 text-2xl font-bold text-gray-900">{title}</h2>
|
||||
<div className="grid grid-cols-1 gap-6 md:grid-cols-2 lg:grid-cols-3">
|
||||
{agents.map((agent) => (
|
||||
<AgentCard agent={agent} key={agent.id} featured={featured} />
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
const Pagination: React.FC<{
|
||||
page: number;
|
||||
totalPages: number;
|
||||
onPrevPage: () => void;
|
||||
onNextPage: () => void;
|
||||
}> = ({ page, totalPages, onPrevPage, onNextPage }) => (
|
||||
<div className="mt-8 flex items-center justify-between">
|
||||
<Button
|
||||
onClick={onPrevPage}
|
||||
disabled={page === 1}
|
||||
className="flex items-center space-x-2 rounded-md border border-gray-300 bg-white px-4 py-2 text-sm font-medium text-gray-700 shadow-sm hover:bg-gray-50"
|
||||
>
|
||||
<ChevronLeft size={16} />
|
||||
<span>Previous</span>
|
||||
</Button>
|
||||
<span className="text-sm text-gray-700">
|
||||
Page {page} of {totalPages}
|
||||
</span>
|
||||
<Button
|
||||
onClick={onNextPage}
|
||||
disabled={page === totalPages}
|
||||
className="flex items-center space-x-2 rounded-md border border-gray-300 bg-white px-4 py-2 text-sm font-medium text-gray-700 shadow-sm hover:bg-gray-50"
|
||||
>
|
||||
<span>Next</span>
|
||||
<ChevronRight size={16} />
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
|
||||
// Main Component
|
||||
const Marketplace: React.FC = () => {
|
||||
const apiUrl =
|
||||
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
|
||||
"http://localhost:8015/api/v1/market";
|
||||
const api = useMemo(() => new MarketplaceAPI(apiUrl), [apiUrl]);
|
||||
|
||||
const [searchValue, setSearchValue] = useState("");
|
||||
const [searchResults, setSearchResults] = useState<Agent[]>([]);
|
||||
const [featuredAgents, setFeaturedAgents] = useState<Agent[]>([]);
|
||||
const [topAgents, setTopAgents] = useState<Agent[]>([]);
|
||||
const [page, setPage] = useState(1);
|
||||
const [totalPages, setTotalPages] = useState(1);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
|
||||
const fetchTopAgents = useCallback(
|
||||
async (currentPage: number) => {
|
||||
setIsLoading(true);
|
||||
try {
|
||||
const response = await api.getTopDownloadedAgents(currentPage, 9);
|
||||
setTopAgents(response.agents);
|
||||
setTotalPages(response.total_pages);
|
||||
} catch (error) {
|
||||
console.error("Error fetching top agents:", error);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
},
|
||||
[api],
|
||||
);
|
||||
|
||||
const fetchFeaturedAgents = useCallback(async () => {
|
||||
try {
|
||||
const featured = await api.getFeaturedAgents();
|
||||
setFeaturedAgents(featured.agents);
|
||||
} catch (error) {
|
||||
console.error("Error fetching featured agents:", error);
|
||||
}
|
||||
}, [api]);
|
||||
|
||||
const searchAgents = useCallback(
|
||||
async (searchTerm: string) => {
|
||||
setIsLoading(true);
|
||||
try {
|
||||
const response = await api.searchAgents(searchTerm, 1, 30);
|
||||
const filteredAgents = response.filter((agent) => agent.rank > 0);
|
||||
setSearchResults(filteredAgents);
|
||||
} catch (error) {
|
||||
console.error("Error searching agents:", error);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
},
|
||||
[api],
|
||||
);
|
||||
|
||||
const debouncedSearch = useMemo(
|
||||
() => debounce(searchAgents, 300),
|
||||
[searchAgents],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (searchValue) {
|
||||
debouncedSearch(searchValue);
|
||||
} else {
|
||||
fetchTopAgents(page);
|
||||
}
|
||||
}, [searchValue, page, debouncedSearch, fetchTopAgents]);
|
||||
|
||||
useEffect(() => {
|
||||
fetchFeaturedAgents();
|
||||
}, [fetchFeaturedAgents]);
|
||||
|
||||
const handleInputChange = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
setSearchValue(e.target.value);
|
||||
setPage(1);
|
||||
};
|
||||
|
||||
const handleNextPage = () => {
|
||||
if (page < totalPages) {
|
||||
setPage(page + 1);
|
||||
}
|
||||
};
|
||||
|
||||
const handlePrevPage = () => {
|
||||
if (page > 1) {
|
||||
setPage(page - 1);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="min-h-screen bg-gray-50">
|
||||
<HeroSection />
|
||||
<div className="mx-auto max-w-7xl px-4 py-12 sm:px-6 lg:px-8">
|
||||
<SearchInput value={searchValue} onChange={handleInputChange} />
|
||||
{isLoading ? (
|
||||
<div className="py-12 text-center">
|
||||
<div className="inline-block h-8 w-8 animate-spin rounded-full border-b-2 border-gray-900"></div>
|
||||
<p className="mt-2 text-gray-600">Loading agents...</p>
|
||||
</div>
|
||||
) : searchValue ? (
|
||||
searchResults.length > 0 ? (
|
||||
<AgentGrid agents={searchResults} title="Search Results" />
|
||||
) : (
|
||||
<div className="py-12 text-center">
|
||||
<p className="text-gray-600">
|
||||
No agents found matching your search criteria.
|
||||
</p>
|
||||
</div>
|
||||
)
|
||||
) : (
|
||||
<>
|
||||
{featuredAgents.length > 0 && (
|
||||
<AgentGrid
|
||||
agents={featuredAgents}
|
||||
title="Featured Agents"
|
||||
featured={true}
|
||||
/>
|
||||
)}
|
||||
<AgentGrid agents={topAgents} title="Top Downloaded Agents" />
|
||||
<Pagination
|
||||
page={page}
|
||||
totalPages={totalPages}
|
||||
onPrevPage={handlePrevPage}
|
||||
onNextPage={handleNextPage}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Marketplace;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user