Compare commits

..

54 Commits
detached ... go

Author SHA1 Message Date
SwiftyOS
5e0dad5a96 passing logger context 2024-09-13 20:04:54 +02:00
SwiftyOS
f598ba27b6 renamed folder 2024-09-13 19:57:39 +02:00
SwiftyOS
286202cc66 get agents working 2024-09-13 19:55:02 +02:00
SwiftyOS
55eb917162 formatting 2024-09-13 19:04:36 +02:00
SwiftyOS
c2843eecfa moved categories to api level 2024-09-13 19:03:11 +02:00
SwiftyOS
3971fbd800 Added test data 2024-09-13 19:02:12 +02:00
SwiftyOS
e04838feb5 added swagger instructions to the docs 2024-09-13 18:38:46 +02:00
SwiftyOS
3ef1f6e380 Added swagger docs 2024-09-13 18:25:38 +02:00
SwiftyOS
705c63b801 Merge branch 'master' into go 2024-09-13 17:52:32 +02:00
SwiftyOS
da007e3a22 add config from env 2024-09-12 19:02:50 +02:00
SwiftyOS
9e18c26e65 fixing db queries 2024-09-12 19:02:16 +02:00
SwiftyOS
30e3d65711 fixed cors added prometheus 2024-09-12 18:35:11 +02:00
SwiftyOS
b9c26b1a6b Added readme 2024-09-12 18:19:26 +02:00
SwiftyOS
220a127e51 Added cors 2024-09-12 18:19:18 +02:00
SwiftyOS
004e49edb1 added metrics 2024-09-12 18:07:24 +02:00
SwiftyOS
bf21bb1fa5 allow changing of port 2024-09-12 18:05:28 +02:00
SwiftyOS
530ddf2c34 Added migrations and docker-compose 2024-09-12 17:55:02 +02:00
SwiftyOS
33ee2f2ee5 adding tests 2024-09-12 17:11:42 +02:00
SwiftyOS
ae1d410b65 adding tests 2024-09-12 16:50:17 +02:00
SwiftyOS
196a5d6b59 formatting 2024-09-12 16:30:53 +02:00
SwiftyOS
82c1249d33 added rest of routes 2024-09-12 16:30:45 +02:00
SwiftyOS
9389a30298 fixes 2024-09-12 15:50:49 +02:00
SwiftyOS
9b58faeeb6 added analytics route 2024-09-12 15:48:11 +02:00
SwiftyOS
9cb55c5ac0 added all agent routes 2024-09-12 15:38:46 +02:00
SwiftyOS
eb1df12ce8 adding all the routes 2024-09-12 15:12:23 +02:00
SwiftyOS
12f40596b3 added handler stubs 2024-09-12 13:59:52 +02:00
SwiftyOS
3af26a9379 added models 2024-09-12 13:49:28 +02:00
SwiftyOS
4041ed3e33 deleted setup script 2024-09-12 13:45:27 +02:00
SwiftyOS
ee78653425 updated auth 2024-09-12 13:45:11 +02:00
SwiftyOS
76c5a27044 remove dead layer 2024-09-12 12:55:19 +02:00
SwiftyOS
c6aba70dd4 add a go server 2024-09-12 12:54:58 +02:00
SwiftyOS
470e7036b9 remove try 2024-09-12 12:45:14 +02:00
SwiftyOS
f6608754aa remove dead layer 2024-09-12 12:05:41 +02:00
SwiftyOS
bd70ab00e0 fixed missing prisma binaries 2024-09-12 12:02:00 +02:00
SwiftyOS
2c940b381a updated depends checks 2024-09-12 11:48:19 +02:00
SwiftyOS
03b30ebf5b updated default config 2024-09-12 11:43:29 +02:00
SwiftyOS
0d8c2a820e Sorting alphabetical 2024-09-12 11:39:21 +02:00
SwiftyOS
5ce562e11f updated config based on review comments 2024-09-12 11:20:05 +02:00
Swifty
fcf2247c20 Merge branch 'master' into remove-forge-and-autogpt 2024-09-12 11:00:46 +02:00
SwiftyOS
7326ee1221 add exp backoff on connection issues 2024-09-12 10:59:40 +02:00
SwiftyOS
70c7a3b1f3 sleep for prisma issues 2024-09-12 10:58:27 +02:00
SwiftyOS
6bdab5b777 updated version 2024-09-12 10:20:23 +02:00
SwiftyOS
5f5e31ac19 trying to get around github action linting issue 2024-09-12 10:20:09 +02:00
SwiftyOS
9e71b658d6 linting not working in github actions.. 2024-09-12 10:14:52 +02:00
SwiftyOS
b7b23d68b4 linting issue 2024-09-12 10:10:45 +02:00
SwiftyOS
630f401cee formatting 2024-09-12 10:05:44 +02:00
SwiftyOS
94fbcfb501 updated env.example 2024-09-12 09:47:56 +02:00
SwiftyOS
8102f78030 Updated logging 2024-09-12 09:43:11 +02:00
SwiftyOS
b1eb259bb3 fixed docker compose watch 2024-09-11 16:37:23 +02:00
SwiftyOS
c738eb3bc6 Reduced built image size 2024-09-11 16:27:57 +02:00
SwiftyOS
60fca5c5f0 Update build process to reduce image size 2024-09-11 16:10:06 +02:00
SwiftyOS
bba9836735 Merge remote-tracking branch 'origin/master' into remove-forge-and-autogpt 2024-09-11 15:47:40 +02:00
SwiftyOS
ad76bd1300 update lock files 2024-09-11 15:46:22 +02:00
SwiftyOS
fa16c207e0 Remove forge and autogpt 2024-09-10 14:10:59 +02:00
3142 changed files with 34431 additions and 121963 deletions

View File

@@ -1,40 +1,40 @@
# Ignore everything by default, selectively add things to context
classic/run
*
# AutoGPT
!classic/original_autogpt/autogpt/
!classic/original_autogpt/pyproject.toml
!classic/original_autogpt/poetry.lock
!classic/original_autogpt/README.md
!classic/original_autogpt/tests/
!autogpt/autogpt/
!autogpt/pyproject.toml
!autogpt/poetry.lock
!autogpt/README.md
!autogpt/tests/
# Benchmark
!classic/benchmark/agbenchmark/
!classic/benchmark/pyproject.toml
!classic/benchmark/poetry.lock
!classic/benchmark/README.md
!benchmark/agbenchmark/
!benchmark/pyproject.toml
!benchmark/poetry.lock
!benchmark/README.md
# Forge
!classic/forge/
!classic/forge/pyproject.toml
!classic/forge/poetry.lock
!classic/forge/README.md
!forge/forge/
!forge/pyproject.toml
!forge/poetry.lock
!forge/README.md
# Frontend
!classic/frontend/build/web/
!frontend/build/web/
# Platform
!autogpt_platform/
# rnd
!rnd/
# Explicitly re-ignore some folders
.*
**/__pycache__
# rnd
rnd/autogpt_builder/.next/
rnd/autogpt_builder/node_modules
rnd/autogpt_builder/.env.example
rnd/autogpt_builder/.env.local
rnd/autogpt_server/.env
rnd/autogpt_server/.venv/
autogpt_platform/frontend/.next/
autogpt_platform/frontend/node_modules
autogpt_platform/frontend/.env.example
autogpt_platform/frontend/.env.local
autogpt_platform/backend/.env
autogpt_platform/backend/.venv/
autogpt_platform/market/.env
rnd/market/.env

4
.gitattributes vendored
View File

@@ -1,10 +1,10 @@
classic/frontend/build/** linguist-generated
frontend/build/** linguist-generated
**/poetry.lock linguist-generated
docs/_javascript/** linguist-vendored
# Exclude VCR cassettes from stats
classic/forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
* text=auto

8
.github/CODEOWNERS vendored
View File

@@ -1,7 +1,7 @@
* @Significant-Gravitas/maintainers
.github/workflows/ @Significant-Gravitas/devops
classic/forge/ @Significant-Gravitas/forge-maintainers
classic/benchmark/ @Significant-Gravitas/benchmark-maintainers
classic/frontend/ @Significant-Gravitas/frontend-maintainers
autogpt_platform/infra @Significant-Gravitas/devops
forge/ @Significant-Gravitas/forge-maintainers
benchmark/ @Significant-Gravitas/benchmark-maintainers
frontend/ @Significant-Gravitas/frontend-maintainers
rnd/infra @Significant-Gravitas/devops
.github/CODEOWNERS @Significant-Gravitas/admins

View File

@@ -1,38 +1,31 @@
### Background
<!-- Clearly explain the need for these changes: -->
### Changes 🏗️
<!-- Concisely describe all of the changes made in this pull request: -->
### Checklist 📋
### PR Quality Scorecard ✨
#### For code changes:
- [ ] I have clearly listed my changes in the PR description
- [ ] I have made a test plan
- [ ] I have tested my changes according to the test plan:
<!-- Put your test plan here: -->
- [ ] ...
<!--
Check out our contribution guide:
https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
<details>
<summary>Example test plan</summary>
- [ ] Create from scratch and execute an agent with at least 3 blocks
- [ ] Import an agent from file upload, and confirm it executes correctly
- [ ] Upload agent to marketplace
- [ ] Import an agent from marketplace and confirm it executes correctly
- [ ] Edit an agent from monitor, and confirm it executes correctly
</details>
1. Avoid duplicate work, issues, PRs etc.
2. Also consider contributing something other than code; see the [contribution guide]
for options.
3. Clearly explain your changes.
4. Avoid making unnecessary changes, especially if they're purely based on personal
preferences. Doing so is the maintainers' job. ;-)
-->
#### For configuration changes:
- [ ] `.env.example` is updated or already compatible with my changes
- [ ] `docker-compose.yml` is updated or already compatible with my changes
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
<details>
<summary>Examples of configuration changes</summary>
- Changing ports
- Adding new services that need to communicate with each other
- Secrets or environment variable changes
- New or infrastructure changes such as databases
</details>
- [x] Have you used the PR description template? &ensp; `+2 pts`
- [ ] Is your pull request atomic, focusing on a single change? &ensp; `+5 pts`
- [ ] Have you linked the GitHub issue(s) that this PR addresses? &ensp; `+5 pts`
- [ ] Have you documented your changes clearly and comprehensively? &ensp; `+5 pts`
- [ ] Have you changed or added a feature? &ensp; `-4 pts`
- [ ] Have you added/updated corresponding documentation? &ensp; `+4 pts`
- [ ] Have you added/updated corresponding integration tests? &ensp; `+5 pts`
- [ ] Have you changed the behavior of AutoGPT? &ensp; `-5 pts`
- [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance? &ensp; `+10 pts`

179
.github/dependabot.yml vendored
View File

@@ -1,179 +0,0 @@
version: 2
updates:
# autogpt_libs (Poetry project)
- package-ecosystem: "pip"
directory: "autogpt_platform/autogpt_libs"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# backend (Poetry project)
- package-ecosystem: "pip"
directory: "autogpt_platform/backend"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# frontend (Next.js project)
- package-ecosystem: "npm"
directory: "autogpt_platform/frontend"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# infra (Terraform)
- package-ecosystem: "terraform"
directory: "autogpt_platform/infra"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# market (Poetry project)
- package-ecosystem: "pip"
directory: "autogpt_platform/market"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# Docker
- package-ecosystem: "docker"
directory: "autogpt_platform/"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# Submodules
- package-ecosystem: "gitsubmodule"
directory: "autogpt_platform/supabase"
schedule:
interval: "weekly"
open-pull-requests-limit: 1
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# Docs
- package-ecosystem: 'pip'
directory: "docs/"
schedule:
interval: "weekly"
open-pull-requests-limit: 1
target-branch: "dev"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"

35
.github/labeler.yml vendored
View File

@@ -1,32 +1,27 @@
Classic AutoGPT Agent:
AutoGPT Agent:
- changed-files:
- any-glob-to-any-file: classic/original_autogpt/**
Classic Benchmark:
- changed-files:
- any-glob-to-any-file: classic/benchmark/**
Classic Frontend:
- changed-files:
- any-glob-to-any-file: classic/frontend/**
- any-glob-to-any-file: autogpt/**
Forge:
- changed-files:
- any-glob-to-any-file: classic/forge/**
- any-glob-to-any-file: forge/**
Benchmark:
- changed-files:
- any-glob-to-any-file: benchmark/**
Frontend:
- changed-files:
- any-glob-to-any-file: frontend/**
documentation:
- changed-files:
- any-glob-to-any-file: docs/**
platform/frontend:
Builder:
- changed-files:
- any-glob-to-any-file: autogpt_platform/frontend/**
- any-glob-to-any-file: rnd/autogpt_builder/**
platform/backend:
Server:
- changed-files:
- any-glob-to-any-file: autogpt_platform/backend/**
- all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**'
platform/blocks:
- changed-files:
- any-glob-to-any-file: autogpt_platform/backend/backend/blocks/**
- any-glob-to-any-file: rnd/autogpt_server/**

View File

@@ -0,0 +1,41 @@
name: AutoGPT Builder CI
on:
push:
branches: [ master ]
paths:
- '.github/workflows/autogpt-builder-ci.yml'
- 'rnd/autogpt_builder/**'
pull_request:
paths:
- '.github/workflows/autogpt-builder-ci.yml'
- 'rnd/autogpt_builder/**'
defaults:
run:
shell: bash
working-directory: rnd/autogpt_builder
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '21'
- name: Install dependencies
run: |
npm install
- name: Check formatting with Prettier
run: |
npx prettier --check .
- name: Run lint
run: |
npm run lint

View File

@@ -1,25 +1,25 @@
name: Classic - AutoGPT CI
name: AutoGPT CI
on:
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
- '.github/workflows/autogpt-ci.yml'
- 'autogpt/**'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
- '.github/workflows/autogpt-ci.yml'
- 'autogpt/**'
concurrency:
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: classic/original_autogpt
working-directory: autogpt
jobs:
test:
@@ -86,7 +86,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
@@ -135,4 +135,4 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: test-logs
path: classic/original_autogpt/logs/
path: autogpt/logs/

View File

@@ -0,0 +1,59 @@
name: Purge Auto-GPT Docker CI cache
on:
schedule:
- cron: 20 4 * * 1,4
env:
BASE_BRANCH: development
IMAGE_NAME: auto-gpt
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
file: Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.schedule }}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
push_forced_label:
new_commits_json: ${{ null }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

View File

@@ -1,26 +1,24 @@
name: Classic - AutoGPT Docker CI
name: AutoGPT Docker CI
on:
push:
branches: [master, dev]
branches: [ master, development ]
paths:
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpt/**'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpt/**'
concurrency:
group: ${{ format('classic-autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
working-directory: classic/original_autogpt
working-directory: autogpt
env:
IMAGE_NAME: auto-gpt
@@ -34,58 +32,57 @@ jobs:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- if: runner.debug
run: |
ls -al
du -hs *
- if: runner.debug
run: |
ls -al
du -hs *
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
file: Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
build_type: ${{ matrix.build-type }}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: dev
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.event.after }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.event.after }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
new_commits_json: ${{ toJSON(github.event.commits) }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
new_commits_json: ${{ toJSON(github.event.commits) }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
test:
runs-on: ubuntu-latest
@@ -117,16 +114,15 @@ jobs:
- id: build
name: Build image
uses: docker/build-push-action@v6
uses: docker/build-push-action@v5
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=dev # include pytest
file: Dockerfile.autogpt
build-args: BUILD_TYPE=dev # include pytest
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-dev
cache-to: type=gha,scope=autogpt-docker-dev,mode=max

View File

@@ -0,0 +1,86 @@
name: AutoGPT Docker Release
on:
release:
types: [ published, edited ]
workflow_dispatch:
inputs:
no_cache:
type: boolean
description: 'Build from scratch, without using cached layers'
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
jobs:
build:
if: startsWith(github.ref, 'refs/tags/autogpt-')
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to Docker hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# slashes are not allowed in image tags, but can appear in git branch or tag names
- id: sanitize_tag
name: Sanitize image tag
run: |
tag=${raw_tag//\//-}
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
env:
raw_tag: ${{ github.ref_name }}
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
file: Dockerfile.autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:latest,
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
labels: GIT_REVISION=${{ github.sha }}
# cache layers in GitHub Actions cache to speed up builds
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
cache-to: type=gha,scope=autogpt-docker-release,mode=max
- name: Push image to Docker Hub
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
inputs_no_cache: ${{ inputs.no_cache }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
ref_type: ${{ github.ref_type }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

56
.github/workflows/autogpt-infra-ci.yml vendored Normal file
View File

@@ -0,0 +1,56 @@
name: AutoGPT Builder Infra
on:
push:
branches: [ master ]
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'rnd/infra/**'
pull_request:
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'rnd/infra/**'
defaults:
run:
shell: bash
working-directory: rnd/infra
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: TFLint
uses: pauloconnor/tflint-action@v0.0.2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tflint_path: terraform/
tflint_recurse: true
tflint_changed_only: false
- name: Set up Helm
uses: azure/setup-helm@v4.2.0
with:
version: v3.14.4
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
if [[ -n "$changed" ]]; then
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Run chart-testing (lint)
if: steps.list-changed.outputs.changed == 'true'
run: ct lint --target-branch ${{ github.event.repository.default_branch }}

155
.github/workflows/autogpt-server-ci.yml vendored Normal file
View File

@@ -0,0 +1,155 @@
name: AutoGPT Server CI
on:
push:
branches: [master, development, ci-test*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "rnd/autogpt_server/**"
pull_request:
branches: [master, development, release-*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "rnd/autogpt_server/**"
concurrency:
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: rnd/autogpt_server
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
- name: Setup PostgreSQL
uses: ikalnytskyi/action-setup-postgres@v6
with:
username: ${{ secrets.DB_USER || 'postgres' }}
password: ${{ secrets.DB_PASS || 'postgres' }}
database: postgres
port: 5432
id: postgres
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: "."
run: |
docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
- id: lint
name: Run Linter
run: poetry run lint
- name: Run pytest with coverage
run: |
if [[ "${{ runner.debug }}" == "1" ]]; then
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
else
poetry run pytest -vv test
fi
if: success() || (failure() && steps.lint.outcome == 'failure')
env:
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
DB_USER: ${{ secrets.DB_USER || 'postgres' }}
DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
DB_NAME: postgres
DB_PORT: 5432
RUN_ENV: local
PORT: 8080
DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4
# with:
# token: ${{ secrets.CODECOV_TOKEN }}
# flags: autogpt-server,${{ runner.os }}

View File

@@ -0,0 +1,97 @@
name: AutoGPTs Nightly Benchmark
on:
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
jobs:
benchmark:
permissions:
contents: write
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt ]
fail-fast: false
timeout-minutes: 120
env:
min-python-version: '3.10'
REPORTS_BRANCH: data/benchmark-reports
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python -
- name: Prepare reports folder
run: mkdir -p ${{ env.REPORTS_FOLDER }}
- run: poetry -C benchmark install
- name: Benchmark ${{ matrix.agent-name }}
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
set +e # Do not quit on non-zero exit codes
poetry run agbenchmark run -N 3 \
--test=ReadFile \
--test=BasicRetrieval --test=RevenueRetrieval2 \
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
--test=UrlShortener --test=TicTacToe --test=Battleship \
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
--test=WebArenaTask_134 --test=WebArenaTask_163
# Convert exit code 1 (some challenges failed) to exit code 0
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
exit 0
else
exit $?
fi
env:
AGENT_NAME: ${{ matrix.agent-name }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
- name: Push reports to data branch
run: |
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
rm ${{ env.REPORTS_FOLDER }}/*.json
# Find folder with newest (untracked) report in it
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
| xargs -I {} dirname {} \
| xargs -I {} git ls-files --others --exclude-standard {} \
| xargs -I {} dirname {} \
| sort -u)
json_report_file="$report_subfolder/report.json"
# Convert JSON report to Markdown
markdown_report_file="$report_subfolder/report.md"
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
git config --global user.name 'GitHub Actions'
git config --global user.email 'github-actions@agpt.co'
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
&& git checkout ${{ env.REPORTS_BRANCH }} \
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
git reset --hard
git add ${{ env.REPORTS_FOLDER }}
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
&& git push origin ${{ env.REPORTS_BRANCH }}

View File

@@ -1,43 +1,38 @@
name: Classic - Agent smoke tests
name: Agent smoke tests
on:
workflow_dispatch:
schedule:
- cron: '0 8 * * *'
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '.github/workflows/autogpts-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '!**/*.md'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '.github/workflows/autogpts-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '!**/*.md'
defaults:
run:
shell: bash
working-directory: classic
jobs:
serve-agent-protocol:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ original_autogpt ]
agent-name: [ autogpt ]
fail-fast: false
timeout-minutes: 20
env:
@@ -55,7 +50,7 @@ jobs:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./classic/${{ matrix.agent-name }}/
working-directory: ./${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -

View File

@@ -1,18 +1,18 @@
name: Classic - AGBenchmark CI
name: AGBenchmark CI
on:
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
- 'benchmark/**'
- .github/workflows/benchmark-ci.yml
- '!benchmark/reports/**'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
- 'benchmark/**'
- '!benchmark/reports/**'
- .github/workflows/benchmark-ci.yml
concurrency:
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -39,7 +39,7 @@ jobs:
defaults:
run:
shell: bash
working-directory: classic/benchmark
working-directory: benchmark
steps:
- name: Checkout repository
uses: actions/checkout@v4
@@ -58,7 +58,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
@@ -102,7 +102,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [forge]
agent-name: [ forge ]
fail-fast: false
timeout-minutes: 20
steps:
@@ -122,7 +122,7 @@ jobs:
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
working-directory: classic
working-directory: .
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
@@ -146,23 +146,23 @@ jobs:
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
poetry run agbenchmark --mock --category=coding
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
# poetry run agbenchmark --test=WriteFile
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
poetry run agbenchmark --test=WriteFile
cd ../benchmark
poetry install
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
export BUILD_SKILL_TREE=true
# poetry run agbenchmark --mock
poetry run agbenchmark --mock
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
# if [ ! -z "$CHANGED" ]; then
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
# echo "$CHANGED"
# exit 1
# else
# echo "No unstaged changes."
# fi
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
if [ ! -z "$CHANGED" ]; then
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
echo "$CHANGED"
exit 1
else
echo "No unstaged changes."
fi
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci

View File

@@ -1,4 +1,4 @@
name: Classic - Publish to PyPI
name: Publish to PyPI
on:
workflow_dispatch:
@@ -21,21 +21,21 @@ jobs:
python-version: 3.8
- name: Install Poetry
working-directory: ./classic/benchmark/
working-directory: ./benchmark/
run: |
curl -sSL https://install.python-poetry.org | python3 -
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
- name: Build project for distribution
working-directory: ./classic/benchmark/
working-directory: ./benchmark/
run: poetry build
- name: Install dependencies
working-directory: ./classic/benchmark/
working-directory: ./benchmark/
run: poetry install
- name: Check Version
working-directory: ./classic/benchmark/
working-directory: ./benchmark/
id: check-version
run: |
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
@@ -43,7 +43,7 @@ jobs:
- name: Create Release
uses: ncipollo/release-action@v1
with:
artifacts: "classic/benchmark/dist/*"
artifacts: "benchmark/dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: false
@@ -51,5 +51,5 @@ jobs:
commit: master
- name: Build and publish
working-directory: ./classic/benchmark/
working-directory: ./benchmark/
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}

View File

@@ -1,60 +0,0 @@
name: Classic - Purge Auto-GPT Docker CI cache
on:
schedule:
- cron: 20 4 * * 1,4
env:
BASE_BRANCH: dev
IMAGE_NAME: auto-gpt
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.schedule }}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: dev
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
push_forced_label:
new_commits_json: ${{ null }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

View File

@@ -1,87 +0,0 @@
name: Classic - AutoGPT Docker Release
on:
release:
types: [published, edited]
workflow_dispatch:
inputs:
no_cache:
type: boolean
description: 'Build from scratch, without using cached layers'
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
jobs:
build:
if: startsWith(github.ref, 'refs/tags/autogpt-')
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to Docker hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# slashes are not allowed in image tags, but can appear in git branch or tag names
- id: sanitize_tag
name: Sanitize image tag
run: |
tag=${raw_tag//\//-}
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
env:
raw_tag: ${{ github.ref_name }}
- id: build
name: Build image
uses: docker/build-push-action@v6
with:
context: classic/
file: Dockerfile.autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:latest,
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
labels: GIT_REVISION=${{ github.sha }}
# cache layers in GitHub Actions cache to speed up builds
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
cache-to: type=gha,scope=autogpt-docker-release,mode=max
- name: Push image to Docker Hub
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
inputs_no_cache: ${{ inputs.no_cache }}
prod_branch: master
dev_branch: dev
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
ref_type: ${{ github.ref_type }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

View File

@@ -1,60 +0,0 @@
name: Classic - Frontend CI/CD
on:
push:
branches:
- master
- dev
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'classic/frontend/**'
- '.github/workflows/classic-frontend-ci.yml'
pull_request:
paths:
- 'classic/frontend/**'
- '.github/workflows/classic-frontend-ci.yml'
jobs:
build:
permissions:
contents: write
pull-requests: write
runs-on: ubuntu-latest
env:
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.13.2'
- name: Build Flutter to Web
run: |
cd classic/frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
# if: github.event_name == 'push'
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add classic/frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v7
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true
title: "Update frontend build in `${{ github.ref_name }}`"
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
commit-message: "Update frontend build based on commit ${{ github.sha }}"

View File

@@ -1,4 +1,4 @@
name: Repo - Close stale issues
name: 'Close stale issues'
on:
schedule:
- cron: '30 1 * * *'

View File

@@ -1,98 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ "master", "release-*", "dev" ]
pull_request:
branches: [ "master", "release-*", "dev" ]
merge_group:
schedule:
- cron: '15 4 * * 0'
jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
permissions:
# required for all workflows
security-events: write
# required to fetch internal or private CodeQL packs
packages: read
# only required for workflows in private repositories
actions: read
contents: read
strategy:
fail-fast: false
matrix:
include:
- language: typescript
build-mode: none
- language: python
build-mode: none
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
config: |
paths-ignore:
- classic/frontend/build/**
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@@ -1,18 +1,18 @@
name: Classic - Forge CI
name: Forge CI
on:
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
- '.github/workflows/forge-ci.yml'
- 'forge/**'
- '!forge/tests/vcr_cassettes'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
- '.github/workflows/forge-ci.yml'
- 'forge/**'
- '!forge/tests/vcr_cassettes'
concurrency:
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -21,7 +21,7 @@ concurrency:
defaults:
run:
shell: bash
working-directory: classic/forge
working-directory: forge
jobs:
test:
@@ -110,7 +110,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
@@ -233,4 +233,4 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: test-logs
path: classic/forge/logs/
path: forge/logs/

60
.github/workflows/frontend-ci.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: Frontend CI/CD
on:
push:
branches:
- master
- development
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'frontend/**'
- '.github/workflows/frontend-ci.yml'
pull_request:
paths:
- 'frontend/**'
- '.github/workflows/frontend-ci.yml'
jobs:
build:
permissions:
contents: write
pull-requests: write
runs-on: ubuntu-latest
env:
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.13.2'
- name: Build Flutter to Web
run: |
cd frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
# if: github.event_name == 'push'
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v6
with:
add-paths: frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true
title: "Update frontend build in `${{ github.ref_name }}`"
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
commit-message: "Update frontend build based on commit ${{ github.sha }}"

133
.github/workflows/hackathon.yml vendored Normal file
View File

@@ -0,0 +1,133 @@
name: Hackathon
on:
workflow_dispatch:
inputs:
agents:
description: "Agents to run (comma-separated)"
required: false
default: "autogpt" # Default agents if none are specified
jobs:
matrix-setup:
runs-on: ubuntu-latest
# Service containers to run with `matrix-setup`
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
env-name: ${{ steps.set-matrix.outputs.env-name }}
steps:
- id: set-matrix
run: |
if [ "${{ github.event_name }}" == "schedule" ]; then
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::[ 'irrelevant']"
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::$matrix_string"
else
echo "::set-output name=env-name::testing"
echo "::set-output name=matrix::[ 'irrelevant' ]"
fi
tests:
environment:
name: "${{ needs.matrix-setup.outputs.env-name }}"
needs: matrix-setup
env:
min-python-version: "3.10"
name: "${{ matrix.agent-name }}"
runs-on: ubuntu-latest
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
timeout-minutes: 50
strategy:
fail-fast: false
matrix:
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
steps:
- name: Print Environment Name
run: |
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
- name: Check Docker Container
id: check
run: docker ps
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Install Poetry
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version: v18.15
- name: Run benchmark
run: |
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
git clone "$link" -b "$branch" "$AGENT_NAME"
cd $AGENT_NAME
cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
./run agent start $AGENT_NAME
cd ../benchmark
poetry install
poetry run agbenchmark --no-dep
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
AGENT_NAME: ${{ matrix.agent-name }}

View File

@@ -1,55 +0,0 @@
name: AutoGPT Platform - Deploy Prod Environment
on:
release:
types: [published]
permissions:
contents: 'read'
id-token: 'write'
jobs:
migrate:
environment: production
name: Run migrations for AutoGPT Platform
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install prisma
- name: Run Backend Migrations
working-directory: ./autogpt_platform/backend
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
- name: Run Market Migrations
working-directory: ./autogpt_platform/market
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
trigger:
needs: migrate
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: build_deploy_prod
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'

View File

@@ -1,57 +0,0 @@
name: AutoGPT Platform - Deploy Dev Environment
on:
push:
branches: [ dev ]
paths:
- 'autogpt_platform/**'
permissions:
contents: 'read'
id-token: 'write'
jobs:
migrate:
environment: develop
name: Run migrations for AutoGPT Platform
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install prisma
- name: Run Backend Migrations
working-directory: ./autogpt_platform/backend
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
- name: Run Market Migrations
working-directory: ./autogpt_platform/market
run: |
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
trigger:
needs: migrate
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: build_deploy_dev
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'

View File

@@ -1,134 +0,0 @@
name: AutoGPT Platform - Backend CI
on:
push:
branches: [master, dev, ci-test*]
paths:
- ".github/workflows/platform-backend-ci.yml"
- "autogpt_platform/backend/**"
pull_request:
branches: [master, dev, release-*]
paths:
- ".github/workflows/platform-backend-ci.yml"
- "autogpt_platform/backend/**"
merge_group:
concurrency:
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: autogpt_platform/backend
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
runs-on: ubuntu-latest
services:
redis:
image: bitnami/redis:6.2
env:
REDIS_PASSWORD: testpassword
ports:
- 6379:6379
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Setup Supabase
uses: supabase/setup-cli@v1
with:
version: latest
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
- name: Install Poetry (Unix)
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- id: supabase
name: Start Supabase
working-directory: .
run: |
supabase init
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
# outputs:
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
- id: lint
name: Run Linter
run: poetry run lint
- name: Run pytest with coverage
run: |
if [[ "${{ runner.debug }}" == "1" ]]; then
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
else
poetry run pytest -s -vv test
fi
if: success() || (failure() && steps.lint.outcome == 'failure')
env:
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
REDIS_HOST: 'localhost'
REDIS_PORT: '6379'
REDIS_PASSWORD: 'testpassword'
env:
CI: true
PLAIN_OUTPUT: True
RUN_ENV: local
PORT: 8080
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4
# with:
# token: ${{ secrets.CODECOV_TOKEN }}
# flags: backend,${{ runner.os }}

View File

@@ -1,101 +0,0 @@
name: AutoGPT Platform - Frontend CI
on:
push:
branches: [master, dev]
paths:
- ".github/workflows/platform-frontend-ci.yml"
- "autogpt_platform/frontend/**"
pull_request:
paths:
- ".github/workflows/platform-frontend-ci.yml"
- "autogpt_platform/frontend/**"
merge_group:
defaults:
run:
shell: bash
working-directory: autogpt_platform/frontend
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Install dependencies
run: |
yarn install --frozen-lockfile
- name: Run lint
run: |
yarn lint
test:
runs-on: ubuntu-latest
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: false
dotnet: false
haskell: false
large-packages: true
docker-images: true
swap-storage: true
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Copy default supabase .env
run: |
cp ../supabase/docker/.env.example ../.env
- name: Copy backend .env
run: |
cp ../backend/.env.example ../backend/.env
- name: Run docker compose
run: |
docker compose -f ../docker-compose.yml up -d
- name: Install dependencies
run: |
yarn install --frozen-lockfile
- name: Setup Builder .env
run: |
cp .env.example .env
- name: Install Playwright Browsers
run: yarn playwright install --with-deps
- name: Run tests
run: |
yarn test
- uses: actions/upload-artifact@v4
if: ${{ !cancelled() }}
with:
name: playwright-report
path: playwright-report/
retention-days: 30

View File

@@ -1,126 +0,0 @@
name: AutoGPT Platform - Backend CI
on:
push:
branches: [master, dev, ci-test*]
paths:
- ".github/workflows/platform-market-ci.yml"
- "autogpt_platform/market/**"
pull_request:
branches: [master, dev, release-*]
paths:
- ".github/workflows/platform-market-ci.yml"
- "autogpt_platform/market/**"
merge_group:
concurrency:
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: autogpt_platform/market
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Setup Supabase
uses: supabase/setup-cli@v1
with:
version: latest
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/market/poetry.lock') }}
- name: Install Poetry (Unix)
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- id: supabase
name: Start Supabase
working-directory: .
run: |
supabase init
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
# outputs:
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
- id: lint
name: Run Linter
run: poetry run lint
# Tests comment out because they do not work with prisma mock, nor have they been updated since they were created
# - name: Run pytest with coverage
# run: |
# if [[ "${{ runner.debug }}" == "1" ]]; then
# poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
# else
# poetry run pytest -s -vv test
# fi
# if: success() || (failure() && steps.lint.outcome == 'failure')
# env:
# LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
# DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
# SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
# SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
# SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
# REDIS_HOST: 'localhost'
# REDIS_PORT: '6379'
# REDIS_PASSWORD: 'testpassword'
env:
CI: true
PLAIN_OUTPUT: True
RUN_ENV: local
PORT: 8080
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4
# with:
# token: ${{ secrets.CODECOV_TOKEN }}
# flags: backend,${{ runner.os }}

View File

@@ -1,12 +1,12 @@
name: Repo - Pull Request auto-label
name: "Pull Request auto-label"
on:
# So that PRs touching the same files as the push are updated
push:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths-ignore:
- 'classic/forge/tests/vcr_cassettes'
- 'classic/benchmark/reports/**'
- 'forge/tests/vcr_cassettes'
- 'benchmark/reports/**'
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs

View File

@@ -1,27 +1,27 @@
name: Classic - Python checks
name: Python checks
on:
push:
branches: [ master, dev, ci-test* ]
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/classic-python-checks-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- '.github/workflows/lint-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- '**.py'
- '!classic/forge/tests/vcr_cassettes'
- '!forge/tests/vcr_cassettes'
pull_request:
branches: [ master, dev, release-* ]
branches: [ master, development, release-* ]
paths:
- '.github/workflows/classic-python-checks-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- '.github/workflows/lint-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- '**.py'
- '!classic/forge/tests/vcr_cassettes'
- '!forge/tests/vcr_cassettes'
concurrency:
group: ${{ format('classic-python-checks-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
@@ -40,18 +40,18 @@ jobs:
uses: dorny/paths-filter@v3
with:
filters: |
original_autogpt:
- classic/original_autogpt/autogpt/**
- classic/original_autogpt/tests/**
- classic/original_autogpt/poetry.lock
autogpt:
- autogpt/autogpt/**
- autogpt/tests/**
- autogpt/poetry.lock
forge:
- classic/forge/forge/**
- classic/forge/tests/**
- classic/forge/poetry.lock
- forge/forge/**
- forge/tests/**
- forge/poetry.lock
benchmark:
- classic/benchmark/agbenchmark/**
- classic/benchmark/tests/**
- classic/benchmark/poetry.lock
- benchmark/agbenchmark/**
- benchmark/tests/**
- benchmark/poetry.lock
outputs:
changed-parts: ${{ steps.changes-in.outputs.changes }}
@@ -89,23 +89,23 @@ jobs:
# Install dependencies
- name: Install Python dependencies
run: poetry -C classic/${{ matrix.sub-package }} install
run: poetry -C ${{ matrix.sub-package }} install
# Lint
- name: Lint (isort)
run: poetry run isort --check .
working-directory: classic/${{ matrix.sub-package }}
working-directory: ${{ matrix.sub-package }}
- name: Lint (Black)
if: success() || failure()
run: poetry run black --check .
working-directory: classic/${{ matrix.sub-package }}
working-directory: ${{ matrix.sub-package }}
- name: Lint (Flake8)
if: success() || failure()
run: poetry run flake8 .
working-directory: classic/${{ matrix.sub-package }}
working-directory: ${{ matrix.sub-package }}
types:
needs: get-changed-parts
@@ -141,11 +141,11 @@ jobs:
# Install dependencies
- name: Install Python dependencies
run: poetry -C classic/${{ matrix.sub-package }} install
run: poetry -C ${{ matrix.sub-package }} install
# Typecheck
- name: Typecheck
if: success() || failure()
run: poetry run pyright
working-directory: classic/${{ matrix.sub-package }}
working-directory: ${{ matrix.sub-package }}

View File

@@ -1,21 +0,0 @@
name: Repo - Enforce dev as base branch
on:
pull_request_target:
branches: [ master ]
types: [ opened ]
jobs:
check_pr_target:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- name: Check if PR is from dev or hotfix
if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }}
run: |
gh pr comment ${{ github.event.number }} --repo "$REPO" \
--body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.'
gh pr edit ${{ github.event.number }} --base dev --repo "$REPO"
env:
GITHUB_TOKEN: ${{ github.token }}
REPO: ${{ github.repository }}

View File

@@ -1,4 +1,4 @@
name: Repo - Github Stats
name: github-repo-stats
on:
schedule:

View File

@@ -1,8 +1,7 @@
name: Repo - PR Status Checker
name: PR Status Checker
on:
pull_request:
types: [opened, synchronize, reopened]
merge_group:
jobs:
status-check:
@@ -27,6 +26,6 @@ jobs:
echo "Current directory before running Python script:"
pwd
echo "Attempting to run Python script:"
python .github/workflows/scripts/check_actions_status.py
python check_actions_status.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

10
.gitignore vendored
View File

@@ -1,7 +1,7 @@
## Original ignores
.github_access_token
classic/original_autogpt/keys.py
classic/original_autogpt/*.json
autogpt/keys.py
autogpt/*.json
auto_gpt_workspace/*
*.mpeg
.env
@@ -157,7 +157,7 @@ openai/
CURRENT_BULLETIN.md
# AgBenchmark
classic/benchmark/agbenchmark/reports/
agbenchmark/reports/
# Nodejs
package-lock.json
@@ -170,6 +170,4 @@ pri*
ig*
.github_access_token
LICENSE.rtf
autogpt_platform/backend/settings.py
/.auth
/autogpt_platform/frontend/.auth
rnd/autogpt_server/settings.py

7
.gitmodules vendored
View File

@@ -1,6 +1,3 @@
[submodule "classic/forge/tests/vcr_cassettes"]
path = classic/forge/tests/vcr_cassettes
[submodule "forge/tests/vcr_cassettes"]
path = forge/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
[submodule "autogpt_platform/supabase"]
path = autogpt_platform/supabase
url = https://github.com/supabase/supabase.git

View File

@@ -10,127 +10,28 @@ repos:
- id: check-symlinks
- id: debug-statements
- repo: https://github.com/Yelp/detect-secrets
rev: v1.5.0
hooks:
- id: detect-secrets
name: Detect secrets
description: Detects high entropy strings that are likely to be passwords.
files: ^autogpt_platform/
stages: [push]
- repo: local
# For proper type checking, all dependencies need to be up-to-date.
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
hooks:
- id: poetry-install
name: Check & Install dependencies - AutoGPT Platform - Backend
alias: poetry-install-platform-backend
entry: poetry -C autogpt_platform/backend install
# include autogpt_libs source (since it's a path dependency)
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - AutoGPT Platform - Libs
alias: poetry-install-platform-libs
entry: poetry -C autogpt_platform/autogpt_libs install
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - AutoGPT
alias: poetry-install-classic-autogpt
entry: poetry -C classic/original_autogpt install
# include forge source (since it's a path dependency)
files: ^classic/(original_autogpt|forge)/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - Forge
alias: poetry-install-classic-forge
entry: poetry -C classic/forge install
files: ^classic/forge/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - Benchmark
alias: poetry-install-classic-benchmark
entry: poetry -C classic/benchmark install
files: ^classic/benchmark/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- repo: local
# For proper type checking, Prisma client must be up-to-date.
hooks:
- id: prisma-generate
name: Prisma Generate - AutoGPT Platform - Backend
alias: prisma-generate-platform-backend
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
# include everything that triggers poetry install + the prisma schema
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
types: [file]
language: system
pass_filenames: false
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.2
hooks:
- id: ruff
name: Lint (Ruff) - AutoGPT Platform - Backend
alias: ruff-lint-platform-backend
files: ^autogpt_platform/backend/
args: [--fix]
- id: ruff
name: Lint (Ruff) - AutoGPT Platform - Libs
alias: ruff-lint-platform-libs
files: ^autogpt_platform/autogpt_libs/
args: [--fix]
- repo: local
# isort needs the context of which packages are installed to function, so we
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
hooks:
- id: isort
name: Lint (isort) - AutoGPT Platform - Backend
alias: isort-platform-backend
entry: poetry -C autogpt_platform/backend run isort -p backend
files: ^autogpt_platform/backend/
- id: isort-autogpt
name: Lint (isort) - AutoGPT
entry: poetry -C autogpt run isort
files: ^autogpt/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - AutoGPT
alias: isort-classic-autogpt
entry: poetry -C classic/original_autogpt run isort -p autogpt
files: ^classic/original_autogpt/
- id: isort-forge
name: Lint (isort) - Forge
entry: poetry -C forge run isort
files: ^forge/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - Forge
alias: isort-classic-forge
entry: poetry -C classic/forge run isort -p forge
files: ^classic/forge/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - Benchmark
alias: isort-classic-benchmark
entry: poetry -C classic/benchmark run isort -p agbenchmark
files: ^classic/benchmark/
- id: isort-benchmark
name: Lint (isort) - Benchmark
entry: poetry -C benchmark run isort
files: ^benchmark/
types: [file, python]
language: system
@@ -141,6 +42,7 @@ repos:
hooks:
- id: black
name: Lint (Black)
language_version: python3.10
- repo: https://github.com/PyCQA/flake8
rev: 7.0.0
@@ -148,111 +50,78 @@ repos:
# them separately.
hooks:
- id: flake8
name: Lint (Flake8) - Classic - AutoGPT
alias: flake8-classic-autogpt
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
args: [--config=classic/original_autogpt/.flake8]
name: Lint (Flake8) - AutoGPT
alias: flake8-autogpt
files: ^autogpt/(autogpt|scripts|tests)/
args: [--config=autogpt/.flake8]
- id: flake8
name: Lint (Flake8) - Classic - Forge
alias: flake8-classic-forge
files: ^classic/forge/(forge|tests)/
args: [--config=classic/forge/.flake8]
name: Lint (Flake8) - Forge
alias: flake8-forge
files: ^forge/(forge|tests)/
args: [--config=forge/.flake8]
- id: flake8
name: Lint (Flake8) - Classic - Benchmark
alias: flake8-classic-benchmark
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=classic/benchmark/.flake8]
name: Lint (Flake8) - Benchmark
alias: flake8-benchmark
files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=benchmark/.flake8]
- repo: local
# To have watertight type checking, we check *all* the files in an affected
# project. To trigger on poetry.lock we also reset the file `types` filter.
hooks:
- id: pyright
name: Typecheck - AutoGPT Platform - Backend
alias: pyright-platform-backend
entry: poetry -C autogpt_platform/backend run pyright
args: [-p, autogpt_platform/backend, autogpt_platform/backend]
name: Typecheck - AutoGPT
alias: pyright-autogpt
entry: poetry -C autogpt run pyright
args: [-p, autogpt, autogpt]
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - AutoGPT Platform - Libs
alias: pyright-platform-libs
entry: poetry -C autogpt_platform/autogpt_libs run pyright
args: [-p, autogpt_platform/autogpt_libs, autogpt_platform/autogpt_libs]
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
name: Typecheck - Forge
alias: pyright-forge
entry: poetry -C forge run pyright
args: [-p, forge, forge]
files: ^forge/(forge/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - AutoGPT
alias: pyright-classic-autogpt
entry: poetry -C classic/original_autogpt run pyright
args: [-p, classic/original_autogpt, classic/original_autogpt]
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - Forge
alias: pyright-classic-forge
entry: poetry -C classic/forge run pyright
args: [-p, classic/forge, classic/forge]
files: ^classic/forge/(forge/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - Benchmark
alias: pyright-classic-benchmark
entry: poetry -C classic/benchmark run pyright
args: [-p, classic/benchmark, classic/benchmark]
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
name: Typecheck - Benchmark
alias: pyright-benchmark
entry: poetry -C benchmark run pyright
args: [-p, benchmark, benchmark]
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- repo: local
hooks:
- id: pytest
name: Run tests - AutoGPT Platform - Backend
alias: pytest-platform-backend
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
- id: pytest
name: Run tests - Classic - AutoGPT (excl. slow tests)
alias: pytest-classic-autogpt
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
- id: pytest-autogpt
name: Run tests - AutoGPT (excl. slow tests)
entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
- id: pytest
name: Run tests - Classic - Forge (excl. slow tests)
alias: pytest-classic-forge
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
- id: pytest-forge
name: Run tests - Forge (excl. slow tests)
entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
files: ^forge/(forge/|tests/|poetry\.lock$)
language: system
pass_filenames: false
- id: pytest
name: Run tests - Classic - Benchmark
alias: pytest-classic-benchmark
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
- id: pytest-benchmark
name: Run tests - Benchmark
entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

View File

@@ -1,45 +1,44 @@
{
"folders": [
{
"name": "frontend",
"path": "../autogpt_platform/frontend"
"name": "autogpt",
"path": "../autogpt"
},
{
"name": "backend",
"path": "../autogpt_platform/backend"
},
{
"name": "market",
"path": "../autogpt_platform/market"
},
{
"name": "lib",
"path": "../autogpt_platform/autogpt_libs"
},
{
"name": "infra",
"path": "../autogpt_platform/infra"
"name": "benchmark",
"path": "../benchmark"
},
{
"name": "docs",
"path": "../docs"
},
{
"name": "classic - autogpt",
"path": "../classic/original_autogpt"
"name": "forge",
"path": "../forge"
},
{
"name": "classic - benchmark",
"path": "../classic/benchmark"
"name": "frontend",
"path": "../frontend"
},
{
"name": "classic - forge",
"path": "../classic/forge"
"name": "autogpt_server",
"path": "../rnd/autogpt_server"
},
{
"name": "classic - frontend",
"path": "../classic/frontend"
"name": "autogpt_builder",
"path": "../rnd/autogpt_builder"
},
{
"name": "market",
"path": "../rnd/market"
},
{
"name": "lib",
"path": "../rnd/autogpt_libs"
},
{
"name": "infra",
"path": "../rnd/infra"
},
{
"name": "[root]",

67
.vscode/launch.json vendored
View File

@@ -1,67 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Frontend: Server Side",
"type": "node-terminal",
"request": "launch",
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
"command": "yarn dev"
},
{
"name": "Frontend: Client Side",
"type": "msedge",
"request": "launch",
"url": "http://localhost:3000"
},
{
"name": "Frontend: Full Stack",
"type": "node-terminal",
"request": "launch",
"command": "yarn dev",
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
"serverReadyAction": {
"pattern": "- Local:.+(https?://.+)",
"uriFormat": "%s",
"action": "debugWithEdge"
}
},
{
"name": "Backend",
"type": "debugpy",
"request": "launch",
"module": "backend.app",
// "env": {
// "ENV": "dev"
// },
"envFile": "${workspaceFolder}/backend/.env",
"justMyCode": false,
"cwd": "${workspaceFolder}/autogpt_platform/backend"
},
{
"name": "Marketplace",
"type": "debugpy",
"request": "launch",
"module": "autogpt_platform.market.main",
"env": {
"ENV": "dev"
},
"envFile": "${workspaceFolder}/market/.env",
"justMyCode": false,
"cwd": "${workspaceFolder}/market"
}
],
"compounds": [
{
"name": "Everything",
"configurations": ["Backend", "Frontend: Full Stack"],
// "preLaunchTask": "${defaultBuildTask}",
"stopAll": true,
"presentation": {
"hidden": false,
"order": 0
}
}
]
}

View File

@@ -10,9 +10,6 @@ Also check out our [🚀 Roadmap][roadmap] for information about our priorities
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
## Contributing to the AutoGPT Platform Folder
All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license.
## In short
1. Avoid duplicate work, issues, PRs etc.
2. We encourage you to collaborate with fellow community members on some of our bigger

View File

@@ -29,7 +29,7 @@ ENV PATH="$POETRY_HOME/bin:$PATH"
RUN poetry config installer.max-workers 10
WORKDIR /app/autogpt
COPY original_autogpt/pyproject.toml original_autogpt/poetry.lock ./
COPY autogpt/pyproject.toml autogpt/poetry.lock ./
# Include forge so it can be used as a path dependency
COPY forge/ ../forge
@@ -42,19 +42,20 @@ ENTRYPOINT ["poetry", "run", "autogpt"]
CMD []
# dev build -> include everything
FROM autogpt-base AS autogpt-dev
FROM autogpt-base as autogpt-dev
RUN poetry install --no-cache --no-root \
&& rm -rf $(poetry env info --path)/src
ONBUILD COPY original_autogpt/ ./
ONBUILD RUN mkdir -p ./data
ONBUILD COPY autogpt/ ./
# release build -> include bare minimum
FROM autogpt-base AS autogpt-release
FROM autogpt-base as autogpt-release
RUN poetry install --no-cache --no-root --without dev \
&& rm -rf $(poetry env info --path)/src
ONBUILD COPY original_autogpt/ ./autogpt
ONBUILD COPY original_autogpt/README.md ./README.md
ONBUILD RUN mkdir -p ./data
ONBUILD COPY autogpt/autogpt/ ./autogpt
ONBUILD COPY autogpt/scripts/ ./scripts
ONBUILD COPY autogpt/plugins/ ./plugins
ONBUILD COPY autogpt/README.md ./README.md
ONBUILD RUN mkdir ./data
FROM autogpt-${BUILD_TYPE} AS autogpt
RUN poetry install --only-root

View File

@@ -14,10 +14,10 @@ This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux
To fork the repository, follow these steps:
- Navigate to the main page of the repository.
![Repository](../docs/content/imgs/quickstart/001_repo.png)
![Repository](docs/content/imgs/quickstart/001_repo.png)
- In the top-right corner of the page, click Fork.
![Create Fork UI](../docs/content/imgs/quickstart/002_fork.png)
![Create Fork UI](docs/content/imgs/quickstart/002_fork.png)
- On the next page, select your GitHub account to create the fork.
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
@@ -27,11 +27,11 @@ This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux
- Navigate to the directory where you want to clone the repository.
- Run the git clone command for the fork you just created
![Clone the Repository](../docs/content/imgs/quickstart/003_clone.png)
![Clone the Repository](docs/content/imgs/quickstart/003_clone.png)
- Then open your project in your ide
![Open the Project in your IDE](../docs/content/imgs/quickstart/004_ide.png)
![Open the Project in your IDE](docs/content/imgs/quickstart/004_ide.png)
4. **Setup the Project**
Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
@@ -40,7 +40,7 @@ This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux
The first command you need to use is `./run setup.` This will guide you through setting up your system.
Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
![Setup the Project](../docs/content/imgs/quickstart/005_setup.png)
![Setup the Project](docs/content/imgs/quickstart/005_setup.png)
### For Windows Users
@@ -78,7 +78,7 @@ If you continue to experience issues, consider storing your project files within
You can keep running the command to get feedback on where you are up to with your setup.
When setup has been completed, the command will return an output like this:
![Setup Complete](../docs/content/imgs/quickstart/006_setup_complete.png)
![Setup Complete](docs/content/imgs/quickstart/006_setup_complete.png)
## Creating Your Agent
@@ -91,7 +91,7 @@ Tips for naming your agent:
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
![Create an Agent](../docs/content/imgs/quickstart/007_create_agent.png)
![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png)
## Running your Agent
@@ -99,15 +99,15 @@ Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
This starts the agent on the URL: `http://localhost:8000/`
![Start the Agent](../docs/content/imgs/quickstart/009_start_agent.png)
![Start the Agent](docs/content/imgs/quickstart/009_start_agent.png)
The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
![Login](../docs/content/imgs/quickstart/010_login.png)
![Login](docs/content/imgs/quickstart/010_login.png)
Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
![Login](../docs/content/imgs/quickstart/011_home.png)
![Login](docs/content/imgs/quickstart/011_home.png)
When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
@@ -160,7 +160,7 @@ The benchmark has been split into different categories of skills you can test yo
./run benchmark tests list
```
![Login](../docs/content/imgs/quickstart/012_tests.png)
![Login](docs/content/imgs/quickstart/012_tests.png)
Finally, you can run the benchmark with

View File

@@ -1,13 +1,7 @@
All portions of this repository are under one of two licenses. The majority of the AutoGPT repository is under the MIT License below. The autogpt_platform folder is under the
Polyform Shield License.
MIT License
Copyright (c) 2023 Toran Bruce Richards
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
@@ -15,11 +9,9 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

View File

@@ -1,71 +1,43 @@
# AutoGPT: Build, Deploy, and Run AI Agents
# AutoGPT: Build & Use AI Agents
[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) &ensp;
[![Twitter Follow](https://img.shields.io/twitter/follow/Auto_GPT?style=social)](https://twitter.com/Auto_GPT) &ensp;
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows.
**AutoGPT** is a powerful tool that lets you create and run intelligent agents. These agents can perform various tasks automatically, making your life easier.
## Hosting Options
- Download to self-host
- [Join the Waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta
## How to Get Started
## How to Setup for Self-Hosting
> [!NOTE]
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
https://github.com/user-attachments/assets/8508f4dc-b362-4cab-900f-644964a96cdf
https://github.com/user-attachments/assets/d04273a5-b36a-4a37-818e-f631ce72d603
### 🧱 AutoGPT Builder
This tutorial assumes you have Docker, VSCode, git and npm installed.
The AutoGPT Builder is the frontend. It allows you to design agents using an easy flowchart style. You build your agent by connecting blocks, where each block performs a single action. It's simple and intuitive!
### 🧱 AutoGPT Frontend
The AutoGPT frontend is where users interact with our powerful AI automation platform. It offers multiple ways to engage with and leverage our AI agents. This is the interface where you'll bring your AI automation ideas to life:
**Agent Builder:** For those who want to customize, our intuitive, low-code interface allows you to design and configure your own AI agents.
**Workflow Management:** Build, modify, and optimize your automation workflows with ease. You build your agent by connecting blocks, where each block performs a single action.
**Deployment Controls:** Manage the lifecycle of your agents, from testing to production.
**Ready-to-Use Agents:** Don't want to build? Simply select from our library of pre-configured agents and put them to work immediately.
**Agent Interaction:** Whether you've built your own or are using pre-configured agents, easily run and interact with them through our user-friendly interface.
**Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes.
[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks.
[Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
### 💽 AutoGPT Server
The AutoGPT Server is the powerhouse of our platform This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously. It contains all the essential components that make AutoGPT run smoothly.
**Source Code:** The core logic that drives our agents and automation processes.
**Infrastructure:** Robust systems that ensure reliable and scalable performance.
**Marketplace:** A comprehensive marketplace where you can find and deploy a wide range of pre-built agents.
The AutoGPT Server is the backend. This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously.
### 🐙 Example Agents
Here are two examples of what you can do with AutoGPT:
1. **Generate Viral Videos from Trending Topics**
- This agent reads topics on Reddit.
- It identifies trending topics.
- It then automatically creates a short-form video based on the content.
1. **Reddit Marketing Agent**
- This agent reads comments on Reddit.
- It looks for people asking about your product.
- It then automatically responds to them.
2. **Identify Top Quotes from Videos for Social Media**
2. **YouTube Content Repurposing Agent**
- This agent subscribes to your YouTube channel.
- When you post a new video, it transcribes it.
- It uses AI to identify the most impactful quotes to generate a summary.
- Then, it writes a post to automatically publish to your social media.
- It uses AI to write a search engine optimized blog post.
- Then, it publishes this blog post to your Medium account.
These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case.
These examples show just a glimpse of what you can achieve with AutoGPT!
---
### Mission and Licencing
Our mission is to provide the tools, so that you can focus on what matters:
- 🏗️ **Building** - Lay the foundation for something amazing.
@@ -78,28 +50,20 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
&ensp;|&ensp;
**🚀 [Contributing](CONTRIBUTING.md)**
**Licensing:**
MIT License: The majority of the AutoGPT repository is under the MIT License.
Polyform Shield License: This license applies to the autogpt_platform folder.
For more information, see https://agpt.co/blog/introducing-the-autogpt-platform
---
## 🤖 AutoGPT Classic
> Below is information about the classic version of AutoGPT.
**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)**
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
### 🏗️ Forge
**Forge your own agent!** &ndash; Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project.
**Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) &ndash;
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/tutorials/001_getting_started.md) &ndash;
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
### 🎯 Benchmark
@@ -109,7 +73,7 @@ This guide will walk you through the process of creating your own agent and usin
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
&ensp;|&ensp;
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
### 💻 UI
@@ -119,7 +83,7 @@ This guide will walk you through the process of creating your own agent and usin
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
### ⌨️ CLI
@@ -158,8 +122,6 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
---
## Stars stats
<p align="center">
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
<picture>
@@ -169,10 +131,3 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
</picture>
</a>
</p>
## ⚡ Contributors
<a href="https://github.com/Significant-Gravitas/AutoGPT/graphs/contributors" alt="View Contributors">
<img src="https://contrib.rocks/image?repo=Significant-Gravitas/AutoGPT&max=1000&columns=10" alt="Contributors" />
</a>

View File

@@ -1,47 +1,66 @@
# Security Policy
## Reporting Security Issues
- [**Using AutoGPT Securely**](#using-AutoGPT-securely)
- [Restrict Workspace](#restrict-workspace)
- [Untrusted inputs](#untrusted-inputs)
- [Data privacy](#data-privacy)
- [Untrusted environments or networks](#untrusted-environments-or-networks)
- [Multi-Tenant environments](#multi-tenant-environments)
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
## Using AutoGPT Securely
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
### Restrict Workspace
Instead, please report them via:
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty
Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False.
### Reporting Process
1. **Submit Report**: Use one of the above channels to submit your report
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
3. **Collaboration**: We will collaborate with you to understand and validate the issue
4. **Resolution**: We will work on a fix and coordinate the release process
Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks.
### Disclosure Policy
- Please provide detailed reports with reproducible steps
- Include the version/commit hash where you discovered the vulnerability
- Allow us a 90-day security fix window before any public disclosure
- Share any potential mitigations or workarounds if known
### Untrusted inputs
## Supported Versions
Only the following versions are eligible for security updates:
When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks.
| Version | Supported |
|---------|-----------|
| Latest release on master branch | ✅ |
| Development commits (pre-master) | ✅ |
| Classic folder (deprecated) | ❌ |
| All other versions | ❌ |
For maximum security when handling untrusted inputs, you may need to employ the following:
## Security Best Practices
When using this project:
1. Always use the latest stable version
2. Review security advisories before updating
3. Follow our security documentation and guidelines
4. Keep your dependencies up to date
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
* Sandboxing: Isolate the process.
* Updates: Keep your libraries (including AutoGPT) updated with the latest security patches.
* Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as:
* Validation: Enforce strict rules on allowed characters and data types.
* Filtering: Remove potentially malicious scripts or code fragments.
* Encoding: Convert special characters into safe representations.
* Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)).
## Past Security Advisories
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
### Data privacy
---
Last updated: November 2024
To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors.
### Untrusted environments or networks
Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers.
Additionally, running it on an untrusted network can expose your data to potential network attacks.
However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network.
### Multi-Tenant environments
If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data.
The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks.
- Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenants identity.
- Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring.
- Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks.
- Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time.
## Reporting a Vulnerability
Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT.
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new).
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.

View File

@@ -1,2 +1,2 @@
[run]
[run]
relative_files = true

View File

@@ -1,5 +1,5 @@
# To boot the app run the following:
# docker compose run auto-gpt
# docker-compose run auto-gpt
version: '3.9'
services:

View File

@@ -1,6 +1,6 @@
## Original ignores
classic/original_autogpt/keys.py
classic/original_autogpt/*.json
autogpt/keys.py
autogpt/*.json
*.mpeg
.env
azure.yaml

3
autogpt/.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"python.analysis.typeCheckingMode": "basic",
}

View File

@@ -120,9 +120,9 @@ by default on `http://localhost:8000`.
For more comprehensive instructions, see the [user guide][docs/usage].
[docs]: https://docs.agpt.co/autogpt
[docs/setup]: https://docs.agpt.co/classic/original_autogpt/setup
[docs/usage]: https://docs.agpt.co/classic/original_autogpt/usage
[docs/plugins]: https://docs.agpt.co/classic/original_autogpt/plugins
[docs/setup]: https://docs.agpt.co/autogpt/setup
[docs/usage]: https://docs.agpt.co/autogpt/usage
[docs/plugins]: https://docs.agpt.co/autogpt/plugins
## 📚 Resources
* 📔 AutoGPT [project wiki](https://github.com/Significant-Gravitas/AutoGPT/wiki)

View File

@@ -34,4 +34,4 @@ class MyAgent(Agent):
self.my_component = MyComponent()
```
For more customization, you can override the `propose_action` and `execute` or even subclass `BaseAgent` directly. This way you can have full control over the agent's components and behavior. Have a look at the [implementation of Agent](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/original_autogpt/agents/agent.py) for more details.
For more customization, you can override the `propose_action` and `execute` or even subclass `BaseAgent` directly. This way you can have full control over the agent's components and behavior. Have a look at the [implementation of Agent](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt/autogpt/agents/agent.py) for more details.

View File

@@ -97,9 +97,7 @@ class AgentProtocolServer:
app.include_router(router, prefix="/ap/v1")
script_dir = os.path.dirname(os.path.realpath(__file__))
frontend_path = (
pathlib.Path(script_dir)
.joinpath("../../../classic/frontend/build/web")
.resolve()
pathlib.Path(script_dir).joinpath("../../../frontend/build/web").resolve()
)
if os.path.exists(frontend_path):

View File

@@ -148,7 +148,7 @@ async def assert_config_has_required_llm_api_keys(config: AppConfig) -> None:
)
logger.info(
"For further instructions: "
"https://docs.agpt.co/classic/original_autogpt/setup/#anthropic"
"https://docs.agpt.co/autogpt/setup/#anthropic"
)
raise ValueError("Anthropic is unavailable: can't load credentials") from e
@@ -176,15 +176,14 @@ async def assert_config_has_required_llm_api_keys(config: AppConfig) -> None:
logger.error("Set your Groq API key in .env or as an environment variable")
logger.info(
"For further instructions: "
+ "https://docs.agpt.co/classic/original_autogpt/setup/#groq"
"For further instructions: https://docs.agpt.co/autogpt/setup/#groq"
)
raise ValueError("Groq is unavailable: can't load credentials")
except AuthenticationError as e:
logger.error("The Groq API key is invalid!")
logger.info(
"For instructions to get and set a new API key: "
"https://docs.agpt.co/classic/original_autogpt/setup/#groq"
"https://docs.agpt.co/autogpt/setup/#groq"
)
raise ValueError("Groq is unavailable: invalid API key") from e
@@ -203,15 +202,14 @@ async def assert_config_has_required_llm_api_keys(config: AppConfig) -> None:
"Set your OpenAI API key in .env or as an environment variable"
)
logger.info(
"For further instructions: "
+ "https://docs.agpt.co/classic/original_autogpt/setup/#openai"
"For further instructions: https://docs.agpt.co/autogpt/setup/#openai"
)
raise ValueError("OpenAI is unavailable: can't load credentials")
except AuthenticationError as e:
logger.error("The OpenAI API key is invalid!")
logger.info(
"For instructions to get and set a new API key: "
"https://docs.agpt.co/classic/original_autogpt/setup/#openai"
"https://docs.agpt.co/autogpt/setup/#openai"
)
raise ValueError("OpenAI is unavailable: invalid API key") from e

View File

@@ -22,7 +22,7 @@ logger = logging.getLogger(__name__)
def get_bulletin_from_web():
try:
response = requests.get(
"https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/classic/original_autogpt/BULLETIN.md" # noqa: E501
"https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/autogpt/BULLETIN.md" # noqa: E501
)
if response.status_code == 200:
return response.text
@@ -45,7 +45,7 @@ def vcs_state_diverges_from_master() -> bool:
"""
Returns whether a git repo is present and contains changes that are not in `master`.
"""
paths_we_care_about = "classic/original_autogpt/classic/original_autogpt/**/*.py"
paths_we_care_about = "autogpt/autogpt/**/*.py"
try:
repo = Repo(search_parent_directories=True)

View File

@@ -14,7 +14,7 @@ services:
ports:
- "8000:8000"
volumes:
- ./:/app/classic/original_autogpt/
- ./:/app/autogpt/
- ./docker-compose.yml:/app/docker-compose.yml:ro
# - ./Dockerfile:/app/Dockerfile:ro
profiles: ["exclude-from-up"]
@@ -33,8 +33,8 @@ services:
entrypoint: ["poetry", "run"]
command: ["pytest", "-v"]
volumes:
- ./autogpt:/app/classic/original_autogpt/autogpt
- ./tests:/app/classic/original_autogpt/tests
- ./autogpt:/app/autogpt/autogpt
- ./tests:/app/autogpt/tests
depends_on:
- minio
profiles: ["exclude-from-up"]

View File

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 33 KiB

View File

@@ -348,10 +348,10 @@ tiktoken = ">=0.7.0,<1.0.0"
toml = "^0.10.2"
uvicorn = {version = ">=0.23.2,<1", extras = ["standard"]}
watchdog = "4.0.0"
webdriver-manager = "^4.0.2"
webdriver-manager = "^4.0.1"
[package.extras]
benchmark = ["agbenchmark @ file:///home/reinier/code/agpt/AutoGPT/classic/benchmark"]
benchmark = ["agbenchmark @ file:///Users/czerwinski/Projects/AutoGPT/benchmark"]
[package.source]
type = "directory"
@@ -6416,13 +6416,13 @@ wasabi = ">=0.9.1,<1.2.0"
[[package]]
name = "webdriver-manager"
version = "4.0.2"
version = "4.0.1"
description = "Library provides the way to automatically manage drivers for different browsers"
optional = false
python-versions = ">=3.7"
files = [
{file = "webdriver_manager-4.0.2-py2.py3-none-any.whl", hash = "sha256:75908d92ecc45ff2b9953614459c633db8f9aa1ff30181cefe8696e312908129"},
{file = "webdriver_manager-4.0.2.tar.gz", hash = "sha256:efedf428f92fd6d5c924a0d054e6d1322dd77aab790e834ee767af392b35590f"},
{file = "webdriver_manager-4.0.1-py2.py3-none-any.whl", hash = "sha256:d7970052295bb9cda2c1a24cf0b872dd2c41ababcc78f7b6b8dc37a41e979a7e"},
{file = "webdriver_manager-4.0.1.tar.gz", hash = "sha256:25ec177c6a2ce9c02fb8046f1b2732701a9418d6a977967bb065d840a3175d87"},
]
[package.dependencies]

View File

@@ -88,4 +88,4 @@ skip_glob = ["data"]
[tool.pyright]
pythonVersion = "3.10"
exclude = ["data/**", "**/node_modules", "**/__pycache__", "**/.*"]
ignore = ["../classic/forge/**"]
ignore = ["../forge/**"]

Some files were not shown because too many files have changed in this diff Show More