mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 16:48:06 -05:00
Compare commits
5 Commits
twitter-in
...
security/a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
57985eb8ed | ||
|
|
575be818ca | ||
|
|
58762c7977 | ||
|
|
be69ad2a9f | ||
|
|
3b9a467deb |
@@ -1,60 +1,27 @@
|
||||
# Ignore everything by default, selectively add things to context
|
||||
*
|
||||
|
||||
# Platform - Libs
|
||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
||||
!autogpt_platform/autogpt_libs/poetry.lock
|
||||
!autogpt_platform/autogpt_libs/README.md
|
||||
# AutoGPT
|
||||
!autogpt/autogpt/
|
||||
!autogpt/pyproject.toml
|
||||
!autogpt/poetry.lock
|
||||
!autogpt/README.md
|
||||
!autogpt/tests/
|
||||
|
||||
# Platform - Backend
|
||||
!autogpt_platform/backend/backend/
|
||||
!autogpt_platform/backend/migrations/
|
||||
!autogpt_platform/backend/schema.prisma
|
||||
!autogpt_platform/backend/pyproject.toml
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
# Benchmark
|
||||
!benchmark/agbenchmark/
|
||||
!benchmark/pyproject.toml
|
||||
!benchmark/poetry.lock
|
||||
!benchmark/README.md
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
!autogpt_platform/market/scripts.py
|
||||
!autogpt_platform/market/schema.prisma
|
||||
!autogpt_platform/market/pyproject.toml
|
||||
!autogpt_platform/market/poetry.lock
|
||||
!autogpt_platform/market/README.md
|
||||
# Forge
|
||||
!forge/forge/
|
||||
!forge/pyproject.toml
|
||||
!forge/poetry.lock
|
||||
!forge/README.md
|
||||
|
||||
# Platform - Frontend
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/yarn.lock
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
!autogpt_platform/frontend/README.md
|
||||
## config
|
||||
!autogpt_platform/frontend/*.config.*
|
||||
!autogpt_platform/frontend/.env.*
|
||||
|
||||
# Classic - AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
!classic/original_autogpt/pyproject.toml
|
||||
!classic/original_autogpt/poetry.lock
|
||||
!classic/original_autogpt/README.md
|
||||
!classic/original_autogpt/tests/
|
||||
|
||||
# Classic - Benchmark
|
||||
!classic/benchmark/agbenchmark/
|
||||
!classic/benchmark/pyproject.toml
|
||||
!classic/benchmark/poetry.lock
|
||||
!classic/benchmark/README.md
|
||||
|
||||
# Classic - Forge
|
||||
!classic/forge/
|
||||
!classic/forge/pyproject.toml
|
||||
!classic/forge/poetry.lock
|
||||
!classic/forge/README.md
|
||||
|
||||
# Classic - Frontend
|
||||
!classic/frontend/build/web/
|
||||
# Frontend
|
||||
!frontend/build/web/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
|
||||
7
.gitattributes
vendored
7
.gitattributes
vendored
@@ -1,10 +1,5 @@
|
||||
classic/frontend/build/** linguist-generated
|
||||
frontend/build/** linguist-generated
|
||||
|
||||
**/poetry.lock linguist-generated
|
||||
|
||||
docs/_javascript/** linguist-vendored
|
||||
|
||||
# Exclude VCR cassettes from stats
|
||||
classic/forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
* text=auto
|
||||
12
.github/CODEOWNERS
vendored
12
.github/CODEOWNERS
vendored
@@ -1,7 +1,5 @@
|
||||
* @Significant-Gravitas/maintainers
|
||||
.github/workflows/ @Significant-Gravitas/devops
|
||||
classic/forge/ @Significant-Gravitas/forge-maintainers
|
||||
classic/benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
classic/frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
autogpt_platform/infra @Significant-Gravitas/devops
|
||||
.github/CODEOWNERS @Significant-Gravitas/admins
|
||||
.github/workflows/ @Significant-Gravitas/devops
|
||||
autogpt/ @Significant-Gravitas/maintainers
|
||||
forge/ @Significant-Gravitas/forge-maintainers
|
||||
benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
|
||||
17
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
17
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -88,16 +88,14 @@ body:
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: What LLM Provider do you use?
|
||||
label: Do you use OpenAI GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- Azure
|
||||
- Groq
|
||||
- Anthropic
|
||||
- Llamafile
|
||||
- Other (detail in issue)
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
- GPT-4(32k)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -128,13 +126,6 @@ body:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: What commit or version are you using?
|
||||
description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe your issue.
|
||||
|
||||
51
.github/PULL_REQUEST_TEMPLATE.md
vendored
51
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,38 +1,31 @@
|
||||
### Background
|
||||
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Changes 🏗️
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### Checklist 📋
|
||||
### PR Quality Scorecard ✨
|
||||
|
||||
#### For code changes:
|
||||
- [ ] I have clearly listed my changes in the PR description
|
||||
- [ ] I have made a test plan
|
||||
- [ ] I have tested my changes according to the test plan:
|
||||
<!-- Put your test plan here: -->
|
||||
- [ ] ...
|
||||
<!--
|
||||
Check out our contribution guide:
|
||||
https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
|
||||
<details>
|
||||
<summary>Example test plan</summary>
|
||||
|
||||
- [ ] Create from scratch and execute an agent with at least 3 blocks
|
||||
- [ ] Import an agent from file upload, and confirm it executes correctly
|
||||
- [ ] Upload agent to marketplace
|
||||
- [ ] Import an agent from marketplace and confirm it executes correctly
|
||||
- [ ] Edit an agent from monitor, and confirm it executes correctly
|
||||
</details>
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. Also consider contributing something other than code; see the [contribution guide]
|
||||
for options.
|
||||
3. Clearly explain your changes.
|
||||
4. Avoid making unnecessary changes, especially if they're purely based on personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
-->
|
||||
|
||||
#### For configuration changes:
|
||||
- [ ] `.env.example` is updated or already compatible with my changes
|
||||
- [ ] `docker-compose.yml` is updated or already compatible with my changes
|
||||
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
|
||||
|
||||
<details>
|
||||
<summary>Examples of configuration changes</summary>
|
||||
|
||||
- Changing ports
|
||||
- Adding new services that need to communicate with each other
|
||||
- Secrets or environment variable changes
|
||||
- New or infrastructure changes such as databases
|
||||
</details>
|
||||
- [x] Have you used the PR description template?   `+2 pts`
|
||||
- [ ] Is your pull request atomic, focusing on a single change?   `+5 pts`
|
||||
- [ ] Have you linked the GitHub issue(s) that this PR addresses?   `+5 pts`
|
||||
- [ ] Have you documented your changes clearly and comprehensively?   `+5 pts`
|
||||
- [ ] Have you changed or added a feature?   `-4 pts`
|
||||
- [ ] Have you added/updated corresponding documentation?   `+4 pts`
|
||||
- [ ] Have you added/updated corresponding integration tests?   `+5 pts`
|
||||
- [ ] Have you changed the behavior of AutoGPT?   `-5 pts`
|
||||
- [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance?   `+10 pts`
|
||||
|
||||
175
.github/dependabot.yml
vendored
175
.github/dependabot.yml
vendored
@@ -1,175 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
# autogpt_libs (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/autogpt_libs"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(libs/deps)"
|
||||
prefix-development: "chore(libs/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# backend (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/backend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(backend/deps)"
|
||||
prefix-development: "chore(backend/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# frontend (Next.js project)
|
||||
- package-ecosystem: "npm"
|
||||
directory: "autogpt_platform/frontend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(frontend/deps)"
|
||||
prefix-development: "chore(frontend/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# infra (Terraform)
|
||||
- package-ecosystem: "terraform"
|
||||
directory: "autogpt_platform/infra"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(infra/deps)"
|
||||
prefix-development: "chore(infra/deps-dev)"
|
||||
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Docker
|
||||
- package-ecosystem: "docker"
|
||||
directory: "autogpt_platform/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Submodules
|
||||
- package-ecosystem: "gitsubmodule"
|
||||
directory: "autogpt_platform/supabase"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(platform/deps)"
|
||||
prefix-development: "chore(platform/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: 'pip'
|
||||
directory: "docs/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(docs/deps)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
35
.github/labeler.yml
vendored
35
.github/labeler.yml
vendored
@@ -1,32 +1,19 @@
|
||||
Classic AutoGPT Agent:
|
||||
AutoGPT Agent:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/original_autogpt/**
|
||||
|
||||
Classic Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/benchmark/**
|
||||
|
||||
Classic Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/frontend/**
|
||||
- any-glob-to-any-file: autogpt/**
|
||||
|
||||
Forge:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/forge/**
|
||||
- any-glob-to-any-file: forge/**
|
||||
|
||||
Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: benchmark/**
|
||||
|
||||
Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: frontend/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
platform/frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/frontend/**
|
||||
|
||||
platform/backend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/backend/**
|
||||
- all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**'
|
||||
|
||||
platform/blocks:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/backend/backend/blocks/**
|
||||
|
||||
@@ -1,27 +1,27 @@
|
||||
name: Classic - Forge CI
|
||||
name: AutoGPT CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/forge
|
||||
working-directory: autogpt
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -68,6 +68,11 @@ jobs:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
@@ -104,13 +109,17 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
@@ -137,9 +146,9 @@ jobs:
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
forge
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
tests/unit tests/integration
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
@@ -152,7 +161,7 @@ jobs:
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
@@ -233,4 +242,4 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/forge/logs/
|
||||
path: autogpt/logs/
|
||||
59
.github/workflows/autogpt-docker-cache-clean.yml
vendored
Normal file
59
.github/workflows/autogpt-docker-cache-clean.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Purge Auto-GPT Docker CI cache
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: development
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
@@ -1,26 +1,26 @@
|
||||
name: Classic - AutoGPT Docker CI
|
||||
name: AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
branches: [ master, development ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: classic/original_autogpt
|
||||
working-directory: autogpt
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
@@ -34,58 +34,57 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -117,16 +116,15 @@ jobs:
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
86
.github/workflows/autogpt-docker-release.yml
vendored
Normal file
86
.github/workflows/autogpt-docker-release.yml
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
name: AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published, edited ]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
no_cache:
|
||||
type: boolean
|
||||
description: 'Build from scratch, without using cached layers'
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
268
.github/workflows/autogpt-server-ci.yml
vendored
Normal file
268
.github/workflows/autogpt-server-ci.yml
vendored
Normal file
@@ -0,0 +1,268 @@
|
||||
name: AutoGPT Server CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, development, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
- "!autogpt/tests/vcr_cassettes"
|
||||
pull_request:
|
||||
branches: [master, development, release-*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
- "!autogpt/tests/vcr_cassettes"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_server
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: "."
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
test
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: autogpt-server,${{ runner.os }}
|
||||
|
||||
build:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
|
||||
- name: install rpm
|
||||
if: matrix.platform-os == 'ubuntu'
|
||||
run: sudo apt-get install -y alien fakeroot rpm
|
||||
|
||||
- name: Build distribution
|
||||
run: |
|
||||
case "${{ matrix.platform-os }}" in
|
||||
"macos" | "macos-arm64")
|
||||
${MAC_COMMAND}
|
||||
;;
|
||||
"windows")
|
||||
${WINDOWS_COMMAND}
|
||||
;;
|
||||
*)
|
||||
${LINUX_COMMAND}
|
||||
;;
|
||||
esac
|
||||
env:
|
||||
MAC_COMMAND: "poetry run poe dist_dmg"
|
||||
WINDOWS_COMMAND: "poetry run poe dist_msi"
|
||||
LINUX_COMMAND: "poetry run poe dist_appimage"
|
||||
|
||||
# break this into seperate steps each with their own name that matches the file
|
||||
- name: Upload App artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-app-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.app
|
||||
|
||||
- name: Upload dmg artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-dmg-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/AutoGPTServer.dmg
|
||||
|
||||
- name: Upload msi artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-msi-${{ matrix.platform-os }}
|
||||
path: D:\a\AutoGPT\AutoGPT\rnd\autogpt_server\dist\*.msi
|
||||
|
||||
- name: Upload deb artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-deb-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.deb
|
||||
|
||||
- name: Upload rpm artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-rpm-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.rpm
|
||||
|
||||
- name: Upload tar.gz artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-tar.gz-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.tar.gz
|
||||
|
||||
- name: Upload zip artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-zip-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.zip
|
||||
|
||||
- name: Upload pkg artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-pkg-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.pkg
|
||||
|
||||
- name: Upload AppImage artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-AppImage-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.AppImage
|
||||
97
.github/workflows/autogpts-benchmark.yml
vendored
Normal file
97
.github/workflows/autogpts-benchmark.yml
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
name: AutoGPTs Nightly Benchmark
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
benchmark:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
REPORTS_BRANCH: data/benchmark-reports
|
||||
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Prepare reports folder
|
||||
run: mkdir -p ${{ env.REPORTS_FOLDER }}
|
||||
|
||||
- run: poetry -C benchmark install
|
||||
|
||||
- name: Benchmark ${{ matrix.agent-name }}
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Do not quit on non-zero exit codes
|
||||
poetry run agbenchmark run -N 3 \
|
||||
--test=ReadFile \
|
||||
--test=BasicRetrieval --test=RevenueRetrieval2 \
|
||||
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
|
||||
--test=UrlShortener --test=TicTacToe --test=Battleship \
|
||||
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
|
||||
--test=WebArenaTask_134 --test=WebArenaTask_163
|
||||
|
||||
# Convert exit code 1 (some challenges failed) to exit code 0
|
||||
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
|
||||
exit 0
|
||||
else
|
||||
exit $?
|
||||
fi
|
||||
env:
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
|
||||
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
|
||||
- name: Push reports to data branch
|
||||
run: |
|
||||
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
|
||||
rm ${{ env.REPORTS_FOLDER }}/*.json
|
||||
|
||||
# Find folder with newest (untracked) report in it
|
||||
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
|
||||
| xargs -I {} dirname {} \
|
||||
| xargs -I {} git ls-files --others --exclude-standard {} \
|
||||
| xargs -I {} dirname {} \
|
||||
| sort -u)
|
||||
json_report_file="$report_subfolder/report.json"
|
||||
|
||||
# Convert JSON report to Markdown
|
||||
markdown_report_file="$report_subfolder/report.md"
|
||||
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
|
||||
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
git config --global user.name 'GitHub Actions'
|
||||
git config --global user.email 'github-actions@agpt.co'
|
||||
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
|
||||
&& git checkout ${{ env.REPORTS_BRANCH }} \
|
||||
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
|
||||
git reset --hard
|
||||
git add ${{ env.REPORTS_FOLDER }}
|
||||
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
|
||||
&& git push origin ${{ env.REPORTS_BRANCH }}
|
||||
@@ -1,43 +1,38 @@
|
||||
name: Classic - Agent smoke tests
|
||||
name: Agent smoke tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
serve-agent-protocol:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ original_autogpt ]
|
||||
agent-name: [ autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
@@ -55,7 +50,7 @@ jobs:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./classic/${{ matrix.agent-name }}/
|
||||
working-directory: ./${{ matrix.agent-name }}/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
name: Classic - AGBenchmark CI
|
||||
name: AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
- 'benchmark/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
- '!benchmark/reports/**'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
- 'benchmark/**'
|
||||
- '!benchmark/reports/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/benchmark
|
||||
working-directory: benchmark
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [forge]
|
||||
agent-name: [ forge ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: classic
|
||||
working-directory: .
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
@@ -146,23 +146,23 @@ jobs:
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
# poetry run agbenchmark --mock
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
exit 1
|
||||
else
|
||||
echo "No unstaged changes."
|
||||
fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Classic - Publish to PyPI
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -21,21 +21,21 @@ jobs:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./classic/benchmark/
|
||||
working-directory: ./benchmark/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Build project for distribution
|
||||
working-directory: ./classic/benchmark/
|
||||
working-directory: ./benchmark/
|
||||
run: poetry build
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ./classic/benchmark/
|
||||
working-directory: ./benchmark/
|
||||
run: poetry install
|
||||
|
||||
- name: Check Version
|
||||
working-directory: ./classic/benchmark/
|
||||
working-directory: ./benchmark/
|
||||
id: check-version
|
||||
run: |
|
||||
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "classic/benchmark/dist/*"
|
||||
artifacts: "benchmark/dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: false
|
||||
@@ -51,5 +51,5 @@ jobs:
|
||||
commit: master
|
||||
|
||||
- name: Build and publish
|
||||
working-directory: ./classic/benchmark/
|
||||
working-directory: ./benchmark/
|
||||
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}
|
||||
@@ -1,60 +0,0 @@
|
||||
name: Classic - Purge Auto-GPT Docker CI cache
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: dev
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
@@ -1,87 +0,0 @@
|
||||
name: Classic - AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, edited]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
no_cache:
|
||||
type: boolean
|
||||
description: 'Build from scratch, without using cached layers'
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
60
.github/workflows/classic-frontend-ci.yml
vendored
60
.github/workflows/classic-frontend-ci.yml
vendored
@@ -1,60 +0,0 @@
|
||||
name: Classic - Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- dev
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Repo - Close stale issues
|
||||
name: 'Close stale issues'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
98
.github/workflows/codeql.yml
vendored
98
.github/workflows/codeql.yml
vendored
@@ -1,98 +0,0 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
pull_request:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
merge_group:
|
||||
schedule:
|
||||
- cron: '15 4 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: typescript
|
||||
build-mode: none
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
config: |
|
||||
paths-ignore:
|
||||
- classic/frontend/build/**
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
@@ -1,25 +1,25 @@
|
||||
name: Classic - AutoGPT CI
|
||||
name: Forge CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/original_autogpt
|
||||
working-directory: forge
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -66,27 +66,18 @@ jobs:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
@@ -113,9 +104,9 @@ jobs:
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
tests/unit tests/integration
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
forge
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
@@ -128,11 +119,11 @@ jobs:
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/original_autogpt/logs/
|
||||
path: forge/logs/
|
||||
60
.github/workflows/frontend-ci.yml
vendored
Normal file
60
.github/workflows/frontend-ci.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- development
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
add-paths: frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
133
.github/workflows/hackathon.yml
vendored
Normal file
133
.github/workflows/hackathon.yml
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
name: Hackathon
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
agents:
|
||||
description: "Agents to run (comma-separated)"
|
||||
required: false
|
||||
default: "autogpt" # Default agents if none are specified
|
||||
|
||||
jobs:
|
||||
matrix-setup:
|
||||
runs-on: ubuntu-latest
|
||||
# Service containers to run with `matrix-setup`
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
env-name: ${{ steps.set-matrix.outputs.env-name }}
|
||||
steps:
|
||||
- id: set-matrix
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "schedule" ]; then
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::[ 'irrelevant']"
|
||||
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
||||
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
|
||||
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::$matrix_string"
|
||||
else
|
||||
echo "::set-output name=env-name::testing"
|
||||
echo "::set-output name=matrix::[ 'irrelevant' ]"
|
||||
fi
|
||||
|
||||
tests:
|
||||
environment:
|
||||
name: "${{ needs.matrix-setup.outputs.env-name }}"
|
||||
needs: matrix-setup
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
name: "${{ matrix.agent-name }}"
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
timeout-minutes: 50
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
|
||||
steps:
|
||||
- name: Print Environment Name
|
||||
run: |
|
||||
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
|
||||
|
||||
- name: Check Docker Container
|
||||
id: check
|
||||
run: docker ps
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: v18.15
|
||||
|
||||
- name: Run benchmark
|
||||
run: |
|
||||
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
|
||||
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
|
||||
git clone "$link" -b "$branch" "$AGENT_NAME"
|
||||
cd $AGENT_NAME
|
||||
cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
|
||||
./run agent start $AGENT_NAME
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
poetry run agbenchmark --no-dep
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
|
||||
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
68
.github/workflows/ossf-scorecard.yml
vendored
Normal file
68
.github/workflows/ossf-scorecard.yml
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
name: "Security - OSSF Scorecard"
|
||||
on:
|
||||
# For Branch-Protection check. Only the default branch is supported. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
|
||||
branch_protection_rule:
|
||||
# To guarantee Maintained check is occasionally updated. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
|
||||
schedule:
|
||||
- cron: '28 12 * * 2'
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecard analysis
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Needed to publish results and get a badge (see publish_results below).
|
||||
id-token: write
|
||||
# Uncomment the permissions below if installing in a private repository.
|
||||
# contents: read
|
||||
# actions: read
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecard on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat.
|
||||
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
|
||||
|
||||
# Public repositories:
|
||||
# - Publish results to OpenSSF REST API for easy access by consumers
|
||||
# - Allows the repository to include the Scorecard badge.
|
||||
# - See https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories:
|
||||
# - `publish_results` will always be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
@@ -1,49 +0,0 @@
|
||||
name: AutoGPT Platform - Deploy Prod Environment
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: production
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_prod
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
@@ -1,50 +0,0 @@
|
||||
name: AutoGPT Platform - Deploy Dev Environment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/**'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_dev
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
147
.github/workflows/platform-backend-ci.yml
vendored
147
.github/workflows/platform-backend-ci.yml
vendored
@@ -1,147 +0,0 @@
|
||||
name: AutoGPT Platform - Backend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/backend
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: bitnami/redis:6.2
|
||||
env:
|
||||
REDIS_PASSWORD: testpassword
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Setup Supabase
|
||||
uses: supabase/setup-cli@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Check poetry.lock
|
||||
run: |
|
||||
poetry lock
|
||||
|
||||
if ! git diff --quiet poetry.lock; then
|
||||
echo "Error: poetry.lock not up to date."
|
||||
echo
|
||||
git diff poetry.lock
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- id: supabase
|
||||
name: Start Supabase
|
||||
working-directory: .
|
||||
run: |
|
||||
supabase init
|
||||
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
|
||||
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
|
||||
# outputs:
|
||||
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
else
|
||||
poetry run pytest -s -vv test
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
|
||||
SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
|
||||
REDIS_HOST: 'localhost'
|
||||
REDIS_PORT: '6379'
|
||||
REDIS_PASSWORD: 'testpassword'
|
||||
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: backend,${{ runner.os }}
|
||||
101
.github/workflows/platform-frontend-ci.yml
vendored
101
.github/workflows/platform-frontend-ci.yml
vendored
@@ -1,101 +0,0 @@
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
merge_group:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
yarn lint
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
browser: [chromium, webkit]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
large-packages: false # slow
|
||||
docker-images: false # limited benefit
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../supabase/docker/.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.example ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml up -d
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
|
||||
- name: Setup Builder .env
|
||||
run: |
|
||||
cp .env.example .env
|
||||
|
||||
- name: Install Browser '${{ matrix.browser }}'
|
||||
run: yarn playwright install --with-deps ${{ matrix.browser }}
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
yarn test --project=${{ matrix.browser }}
|
||||
|
||||
- name: Print Docker Compose logs in debug mode
|
||||
if: runner.debug
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml logs
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: playwright-report-${{ matrix.browser }}
|
||||
path: playwright-report/
|
||||
retention-days: 30
|
||||
@@ -1,12 +1,12 @@
|
||||
name: Repo - Pull Request auto-label
|
||||
name: "Pull Request auto-label"
|
||||
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths-ignore:
|
||||
- 'classic/forge/tests/vcr_cassettes'
|
||||
- 'classic/benchmark/reports/**'
|
||||
- 'autogpt/tests/vcr_cassettes'
|
||||
- 'benchmark/reports/**'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
@@ -1,27 +1,27 @@
|
||||
name: Classic - Python checks
|
||||
name: Python checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-python-checks-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
@@ -40,18 +40,18 @@ jobs:
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
original_autogpt:
|
||||
- classic/original_autogpt/autogpt/**
|
||||
- classic/original_autogpt/tests/**
|
||||
- classic/original_autogpt/poetry.lock
|
||||
autogpt:
|
||||
- autogpt/autogpt/**
|
||||
- autogpt/tests/**
|
||||
- autogpt/poetry.lock
|
||||
forge:
|
||||
- classic/forge/forge/**
|
||||
- classic/forge/tests/**
|
||||
- classic/forge/poetry.lock
|
||||
- forge/forge/**
|
||||
- forge/tests/**
|
||||
- forge/poetry.lock
|
||||
benchmark:
|
||||
- classic/benchmark/agbenchmark/**
|
||||
- classic/benchmark/tests/**
|
||||
- classic/benchmark/poetry.lock
|
||||
- benchmark/agbenchmark/**
|
||||
- benchmark/tests/**
|
||||
- benchmark/poetry.lock
|
||||
outputs:
|
||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||
|
||||
@@ -89,23 +89,23 @@ jobs:
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
|
||||
# Lint
|
||||
|
||||
- name: Lint (isort)
|
||||
run: poetry run isort --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Black)
|
||||
if: success() || failure()
|
||||
run: poetry run black --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Flake8)
|
||||
if: success() || failure()
|
||||
run: poetry run flake8 .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
types:
|
||||
needs: get-changed-parts
|
||||
@@ -141,11 +141,11 @@ jobs:
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
|
||||
# Typecheck
|
||||
|
||||
- name: Typecheck
|
||||
if: success() || failure()
|
||||
run: poetry run pyright
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
@@ -1,21 +0,0 @@
|
||||
name: Repo - Enforce dev as base branch
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: [ master ]
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
check_pr_target:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Check if PR is from dev or hotfix
|
||||
if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }}
|
||||
run: |
|
||||
gh pr comment ${{ github.event.number }} --repo "$REPO" \
|
||||
--body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.'
|
||||
gh pr edit ${{ github.event.number }} --base dev --repo "$REPO"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
2
.github/workflows/repo-stats.yml
vendored
2
.github/workflows/repo-stats.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Repo - Github Stats
|
||||
name: github-repo-stats
|
||||
|
||||
on:
|
||||
schedule:
|
||||
|
||||
32
.github/workflows/repo-workflow-checker.yml
vendored
32
.github/workflows/repo-workflow-checker.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: Repo - PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
name: Check PR Status
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# - name: Wait some time for all actions to start
|
||||
# run: sleep 30
|
||||
- uses: actions/checkout@v4
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install requests
|
||||
- name: Check PR Status
|
||||
run: |
|
||||
echo "Current directory before running Python script:"
|
||||
pwd
|
||||
echo "Attempting to run Python script:"
|
||||
python .github/workflows/scripts/check_actions_status.py
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
116
.github/workflows/scripts/check_actions_status.py
vendored
116
.github/workflows/scripts/check_actions_status.py
vendored
@@ -1,116 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
import requests
|
||||
import sys
|
||||
import time
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
CHECK_INTERVAL = 30
|
||||
|
||||
|
||||
def get_environment_variables() -> Tuple[str, str, str, str, str]:
|
||||
"""Retrieve and return necessary environment variables."""
|
||||
try:
|
||||
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
|
||||
event = json.load(f)
|
||||
|
||||
# Handle both PR and merge group events
|
||||
if "pull_request" in event:
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
else:
|
||||
sha = os.environ["GITHUB_SHA"]
|
||||
|
||||
return (
|
||||
os.environ["GITHUB_API_URL"],
|
||||
os.environ["GITHUB_REPOSITORY"],
|
||||
sha,
|
||||
os.environ["GITHUB_TOKEN"],
|
||||
os.environ["GITHUB_RUN_ID"],
|
||||
)
|
||||
except KeyError as e:
|
||||
print(f"Error: Missing required environment variable or event data: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def make_api_request(url: str, headers: Dict[str, str]) -> Dict:
|
||||
"""Make an API request and return the JSON response."""
|
||||
try:
|
||||
print("Making API request to:", url)
|
||||
response = requests.get(url, headers=headers, timeout=10)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.RequestException as e:
|
||||
print(f"Error: API request failed. {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def process_check_runs(check_runs: List[Dict]) -> Tuple[bool, bool]:
|
||||
"""Process check runs and return their status."""
|
||||
runs_in_progress = False
|
||||
all_others_passed = True
|
||||
|
||||
for run in check_runs:
|
||||
if str(run["name"]) != "Check PR Status":
|
||||
status = run["status"]
|
||||
conclusion = run["conclusion"]
|
||||
|
||||
if status == "completed":
|
||||
if conclusion not in ["success", "skipped", "neutral"]:
|
||||
all_others_passed = False
|
||||
print(
|
||||
f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}"
|
||||
)
|
||||
else:
|
||||
runs_in_progress = True
|
||||
print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
|
||||
all_others_passed = False
|
||||
else:
|
||||
print(
|
||||
f"Skipping check run {run['name']} (ID: {run['id']}) as it is the current run."
|
||||
)
|
||||
|
||||
return runs_in_progress, all_others_passed
|
||||
|
||||
|
||||
def main():
|
||||
api_url, repo, sha, github_token, current_run_id = get_environment_variables()
|
||||
|
||||
endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
|
||||
headers = {
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
if github_token:
|
||||
headers["Authorization"] = f"token {github_token}"
|
||||
|
||||
print(f"Current run ID: {current_run_id}")
|
||||
|
||||
while True:
|
||||
data = make_api_request(endpoint, headers)
|
||||
|
||||
check_runs = data["check_runs"]
|
||||
|
||||
print("Processing check runs...")
|
||||
|
||||
print(check_runs)
|
||||
|
||||
runs_in_progress, all_others_passed = process_check_runs(check_runs)
|
||||
|
||||
if not runs_in_progress:
|
||||
break
|
||||
|
||||
print(
|
||||
"Some check runs are still in progress. "
|
||||
f"Waiting {CHECK_INTERVAL} seconds before checking again..."
|
||||
)
|
||||
time.sleep(CHECK_INTERVAL)
|
||||
|
||||
if all_others_passed:
|
||||
print("All other completed check runs have passed. This check passes.")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("Some check runs have failed or have not completed. This check fails.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
16
.gitignore
vendored
16
.gitignore
vendored
@@ -1,7 +1,7 @@
|
||||
## Original ignores
|
||||
.github_access_token
|
||||
classic/original_autogpt/keys.py
|
||||
classic/original_autogpt/*.json
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
@@ -32,6 +32,7 @@ dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
@@ -157,11 +158,11 @@ openai/
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
classic/benchmark/agbenchmark/reports/
|
||||
agbenchmark/reports/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
|
||||
package.json
|
||||
|
||||
# Allow for locally private items
|
||||
# private
|
||||
@@ -169,10 +170,3 @@ pri*
|
||||
# ignore
|
||||
ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
autogpt_platform/backend/settings.py
|
||||
/.auth
|
||||
/autogpt_platform/frontend/.auth
|
||||
|
||||
*.ign.*
|
||||
.test-contents
|
||||
|
||||
7
.gitmodules
vendored
7
.gitmodules
vendored
@@ -1,6 +1,3 @@
|
||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||
path = classic/forge/tests/vcr_cassettes
|
||||
[submodule "autogpt/tests/vcr_cassettes"]
|
||||
path = autogpt/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
[submodule "autogpt_platform/supabase"]
|
||||
path = autogpt_platform/supabase
|
||||
url = https://github.com/supabase/supabase.git
|
||||
|
||||
@@ -10,132 +10,28 @@ repos:
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: https://github.com/Yelp/detect-secrets
|
||||
rev: v1.5.0
|
||||
hooks:
|
||||
- id: detect-secrets
|
||||
name: Detect secrets
|
||||
description: Detects high entropy strings that are likely to be passwords.
|
||||
files: ^autogpt_platform/
|
||||
stages: [push]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, all dependencies need to be up-to-date.
|
||||
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
|
||||
hooks:
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Backend
|
||||
alias: poetry-install-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend install
|
||||
# include autogpt_libs source (since it's a path dependency)
|
||||
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Libs
|
||||
alias: poetry-install-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs install
|
||||
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, Prisma client must be up-to-date.
|
||||
hooks:
|
||||
- id: prisma-generate
|
||||
name: Prisma Generate - AutoGPT Platform - Backend
|
||||
alias: prisma-generate-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
|
||||
# include everything that triggers poetry install + the prisma schema
|
||||
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.7.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Backend
|
||||
alias: ruff-lint-platform-backend
|
||||
files: ^autogpt_platform/backend/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff-format
|
||||
name: Format (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
hooks:
|
||||
- id: isort
|
||||
name: Lint (isort) - AutoGPT Platform - Backend
|
||||
alias: isort-platform-backend
|
||||
entry: poetry -P autogpt_platform/backend run isort -p backend
|
||||
files: ^autogpt_platform/backend/
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C autogpt run isort
|
||||
files: ^autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -P classic/original_autogpt run isort -p autogpt
|
||||
files: ^classic/original_autogpt/
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C forge run isort
|
||||
files: ^forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -P classic/forge run isort -p forge
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -P classic/benchmark run isort -p agbenchmark
|
||||
files: ^classic/benchmark/
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C benchmark run isort
|
||||
files: ^benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
@@ -145,7 +41,8 @@ repos:
|
||||
# everything in .gitignore, so it works fine without any config or arguments.
|
||||
hooks:
|
||||
- id: black
|
||||
name: Format (Black)
|
||||
name: Lint (Black)
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
@@ -153,106 +50,78 @@ repos:
|
||||
# them separately.
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
files: ^autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
files: ^forge/(forge|tests)/
|
||||
args: [--config=forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
# To have watertight type checking, we check *all* the files in an affected
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Backend
|
||||
alias: pyright-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run pyright
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C autogpt run pyright
|
||||
args: [-p, autogpt, autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Libs
|
||||
alias: pyright-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs run pyright
|
||||
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
entry: poetry -C forge run pyright
|
||||
args: [-p, forge, forge]
|
||||
files: ^forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
entry: poetry -C benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^benchmark/(agbenchmark|tests)/
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest
|
||||
name: Run tests - AutoGPT Platform - Backend
|
||||
alias: pytest-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
|
||||
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - AutoGPT (excl. slow tests)
|
||||
alias: pytest-classic-autogpt
|
||||
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Forge (excl. slow tests)
|
||||
alias: pytest-classic-forge
|
||||
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^forge/(forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Benchmark
|
||||
alias: pytest-classic-benchmark
|
||||
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
62
.vscode/all-projects.code-workspace
vendored
62
.vscode/all-projects.code-workspace
vendored
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"name": "frontend",
|
||||
"path": "../autogpt_platform/frontend"
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"path": "../autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
"path": "../autogpt_platform/market"
|
||||
},
|
||||
{
|
||||
"name": "lib",
|
||||
"path": "../autogpt_platform/autogpt_libs"
|
||||
},
|
||||
{
|
||||
"name": "infra",
|
||||
"path": "../autogpt_platform/infra"
|
||||
},
|
||||
{
|
||||
"name": "docs",
|
||||
"path": "../docs"
|
||||
},
|
||||
|
||||
{
|
||||
"name": "classic - autogpt",
|
||||
"path": "../classic/original_autogpt"
|
||||
},
|
||||
{
|
||||
"name": "classic - benchmark",
|
||||
"path": "../classic/benchmark"
|
||||
},
|
||||
{
|
||||
"name": "classic - forge",
|
||||
"path": "../classic/forge"
|
||||
},
|
||||
{
|
||||
"name": "classic - frontend",
|
||||
"path": "../classic/frontend"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
},
|
||||
"extensions": {
|
||||
"recommendations": [
|
||||
"charliermarsh.ruff",
|
||||
"dart-code.flutter",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.vscode-pylance",
|
||||
"prisma.prisma",
|
||||
"qwtel.sqlite-viewer"
|
||||
]
|
||||
}
|
||||
}
|
||||
67
.vscode/launch.json
vendored
67
.vscode/launch.json
vendored
@@ -1,67 +0,0 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Frontend: Server Side",
|
||||
"type": "node-terminal",
|
||||
"request": "launch",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"command": "yarn dev"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Client Side",
|
||||
"type": "msedge",
|
||||
"request": "launch",
|
||||
"url": "http://localhost:3000"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Full Stack",
|
||||
"type": "node-terminal",
|
||||
|
||||
"request": "launch",
|
||||
"command": "yarn dev",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"serverReadyAction": {
|
||||
"pattern": "- Local:.+(https?://.+)",
|
||||
"uriFormat": "%s",
|
||||
"action": "debugWithEdge"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Backend",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "backend.app",
|
||||
// "env": {
|
||||
// "ENV": "dev"
|
||||
// },
|
||||
"envFile": "${workspaceFolder}/backend/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "Marketplace",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "autogpt_platform.market.main",
|
||||
"env": {
|
||||
"ENV": "dev"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/market/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/market"
|
||||
}
|
||||
],
|
||||
"compounds": [
|
||||
{
|
||||
"name": "Everything",
|
||||
"configurations": ["Backend", "Frontend: Full Stack"],
|
||||
// "preLaunchTask": "${defaultBuildTask}",
|
||||
"stopAll": true,
|
||||
"presentation": {
|
||||
"hidden": false,
|
||||
"order": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
# AutoGPT Contribution Guide
|
||||
If you are reading this, you are probably looking for the full **[contribution guide]**,
|
||||
If you are reading this, you are probably looking for our **[contribution guide]**,
|
||||
which is part of our [wiki].
|
||||
|
||||
Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
|
||||
@@ -10,25 +10,20 @@ Also check out our [🚀 Roadmap][roadmap] for information about our priorities
|
||||
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
|
||||
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
|
||||
## Contributing to the AutoGPT Platform Folder
|
||||
All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license.
|
||||
|
||||
## In short
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. We encourage you to collaborate with fellow community members on some of our bigger
|
||||
[todo's][roadmap]!
|
||||
* We highly recommend to post your idea and discuss it in the [dev channel].
|
||||
3. Create a draft PR when starting work on bigger changes.
|
||||
4. Adhere to the [Code Guidelines]
|
||||
4. Create a draft PR when starting work on bigger changes.
|
||||
3. Please also consider contributing something other than code; see the
|
||||
[contribution guide] for options.
|
||||
5. Clearly explain your changes when submitting a PR.
|
||||
6. Don't submit broken code: test/validate your changes.
|
||||
7. Avoid making unnecessary changes, especially if they're purely based on your personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
8. Please also consider contributing something other than code; see the
|
||||
[contribution guide] for options.
|
||||
|
||||
[dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
|
||||
[code guidelines]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing#code-guidelines
|
||||
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the
|
||||
wiki page about [Catalyzing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Catalyzing).
|
||||
|
||||
@@ -29,7 +29,7 @@ ENV PATH="$POETRY_HOME/bin:$PATH"
|
||||
RUN poetry config installer.max-workers 10
|
||||
|
||||
WORKDIR /app/autogpt
|
||||
COPY original_autogpt/pyproject.toml original_autogpt/poetry.lock ./
|
||||
COPY autogpt/pyproject.toml autogpt/poetry.lock ./
|
||||
|
||||
# Include forge so it can be used as a path dependency
|
||||
COPY forge/ ../forge
|
||||
@@ -42,19 +42,20 @@ ENTRYPOINT ["poetry", "run", "autogpt"]
|
||||
CMD []
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base AS autogpt-dev
|
||||
FROM autogpt-base as autogpt-dev
|
||||
RUN poetry install --no-cache --no-root \
|
||||
&& rm -rf $(poetry env info --path)/src
|
||||
ONBUILD COPY original_autogpt/ ./
|
||||
ONBUILD RUN mkdir -p ./data
|
||||
ONBUILD COPY autogpt/ ./
|
||||
|
||||
# release build -> include bare minimum
|
||||
FROM autogpt-base AS autogpt-release
|
||||
FROM autogpt-base as autogpt-release
|
||||
RUN poetry install --no-cache --no-root --without dev \
|
||||
&& rm -rf $(poetry env info --path)/src
|
||||
ONBUILD COPY original_autogpt/ ./autogpt
|
||||
ONBUILD COPY original_autogpt/README.md ./README.md
|
||||
ONBUILD RUN mkdir -p ./data
|
||||
ONBUILD COPY autogpt/autogpt/ ./autogpt
|
||||
ONBUILD COPY autogpt/scripts/ ./scripts
|
||||
ONBUILD COPY autogpt/plugins/ ./plugins
|
||||
ONBUILD COPY autogpt/README.md ./README.md
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS autogpt
|
||||
RUN poetry install --only-root
|
||||
8
LICENSE
8
LICENSE
@@ -1,13 +1,7 @@
|
||||
All portions of this repository are under one of two licenses. The majority of the AutoGPT repository is under the MIT License below. The autogpt_platform folder is under the
|
||||
Polyform Shield License.
|
||||
|
||||
|
||||
MIT License
|
||||
|
||||
|
||||
Copyright (c) 2023 Toran Bruce Richards
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
@@ -15,11 +9,9 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
|
||||
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
|
||||
|
||||
Welcome to the Quickstart Guide! This guide will walk you through setting up, building, and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the steps to jumpstart your journey in AI development with AutoGPT.
|
||||
Welcome to the Quickstart Guide! This guide will walk you through the process of setting up and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the necessary steps to jumpstart your journey in the world of AI development with AutoGPT.
|
||||
|
||||
## System Requirements
|
||||
|
||||
This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux (WSL). If you use a Windows system, you must install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
|
||||
This project supports Linux (Debian based), Mac, and Windows Subsystem for Linux (WSL). If you are using a Windows system, you will need to install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
|
||||
|
||||
|
||||
## Getting Setup
|
||||
@@ -14,40 +14,40 @@ This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux
|
||||
To fork the repository, follow these steps:
|
||||
- Navigate to the main page of the repository.
|
||||
|
||||

|
||||

|
||||
- In the top-right corner of the page, click Fork.
|
||||
|
||||

|
||||
- On the next page, select your GitHub account to create the fork.
|
||||

|
||||
- On the next page, select your GitHub account to create the fork under.
|
||||
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
|
||||
|
||||
2. **Clone the Repository**
|
||||
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
|
||||
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, you can download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
|
||||
- Open your terminal.
|
||||
- Navigate to the directory where you want to clone the repository.
|
||||
- Run the git clone command for the fork you just created
|
||||
|
||||

|
||||

|
||||
|
||||
- Then open your project in your ide
|
||||
|
||||

|
||||

|
||||
|
||||
4. **Setup the Project**
|
||||
Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
|
||||
Next we need to setup the required dependencies. We have a tool for helping you do all the tasks you need to on the repo.
|
||||
It can be accessed by running the `run` command by typing `./run` in the terminal.
|
||||
|
||||
The first command you need to use is `./run setup.` This will guide you through setting up your system.
|
||||
Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
|
||||
The first command you need to use is `./run setup` This will guide you through the process of setting up your system.
|
||||
Initially you will get instructions for installing flutter, chrome and setting up your github access token like the following image:
|
||||
|
||||

|
||||

|
||||
|
||||
### For Windows Users
|
||||
|
||||
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
|
||||
|
||||
#### Update WSL
|
||||
Run the following command in Powershell or Command Prompt:
|
||||
Run the following command in Powershell or Command Prompt to:
|
||||
1. Enable the optional WSL and Virtual Machine Platform components.
|
||||
2. Download and install the latest Linux kernel.
|
||||
3. Set WSL 2 as the default.
|
||||
@@ -73,17 +73,17 @@ dos2unix ./run
|
||||
After executing the above commands, running `./run setup` should work successfully.
|
||||
|
||||
#### Store Project Files within the WSL File System
|
||||
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids path translations and permissions issues and provides a more consistent development environment.
|
||||
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids issues related to path translations and permissions and provides a more consistent development environment.
|
||||
|
||||
You can keep running the command to get feedback on where you are up to with your setup.
|
||||
When setup has been completed, the command will return an output like this:
|
||||
|
||||

|
||||

|
||||
|
||||
## Creating Your Agent
|
||||
|
||||
After completing the setup, the next step is to create your agent template.
|
||||
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with your chosen name.
|
||||
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with a name of your choosing.
|
||||
|
||||
Tips for naming your agent:
|
||||
* Give it its own unique name, or name it after yourself
|
||||
@@ -91,7 +91,7 @@ Tips for naming your agent:
|
||||
|
||||
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
|
||||
|
||||

|
||||

|
||||
|
||||
## Running your Agent
|
||||
|
||||
@@ -99,23 +99,23 @@ Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
|
||||
|
||||
This starts the agent on the URL: `http://localhost:8000/`
|
||||
|
||||

|
||||

|
||||
|
||||
The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
|
||||
The frontend can be accessed from `http://localhost:8000/`, you will first need to login using either a google account or your github account.
|
||||
|
||||

|
||||

|
||||
|
||||
Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
|
||||
Upon logging in you will get a page that looks something like this. With your task history down the left hand side of the page and the 'chat' window to send tasks to your agent.
|
||||
|
||||

|
||||

|
||||
|
||||
When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
|
||||
When you have finished with your agent, or if you just need to restart it, use Ctl-C to end the session then you can re-run the start command.
|
||||
|
||||
If you are having issues and want to ensure the agent has been stopped, there is a `./run agent stop` command, which will kill the process using port 8000, which should be the agent.
|
||||
If you are having issues and want to ensure the agent has been stopped there is a `./run agent stop` command which will kill the process using port 8000, which should be the agent.
|
||||
|
||||
## Benchmarking your Agent
|
||||
|
||||
The benchmarking system can also be accessed using the CLI too:
|
||||
The benchmarking system can also be accessed using the cli too:
|
||||
|
||||
```bash
|
||||
agpt % ./run benchmark
|
||||
@@ -160,10 +160,10 @@ The benchmark has been split into different categories of skills you can test yo
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
Finally, you can run the benchmark with
|
||||
Finally you can run the benchmark with
|
||||
|
||||
```bash
|
||||
./run benchmark start YOUR_AGENT_NAME
|
||||
101
README.md
101
README.md
@@ -1,71 +1,17 @@
|
||||
# AutoGPT: Build, Deploy, and Run AI Agents
|
||||
# AutoGPT: build & use AI agents
|
||||
|
||||
[](https://discord.gg/autogpt)  
|
||||
[](https://twitter.com/Auto_GPT)  
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows.
|
||||
**AutoGPT** is a generalist LLM based AI agent that can autonomously accomplish minor tasks.
|
||||
|
||||
## Hosting Options
|
||||
- Download to self-host
|
||||
- [Join the Waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta
|
||||
**Examples**:
|
||||
|
||||
## How to Setup for Self-Hosting
|
||||
> [!NOTE]
|
||||
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
|
||||
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
|
||||
- Look up and summarize this research paper
|
||||
- Write a marketing for food supplements
|
||||
- Write a blog post detailing the news in AI
|
||||
|
||||
https://github.com/user-attachments/assets/d04273a5-b36a-4a37-818e-f631ce72d603
|
||||
|
||||
This tutorial assumes you have Docker, VSCode, git and npm installed.
|
||||
|
||||
### 🧱 AutoGPT Frontend
|
||||
|
||||
The AutoGPT frontend is where users interact with our powerful AI automation platform. It offers multiple ways to engage with and leverage our AI agents. This is the interface where you'll bring your AI automation ideas to life:
|
||||
|
||||
**Agent Builder:** For those who want to customize, our intuitive, low-code interface allows you to design and configure your own AI agents.
|
||||
|
||||
**Workflow Management:** Build, modify, and optimize your automation workflows with ease. You build your agent by connecting blocks, where each block performs a single action.
|
||||
|
||||
**Deployment Controls:** Manage the lifecycle of your agents, from testing to production.
|
||||
|
||||
**Ready-to-Use Agents:** Don't want to build? Simply select from our library of pre-configured agents and put them to work immediately.
|
||||
|
||||
**Agent Interaction:** Whether you've built your own or are using pre-configured agents, easily run and interact with them through our user-friendly interface.
|
||||
|
||||
**Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes.
|
||||
|
||||
[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks.
|
||||
|
||||
### 💽 AutoGPT Server
|
||||
|
||||
The AutoGPT Server is the powerhouse of our platform This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously. It contains all the essential components that make AutoGPT run smoothly.
|
||||
|
||||
**Source Code:** The core logic that drives our agents and automation processes.
|
||||
|
||||
**Infrastructure:** Robust systems that ensure reliable and scalable performance.
|
||||
|
||||
**Marketplace:** A comprehensive marketplace where you can find and deploy a wide range of pre-built agents.
|
||||
|
||||
### 🐙 Example Agents
|
||||
|
||||
Here are two examples of what you can do with AutoGPT:
|
||||
|
||||
1. **Generate Viral Videos from Trending Topics**
|
||||
- This agent reads topics on Reddit.
|
||||
- It identifies trending topics.
|
||||
- It then automatically creates a short-form video based on the content.
|
||||
|
||||
2. **Identify Top Quotes from Videos for Social Media**
|
||||
- This agent subscribes to your YouTube channel.
|
||||
- When you post a new video, it transcribes it.
|
||||
- It uses AI to identify the most impactful quotes to generate a summary.
|
||||
- Then, it writes a post to automatically publish to your social media.
|
||||
|
||||
These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case.
|
||||
|
||||
---
|
||||
### Mission and Licencing
|
||||
Our mission is to provide the tools, so that you can focus on what matters:
|
||||
|
||||
- 🏗️ **Building** - Lay the foundation for something amazing.
|
||||
@@ -77,29 +23,19 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
|
||||
**📖 [Documentation](https://docs.agpt.co)**
|
||||
 | 
|
||||
**🚀 [Contributing](CONTRIBUTING.md)**
|
||||
 | 
|
||||
**🛠️ [Build your own Agent - Quickstart](QUICKSTART.md)**
|
||||
|
||||
**Licensing:**
|
||||
|
||||
MIT License: The majority of the AutoGPT repository is under the MIT License.
|
||||
|
||||
Polyform Shield License: This license applies to the autogpt_platform folder.
|
||||
|
||||
For more information, see https://agpt.co/blog/introducing-the-autogpt-platform
|
||||
|
||||
---
|
||||
## 🤖 AutoGPT Classic
|
||||
> Below is information about the classic version of AutoGPT.
|
||||
|
||||
**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)**
|
||||
## 🧱 Building blocks
|
||||
|
||||
### 🏗️ Forge
|
||||
|
||||
**Forge your own agent!** – Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project.
|
||||
**Forge your own agent!** – Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
|
||||
|
||||
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) –
|
||||
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/tutorials/001_getting_started.md) –
|
||||
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
|
||||
|
||||
### 🎯 Benchmark
|
||||
|
||||
@@ -109,7 +45,7 @@ This guide will walk you through the process of creating your own agent and usin
|
||||
|
||||
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
|
||||
 | 
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
|
||||
|
||||
### 💻 UI
|
||||
|
||||
@@ -119,7 +55,7 @@ This guide will walk you through the process of creating your own agent and usin
|
||||
|
||||
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
|
||||
|
||||
### ⌨️ CLI
|
||||
|
||||
@@ -158,8 +94,6 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
|
||||
|
||||
---
|
||||
|
||||
## Stars stats
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
|
||||
<picture>
|
||||
@@ -169,10 +103,3 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
|
||||
</picture>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
## ⚡ Contributors
|
||||
|
||||
<a href="https://github.com/Significant-Gravitas/AutoGPT/graphs/contributors" alt="View Contributors">
|
||||
<img src="https://contrib.rocks/image?repo=Significant-Gravitas/AutoGPT&max=1000&columns=10" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
89
SECURITY.md
89
SECURITY.md
@@ -1,47 +1,66 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting Security Issues
|
||||
- [**Using AutoGPT Securely**](#using-AutoGPT-securely)
|
||||
- [Restrict Workspace](#restrict-workspace)
|
||||
- [Untrusted inputs](#untrusted-inputs)
|
||||
- [Data privacy](#data-privacy)
|
||||
- [Untrusted environments or networks](#untrusted-environments-or-networks)
|
||||
- [Multi-Tenant environments](#multi-tenant-environments)
|
||||
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
|
||||
|
||||
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
|
||||
## Using AutoGPT Securely
|
||||
|
||||
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
|
||||
### Restrict Workspace
|
||||
|
||||
Instead, please report them via:
|
||||
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
|
||||
<!--- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty-->
|
||||
Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False.
|
||||
|
||||
### Reporting Process
|
||||
1. **Submit Report**: Use one of the above channels to submit your report
|
||||
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
|
||||
3. **Collaboration**: We will collaborate with you to understand and validate the issue
|
||||
4. **Resolution**: We will work on a fix and coordinate the release process
|
||||
Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks.
|
||||
|
||||
### Disclosure Policy
|
||||
- Please provide detailed reports with reproducible steps
|
||||
- Include the version/commit hash where you discovered the vulnerability
|
||||
- Allow us a 90-day security fix window before any public disclosure
|
||||
- Share any potential mitigations or workarounds if known
|
||||
### Untrusted inputs
|
||||
|
||||
## Supported Versions
|
||||
Only the following versions are eligible for security updates:
|
||||
When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks.
|
||||
|
||||
| Version | Supported |
|
||||
|---------|-----------|
|
||||
| Latest release on master branch | ✅ |
|
||||
| Development commits (pre-master) | ✅ |
|
||||
| Classic folder (deprecated) | ❌ |
|
||||
| All other versions | ❌ |
|
||||
For maximum security when handling untrusted inputs, you may need to employ the following:
|
||||
|
||||
## Security Best Practices
|
||||
When using this project:
|
||||
1. Always use the latest stable version
|
||||
2. Review security advisories before updating
|
||||
3. Follow our security documentation and guidelines
|
||||
4. Keep your dependencies up to date
|
||||
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
|
||||
* Sandboxing: Isolate the process.
|
||||
* Updates: Keep your libraries (including AutoGPT) updated with the latest security patches.
|
||||
* Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as:
|
||||
* Validation: Enforce strict rules on allowed characters and data types.
|
||||
* Filtering: Remove potentially malicious scripts or code fragments.
|
||||
* Encoding: Convert special characters into safe representations.
|
||||
* Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)).
|
||||
|
||||
## Past Security Advisories
|
||||
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
|
||||
### Data privacy
|
||||
|
||||
---
|
||||
Last updated: November 2024
|
||||
To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors.
|
||||
|
||||
### Untrusted environments or networks
|
||||
|
||||
Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers.
|
||||
Additionally, running it on an untrusted network can expose your data to potential network attacks.
|
||||
|
||||
However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network.
|
||||
|
||||
### Multi-Tenant environments
|
||||
|
||||
If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data.
|
||||
|
||||
The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks.
|
||||
|
||||
- Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenant’s identity.
|
||||
|
||||
- Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring.
|
||||
|
||||
- Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks.
|
||||
|
||||
- Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT.
|
||||
|
||||
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
|
||||
|
||||
Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new).
|
||||
|
||||
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 49 KiB |
@@ -1,2 +1,2 @@
|
||||
[run]
|
||||
[run]
|
||||
relative_files = true
|
||||
@@ -1,5 +1,5 @@
|
||||
# To boot the app run the following:
|
||||
# docker compose run auto-gpt
|
||||
# docker-compose run auto-gpt
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
@@ -11,15 +11,12 @@
|
||||
## GROQ_API_KEY - Groq API Key (Example: gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# GROQ_API_KEY=
|
||||
|
||||
## LLAMAFILE_API_BASE - Llamafile API base URL
|
||||
# LLAMAFILE_API_BASE=http://localhost:8080/v1
|
||||
|
||||
## TELEMETRY_OPT_IN - Share telemetry on errors and other issues with the AutoGPT team, e.g. through Sentry.
|
||||
## This helps us to spot and solve problems earlier & faster. (Default: DISABLED)
|
||||
# TELEMETRY_OPT_IN=true
|
||||
|
||||
## COMPONENT_CONFIG_FILE - Path to the json config file (Default: None)
|
||||
# COMPONENT_CONFIG_FILE=
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
|
||||
### Workspace ###
|
||||
|
||||
@@ -47,6 +44,9 @@
|
||||
|
||||
### Miscellaneous ###
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
@@ -96,21 +96,59 @@
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-3-small
|
||||
|
||||
################################################################################
|
||||
### SHELL EXECUTION
|
||||
################################################################################
|
||||
|
||||
## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist)
|
||||
# SHELL_COMMAND_CONTROL=denylist
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
|
||||
## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by AutoGPT (Default: sudo,su)
|
||||
# SHELL_DENYLIST=sudo,su
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
|
||||
## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by AutoGPT (Default: None)
|
||||
# SHELL_ALLOWLIST=
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### Common
|
||||
|
||||
## IMAGE_PROVIDER - Image provider (Default: dalle)
|
||||
# IMAGE_PROVIDER=dalle
|
||||
|
||||
## IMAGE_SIZE - Image size (Default: 256)
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
# SD_WEBUI_AUTH=
|
||||
|
||||
## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860)
|
||||
# SD_WEBUI_URL=http://localhost:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface)
|
||||
# AUDIO_TO_TEXT_PROVIDER=huggingface
|
||||
|
||||
## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
################################################################################
|
||||
### GITHUB
|
||||
################################################################################
|
||||
@@ -125,6 +163,18 @@
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
# HEADLESS_BROWSER=True
|
||||
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome)
|
||||
# USE_WEB_BROWSER=chrome
|
||||
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000)
|
||||
# BROWSE_CHUNK_MAX_LENGTH=3000
|
||||
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm)
|
||||
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
|
||||
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
@@ -148,6 +198,13 @@
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
|
||||
################################################################################
|
||||
### CHAT MESSAGES
|
||||
################################################################################
|
||||
|
||||
## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# CHAT_MESSAGES_ENABLED=False
|
||||
|
||||
################################################################################
|
||||
### LOGGING
|
||||
################################################################################
|
||||
5
autogpt/.gitattributes
vendored
Normal file
5
autogpt/.gitattributes
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exclude VCR cassettes from stats
|
||||
tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
# Mark documentation as such
|
||||
docs/**.md linguist-documentation
|
||||
@@ -1,6 +1,6 @@
|
||||
## Original ignores
|
||||
classic/original_autogpt/keys.py
|
||||
classic/original_autogpt/*.json
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
@@ -68,6 +68,10 @@ Options:
|
||||
continuous mode
|
||||
--speak Enable Speak Mode
|
||||
--debug Enable Debug Mode
|
||||
-b, --browser-name TEXT Specifies which web-browser to use when
|
||||
using selenium to scrape the web.
|
||||
--allow-downloads Dangerous: Allows AutoGPT to download files
|
||||
natively.
|
||||
--skip-news Specifies whether to suppress the output of
|
||||
latest news on startup.
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
@@ -86,7 +90,6 @@ Options:
|
||||
--override-directives If specified, --constraint, --resource and
|
||||
--best-practice will override the AI's
|
||||
directives instead of being appended to them
|
||||
--component-config-file TEXT Path to the json configuration file.
|
||||
--help Show this message and exit.
|
||||
```
|
||||
</details>
|
||||
@@ -108,6 +111,10 @@ Usage: python -m autogpt serve [OPTIONS]
|
||||
|
||||
Options:
|
||||
--debug Enable Debug Mode
|
||||
-b, --browser-name TEXT Specifies which web-browser to use when using
|
||||
selenium to scrape the web.
|
||||
--allow-downloads Dangerous: Allows AutoGPT to download files
|
||||
natively.
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
plugins.
|
||||
--help Show this message and exit.
|
||||
@@ -120,9 +127,9 @@ by default on `http://localhost:8000`.
|
||||
For more comprehensive instructions, see the [user guide][docs/usage].
|
||||
|
||||
[docs]: https://docs.agpt.co/autogpt
|
||||
[docs/setup]: https://docs.agpt.co/classic/original_autogpt/setup
|
||||
[docs/usage]: https://docs.agpt.co/classic/original_autogpt/usage
|
||||
[docs/plugins]: https://docs.agpt.co/classic/original_autogpt/plugins
|
||||
[docs/setup]: https://docs.agpt.co/autogpt/setup
|
||||
[docs/usage]: https://docs.agpt.co/autogpt/usage
|
||||
[docs/plugins]: https://docs.agpt.co/autogpt/plugins
|
||||
|
||||
## 📚 Resources
|
||||
* 📔 AutoGPT [project wiki](https://github.com/Significant-Gravitas/AutoGPT/wiki)
|
||||
@@ -2,17 +2,17 @@ from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
|
||||
def create_agent(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
ai_profile: Optional[AIProfile] = None,
|
||||
@@ -38,7 +38,7 @@ def create_agent(
|
||||
|
||||
def configure_agent_with_state(
|
||||
state: AgentSettings,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
@@ -51,7 +51,7 @@ def configure_agent_with_state(
|
||||
|
||||
|
||||
def _configure_agent(
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
agent_id: str = "",
|
||||
@@ -80,7 +80,7 @@ def _configure_agent(
|
||||
settings=agent_state,
|
||||
llm_provider=llm_provider,
|
||||
file_storage=file_storage,
|
||||
app_config=app_config,
|
||||
legacy_config=app_config,
|
||||
)
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ def create_agent_state(
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
) -> AgentSettings:
|
||||
return AgentSettings(
|
||||
agent_id=agent_id,
|
||||
@@ -104,5 +104,5 @@ def create_agent_state(
|
||||
allow_fs_access=not app_config.restrict_to_workspace,
|
||||
use_functions_api=app_config.openai_functions,
|
||||
),
|
||||
history=Agent.default_settings.history.model_copy(deep=True),
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
@@ -6,7 +6,7 @@ from forge.file_storage.base import FileStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.app.config import AppConfig
|
||||
from forge.config.config import Config
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from .configurators import _configure_agent
|
||||
@@ -16,7 +16,7 @@ from .profile_generator import generate_agent_profile_for_task
|
||||
async def generate_agent_for_task(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
@@ -3,6 +3,7 @@ import logging
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.llm.providers.schema import (
|
||||
@@ -13,13 +14,11 @@ from forge.llm.providers.schema import (
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProfileGeneratorConfiguration(SystemConfiguration):
|
||||
llm_classification: LanguageModelClassification = UserConfigurable(
|
||||
model_classification: LanguageModelClassification = UserConfigurable(
|
||||
default=LanguageModelClassification.SMART_MODEL
|
||||
)
|
||||
_example_call: object = {
|
||||
@@ -137,7 +136,7 @@ class AgentProfileGeneratorConfiguration(SystemConfiguration):
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
).model_dump()
|
||||
).dict()
|
||||
)
|
||||
|
||||
|
||||
@@ -148,21 +147,21 @@ class AgentProfileGenerator(PromptStrategy):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_classification: LanguageModelClassification,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: dict,
|
||||
):
|
||||
self._llm_classification = llm_classification
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = CompletionModelFunction.model_validate(
|
||||
self._create_agent_function = CompletionModelFunction.parse_obj(
|
||||
create_agent_function
|
||||
)
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return self._llm_classification
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
|
||||
system_message = ChatMessage.system(self._system_prompt_message)
|
||||
@@ -213,7 +212,7 @@ class AgentProfileGenerator(PromptStrategy):
|
||||
|
||||
async def generate_agent_profile_for_task(
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
llm_provider: MultiProvider,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
@@ -222,7 +221,7 @@ async def generate_agent_profile_for_task(
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
agent_profile_generator = AgentProfileGenerator(
|
||||
**AgentProfileGenerator.default_configuration.model_dump() # HACK
|
||||
**AgentProfileGenerator.default_configuration.dict() # HACK
|
||||
)
|
||||
|
||||
prompt = agent_profile_generator.build_prompt(task)
|
||||
@@ -26,12 +26,12 @@ class MyAgent(Agent):
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
legacy_config: Config,
|
||||
):
|
||||
# Call the parent constructor to bring in the default components
|
||||
super().__init__(settings, llm_provider, file_storage, app_config)
|
||||
super().__init__(settings, llm_provider, file_storage, legacy_config)
|
||||
# Add your custom component
|
||||
self.my_component = MyComponent()
|
||||
```
|
||||
|
||||
For more customization, you can override the `propose_action` and `execute` or even subclass `BaseAgent` directly. This way you can have full control over the agent's components and behavior. Have a look at the [implementation of Agent](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/original_autogpt/agents/agent.py) for more details.
|
||||
For more customization, you can override the `propose_action` and `execute` or even subclass `BaseAgent` directly. This way you can have full control over the agent's components and behavior. Have a look at the [implementation of Agent](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt/autogpt/agents/agent.py) for more details.
|
||||
@@ -18,11 +18,7 @@ from forge.components.action_history import (
|
||||
ActionHistoryComponent,
|
||||
EpisodicActionHistory,
|
||||
)
|
||||
from forge.components.action_history.action_history import ActionHistoryConfiguration
|
||||
from forge.components.code_executor.code_executor import (
|
||||
CodeExecutorComponent,
|
||||
CodeExecutorConfiguration,
|
||||
)
|
||||
from forge.components.code_executor.code_executor import CodeExecutorComponent
|
||||
from forge.components.context.context import AgentContext, ContextComponent
|
||||
from forge.components.file_manager import FileManagerComponent
|
||||
from forge.components.git_operations import GitOperationsComponent
|
||||
@@ -62,7 +58,7 @@ from .prompt_strategies.one_shot import (
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.app.config import AppConfig
|
||||
from forge.config.config import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -95,14 +91,12 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
legacy_config: Config,
|
||||
):
|
||||
super().__init__(settings)
|
||||
|
||||
self.llm_provider = llm_provider
|
||||
prompt_config = OneShotAgentPromptStrategy.default_configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
prompt_config = OneShotAgentPromptStrategy.default_configuration.copy(deep=True)
|
||||
prompt_config.use_functions_api = (
|
||||
settings.config.use_functions_api
|
||||
# Anthropic currently doesn't support tools + prefilling :(
|
||||
@@ -113,41 +107,33 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
|
||||
# Components
|
||||
self.system = SystemComponent()
|
||||
self.history = (
|
||||
ActionHistoryComponent(
|
||||
settings.history,
|
||||
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
llm_provider,
|
||||
ActionHistoryConfiguration(
|
||||
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
|
||||
),
|
||||
)
|
||||
.run_after(WatchdogComponent)
|
||||
.run_after(SystemComponent)
|
||||
)
|
||||
if not app_config.noninteractive_mode:
|
||||
self.user_interaction = UserInteractionComponent()
|
||||
self.file_manager = FileManagerComponent(file_storage, settings)
|
||||
self.history = ActionHistoryComponent(
|
||||
settings.history,
|
||||
self.send_token_limit,
|
||||
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
legacy_config,
|
||||
llm_provider,
|
||||
).run_after(WatchdogComponent)
|
||||
self.user_interaction = UserInteractionComponent(legacy_config)
|
||||
self.file_manager = FileManagerComponent(settings, file_storage)
|
||||
self.code_executor = CodeExecutorComponent(
|
||||
self.file_manager.workspace,
|
||||
CodeExecutorConfiguration(
|
||||
docker_container_name=f"{settings.agent_id}_sandbox"
|
||||
),
|
||||
settings,
|
||||
legacy_config,
|
||||
)
|
||||
self.git_ops = GitOperationsComponent()
|
||||
self.image_gen = ImageGeneratorComponent(self.file_manager.workspace)
|
||||
self.web_search = WebSearchComponent()
|
||||
self.web_selenium = WebSeleniumComponent(
|
||||
llm_provider,
|
||||
app_config.app_data_dir,
|
||||
self.git_ops = GitOperationsComponent(legacy_config)
|
||||
self.image_gen = ImageGeneratorComponent(
|
||||
self.file_manager.workspace, legacy_config
|
||||
)
|
||||
self.web_search = WebSearchComponent(legacy_config)
|
||||
self.web_selenium = WebSeleniumComponent(legacy_config, llm_provider, self.llm)
|
||||
self.context = ContextComponent(self.file_manager.workspace, settings.context)
|
||||
self.watchdog = WatchdogComponent(settings.config, settings.history).run_after(
|
||||
ContextComponent
|
||||
)
|
||||
|
||||
self.event_history = settings.history
|
||||
self.app_config = app_config
|
||||
self.legacy_config = legacy_config
|
||||
|
||||
async def propose_action(self) -> OneShotAgentActionProposal:
|
||||
"""Proposes the next action to execute, based on the task and current state.
|
||||
@@ -162,7 +148,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
constraints = await self.run_pipeline(DirectiveProvider.get_constraints)
|
||||
best_practices = await self.run_pipeline(DirectiveProvider.get_best_practices)
|
||||
|
||||
directives = self.state.directives.model_copy(deep=True)
|
||||
directives = self.state.directives.copy(deep=True)
|
||||
directives.resources += resources
|
||||
directives.constraints += constraints
|
||||
directives.best_practices += best_practices
|
||||
@@ -174,19 +160,13 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
# Get messages
|
||||
messages = await self.run_pipeline(MessageProvider.get_messages)
|
||||
|
||||
include_os_info = (
|
||||
self.code_executor.config.execute_local_commands
|
||||
if hasattr(self, "code_executor")
|
||||
else False
|
||||
)
|
||||
|
||||
prompt: ChatPrompt = self.prompt_strategy.build_prompt(
|
||||
messages=messages,
|
||||
task=self.state.task,
|
||||
ai_profile=self.state.ai_profile,
|
||||
ai_directives=directives,
|
||||
commands=function_specs_from_commands(self.commands),
|
||||
include_os_info=include_os_info,
|
||||
include_os_info=self.legacy_config.execute_local_commands,
|
||||
)
|
||||
|
||||
logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
|
||||
@@ -297,7 +277,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
command
|
||||
for command in self.commands
|
||||
if not any(
|
||||
name in self.app_config.disabled_commands for name in command.names
|
||||
name in self.legacy_config.disabled_commands for name in command.names
|
||||
)
|
||||
]
|
||||
|
||||
@@ -28,13 +28,15 @@ _RESPONSE_INTERFACE_NAME = "AssistantResponse"
|
||||
|
||||
class AssistantThoughts(ModelWithSummary):
|
||||
observations: str = Field(
|
||||
description="Relevant observations from your last action (if any)"
|
||||
..., description="Relevant observations from your last action (if any)"
|
||||
)
|
||||
text: str = Field(description="Thoughts")
|
||||
reasoning: str = Field(description="Reasoning behind the thoughts")
|
||||
self_criticism: str = Field(description="Constructive self-criticism")
|
||||
plan: list[str] = Field(description="Short list that conveys the long-term plan")
|
||||
speak: str = Field(description="Summary of thoughts, to say to user")
|
||||
text: str = Field(..., description="Thoughts")
|
||||
reasoning: str = Field(..., description="Reasoning behind the thoughts")
|
||||
self_criticism: str = Field(..., description="Constructive self-criticism")
|
||||
plan: list[str] = Field(
|
||||
..., description="Short list that conveys the long-term plan"
|
||||
)
|
||||
speak: str = Field(..., description="Summary of thoughts, to say to user")
|
||||
|
||||
def summary(self) -> str:
|
||||
return self.text
|
||||
@@ -94,13 +96,11 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
logger: Logger,
|
||||
):
|
||||
self.config = configuration
|
||||
self.response_schema = JSONSchema.from_dict(
|
||||
OneShotAgentActionProposal.model_json_schema()
|
||||
)
|
||||
self.response_schema = JSONSchema.from_dict(OneShotAgentActionProposal.schema())
|
||||
self.logger = logger
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
|
||||
|
||||
def build_prompt(
|
||||
@@ -182,7 +182,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
)
|
||||
|
||||
def response_format_instruction(self, use_functions_api: bool) -> tuple[str, str]:
|
||||
response_schema = self.response_schema.model_copy(deep=True)
|
||||
response_schema = self.response_schema.copy(deep=True)
|
||||
assert response_schema.properties
|
||||
if use_functions_api and "use_tool" in response_schema.properties:
|
||||
del response_schema.properties["use_tool"]
|
||||
@@ -274,8 +274,5 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
raise InvalidAgentResponseError("Assistant did not use a tool")
|
||||
assistant_reply_dict["use_tool"] = response.tool_calls[0].function
|
||||
|
||||
parsed_response = OneShotAgentActionProposal.model_validate(
|
||||
assistant_reply_dict
|
||||
)
|
||||
parsed_response.raw_message = response.copy()
|
||||
parsed_response = OneShotAgentActionProposal.parse_obj(assistant_reply_dict)
|
||||
return parsed_response
|
||||
@@ -23,6 +23,7 @@ from forge.agent_protocol.models import (
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.llm.providers import ModelProviderBudget, MultiProvider
|
||||
from forge.models.action import ActionErrorResult, ActionSuccessResult
|
||||
@@ -34,7 +35,6 @@ from sentry_sdk import set_user
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.app.config import AppConfig
|
||||
from autogpt.app.utils import is_port_free
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -45,7 +45,7 @@ class AgentProtocolServer:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
database: AgentDB,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
@@ -97,9 +97,7 @@ class AgentProtocolServer:
|
||||
app.include_router(router, prefix="/ap/v1")
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
frontend_path = (
|
||||
pathlib.Path(script_dir)
|
||||
.joinpath("../../../classic/frontend/build/web")
|
||||
.resolve()
|
||||
pathlib.Path(script_dir).joinpath("../../../frontend/build/web").resolve()
|
||||
)
|
||||
|
||||
if os.path.exists(frontend_path):
|
||||
@@ -316,7 +314,7 @@ class AgentProtocolServer:
|
||||
""
|
||||
if tool_result is None
|
||||
else (
|
||||
orjson.loads(tool_result.model_dump_json())
|
||||
orjson.loads(tool_result.json())
|
||||
if not isinstance(tool_result, ActionErrorResult)
|
||||
else {
|
||||
"error": str(tool_result.error),
|
||||
@@ -329,7 +327,7 @@ class AgentProtocolServer:
|
||||
if last_proposal and tool_result
|
||||
else {}
|
||||
),
|
||||
**assistant_response.model_dump(),
|
||||
**assistant_response.dict(),
|
||||
}
|
||||
|
||||
task_cumulative_cost = agent.llm_provider.get_incurred_cost()
|
||||
@@ -453,9 +451,7 @@ class AgentProtocolServer:
|
||||
"""
|
||||
task_llm_budget = self._task_budgets[task.task_id]
|
||||
|
||||
task_llm_provider_config = self.llm_provider._configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
task_llm_provider_config = self.llm_provider._configuration.copy(deep=True)
|
||||
_extra_request_headers = task_llm_provider_config.extra_request_headers
|
||||
_extra_request_headers["AP-TaskID"] = task.task_id
|
||||
if step_id:
|
||||
@@ -463,7 +459,7 @@ class AgentProtocolServer:
|
||||
if task.additional_input and (user_id := task.additional_input.get("user_id")):
|
||||
_extra_request_headers["AutoGPT-UserID"] = user_id
|
||||
|
||||
settings = self.llm_provider._settings.model_copy()
|
||||
settings = self.llm_provider._settings.copy()
|
||||
settings.budget = task_llm_budget
|
||||
settings.configuration = task_llm_provider_config
|
||||
task_llm_provider = self.llm_provider.__class__(
|
||||
@@ -28,6 +28,24 @@ def cli(ctx: click.Context):
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows AutoGPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
# TODO: this is a hidden option for now, necessary for integration testing.
|
||||
# We should make this public once we're ready to roll out agent specific workspaces.
|
||||
"--workspace-directory",
|
||||
"-w",
|
||||
type=click.Path(file_okay=False),
|
||||
hidden=True,
|
||||
)
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
@@ -110,15 +128,13 @@ def cli(ctx: click.Context):
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--component-config-file",
|
||||
help="Path to a json configuration file",
|
||||
type=click.Path(exists=True, dir_okay=False, resolve_path=True, path_type=Path),
|
||||
)
|
||||
def run(
|
||||
continuous: bool,
|
||||
continuous_limit: Optional[int],
|
||||
speak: bool,
|
||||
browser_name: Optional[str],
|
||||
allow_downloads: bool,
|
||||
workspace_directory: Optional[Path],
|
||||
install_plugin_deps: bool,
|
||||
skip_news: bool,
|
||||
skip_reprompt: bool,
|
||||
@@ -132,7 +148,6 @@ def run(
|
||||
log_level: Optional[str],
|
||||
log_format: Optional[str],
|
||||
log_file_format: Optional[str],
|
||||
component_config_file: Optional[Path],
|
||||
) -> None:
|
||||
"""
|
||||
Sets up and runs an agent, based on the task specified by the user, or resumes an
|
||||
@@ -150,7 +165,10 @@ def run(
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
workspace_directory=workspace_directory,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
override_ai_name=ai_name,
|
||||
override_ai_role=ai_role,
|
||||
@@ -158,11 +176,20 @@ def run(
|
||||
constraints=list(constraint),
|
||||
best_practices=list(best_practice),
|
||||
override_directives=override_directives,
|
||||
component_config_file=component_config_file,
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows AutoGPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
@@ -190,6 +217,8 @@ def run(
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
def serve(
|
||||
browser_name: Optional[str],
|
||||
allow_downloads: bool,
|
||||
install_plugin_deps: bool,
|
||||
debug: bool,
|
||||
log_level: Optional[str],
|
||||
@@ -208,6 +237,8 @@ def serve(
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
)
|
||||
|
||||
@@ -5,18 +5,20 @@ import logging
|
||||
from typing import Literal, Optional
|
||||
|
||||
import click
|
||||
from colorama import Back, Style
|
||||
from forge.config.config import GPT_3_MODEL, Config
|
||||
from forge.llm.providers import ModelName, MultiProvider
|
||||
|
||||
from autogpt.app.config import GPT_3_MODEL, AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def apply_overrides_to_config(
|
||||
config: AppConfig,
|
||||
config: Config,
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
skip_reprompt: bool = False,
|
||||
browser_name: Optional[str] = None,
|
||||
allow_downloads: bool = False,
|
||||
skip_news: bool = False,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
@@ -31,6 +33,8 @@ async def apply_overrides_to_config(
|
||||
log_level (int): The global log level for the application.
|
||||
log_format (str): The format for the log(s).
|
||||
log_file_format (str): Override the format for the log file.
|
||||
browser_name (str): The name of the browser to use for scraping the web.
|
||||
allow_downloads (bool): Whether to allow AutoGPT to download files natively.
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup.
|
||||
"""
|
||||
config.continuous_mode = False
|
||||
@@ -51,33 +55,44 @@ async def apply_overrides_to_config(
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
# Check availability of configured LLMs; fallback to other LLM if unavailable
|
||||
config.fast_llm, config.smart_llm = await check_models(
|
||||
(config.fast_llm, "fast_llm"), (config.smart_llm, "smart_llm")
|
||||
)
|
||||
config.fast_llm = await check_model(config.fast_llm, "fast_llm")
|
||||
config.smart_llm = await check_model(config.smart_llm, "smart_llm")
|
||||
|
||||
if skip_reprompt:
|
||||
config.skip_reprompt = True
|
||||
|
||||
if browser_name:
|
||||
config.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
logger.warning(
|
||||
msg=f"{Back.LIGHTYELLOW_EX}"
|
||||
"AutoGPT will now be able to download and save files to your machine."
|
||||
f"{Back.RESET}"
|
||||
" It is recommended that you monitor any files it downloads carefully.",
|
||||
)
|
||||
logger.warning(
|
||||
msg=f"{Back.RED + Style.BRIGHT}"
|
||||
"NEVER OPEN FILES YOU AREN'T SURE OF!"
|
||||
f"{Style.RESET_ALL}",
|
||||
)
|
||||
config.allow_downloads = True
|
||||
|
||||
if skip_news:
|
||||
config.skip_news = True
|
||||
|
||||
|
||||
async def check_models(
|
||||
*models: tuple[ModelName, Literal["smart_llm", "fast_llm"]]
|
||||
) -> tuple[ModelName, ...]:
|
||||
async def check_model(
|
||||
model_name: ModelName, model_type: Literal["smart_llm", "fast_llm"]
|
||||
) -> ModelName:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
multi_provider = MultiProvider()
|
||||
available_models = await multi_provider.get_available_chat_models()
|
||||
models = await multi_provider.get_available_chat_models()
|
||||
|
||||
checked_models: list[ModelName] = []
|
||||
for model, model_type in models:
|
||||
if any(model == m.name for m in available_models):
|
||||
checked_models.append(model)
|
||||
else:
|
||||
logger.warning(
|
||||
f"You don't have access to {model}. "
|
||||
f"Setting {model_type} to {GPT_3_MODEL}."
|
||||
)
|
||||
checked_models.append(GPT_3_MODEL)
|
||||
if any(model_name == m.name for m in models):
|
||||
return model_name
|
||||
|
||||
return tuple(checked_models)
|
||||
logger.warning(
|
||||
f"You don't have access to {model_name}. Setting {model_type} to {GPT_3_MODEL}."
|
||||
)
|
||||
return GPT_3_MODEL
|
||||
@@ -21,6 +21,7 @@ from forge.components.code_executor.code_executor import (
|
||||
)
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config, ConfigBuilder, assert_config_has_openai_api_key
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.logging.config import configure_logging
|
||||
@@ -33,11 +34,6 @@ from forge.utils.exceptions import AgentTerminated, InvalidAgentResponseError
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
|
||||
from autogpt.app.config import (
|
||||
AppConfig,
|
||||
ConfigBuilder,
|
||||
assert_config_has_required_llm_api_keys,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
@@ -66,7 +62,10 @@ async def run_auto_gpt(
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
browser_name: Optional[str] = None,
|
||||
allow_downloads: bool = False,
|
||||
skip_news: bool = False,
|
||||
workspace_directory: Optional[Path] = None,
|
||||
install_plugin_deps: bool = False,
|
||||
override_ai_name: Optional[str] = None,
|
||||
override_ai_role: Optional[str] = None,
|
||||
@@ -74,7 +73,6 @@ async def run_auto_gpt(
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
override_directives: bool = False,
|
||||
component_config_file: Optional[Path] = None,
|
||||
):
|
||||
# Set up configuration
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
@@ -100,13 +98,16 @@ async def run_auto_gpt(
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
# TODO: fill in llm values here
|
||||
assert_config_has_openai_api_key(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
skip_reprompt=skip_reprompt,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
)
|
||||
|
||||
@@ -131,12 +132,15 @@ async def run_auto_gpt(
|
||||
print_python_version_info(logger)
|
||||
print_attribute("Smart LLM", config.smart_llm)
|
||||
print_attribute("Fast LLM", config.fast_llm)
|
||||
print_attribute("Browser", config.selenium_web_browser)
|
||||
if config.continuous_mode:
|
||||
print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW)
|
||||
if continuous_limit:
|
||||
print_attribute("Continuous Limit", config.continuous_limit)
|
||||
if config.tts_config.speak_mode:
|
||||
print_attribute("Speak Mode", "ENABLED")
|
||||
if config.allow_downloads:
|
||||
print_attribute("Native Downloading", "ENABLED")
|
||||
if we_are_running_in_a_docker_container() or is_docker_available():
|
||||
print_attribute("Code Execution", "ENABLED")
|
||||
else:
|
||||
@@ -323,14 +327,6 @@ async def run_auto_gpt(
|
||||
# )
|
||||
# ).add_done_callback(update_agent_directives)
|
||||
|
||||
# Load component configuration from file
|
||||
if _config_file := component_config_file or config.component_config_file:
|
||||
try:
|
||||
logger.info(f"Loading component configuration from {_config_file}")
|
||||
agent.load_component_configs(_config_file.read_text())
|
||||
except Exception as e:
|
||||
logger.error(f"Could not load component configuration: {e}")
|
||||
|
||||
#################
|
||||
# Run the Agent #
|
||||
#################
|
||||
@@ -357,6 +353,8 @@ async def run_auto_gpt_server(
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
browser_name: Optional[str] = None,
|
||||
allow_downloads: bool = False,
|
||||
install_plugin_deps: bool = False,
|
||||
):
|
||||
from .agent_protocol_server import AgentProtocolServer
|
||||
@@ -382,10 +380,13 @@ async def run_auto_gpt_server(
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
# TODO: fill in llm values here
|
||||
assert_config_has_openai_api_key(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
)
|
||||
|
||||
llm_provider = _configure_llm_provider(config)
|
||||
@@ -410,7 +411,7 @@ async def run_auto_gpt_server(
|
||||
)
|
||||
|
||||
|
||||
def _configure_llm_provider(config: AppConfig) -> MultiProvider:
|
||||
def _configure_llm_provider(config: Config) -> MultiProvider:
|
||||
multi_provider = MultiProvider()
|
||||
for model in [config.smart_llm, config.fast_llm]:
|
||||
# Ensure model providers for configured LLMs are available
|
||||
@@ -450,15 +451,15 @@ async def run_interaction_loop(
|
||||
None
|
||||
"""
|
||||
# These contain both application config and agent config, so grab them here.
|
||||
app_config = agent.app_config
|
||||
legacy_config = agent.legacy_config
|
||||
ai_profile = agent.state.ai_profile
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
app_config.continuous_mode, app_config.continuous_limit
|
||||
legacy_config.continuous_mode, legacy_config.continuous_limit
|
||||
)
|
||||
spinner = Spinner(
|
||||
"Thinking...", plain_output=app_config.logging.plain_console_output
|
||||
"Thinking...", plain_output=legacy_config.logging.plain_console_output
|
||||
)
|
||||
stop_reason = None
|
||||
|
||||
@@ -507,25 +508,22 @@ async def run_interaction_loop(
|
||||
########
|
||||
handle_stop_signal()
|
||||
# Have the agent determine the next action to take.
|
||||
if not (_ep := agent.event_history.current_episode) or _ep.result:
|
||||
with spinner:
|
||||
try:
|
||||
action_proposal = await agent.propose_action()
|
||||
except InvalidAgentResponseError as e:
|
||||
logger.warning(f"The agent's thoughts could not be parsed: {e}")
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures >= 3:
|
||||
logger.error(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row. Terminating..."
|
||||
)
|
||||
raise AgentTerminated(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row."
|
||||
)
|
||||
continue
|
||||
else:
|
||||
action_proposal = _ep.action
|
||||
with spinner:
|
||||
try:
|
||||
action_proposal = await agent.propose_action()
|
||||
except InvalidAgentResponseError as e:
|
||||
logger.warning(f"The agent's thoughts could not be parsed: {e}")
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures >= 3:
|
||||
logger.error(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row. Terminating..."
|
||||
)
|
||||
raise AgentTerminated(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row."
|
||||
)
|
||||
continue
|
||||
|
||||
consecutive_failures = 0
|
||||
|
||||
@@ -536,7 +534,7 @@ async def run_interaction_loop(
|
||||
update_user(
|
||||
ai_profile,
|
||||
action_proposal,
|
||||
speak_mode=app_config.tts_config.speak_mode,
|
||||
speak_mode=legacy_config.tts_config.speak_mode,
|
||||
)
|
||||
|
||||
##################
|
||||
@@ -545,7 +543,7 @@ async def run_interaction_loop(
|
||||
handle_stop_signal()
|
||||
if cycles_remaining == 1: # Last cycle
|
||||
feedback_type, feedback, new_cycles_remaining = await get_user_feedback(
|
||||
app_config,
|
||||
legacy_config,
|
||||
ai_profile,
|
||||
)
|
||||
|
||||
@@ -656,7 +654,7 @@ def update_user(
|
||||
|
||||
|
||||
async def get_user_feedback(
|
||||
config: AppConfig,
|
||||
config: Config,
|
||||
ai_profile: AIProfile,
|
||||
) -> tuple[UserFeedback, str, int | None]:
|
||||
"""Gets the user's feedback on the assistant's reply.
|
||||
@@ -4,10 +4,9 @@ from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.logging.utils import print_attribute
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
from .input import clean_input
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -47,7 +46,7 @@ def apply_overrides_to_ai_settings(
|
||||
async def interactively_revise_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
):
|
||||
"""Interactively revise the AI settings.
|
||||
|
||||
@@ -22,7 +22,7 @@ logger = logging.getLogger(__name__)
|
||||
def get_bulletin_from_web():
|
||||
try:
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/classic/original_autogpt/BULLETIN.md" # noqa: E501
|
||||
"https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/autogpt/BULLETIN.md" # noqa: E501
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.text
|
||||
@@ -45,7 +45,7 @@ def vcs_state_diverges_from_master() -> bool:
|
||||
"""
|
||||
Returns whether a git repo is present and contains changes that are not in `master`.
|
||||
"""
|
||||
paths_we_care_about = "classic/original_autogpt/classic/original_autogpt/**/*.py"
|
||||
paths_we_care_about = "autogpt/autogpt/**/*.py"
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
|
||||
@@ -14,7 +14,7 @@ services:
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- ./:/app/classic/original_autogpt/
|
||||
- ./:/app/autogpt/
|
||||
- ./docker-compose.yml:/app/docker-compose.yml:ro
|
||||
# - ./Dockerfile:/app/Dockerfile:ro
|
||||
profiles: ["exclude-from-up"]
|
||||
@@ -33,8 +33,8 @@ services:
|
||||
entrypoint: ["poetry", "run"]
|
||||
command: ["pytest", "-v"]
|
||||
volumes:
|
||||
- ./autogpt:/app/classic/original_autogpt/autogpt
|
||||
- ./tests:/app/classic/original_autogpt/tests
|
||||
- ./autogpt:/app/autogpt/autogpt
|
||||
- ./tests:/app/autogpt/tests
|
||||
depends_on:
|
||||
- minio
|
||||
profiles: ["exclude-from-up"]
|
||||
|
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
1428
classic/original_autogpt/poetry.lock → autogpt/poetry.lock
generated
1428
classic/original_autogpt/poetry.lock → autogpt/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -20,24 +20,44 @@ serve = "autogpt.app.cli:serve"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
anthropic = "^0.25.1"
|
||||
autogpt-forge = { path = "../forge", develop = true }
|
||||
# autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "forge"}
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
charset-normalizer = "^3.1.0"
|
||||
click = "*"
|
||||
colorama = "^0.4.6"
|
||||
distro = "^1.8.0"
|
||||
en-core-web-sm = { url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl" }
|
||||
fastapi = "^0.109.1"
|
||||
gitpython = "^3.1.32"
|
||||
ftfy = "^6.1.1"
|
||||
google-api-python-client = "*"
|
||||
hypercorn = "^0.14.4"
|
||||
inflection = "*"
|
||||
jsonschema = "*"
|
||||
numpy = "*"
|
||||
openai = "^1.7.2"
|
||||
orjson = "^3.8.10"
|
||||
pydantic = "^2.7.2"
|
||||
Pillow = "*"
|
||||
pydantic = "*"
|
||||
python-docx = "*"
|
||||
python-dotenv = "^1.0.0"
|
||||
pyyaml = "^6.0"
|
||||
readability-lxml = "^0.8.1"
|
||||
requests = "*"
|
||||
sentry-sdk = "^1.40.4"
|
||||
spacy = "^3.7.4"
|
||||
tenacity = "^8.2.2"
|
||||
|
||||
# OpenAI and Generic plugins import
|
||||
openapi-python-client = "^0.14.0"
|
||||
|
||||
# Benchmarking
|
||||
agbenchmark = { path = "../benchmark", optional = true }
|
||||
# agbenchmark = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "benchmark", optional = true}
|
||||
psycopg2-binary = "^2.9.9"
|
||||
multidict = "6.0.5"
|
||||
cx-freeze = "7.0.0"
|
||||
|
||||
[tool.poetry.extras]
|
||||
benchmark = ["agbenchmark"]
|
||||
@@ -45,28 +65,27 @@ benchmark = ["agbenchmark"]
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
black = "^23.12.1"
|
||||
flake8 = "^7.0.0"
|
||||
gitpython = "^3.1.32"
|
||||
isort = "^5.13.1"
|
||||
pre-commit = "*"
|
||||
pyright = "^1.1.364"
|
||||
|
||||
# Type stubs
|
||||
types-beautifulsoup4 = "*"
|
||||
types-colorama = "*"
|
||||
types-Markdown = "*"
|
||||
types-Pillow = "*"
|
||||
|
||||
# Testing
|
||||
asynctest = "*"
|
||||
coverage = "*"
|
||||
pytest = "*"
|
||||
pytest-asyncio = "*"
|
||||
pytest-benchmark = "*"
|
||||
pytest-cov = "*"
|
||||
pytest-integration = "*"
|
||||
pytest-mock = "*"
|
||||
pytest-recording = "*"
|
||||
pytest-xdist = "*"
|
||||
|
||||
[tool.poetry.group.build]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.build.dependencies]
|
||||
cx-freeze = { git = "https://github.com/ntindle/cx_Freeze.git", rev = "main" }
|
||||
# HACK: switch to cx-freeze release package after #2442 and #2472 are merged: https://github.com/marcelotduarte/cx_Freeze/pulls?q=is:pr+%232442+OR+%232472+
|
||||
# cx-freeze = { version = "^7.2.0", optional = true }
|
||||
vcrpy = { git = "https://github.com/Significant-Gravitas/vcrpy.git", rev = "master" }
|
||||
|
||||
|
||||
[build-system]
|
||||
@@ -88,4 +107,8 @@ skip_glob = ["data"]
|
||||
[tool.pyright]
|
||||
pythonVersion = "3.10"
|
||||
exclude = ["data/**", "**/node_modules", "**/__pycache__", "**/.*"]
|
||||
ignore = ["../classic/forge/**"]
|
||||
ignore = ["../forge/**"]
|
||||
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
markers = ["slow", "requires_openai_api_key", "requires_huggingface_api_key"]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user